PR rtl-optimization/81308
[official-gcc.git] / gcc / tree-vect-data-refs.c
blobe0a2f7b7c890aa8e450801a7e9c0eaf9878c219d
1 /* Data References Analysis and Manipulation Utilities for Vectorization.
2 Copyright (C) 2003-2018 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
4 and Ira Rosen <irar@il.ibm.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "backend.h"
26 #include "target.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "gimple.h"
30 #include "predict.h"
31 #include "memmodel.h"
32 #include "tm_p.h"
33 #include "ssa.h"
34 #include "optabs-tree.h"
35 #include "cgraph.h"
36 #include "dumpfile.h"
37 #include "alias.h"
38 #include "fold-const.h"
39 #include "stor-layout.h"
40 #include "tree-eh.h"
41 #include "gimplify.h"
42 #include "gimple-iterator.h"
43 #include "gimplify-me.h"
44 #include "tree-ssa-loop-ivopts.h"
45 #include "tree-ssa-loop-manip.h"
46 #include "tree-ssa-loop.h"
47 #include "cfgloop.h"
48 #include "tree-scalar-evolution.h"
49 #include "tree-vectorizer.h"
50 #include "expr.h"
51 #include "builtins.h"
52 #include "params.h"
53 #include "tree-cfg.h"
54 #include "tree-hash-traits.h"
55 #include "vec-perm-indices.h"
57 /* Return true if load- or store-lanes optab OPTAB is implemented for
58 COUNT vectors of type VECTYPE. NAME is the name of OPTAB. */
60 static bool
61 vect_lanes_optab_supported_p (const char *name, convert_optab optab,
62 tree vectype, unsigned HOST_WIDE_INT count)
64 machine_mode mode;
65 scalar_int_mode array_mode;
66 bool limit_p;
68 mode = TYPE_MODE (vectype);
69 limit_p = !targetm.array_mode_supported_p (mode, count);
70 if (!int_mode_for_size (count * GET_MODE_BITSIZE (mode),
71 limit_p).exists (&array_mode))
73 if (dump_enabled_p ())
74 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
75 "no array mode for %s[" HOST_WIDE_INT_PRINT_DEC "]\n",
76 GET_MODE_NAME (mode), count);
77 return false;
80 if (convert_optab_handler (optab, array_mode, mode) == CODE_FOR_nothing)
82 if (dump_enabled_p ())
83 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
84 "cannot use %s<%s><%s>\n", name,
85 GET_MODE_NAME (array_mode), GET_MODE_NAME (mode));
86 return false;
89 if (dump_enabled_p ())
90 dump_printf_loc (MSG_NOTE, vect_location,
91 "can use %s<%s><%s>\n", name, GET_MODE_NAME (array_mode),
92 GET_MODE_NAME (mode));
94 return true;
98 /* Return the smallest scalar part of STMT.
99 This is used to determine the vectype of the stmt. We generally set the
100 vectype according to the type of the result (lhs). For stmts whose
101 result-type is different than the type of the arguments (e.g., demotion,
102 promotion), vectype will be reset appropriately (later). Note that we have
103 to visit the smallest datatype in this function, because that determines the
104 VF. If the smallest datatype in the loop is present only as the rhs of a
105 promotion operation - we'd miss it.
106 Such a case, where a variable of this datatype does not appear in the lhs
107 anywhere in the loop, can only occur if it's an invariant: e.g.:
108 'int_x = (int) short_inv', which we'd expect to have been optimized away by
109 invariant motion. However, we cannot rely on invariant motion to always
110 take invariants out of the loop, and so in the case of promotion we also
111 have to check the rhs.
112 LHS_SIZE_UNIT and RHS_SIZE_UNIT contain the sizes of the corresponding
113 types. */
115 tree
116 vect_get_smallest_scalar_type (gimple *stmt, HOST_WIDE_INT *lhs_size_unit,
117 HOST_WIDE_INT *rhs_size_unit)
119 tree scalar_type = gimple_expr_type (stmt);
120 HOST_WIDE_INT lhs, rhs;
122 /* During the analysis phase, this function is called on arbitrary
123 statements that might not have scalar results. */
124 if (!tree_fits_uhwi_p (TYPE_SIZE_UNIT (scalar_type)))
125 return scalar_type;
127 lhs = rhs = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (scalar_type));
129 if (is_gimple_assign (stmt)
130 && (gimple_assign_cast_p (stmt)
131 || gimple_assign_rhs_code (stmt) == WIDEN_MULT_EXPR
132 || gimple_assign_rhs_code (stmt) == WIDEN_LSHIFT_EXPR
133 || gimple_assign_rhs_code (stmt) == FLOAT_EXPR))
135 tree rhs_type = TREE_TYPE (gimple_assign_rhs1 (stmt));
137 rhs = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (rhs_type));
138 if (rhs < lhs)
139 scalar_type = rhs_type;
142 *lhs_size_unit = lhs;
143 *rhs_size_unit = rhs;
144 return scalar_type;
148 /* Insert DDR into LOOP_VINFO list of ddrs that may alias and need to be
149 tested at run-time. Return TRUE if DDR was successfully inserted.
150 Return false if versioning is not supported. */
152 static bool
153 vect_mark_for_runtime_alias_test (ddr_p ddr, loop_vec_info loop_vinfo)
155 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
157 if ((unsigned) PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS) == 0)
158 return false;
160 if (!runtime_alias_check_p (ddr, loop,
161 optimize_loop_nest_for_speed_p (loop)))
162 return false;
164 LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo).safe_push (ddr);
165 return true;
169 /* A subroutine of vect_analyze_data_ref_dependence. Handle
170 DDR_COULD_BE_INDEPENDENT_P ddr DDR that has a known set of dependence
171 distances. These distances are conservatively correct but they don't
172 reflect a guaranteed dependence.
174 Return true if this function does all the work necessary to avoid
175 an alias or false if the caller should use the dependence distances
176 to limit the vectorization factor in the usual way. LOOP_DEPTH is
177 the depth of the loop described by LOOP_VINFO and the other arguments
178 are as for vect_analyze_data_ref_dependence. */
180 static bool
181 vect_analyze_possibly_independent_ddr (data_dependence_relation *ddr,
182 loop_vec_info loop_vinfo,
183 int loop_depth, unsigned int *max_vf)
185 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
186 lambda_vector dist_v;
187 unsigned int i;
188 FOR_EACH_VEC_ELT (DDR_DIST_VECTS (ddr), i, dist_v)
190 int dist = dist_v[loop_depth];
191 if (dist != 0 && !(dist > 0 && DDR_REVERSED_P (ddr)))
193 /* If the user asserted safelen >= DIST consecutive iterations
194 can be executed concurrently, assume independence.
196 ??? An alternative would be to add the alias check even
197 in this case, and vectorize the fallback loop with the
198 maximum VF set to safelen. However, if the user has
199 explicitly given a length, it's less likely that that
200 would be a win. */
201 if (loop->safelen >= 2 && abs_hwi (dist) <= loop->safelen)
203 if ((unsigned int) loop->safelen < *max_vf)
204 *max_vf = loop->safelen;
205 LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo) = false;
206 continue;
209 /* For dependence distances of 2 or more, we have the option
210 of limiting VF or checking for an alias at runtime.
211 Prefer to check at runtime if we can, to avoid limiting
212 the VF unnecessarily when the bases are in fact independent.
214 Note that the alias checks will be removed if the VF ends up
215 being small enough. */
216 return vect_mark_for_runtime_alias_test (ddr, loop_vinfo);
219 return true;
223 /* Function vect_analyze_data_ref_dependence.
225 Return TRUE if there (might) exist a dependence between a memory-reference
226 DRA and a memory-reference DRB. When versioning for alias may check a
227 dependence at run-time, return FALSE. Adjust *MAX_VF according to
228 the data dependence. */
230 static bool
231 vect_analyze_data_ref_dependence (struct data_dependence_relation *ddr,
232 loop_vec_info loop_vinfo,
233 unsigned int *max_vf)
235 unsigned int i;
236 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
237 struct data_reference *dra = DDR_A (ddr);
238 struct data_reference *drb = DDR_B (ddr);
239 stmt_vec_info stmtinfo_a = vinfo_for_stmt (DR_STMT (dra));
240 stmt_vec_info stmtinfo_b = vinfo_for_stmt (DR_STMT (drb));
241 lambda_vector dist_v;
242 unsigned int loop_depth;
244 /* In loop analysis all data references should be vectorizable. */
245 if (!STMT_VINFO_VECTORIZABLE (stmtinfo_a)
246 || !STMT_VINFO_VECTORIZABLE (stmtinfo_b))
247 gcc_unreachable ();
249 /* Independent data accesses. */
250 if (DDR_ARE_DEPENDENT (ddr) == chrec_known)
251 return false;
253 if (dra == drb
254 || (DR_IS_READ (dra) && DR_IS_READ (drb)))
255 return false;
257 /* We do not have to consider dependences between accesses that belong
258 to the same group. */
259 if (GROUP_FIRST_ELEMENT (stmtinfo_a)
260 && GROUP_FIRST_ELEMENT (stmtinfo_a) == GROUP_FIRST_ELEMENT (stmtinfo_b))
261 return false;
263 /* Even if we have an anti-dependence then, as the vectorized loop covers at
264 least two scalar iterations, there is always also a true dependence.
265 As the vectorizer does not re-order loads and stores we can ignore
266 the anti-dependence if TBAA can disambiguate both DRs similar to the
267 case with known negative distance anti-dependences (positive
268 distance anti-dependences would violate TBAA constraints). */
269 if (((DR_IS_READ (dra) && DR_IS_WRITE (drb))
270 || (DR_IS_WRITE (dra) && DR_IS_READ (drb)))
271 && !alias_sets_conflict_p (get_alias_set (DR_REF (dra)),
272 get_alias_set (DR_REF (drb))))
273 return false;
275 /* Unknown data dependence. */
276 if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know)
278 /* If user asserted safelen consecutive iterations can be
279 executed concurrently, assume independence. */
280 if (loop->safelen >= 2)
282 if ((unsigned int) loop->safelen < *max_vf)
283 *max_vf = loop->safelen;
284 LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo) = false;
285 return false;
288 if (STMT_VINFO_GATHER_SCATTER_P (stmtinfo_a)
289 || STMT_VINFO_GATHER_SCATTER_P (stmtinfo_b))
291 if (dump_enabled_p ())
293 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
294 "versioning for alias not supported for: "
295 "can't determine dependence between ");
296 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
297 DR_REF (dra));
298 dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
299 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
300 DR_REF (drb));
301 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
303 return true;
306 if (dump_enabled_p ())
308 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
309 "versioning for alias required: "
310 "can't determine dependence between ");
311 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
312 DR_REF (dra));
313 dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
314 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
315 DR_REF (drb));
316 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
319 /* Add to list of ddrs that need to be tested at run-time. */
320 return !vect_mark_for_runtime_alias_test (ddr, loop_vinfo);
323 /* Known data dependence. */
324 if (DDR_NUM_DIST_VECTS (ddr) == 0)
326 /* If user asserted safelen consecutive iterations can be
327 executed concurrently, assume independence. */
328 if (loop->safelen >= 2)
330 if ((unsigned int) loop->safelen < *max_vf)
331 *max_vf = loop->safelen;
332 LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo) = false;
333 return false;
336 if (STMT_VINFO_GATHER_SCATTER_P (stmtinfo_a)
337 || STMT_VINFO_GATHER_SCATTER_P (stmtinfo_b))
339 if (dump_enabled_p ())
341 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
342 "versioning for alias not supported for: "
343 "bad dist vector for ");
344 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
345 DR_REF (dra));
346 dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
347 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
348 DR_REF (drb));
349 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
351 return true;
354 if (dump_enabled_p ())
356 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
357 "versioning for alias required: "
358 "bad dist vector for ");
359 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (dra));
360 dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
361 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (drb));
362 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
364 /* Add to list of ddrs that need to be tested at run-time. */
365 return !vect_mark_for_runtime_alias_test (ddr, loop_vinfo);
368 loop_depth = index_in_loop_nest (loop->num, DDR_LOOP_NEST (ddr));
370 if (DDR_COULD_BE_INDEPENDENT_P (ddr)
371 && vect_analyze_possibly_independent_ddr (ddr, loop_vinfo,
372 loop_depth, max_vf))
373 return false;
375 FOR_EACH_VEC_ELT (DDR_DIST_VECTS (ddr), i, dist_v)
377 int dist = dist_v[loop_depth];
379 if (dump_enabled_p ())
380 dump_printf_loc (MSG_NOTE, vect_location,
381 "dependence distance = %d.\n", dist);
383 if (dist == 0)
385 if (dump_enabled_p ())
387 dump_printf_loc (MSG_NOTE, vect_location,
388 "dependence distance == 0 between ");
389 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra));
390 dump_printf (MSG_NOTE, " and ");
391 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb));
392 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
395 /* When we perform grouped accesses and perform implicit CSE
396 by detecting equal accesses and doing disambiguation with
397 runtime alias tests like for
398 .. = a[i];
399 .. = a[i+1];
400 a[i] = ..;
401 a[i+1] = ..;
402 *p = ..;
403 .. = a[i];
404 .. = a[i+1];
405 where we will end up loading { a[i], a[i+1] } once, make
406 sure that inserting group loads before the first load and
407 stores after the last store will do the right thing.
408 Similar for groups like
409 a[i] = ...;
410 ... = a[i];
411 a[i+1] = ...;
412 where loads from the group interleave with the store. */
413 if (STMT_VINFO_GROUPED_ACCESS (stmtinfo_a)
414 || STMT_VINFO_GROUPED_ACCESS (stmtinfo_b))
416 gimple *earlier_stmt;
417 earlier_stmt = get_earlier_stmt (DR_STMT (dra), DR_STMT (drb));
418 if (DR_IS_WRITE
419 (STMT_VINFO_DATA_REF (vinfo_for_stmt (earlier_stmt))))
421 if (dump_enabled_p ())
422 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
423 "READ_WRITE dependence in interleaving."
424 "\n");
425 return true;
429 continue;
432 if (dist > 0 && DDR_REVERSED_P (ddr))
434 /* If DDR_REVERSED_P the order of the data-refs in DDR was
435 reversed (to make distance vector positive), and the actual
436 distance is negative. */
437 if (dump_enabled_p ())
438 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
439 "dependence distance negative.\n");
440 /* Record a negative dependence distance to later limit the
441 amount of stmt copying / unrolling we can perform.
442 Only need to handle read-after-write dependence. */
443 if (DR_IS_READ (drb)
444 && (STMT_VINFO_MIN_NEG_DIST (stmtinfo_b) == 0
445 || STMT_VINFO_MIN_NEG_DIST (stmtinfo_b) > (unsigned)dist))
446 STMT_VINFO_MIN_NEG_DIST (stmtinfo_b) = dist;
447 continue;
450 unsigned int abs_dist = abs (dist);
451 if (abs_dist >= 2 && abs_dist < *max_vf)
453 /* The dependence distance requires reduction of the maximal
454 vectorization factor. */
455 *max_vf = abs (dist);
456 if (dump_enabled_p ())
457 dump_printf_loc (MSG_NOTE, vect_location,
458 "adjusting maximal vectorization factor to %i\n",
459 *max_vf);
462 if (abs_dist >= *max_vf)
464 /* Dependence distance does not create dependence, as far as
465 vectorization is concerned, in this case. */
466 if (dump_enabled_p ())
467 dump_printf_loc (MSG_NOTE, vect_location,
468 "dependence distance >= VF.\n");
469 continue;
472 if (dump_enabled_p ())
474 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
475 "not vectorized, possible dependence "
476 "between data-refs ");
477 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra));
478 dump_printf (MSG_NOTE, " and ");
479 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb));
480 dump_printf (MSG_NOTE, "\n");
483 return true;
486 return false;
489 /* Function vect_analyze_data_ref_dependences.
491 Examine all the data references in the loop, and make sure there do not
492 exist any data dependences between them. Set *MAX_VF according to
493 the maximum vectorization factor the data dependences allow. */
495 bool
496 vect_analyze_data_ref_dependences (loop_vec_info loop_vinfo,
497 unsigned int *max_vf)
499 unsigned int i;
500 struct data_dependence_relation *ddr;
502 if (dump_enabled_p ())
503 dump_printf_loc (MSG_NOTE, vect_location,
504 "=== vect_analyze_data_ref_dependences ===\n");
506 LOOP_VINFO_DDRS (loop_vinfo)
507 .create (LOOP_VINFO_DATAREFS (loop_vinfo).length ()
508 * LOOP_VINFO_DATAREFS (loop_vinfo).length ());
509 LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo) = true;
510 /* We need read-read dependences to compute STMT_VINFO_SAME_ALIGN_REFS. */
511 if (!compute_all_dependences (LOOP_VINFO_DATAREFS (loop_vinfo),
512 &LOOP_VINFO_DDRS (loop_vinfo),
513 LOOP_VINFO_LOOP_NEST (loop_vinfo), true))
514 return false;
516 /* For epilogues we either have no aliases or alias versioning
517 was applied to original loop. Therefore we may just get max_vf
518 using VF of original loop. */
519 if (LOOP_VINFO_EPILOGUE_P (loop_vinfo))
520 *max_vf = LOOP_VINFO_ORIG_MAX_VECT_FACTOR (loop_vinfo);
521 else
522 FOR_EACH_VEC_ELT (LOOP_VINFO_DDRS (loop_vinfo), i, ddr)
523 if (vect_analyze_data_ref_dependence (ddr, loop_vinfo, max_vf))
524 return false;
526 return true;
530 /* Function vect_slp_analyze_data_ref_dependence.
532 Return TRUE if there (might) exist a dependence between a memory-reference
533 DRA and a memory-reference DRB. When versioning for alias may check a
534 dependence at run-time, return FALSE. Adjust *MAX_VF according to
535 the data dependence. */
537 static bool
538 vect_slp_analyze_data_ref_dependence (struct data_dependence_relation *ddr)
540 struct data_reference *dra = DDR_A (ddr);
541 struct data_reference *drb = DDR_B (ddr);
543 /* We need to check dependences of statements marked as unvectorizable
544 as well, they still can prohibit vectorization. */
546 /* Independent data accesses. */
547 if (DDR_ARE_DEPENDENT (ddr) == chrec_known)
548 return false;
550 if (dra == drb)
551 return false;
553 /* Read-read is OK. */
554 if (DR_IS_READ (dra) && DR_IS_READ (drb))
555 return false;
557 /* If dra and drb are part of the same interleaving chain consider
558 them independent. */
559 if (STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (DR_STMT (dra)))
560 && (GROUP_FIRST_ELEMENT (vinfo_for_stmt (DR_STMT (dra)))
561 == GROUP_FIRST_ELEMENT (vinfo_for_stmt (DR_STMT (drb)))))
562 return false;
564 /* Unknown data dependence. */
565 if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know)
567 if (dump_enabled_p ())
569 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
570 "can't determine dependence between ");
571 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (dra));
572 dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
573 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (drb));
574 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
577 else if (dump_enabled_p ())
579 dump_printf_loc (MSG_NOTE, vect_location,
580 "determined dependence between ");
581 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra));
582 dump_printf (MSG_NOTE, " and ");
583 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb));
584 dump_printf (MSG_NOTE, "\n");
587 return true;
591 /* Analyze dependences involved in the transform of SLP NODE. STORES
592 contain the vector of scalar stores of this instance if we are
593 disambiguating the loads. */
595 static bool
596 vect_slp_analyze_node_dependences (slp_instance instance, slp_tree node,
597 vec<gimple *> stores, gimple *last_store)
599 /* This walks over all stmts involved in the SLP load/store done
600 in NODE verifying we can sink them up to the last stmt in the
601 group. */
602 gimple *last_access = vect_find_last_scalar_stmt_in_slp (node);
603 for (unsigned k = 0; k < SLP_INSTANCE_GROUP_SIZE (instance); ++k)
605 gimple *access = SLP_TREE_SCALAR_STMTS (node)[k];
606 if (access == last_access)
607 continue;
608 data_reference *dr_a = STMT_VINFO_DATA_REF (vinfo_for_stmt (access));
609 for (gimple_stmt_iterator gsi = gsi_for_stmt (access);
610 gsi_stmt (gsi) != last_access; gsi_next (&gsi))
612 gimple *stmt = gsi_stmt (gsi);
613 if (! gimple_vuse (stmt)
614 || (DR_IS_READ (dr_a) && ! gimple_vdef (stmt)))
615 continue;
617 /* If we couldn't record a (single) data reference for this
618 stmt we have to give up. */
619 /* ??? Here and below if dependence analysis fails we can resort
620 to the alias oracle which can handle more kinds of stmts. */
621 data_reference *dr_b = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt));
622 if (!dr_b)
623 return false;
625 bool dependent = false;
626 /* If we run into a store of this same instance (we've just
627 marked those) then delay dependence checking until we run
628 into the last store because this is where it will have
629 been sunk to (and we verify if we can do that as well). */
630 if (gimple_visited_p (stmt))
632 if (stmt != last_store)
633 continue;
634 unsigned i;
635 gimple *store;
636 FOR_EACH_VEC_ELT (stores, i, store)
638 data_reference *store_dr
639 = STMT_VINFO_DATA_REF (vinfo_for_stmt (store));
640 ddr_p ddr = initialize_data_dependence_relation
641 (dr_a, store_dr, vNULL);
642 dependent = vect_slp_analyze_data_ref_dependence (ddr);
643 free_dependence_relation (ddr);
644 if (dependent)
645 break;
648 else
650 ddr_p ddr = initialize_data_dependence_relation (dr_a,
651 dr_b, vNULL);
652 dependent = vect_slp_analyze_data_ref_dependence (ddr);
653 free_dependence_relation (ddr);
655 if (dependent)
656 return false;
659 return true;
663 /* Function vect_analyze_data_ref_dependences.
665 Examine all the data references in the basic-block, and make sure there
666 do not exist any data dependences between them. Set *MAX_VF according to
667 the maximum vectorization factor the data dependences allow. */
669 bool
670 vect_slp_analyze_instance_dependence (slp_instance instance)
672 if (dump_enabled_p ())
673 dump_printf_loc (MSG_NOTE, vect_location,
674 "=== vect_slp_analyze_instance_dependence ===\n");
676 /* The stores of this instance are at the root of the SLP tree. */
677 slp_tree store = SLP_INSTANCE_TREE (instance);
678 if (! STMT_VINFO_DATA_REF (vinfo_for_stmt (SLP_TREE_SCALAR_STMTS (store)[0])))
679 store = NULL;
681 /* Verify we can sink stores to the vectorized stmt insert location. */
682 gimple *last_store = NULL;
683 if (store)
685 if (! vect_slp_analyze_node_dependences (instance, store, vNULL, NULL))
686 return false;
688 /* Mark stores in this instance and remember the last one. */
689 last_store = vect_find_last_scalar_stmt_in_slp (store);
690 for (unsigned k = 0; k < SLP_INSTANCE_GROUP_SIZE (instance); ++k)
691 gimple_set_visited (SLP_TREE_SCALAR_STMTS (store)[k], true);
694 bool res = true;
696 /* Verify we can sink loads to the vectorized stmt insert location,
697 special-casing stores of this instance. */
698 slp_tree load;
699 unsigned int i;
700 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (instance), i, load)
701 if (! vect_slp_analyze_node_dependences (instance, load,
702 store
703 ? SLP_TREE_SCALAR_STMTS (store)
704 : vNULL, last_store))
706 res = false;
707 break;
710 /* Unset the visited flag. */
711 if (store)
712 for (unsigned k = 0; k < SLP_INSTANCE_GROUP_SIZE (instance); ++k)
713 gimple_set_visited (SLP_TREE_SCALAR_STMTS (store)[k], false);
715 return res;
718 /* Record in VINFO the base alignment guarantee given by DRB. STMT is
719 the statement that contains DRB, which is useful for recording in the
720 dump file. */
722 static void
723 vect_record_base_alignment (vec_info *vinfo, gimple *stmt,
724 innermost_loop_behavior *drb)
726 bool existed;
727 innermost_loop_behavior *&entry
728 = vinfo->base_alignments.get_or_insert (drb->base_address, &existed);
729 if (!existed || entry->base_alignment < drb->base_alignment)
731 entry = drb;
732 if (dump_enabled_p ())
734 dump_printf_loc (MSG_NOTE, vect_location,
735 "recording new base alignment for ");
736 dump_generic_expr (MSG_NOTE, TDF_SLIM, drb->base_address);
737 dump_printf (MSG_NOTE, "\n");
738 dump_printf_loc (MSG_NOTE, vect_location,
739 " alignment: %d\n", drb->base_alignment);
740 dump_printf_loc (MSG_NOTE, vect_location,
741 " misalignment: %d\n", drb->base_misalignment);
742 dump_printf_loc (MSG_NOTE, vect_location,
743 " based on: ");
744 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
749 /* If the region we're going to vectorize is reached, all unconditional
750 data references occur at least once. We can therefore pool the base
751 alignment guarantees from each unconditional reference. Do this by
752 going through all the data references in VINFO and checking whether
753 the containing statement makes the reference unconditionally. If so,
754 record the alignment of the base address in VINFO so that it can be
755 used for all other references with the same base. */
757 void
758 vect_record_base_alignments (vec_info *vinfo)
760 loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
761 struct loop *loop = loop_vinfo ? LOOP_VINFO_LOOP (loop_vinfo) : NULL;
762 data_reference *dr;
763 unsigned int i;
764 FOR_EACH_VEC_ELT (vinfo->datarefs, i, dr)
765 if (!DR_IS_CONDITIONAL_IN_STMT (dr))
767 gimple *stmt = DR_STMT (dr);
768 vect_record_base_alignment (vinfo, stmt, &DR_INNERMOST (dr));
770 /* If DR is nested in the loop that is being vectorized, we can also
771 record the alignment of the base wrt the outer loop. */
772 if (loop && nested_in_vect_loop_p (loop, stmt))
774 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
775 vect_record_base_alignment
776 (vinfo, stmt, &STMT_VINFO_DR_WRT_VEC_LOOP (stmt_info));
781 /* Return the target alignment for the vectorized form of DR. */
783 static unsigned int
784 vect_calculate_target_alignment (struct data_reference *dr)
786 gimple *stmt = DR_STMT (dr);
787 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
788 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
789 return targetm.vectorize.preferred_vector_alignment (vectype);
792 /* Function vect_compute_data_ref_alignment
794 Compute the misalignment of the data reference DR.
796 Output:
797 1. If during the misalignment computation it is found that the data reference
798 cannot be vectorized then false is returned.
799 2. DR_MISALIGNMENT (DR) is defined.
801 FOR NOW: No analysis is actually performed. Misalignment is calculated
802 only for trivial cases. TODO. */
804 bool
805 vect_compute_data_ref_alignment (struct data_reference *dr)
807 gimple *stmt = DR_STMT (dr);
808 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
809 vec_base_alignments *base_alignments = &stmt_info->vinfo->base_alignments;
810 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
811 struct loop *loop = NULL;
812 tree ref = DR_REF (dr);
813 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
815 if (dump_enabled_p ())
816 dump_printf_loc (MSG_NOTE, vect_location,
817 "vect_compute_data_ref_alignment:\n");
819 if (loop_vinfo)
820 loop = LOOP_VINFO_LOOP (loop_vinfo);
822 /* Initialize misalignment to unknown. */
823 SET_DR_MISALIGNMENT (dr, DR_MISALIGNMENT_UNKNOWN);
825 innermost_loop_behavior *drb = vect_dr_behavior (dr);
826 bool step_preserves_misalignment_p;
828 unsigned HOST_WIDE_INT vector_alignment
829 = vect_calculate_target_alignment (dr) / BITS_PER_UNIT;
830 DR_TARGET_ALIGNMENT (dr) = vector_alignment;
832 /* No step for BB vectorization. */
833 if (!loop)
835 gcc_assert (integer_zerop (drb->step));
836 step_preserves_misalignment_p = true;
839 /* In case the dataref is in an inner-loop of the loop that is being
840 vectorized (LOOP), we use the base and misalignment information
841 relative to the outer-loop (LOOP). This is ok only if the misalignment
842 stays the same throughout the execution of the inner-loop, which is why
843 we have to check that the stride of the dataref in the inner-loop evenly
844 divides by the vector alignment. */
845 else if (nested_in_vect_loop_p (loop, stmt))
847 step_preserves_misalignment_p
848 = (DR_STEP_ALIGNMENT (dr) % vector_alignment) == 0;
850 if (dump_enabled_p ())
852 if (step_preserves_misalignment_p)
853 dump_printf_loc (MSG_NOTE, vect_location,
854 "inner step divides the vector alignment.\n");
855 else
856 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
857 "inner step doesn't divide the vector"
858 " alignment.\n");
862 /* Similarly we can only use base and misalignment information relative to
863 an innermost loop if the misalignment stays the same throughout the
864 execution of the loop. As above, this is the case if the stride of
865 the dataref evenly divides by the alignment. */
866 else
868 poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
869 step_preserves_misalignment_p
870 = multiple_p (DR_STEP_ALIGNMENT (dr) * vf, vector_alignment);
872 if (!step_preserves_misalignment_p && dump_enabled_p ())
873 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
874 "step doesn't divide the vector alignment.\n");
877 unsigned int base_alignment = drb->base_alignment;
878 unsigned int base_misalignment = drb->base_misalignment;
880 /* Calculate the maximum of the pooled base address alignment and the
881 alignment that we can compute for DR itself. */
882 innermost_loop_behavior **entry = base_alignments->get (drb->base_address);
883 if (entry && base_alignment < (*entry)->base_alignment)
885 base_alignment = (*entry)->base_alignment;
886 base_misalignment = (*entry)->base_misalignment;
889 if (drb->offset_alignment < vector_alignment
890 || !step_preserves_misalignment_p
891 /* We need to know whether the step wrt the vectorized loop is
892 negative when computing the starting misalignment below. */
893 || TREE_CODE (drb->step) != INTEGER_CST)
895 if (dump_enabled_p ())
897 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
898 "Unknown alignment for access: ");
899 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, ref);
900 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
902 return true;
905 if (base_alignment < vector_alignment)
907 tree base = drb->base_address;
908 if (TREE_CODE (base) == ADDR_EXPR)
909 base = TREE_OPERAND (base, 0);
910 if (!vect_can_force_dr_alignment_p (base,
911 vector_alignment * BITS_PER_UNIT))
913 if (dump_enabled_p ())
915 dump_printf_loc (MSG_NOTE, vect_location,
916 "can't force alignment of ref: ");
917 dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
918 dump_printf (MSG_NOTE, "\n");
920 return true;
923 /* Force the alignment of the decl.
924 NOTE: This is the only change to the code we make during
925 the analysis phase, before deciding to vectorize the loop. */
926 if (dump_enabled_p ())
928 dump_printf_loc (MSG_NOTE, vect_location, "force alignment of ");
929 dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
930 dump_printf (MSG_NOTE, "\n");
933 DR_VECT_AUX (dr)->base_decl = base;
934 DR_VECT_AUX (dr)->base_misaligned = true;
935 base_misalignment = 0;
937 poly_int64 misalignment
938 = base_misalignment + wi::to_poly_offset (drb->init).force_shwi ();
940 /* If this is a backward running DR then first access in the larger
941 vectype actually is N-1 elements before the address in the DR.
942 Adjust misalign accordingly. */
943 if (tree_int_cst_sgn (drb->step) < 0)
944 /* PLUS because STEP is negative. */
945 misalignment += ((TYPE_VECTOR_SUBPARTS (vectype) - 1)
946 * TREE_INT_CST_LOW (drb->step));
948 unsigned int const_misalignment;
949 if (!known_misalignment (misalignment, vector_alignment,
950 &const_misalignment))
952 if (dump_enabled_p ())
954 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
955 "Non-constant misalignment for access: ");
956 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, ref);
957 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
959 return true;
962 SET_DR_MISALIGNMENT (dr, const_misalignment);
964 if (dump_enabled_p ())
966 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
967 "misalign = %d bytes of ref ", DR_MISALIGNMENT (dr));
968 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, ref);
969 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
972 return true;
975 /* Function vect_update_misalignment_for_peel.
976 Sets DR's misalignment
977 - to 0 if it has the same alignment as DR_PEEL,
978 - to the misalignment computed using NPEEL if DR's salignment is known,
979 - to -1 (unknown) otherwise.
981 DR - the data reference whose misalignment is to be adjusted.
982 DR_PEEL - the data reference whose misalignment is being made
983 zero in the vector loop by the peel.
984 NPEEL - the number of iterations in the peel loop if the misalignment
985 of DR_PEEL is known at compile time. */
987 static void
988 vect_update_misalignment_for_peel (struct data_reference *dr,
989 struct data_reference *dr_peel, int npeel)
991 unsigned int i;
992 vec<dr_p> same_aligned_drs;
993 struct data_reference *current_dr;
994 int dr_size = vect_get_scalar_dr_size (dr);
995 int dr_peel_size = vect_get_scalar_dr_size (dr_peel);
996 stmt_vec_info stmt_info = vinfo_for_stmt (DR_STMT (dr));
997 stmt_vec_info peel_stmt_info = vinfo_for_stmt (DR_STMT (dr_peel));
999 /* For interleaved data accesses the step in the loop must be multiplied by
1000 the size of the interleaving group. */
1001 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
1002 dr_size *= GROUP_SIZE (vinfo_for_stmt (GROUP_FIRST_ELEMENT (stmt_info)));
1003 if (STMT_VINFO_GROUPED_ACCESS (peel_stmt_info))
1004 dr_peel_size *= GROUP_SIZE (peel_stmt_info);
1006 /* It can be assumed that the data refs with the same alignment as dr_peel
1007 are aligned in the vector loop. */
1008 same_aligned_drs
1009 = STMT_VINFO_SAME_ALIGN_REFS (vinfo_for_stmt (DR_STMT (dr_peel)));
1010 FOR_EACH_VEC_ELT (same_aligned_drs, i, current_dr)
1012 if (current_dr != dr)
1013 continue;
1014 gcc_assert (!known_alignment_for_access_p (dr)
1015 || !known_alignment_for_access_p (dr_peel)
1016 || (DR_MISALIGNMENT (dr) / dr_size
1017 == DR_MISALIGNMENT (dr_peel) / dr_peel_size));
1018 SET_DR_MISALIGNMENT (dr, 0);
1019 return;
1022 if (known_alignment_for_access_p (dr)
1023 && known_alignment_for_access_p (dr_peel))
1025 bool negative = tree_int_cst_compare (DR_STEP (dr), size_zero_node) < 0;
1026 int misal = DR_MISALIGNMENT (dr);
1027 misal += negative ? -npeel * dr_size : npeel * dr_size;
1028 misal &= DR_TARGET_ALIGNMENT (dr) - 1;
1029 SET_DR_MISALIGNMENT (dr, misal);
1030 return;
1033 if (dump_enabled_p ())
1034 dump_printf_loc (MSG_NOTE, vect_location, "Setting misalignment " \
1035 "to unknown (-1).\n");
1036 SET_DR_MISALIGNMENT (dr, DR_MISALIGNMENT_UNKNOWN);
1040 /* Function verify_data_ref_alignment
1042 Return TRUE if DR can be handled with respect to alignment. */
1044 static bool
1045 verify_data_ref_alignment (data_reference_p dr)
1047 enum dr_alignment_support supportable_dr_alignment
1048 = vect_supportable_dr_alignment (dr, false);
1049 if (!supportable_dr_alignment)
1051 if (dump_enabled_p ())
1053 if (DR_IS_READ (dr))
1054 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1055 "not vectorized: unsupported unaligned load.");
1056 else
1057 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1058 "not vectorized: unsupported unaligned "
1059 "store.");
1061 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
1062 DR_REF (dr));
1063 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
1065 return false;
1068 if (supportable_dr_alignment != dr_aligned && dump_enabled_p ())
1069 dump_printf_loc (MSG_NOTE, vect_location,
1070 "Vectorizing an unaligned access.\n");
1072 return true;
1075 /* Function vect_verify_datarefs_alignment
1077 Return TRUE if all data references in the loop can be
1078 handled with respect to alignment. */
1080 bool
1081 vect_verify_datarefs_alignment (loop_vec_info vinfo)
1083 vec<data_reference_p> datarefs = vinfo->datarefs;
1084 struct data_reference *dr;
1085 unsigned int i;
1087 FOR_EACH_VEC_ELT (datarefs, i, dr)
1089 gimple *stmt = DR_STMT (dr);
1090 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1092 if (!STMT_VINFO_RELEVANT_P (stmt_info))
1093 continue;
1095 /* For interleaving, only the alignment of the first access matters. */
1096 if (STMT_VINFO_GROUPED_ACCESS (stmt_info)
1097 && GROUP_FIRST_ELEMENT (stmt_info) != stmt)
1098 continue;
1100 /* Strided accesses perform only component accesses, alignment is
1101 irrelevant for them. */
1102 if (STMT_VINFO_STRIDED_P (stmt_info)
1103 && !STMT_VINFO_GROUPED_ACCESS (stmt_info))
1104 continue;
1106 if (! verify_data_ref_alignment (dr))
1107 return false;
1110 return true;
1113 /* Given an memory reference EXP return whether its alignment is less
1114 than its size. */
1116 static bool
1117 not_size_aligned (tree exp)
1119 if (!tree_fits_uhwi_p (TYPE_SIZE (TREE_TYPE (exp))))
1120 return true;
1122 return (tree_to_uhwi (TYPE_SIZE (TREE_TYPE (exp)))
1123 > get_object_alignment (exp));
1126 /* Function vector_alignment_reachable_p
1128 Return true if vector alignment for DR is reachable by peeling
1129 a few loop iterations. Return false otherwise. */
1131 static bool
1132 vector_alignment_reachable_p (struct data_reference *dr)
1134 gimple *stmt = DR_STMT (dr);
1135 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1136 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1138 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
1140 /* For interleaved access we peel only if number of iterations in
1141 the prolog loop ({VF - misalignment}), is a multiple of the
1142 number of the interleaved accesses. */
1143 int elem_size, mis_in_elements;
1145 /* FORNOW: handle only known alignment. */
1146 if (!known_alignment_for_access_p (dr))
1147 return false;
1149 poly_uint64 nelements = TYPE_VECTOR_SUBPARTS (vectype);
1150 poly_uint64 vector_size = GET_MODE_SIZE (TYPE_MODE (vectype));
1151 elem_size = vector_element_size (vector_size, nelements);
1152 mis_in_elements = DR_MISALIGNMENT (dr) / elem_size;
1154 if (!multiple_p (nelements - mis_in_elements, GROUP_SIZE (stmt_info)))
1155 return false;
1158 /* If misalignment is known at the compile time then allow peeling
1159 only if natural alignment is reachable through peeling. */
1160 if (known_alignment_for_access_p (dr) && !aligned_access_p (dr))
1162 HOST_WIDE_INT elmsize =
1163 int_cst_value (TYPE_SIZE_UNIT (TREE_TYPE (vectype)));
1164 if (dump_enabled_p ())
1166 dump_printf_loc (MSG_NOTE, vect_location,
1167 "data size =" HOST_WIDE_INT_PRINT_DEC, elmsize);
1168 dump_printf (MSG_NOTE,
1169 ". misalignment = %d.\n", DR_MISALIGNMENT (dr));
1171 if (DR_MISALIGNMENT (dr) % elmsize)
1173 if (dump_enabled_p ())
1174 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1175 "data size does not divide the misalignment.\n");
1176 return false;
1180 if (!known_alignment_for_access_p (dr))
1182 tree type = TREE_TYPE (DR_REF (dr));
1183 bool is_packed = not_size_aligned (DR_REF (dr));
1184 if (dump_enabled_p ())
1185 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1186 "Unknown misalignment, %snaturally aligned\n",
1187 is_packed ? "not " : "");
1188 return targetm.vectorize.vector_alignment_reachable (type, is_packed);
1191 return true;
1195 /* Calculate the cost of the memory access represented by DR. */
1197 static void
1198 vect_get_data_access_cost (struct data_reference *dr,
1199 unsigned int *inside_cost,
1200 unsigned int *outside_cost,
1201 stmt_vector_for_cost *body_cost_vec)
1203 gimple *stmt = DR_STMT (dr);
1204 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1205 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1206 int ncopies;
1208 if (PURE_SLP_STMT (stmt_info))
1209 ncopies = 1;
1210 else
1211 ncopies = vect_get_num_copies (loop_vinfo, STMT_VINFO_VECTYPE (stmt_info));
1213 if (DR_IS_READ (dr))
1214 vect_get_load_cost (dr, ncopies, true, inside_cost, outside_cost,
1215 NULL, body_cost_vec, false);
1216 else
1217 vect_get_store_cost (dr, ncopies, inside_cost, body_cost_vec);
1219 if (dump_enabled_p ())
1220 dump_printf_loc (MSG_NOTE, vect_location,
1221 "vect_get_data_access_cost: inside_cost = %d, "
1222 "outside_cost = %d.\n", *inside_cost, *outside_cost);
1226 typedef struct _vect_peel_info
1228 struct data_reference *dr;
1229 int npeel;
1230 unsigned int count;
1231 } *vect_peel_info;
1233 typedef struct _vect_peel_extended_info
1235 struct _vect_peel_info peel_info;
1236 unsigned int inside_cost;
1237 unsigned int outside_cost;
1238 } *vect_peel_extended_info;
1241 /* Peeling hashtable helpers. */
1243 struct peel_info_hasher : free_ptr_hash <_vect_peel_info>
1245 static inline hashval_t hash (const _vect_peel_info *);
1246 static inline bool equal (const _vect_peel_info *, const _vect_peel_info *);
1249 inline hashval_t
1250 peel_info_hasher::hash (const _vect_peel_info *peel_info)
1252 return (hashval_t) peel_info->npeel;
1255 inline bool
1256 peel_info_hasher::equal (const _vect_peel_info *a, const _vect_peel_info *b)
1258 return (a->npeel == b->npeel);
1262 /* Insert DR into peeling hash table with NPEEL as key. */
1264 static void
1265 vect_peeling_hash_insert (hash_table<peel_info_hasher> *peeling_htab,
1266 loop_vec_info loop_vinfo, struct data_reference *dr,
1267 int npeel)
1269 struct _vect_peel_info elem, *slot;
1270 _vect_peel_info **new_slot;
1271 bool supportable_dr_alignment = vect_supportable_dr_alignment (dr, true);
1273 elem.npeel = npeel;
1274 slot = peeling_htab->find (&elem);
1275 if (slot)
1276 slot->count++;
1277 else
1279 slot = XNEW (struct _vect_peel_info);
1280 slot->npeel = npeel;
1281 slot->dr = dr;
1282 slot->count = 1;
1283 new_slot = peeling_htab->find_slot (slot, INSERT);
1284 *new_slot = slot;
1287 if (!supportable_dr_alignment
1288 && unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo)))
1289 slot->count += VECT_MAX_COST;
1293 /* Traverse peeling hash table to find peeling option that aligns maximum
1294 number of data accesses. */
1297 vect_peeling_hash_get_most_frequent (_vect_peel_info **slot,
1298 _vect_peel_extended_info *max)
1300 vect_peel_info elem = *slot;
1302 if (elem->count > max->peel_info.count
1303 || (elem->count == max->peel_info.count
1304 && max->peel_info.npeel > elem->npeel))
1306 max->peel_info.npeel = elem->npeel;
1307 max->peel_info.count = elem->count;
1308 max->peel_info.dr = elem->dr;
1311 return 1;
1314 /* Get the costs of peeling NPEEL iterations checking data access costs
1315 for all data refs. If UNKNOWN_MISALIGNMENT is true, we assume DR0's
1316 misalignment will be zero after peeling. */
1318 static void
1319 vect_get_peeling_costs_all_drs (vec<data_reference_p> datarefs,
1320 struct data_reference *dr0,
1321 unsigned int *inside_cost,
1322 unsigned int *outside_cost,
1323 stmt_vector_for_cost *body_cost_vec,
1324 unsigned int npeel,
1325 bool unknown_misalignment)
1327 unsigned i;
1328 data_reference *dr;
1330 FOR_EACH_VEC_ELT (datarefs, i, dr)
1332 gimple *stmt = DR_STMT (dr);
1333 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1334 if (!STMT_VINFO_RELEVANT_P (stmt_info))
1335 continue;
1337 /* For interleaving, only the alignment of the first access
1338 matters. */
1339 if (STMT_VINFO_GROUPED_ACCESS (stmt_info)
1340 && GROUP_FIRST_ELEMENT (stmt_info) != stmt)
1341 continue;
1343 /* Strided accesses perform only component accesses, alignment is
1344 irrelevant for them. */
1345 if (STMT_VINFO_STRIDED_P (stmt_info)
1346 && !STMT_VINFO_GROUPED_ACCESS (stmt_info))
1347 continue;
1349 int save_misalignment;
1350 save_misalignment = DR_MISALIGNMENT (dr);
1351 if (npeel == 0)
1353 else if (unknown_misalignment && dr == dr0)
1354 SET_DR_MISALIGNMENT (dr, 0);
1355 else
1356 vect_update_misalignment_for_peel (dr, dr0, npeel);
1357 vect_get_data_access_cost (dr, inside_cost, outside_cost,
1358 body_cost_vec);
1359 SET_DR_MISALIGNMENT (dr, save_misalignment);
1363 /* Traverse peeling hash table and calculate cost for each peeling option.
1364 Find the one with the lowest cost. */
1367 vect_peeling_hash_get_lowest_cost (_vect_peel_info **slot,
1368 _vect_peel_extended_info *min)
1370 vect_peel_info elem = *slot;
1371 int dummy;
1372 unsigned int inside_cost = 0, outside_cost = 0;
1373 gimple *stmt = DR_STMT (elem->dr);
1374 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1375 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1376 stmt_vector_for_cost prologue_cost_vec, body_cost_vec,
1377 epilogue_cost_vec;
1379 prologue_cost_vec.create (2);
1380 body_cost_vec.create (2);
1381 epilogue_cost_vec.create (2);
1383 vect_get_peeling_costs_all_drs (LOOP_VINFO_DATAREFS (loop_vinfo),
1384 elem->dr, &inside_cost, &outside_cost,
1385 &body_cost_vec, elem->npeel, false);
1387 body_cost_vec.release ();
1389 outside_cost += vect_get_known_peeling_cost
1390 (loop_vinfo, elem->npeel, &dummy,
1391 &LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo),
1392 &prologue_cost_vec, &epilogue_cost_vec);
1394 /* Prologue and epilogue costs are added to the target model later.
1395 These costs depend only on the scalar iteration cost, the
1396 number of peeling iterations finally chosen, and the number of
1397 misaligned statements. So discard the information found here. */
1398 prologue_cost_vec.release ();
1399 epilogue_cost_vec.release ();
1401 if (inside_cost < min->inside_cost
1402 || (inside_cost == min->inside_cost
1403 && outside_cost < min->outside_cost))
1405 min->inside_cost = inside_cost;
1406 min->outside_cost = outside_cost;
1407 min->peel_info.dr = elem->dr;
1408 min->peel_info.npeel = elem->npeel;
1409 min->peel_info.count = elem->count;
1412 return 1;
1416 /* Choose best peeling option by traversing peeling hash table and either
1417 choosing an option with the lowest cost (if cost model is enabled) or the
1418 option that aligns as many accesses as possible. */
1420 static struct _vect_peel_extended_info
1421 vect_peeling_hash_choose_best_peeling (hash_table<peel_info_hasher> *peeling_htab,
1422 loop_vec_info loop_vinfo)
1424 struct _vect_peel_extended_info res;
1426 res.peel_info.dr = NULL;
1428 if (!unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo)))
1430 res.inside_cost = INT_MAX;
1431 res.outside_cost = INT_MAX;
1432 peeling_htab->traverse <_vect_peel_extended_info *,
1433 vect_peeling_hash_get_lowest_cost> (&res);
1435 else
1437 res.peel_info.count = 0;
1438 peeling_htab->traverse <_vect_peel_extended_info *,
1439 vect_peeling_hash_get_most_frequent> (&res);
1440 res.inside_cost = 0;
1441 res.outside_cost = 0;
1444 return res;
1447 /* Return true if the new peeling NPEEL is supported. */
1449 static bool
1450 vect_peeling_supportable (loop_vec_info loop_vinfo, struct data_reference *dr0,
1451 unsigned npeel)
1453 unsigned i;
1454 struct data_reference *dr = NULL;
1455 vec<data_reference_p> datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
1456 gimple *stmt;
1457 stmt_vec_info stmt_info;
1458 enum dr_alignment_support supportable_dr_alignment;
1460 /* Ensure that all data refs can be vectorized after the peel. */
1461 FOR_EACH_VEC_ELT (datarefs, i, dr)
1463 int save_misalignment;
1465 if (dr == dr0)
1466 continue;
1468 stmt = DR_STMT (dr);
1469 stmt_info = vinfo_for_stmt (stmt);
1470 /* For interleaving, only the alignment of the first access
1471 matters. */
1472 if (STMT_VINFO_GROUPED_ACCESS (stmt_info)
1473 && GROUP_FIRST_ELEMENT (stmt_info) != stmt)
1474 continue;
1476 /* Strided accesses perform only component accesses, alignment is
1477 irrelevant for them. */
1478 if (STMT_VINFO_STRIDED_P (stmt_info)
1479 && !STMT_VINFO_GROUPED_ACCESS (stmt_info))
1480 continue;
1482 save_misalignment = DR_MISALIGNMENT (dr);
1483 vect_update_misalignment_for_peel (dr, dr0, npeel);
1484 supportable_dr_alignment = vect_supportable_dr_alignment (dr, false);
1485 SET_DR_MISALIGNMENT (dr, save_misalignment);
1487 if (!supportable_dr_alignment)
1488 return false;
1491 return true;
1494 /* Function vect_enhance_data_refs_alignment
1496 This pass will use loop versioning and loop peeling in order to enhance
1497 the alignment of data references in the loop.
1499 FOR NOW: we assume that whatever versioning/peeling takes place, only the
1500 original loop is to be vectorized. Any other loops that are created by
1501 the transformations performed in this pass - are not supposed to be
1502 vectorized. This restriction will be relaxed.
1504 This pass will require a cost model to guide it whether to apply peeling
1505 or versioning or a combination of the two. For example, the scheme that
1506 intel uses when given a loop with several memory accesses, is as follows:
1507 choose one memory access ('p') which alignment you want to force by doing
1508 peeling. Then, either (1) generate a loop in which 'p' is aligned and all
1509 other accesses are not necessarily aligned, or (2) use loop versioning to
1510 generate one loop in which all accesses are aligned, and another loop in
1511 which only 'p' is necessarily aligned.
1513 ("Automatic Intra-Register Vectorization for the Intel Architecture",
1514 Aart J.C. Bik, Milind Girkar, Paul M. Grey and Ximmin Tian, International
1515 Journal of Parallel Programming, Vol. 30, No. 2, April 2002.)
1517 Devising a cost model is the most critical aspect of this work. It will
1518 guide us on which access to peel for, whether to use loop versioning, how
1519 many versions to create, etc. The cost model will probably consist of
1520 generic considerations as well as target specific considerations (on
1521 powerpc for example, misaligned stores are more painful than misaligned
1522 loads).
1524 Here are the general steps involved in alignment enhancements:
1526 -- original loop, before alignment analysis:
1527 for (i=0; i<N; i++){
1528 x = q[i]; # DR_MISALIGNMENT(q) = unknown
1529 p[i] = y; # DR_MISALIGNMENT(p) = unknown
1532 -- After vect_compute_data_refs_alignment:
1533 for (i=0; i<N; i++){
1534 x = q[i]; # DR_MISALIGNMENT(q) = 3
1535 p[i] = y; # DR_MISALIGNMENT(p) = unknown
1538 -- Possibility 1: we do loop versioning:
1539 if (p is aligned) {
1540 for (i=0; i<N; i++){ # loop 1A
1541 x = q[i]; # DR_MISALIGNMENT(q) = 3
1542 p[i] = y; # DR_MISALIGNMENT(p) = 0
1545 else {
1546 for (i=0; i<N; i++){ # loop 1B
1547 x = q[i]; # DR_MISALIGNMENT(q) = 3
1548 p[i] = y; # DR_MISALIGNMENT(p) = unaligned
1552 -- Possibility 2: we do loop peeling:
1553 for (i = 0; i < 3; i++){ # (scalar loop, not to be vectorized).
1554 x = q[i];
1555 p[i] = y;
1557 for (i = 3; i < N; i++){ # loop 2A
1558 x = q[i]; # DR_MISALIGNMENT(q) = 0
1559 p[i] = y; # DR_MISALIGNMENT(p) = unknown
1562 -- Possibility 3: combination of loop peeling and versioning:
1563 for (i = 0; i < 3; i++){ # (scalar loop, not to be vectorized).
1564 x = q[i];
1565 p[i] = y;
1567 if (p is aligned) {
1568 for (i = 3; i<N; i++){ # loop 3A
1569 x = q[i]; # DR_MISALIGNMENT(q) = 0
1570 p[i] = y; # DR_MISALIGNMENT(p) = 0
1573 else {
1574 for (i = 3; i<N; i++){ # loop 3B
1575 x = q[i]; # DR_MISALIGNMENT(q) = 0
1576 p[i] = y; # DR_MISALIGNMENT(p) = unaligned
1580 These loops are later passed to loop_transform to be vectorized. The
1581 vectorizer will use the alignment information to guide the transformation
1582 (whether to generate regular loads/stores, or with special handling for
1583 misalignment). */
1585 bool
1586 vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo)
1588 vec<data_reference_p> datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
1589 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1590 enum dr_alignment_support supportable_dr_alignment;
1591 struct data_reference *dr0 = NULL, *first_store = NULL;
1592 struct data_reference *dr;
1593 unsigned int i, j;
1594 bool do_peeling = false;
1595 bool do_versioning = false;
1596 bool stat;
1597 gimple *stmt;
1598 stmt_vec_info stmt_info;
1599 unsigned int npeel = 0;
1600 bool one_misalignment_known = false;
1601 bool one_misalignment_unknown = false;
1602 bool one_dr_unsupportable = false;
1603 struct data_reference *unsupportable_dr = NULL;
1604 poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1605 unsigned possible_npeel_number = 1;
1606 tree vectype;
1607 unsigned int mis, same_align_drs_max = 0;
1608 hash_table<peel_info_hasher> peeling_htab (1);
1610 if (dump_enabled_p ())
1611 dump_printf_loc (MSG_NOTE, vect_location,
1612 "=== vect_enhance_data_refs_alignment ===\n");
1614 /* Reset data so we can safely be called multiple times. */
1615 LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).truncate (0);
1616 LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) = 0;
1618 /* While cost model enhancements are expected in the future, the high level
1619 view of the code at this time is as follows:
1621 A) If there is a misaligned access then see if peeling to align
1622 this access can make all data references satisfy
1623 vect_supportable_dr_alignment. If so, update data structures
1624 as needed and return true.
1626 B) If peeling wasn't possible and there is a data reference with an
1627 unknown misalignment that does not satisfy vect_supportable_dr_alignment
1628 then see if loop versioning checks can be used to make all data
1629 references satisfy vect_supportable_dr_alignment. If so, update
1630 data structures as needed and return true.
1632 C) If neither peeling nor versioning were successful then return false if
1633 any data reference does not satisfy vect_supportable_dr_alignment.
1635 D) Return true (all data references satisfy vect_supportable_dr_alignment).
1637 Note, Possibility 3 above (which is peeling and versioning together) is not
1638 being done at this time. */
1640 /* (1) Peeling to force alignment. */
1642 /* (1.1) Decide whether to perform peeling, and how many iterations to peel:
1643 Considerations:
1644 + How many accesses will become aligned due to the peeling
1645 - How many accesses will become unaligned due to the peeling,
1646 and the cost of misaligned accesses.
1647 - The cost of peeling (the extra runtime checks, the increase
1648 in code size). */
1650 FOR_EACH_VEC_ELT (datarefs, i, dr)
1652 stmt = DR_STMT (dr);
1653 stmt_info = vinfo_for_stmt (stmt);
1655 if (!STMT_VINFO_RELEVANT_P (stmt_info))
1656 continue;
1658 /* For interleaving, only the alignment of the first access
1659 matters. */
1660 if (STMT_VINFO_GROUPED_ACCESS (stmt_info)
1661 && GROUP_FIRST_ELEMENT (stmt_info) != stmt)
1662 continue;
1664 /* For invariant accesses there is nothing to enhance. */
1665 if (integer_zerop (DR_STEP (dr)))
1666 continue;
1668 /* Strided accesses perform only component accesses, alignment is
1669 irrelevant for them. */
1670 if (STMT_VINFO_STRIDED_P (stmt_info)
1671 && !STMT_VINFO_GROUPED_ACCESS (stmt_info))
1672 continue;
1674 supportable_dr_alignment = vect_supportable_dr_alignment (dr, true);
1675 do_peeling = vector_alignment_reachable_p (dr);
1676 if (do_peeling)
1678 if (known_alignment_for_access_p (dr))
1680 unsigned int npeel_tmp = 0;
1681 bool negative = tree_int_cst_compare (DR_STEP (dr),
1682 size_zero_node) < 0;
1684 vectype = STMT_VINFO_VECTYPE (stmt_info);
1685 unsigned int target_align = DR_TARGET_ALIGNMENT (dr);
1686 unsigned int dr_size = vect_get_scalar_dr_size (dr);
1687 mis = (negative ? DR_MISALIGNMENT (dr) : -DR_MISALIGNMENT (dr));
1688 if (DR_MISALIGNMENT (dr) != 0)
1689 npeel_tmp = (mis & (target_align - 1)) / dr_size;
1691 /* For multiple types, it is possible that the bigger type access
1692 will have more than one peeling option. E.g., a loop with two
1693 types: one of size (vector size / 4), and the other one of
1694 size (vector size / 8). Vectorization factor will 8. If both
1695 accesses are misaligned by 3, the first one needs one scalar
1696 iteration to be aligned, and the second one needs 5. But the
1697 first one will be aligned also by peeling 5 scalar
1698 iterations, and in that case both accesses will be aligned.
1699 Hence, except for the immediate peeling amount, we also want
1700 to try to add full vector size, while we don't exceed
1701 vectorization factor.
1702 We do this automatically for cost model, since we calculate
1703 cost for every peeling option. */
1704 if (unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo)))
1706 poly_uint64 nscalars = (STMT_SLP_TYPE (stmt_info)
1707 ? vf * GROUP_SIZE (stmt_info) : vf);
1708 possible_npeel_number
1709 = vect_get_num_vectors (nscalars, vectype);
1711 /* NPEEL_TMP is 0 when there is no misalignment, but also
1712 allow peeling NELEMENTS. */
1713 if (DR_MISALIGNMENT (dr) == 0)
1714 possible_npeel_number++;
1717 /* Save info about DR in the hash table. Also include peeling
1718 amounts according to the explanation above. */
1719 for (j = 0; j < possible_npeel_number; j++)
1721 vect_peeling_hash_insert (&peeling_htab, loop_vinfo,
1722 dr, npeel_tmp);
1723 npeel_tmp += target_align / dr_size;
1726 one_misalignment_known = true;
1728 else
1730 /* If we don't know any misalignment values, we prefer
1731 peeling for data-ref that has the maximum number of data-refs
1732 with the same alignment, unless the target prefers to align
1733 stores over load. */
1734 unsigned same_align_drs
1735 = STMT_VINFO_SAME_ALIGN_REFS (stmt_info).length ();
1736 if (!dr0
1737 || same_align_drs_max < same_align_drs)
1739 same_align_drs_max = same_align_drs;
1740 dr0 = dr;
1742 /* For data-refs with the same number of related
1743 accesses prefer the one where the misalign
1744 computation will be invariant in the outermost loop. */
1745 else if (same_align_drs_max == same_align_drs)
1747 struct loop *ivloop0, *ivloop;
1748 ivloop0 = outermost_invariant_loop_for_expr
1749 (loop, DR_BASE_ADDRESS (dr0));
1750 ivloop = outermost_invariant_loop_for_expr
1751 (loop, DR_BASE_ADDRESS (dr));
1752 if ((ivloop && !ivloop0)
1753 || (ivloop && ivloop0
1754 && flow_loop_nested_p (ivloop, ivloop0)))
1755 dr0 = dr;
1758 one_misalignment_unknown = true;
1760 /* Check for data refs with unsupportable alignment that
1761 can be peeled. */
1762 if (!supportable_dr_alignment)
1764 one_dr_unsupportable = true;
1765 unsupportable_dr = dr;
1768 if (!first_store && DR_IS_WRITE (dr))
1769 first_store = dr;
1772 else
1774 if (!aligned_access_p (dr))
1776 if (dump_enabled_p ())
1777 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1778 "vector alignment may not be reachable\n");
1779 break;
1784 /* Check if we can possibly peel the loop. */
1785 if (!vect_can_advance_ivs_p (loop_vinfo)
1786 || !slpeel_can_duplicate_loop_p (loop, single_exit (loop))
1787 || loop->inner)
1788 do_peeling = false;
1790 struct _vect_peel_extended_info peel_for_known_alignment;
1791 struct _vect_peel_extended_info peel_for_unknown_alignment;
1792 struct _vect_peel_extended_info best_peel;
1794 peel_for_unknown_alignment.inside_cost = INT_MAX;
1795 peel_for_unknown_alignment.outside_cost = INT_MAX;
1796 peel_for_unknown_alignment.peel_info.count = 0;
1798 if (do_peeling
1799 && one_misalignment_unknown)
1801 /* Check if the target requires to prefer stores over loads, i.e., if
1802 misaligned stores are more expensive than misaligned loads (taking
1803 drs with same alignment into account). */
1804 unsigned int load_inside_cost = 0;
1805 unsigned int load_outside_cost = 0;
1806 unsigned int store_inside_cost = 0;
1807 unsigned int store_outside_cost = 0;
1808 unsigned int estimated_npeels = vect_vf_for_cost (loop_vinfo) / 2;
1810 stmt_vector_for_cost dummy;
1811 dummy.create (2);
1812 vect_get_peeling_costs_all_drs (datarefs, dr0,
1813 &load_inside_cost,
1814 &load_outside_cost,
1815 &dummy, estimated_npeels, true);
1816 dummy.release ();
1818 if (first_store)
1820 dummy.create (2);
1821 vect_get_peeling_costs_all_drs (datarefs, first_store,
1822 &store_inside_cost,
1823 &store_outside_cost,
1824 &dummy, estimated_npeels, true);
1825 dummy.release ();
1827 else
1829 store_inside_cost = INT_MAX;
1830 store_outside_cost = INT_MAX;
1833 if (load_inside_cost > store_inside_cost
1834 || (load_inside_cost == store_inside_cost
1835 && load_outside_cost > store_outside_cost))
1837 dr0 = first_store;
1838 peel_for_unknown_alignment.inside_cost = store_inside_cost;
1839 peel_for_unknown_alignment.outside_cost = store_outside_cost;
1841 else
1843 peel_for_unknown_alignment.inside_cost = load_inside_cost;
1844 peel_for_unknown_alignment.outside_cost = load_outside_cost;
1847 stmt_vector_for_cost prologue_cost_vec, epilogue_cost_vec;
1848 prologue_cost_vec.create (2);
1849 epilogue_cost_vec.create (2);
1851 int dummy2;
1852 peel_for_unknown_alignment.outside_cost += vect_get_known_peeling_cost
1853 (loop_vinfo, estimated_npeels, &dummy2,
1854 &LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo),
1855 &prologue_cost_vec, &epilogue_cost_vec);
1857 prologue_cost_vec.release ();
1858 epilogue_cost_vec.release ();
1860 peel_for_unknown_alignment.peel_info.count = 1
1861 + STMT_VINFO_SAME_ALIGN_REFS
1862 (vinfo_for_stmt (DR_STMT (dr0))).length ();
1865 peel_for_unknown_alignment.peel_info.npeel = 0;
1866 peel_for_unknown_alignment.peel_info.dr = dr0;
1868 best_peel = peel_for_unknown_alignment;
1870 peel_for_known_alignment.inside_cost = INT_MAX;
1871 peel_for_known_alignment.outside_cost = INT_MAX;
1872 peel_for_known_alignment.peel_info.count = 0;
1873 peel_for_known_alignment.peel_info.dr = NULL;
1875 if (do_peeling && one_misalignment_known)
1877 /* Peeling is possible, but there is no data access that is not supported
1878 unless aligned. So we try to choose the best possible peeling from
1879 the hash table. */
1880 peel_for_known_alignment = vect_peeling_hash_choose_best_peeling
1881 (&peeling_htab, loop_vinfo);
1884 /* Compare costs of peeling for known and unknown alignment. */
1885 if (peel_for_known_alignment.peel_info.dr != NULL
1886 && peel_for_unknown_alignment.inside_cost
1887 >= peel_for_known_alignment.inside_cost)
1889 best_peel = peel_for_known_alignment;
1891 /* If the best peeling for known alignment has NPEEL == 0, perform no
1892 peeling at all except if there is an unsupportable dr that we can
1893 align. */
1894 if (best_peel.peel_info.npeel == 0 && !one_dr_unsupportable)
1895 do_peeling = false;
1898 /* If there is an unsupportable data ref, prefer this over all choices so far
1899 since we'd have to discard a chosen peeling except when it accidentally
1900 aligned the unsupportable data ref. */
1901 if (one_dr_unsupportable)
1902 dr0 = unsupportable_dr;
1903 else if (do_peeling)
1905 /* Calculate the penalty for no peeling, i.e. leaving everything as-is.
1906 TODO: Use nopeel_outside_cost or get rid of it? */
1907 unsigned nopeel_inside_cost = 0;
1908 unsigned nopeel_outside_cost = 0;
1910 stmt_vector_for_cost dummy;
1911 dummy.create (2);
1912 vect_get_peeling_costs_all_drs (datarefs, NULL, &nopeel_inside_cost,
1913 &nopeel_outside_cost, &dummy, 0, false);
1914 dummy.release ();
1916 /* Add epilogue costs. As we do not peel for alignment here, no prologue
1917 costs will be recorded. */
1918 stmt_vector_for_cost prologue_cost_vec, epilogue_cost_vec;
1919 prologue_cost_vec.create (2);
1920 epilogue_cost_vec.create (2);
1922 int dummy2;
1923 nopeel_outside_cost += vect_get_known_peeling_cost
1924 (loop_vinfo, 0, &dummy2,
1925 &LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo),
1926 &prologue_cost_vec, &epilogue_cost_vec);
1928 prologue_cost_vec.release ();
1929 epilogue_cost_vec.release ();
1931 npeel = best_peel.peel_info.npeel;
1932 dr0 = best_peel.peel_info.dr;
1934 /* If no peeling is not more expensive than the best peeling we
1935 have so far, don't perform any peeling. */
1936 if (nopeel_inside_cost <= best_peel.inside_cost)
1937 do_peeling = false;
1940 if (do_peeling)
1942 stmt = DR_STMT (dr0);
1943 stmt_info = vinfo_for_stmt (stmt);
1944 vectype = STMT_VINFO_VECTYPE (stmt_info);
1946 if (known_alignment_for_access_p (dr0))
1948 bool negative = tree_int_cst_compare (DR_STEP (dr0),
1949 size_zero_node) < 0;
1950 if (!npeel)
1952 /* Since it's known at compile time, compute the number of
1953 iterations in the peeled loop (the peeling factor) for use in
1954 updating DR_MISALIGNMENT values. The peeling factor is the
1955 vectorization factor minus the misalignment as an element
1956 count. */
1957 mis = negative ? DR_MISALIGNMENT (dr0) : -DR_MISALIGNMENT (dr0);
1958 unsigned int target_align = DR_TARGET_ALIGNMENT (dr0);
1959 npeel = ((mis & (target_align - 1))
1960 / vect_get_scalar_dr_size (dr0));
1963 /* For interleaved data access every iteration accesses all the
1964 members of the group, therefore we divide the number of iterations
1965 by the group size. */
1966 stmt_info = vinfo_for_stmt (DR_STMT (dr0));
1967 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
1968 npeel /= GROUP_SIZE (stmt_info);
1970 if (dump_enabled_p ())
1971 dump_printf_loc (MSG_NOTE, vect_location,
1972 "Try peeling by %d\n", npeel);
1975 /* Ensure that all datarefs can be vectorized after the peel. */
1976 if (!vect_peeling_supportable (loop_vinfo, dr0, npeel))
1977 do_peeling = false;
1979 /* Check if all datarefs are supportable and log. */
1980 if (do_peeling && known_alignment_for_access_p (dr0) && npeel == 0)
1982 stat = vect_verify_datarefs_alignment (loop_vinfo);
1983 if (!stat)
1984 do_peeling = false;
1985 else
1986 return stat;
1989 /* Cost model #1 - honor --param vect-max-peeling-for-alignment. */
1990 if (do_peeling)
1992 unsigned max_allowed_peel
1993 = PARAM_VALUE (PARAM_VECT_MAX_PEELING_FOR_ALIGNMENT);
1994 if (max_allowed_peel != (unsigned)-1)
1996 unsigned max_peel = npeel;
1997 if (max_peel == 0)
1999 unsigned int target_align = DR_TARGET_ALIGNMENT (dr0);
2000 max_peel = target_align / vect_get_scalar_dr_size (dr0) - 1;
2002 if (max_peel > max_allowed_peel)
2004 do_peeling = false;
2005 if (dump_enabled_p ())
2006 dump_printf_loc (MSG_NOTE, vect_location,
2007 "Disable peeling, max peels reached: %d\n", max_peel);
2012 /* Cost model #2 - if peeling may result in a remaining loop not
2013 iterating enough to be vectorized then do not peel. Since this
2014 is a cost heuristic rather than a correctness decision, use the
2015 most likely runtime value for variable vectorization factors. */
2016 if (do_peeling
2017 && LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
2019 unsigned int assumed_vf = vect_vf_for_cost (loop_vinfo);
2020 unsigned int max_peel = npeel == 0 ? assumed_vf - 1 : npeel;
2021 if ((unsigned HOST_WIDE_INT) LOOP_VINFO_INT_NITERS (loop_vinfo)
2022 < assumed_vf + max_peel)
2023 do_peeling = false;
2026 if (do_peeling)
2028 /* (1.2) Update the DR_MISALIGNMENT of each data reference DR_i.
2029 If the misalignment of DR_i is identical to that of dr0 then set
2030 DR_MISALIGNMENT (DR_i) to zero. If the misalignment of DR_i and
2031 dr0 are known at compile time then increment DR_MISALIGNMENT (DR_i)
2032 by the peeling factor times the element size of DR_i (MOD the
2033 vectorization factor times the size). Otherwise, the
2034 misalignment of DR_i must be set to unknown. */
2035 FOR_EACH_VEC_ELT (datarefs, i, dr)
2036 if (dr != dr0)
2038 /* Strided accesses perform only component accesses, alignment
2039 is irrelevant for them. */
2040 stmt_info = vinfo_for_stmt (DR_STMT (dr));
2041 if (STMT_VINFO_STRIDED_P (stmt_info)
2042 && !STMT_VINFO_GROUPED_ACCESS (stmt_info))
2043 continue;
2045 vect_update_misalignment_for_peel (dr, dr0, npeel);
2048 LOOP_VINFO_UNALIGNED_DR (loop_vinfo) = dr0;
2049 if (npeel)
2050 LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) = npeel;
2051 else
2052 LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo)
2053 = DR_MISALIGNMENT (dr0);
2054 SET_DR_MISALIGNMENT (dr0, 0);
2055 if (dump_enabled_p ())
2057 dump_printf_loc (MSG_NOTE, vect_location,
2058 "Alignment of access forced using peeling.\n");
2059 dump_printf_loc (MSG_NOTE, vect_location,
2060 "Peeling for alignment will be applied.\n");
2063 /* The inside-loop cost will be accounted for in vectorizable_load
2064 and vectorizable_store correctly with adjusted alignments.
2065 Drop the body_cst_vec on the floor here. */
2066 stat = vect_verify_datarefs_alignment (loop_vinfo);
2067 gcc_assert (stat);
2068 return stat;
2072 /* (2) Versioning to force alignment. */
2074 /* Try versioning if:
2075 1) optimize loop for speed
2076 2) there is at least one unsupported misaligned data ref with an unknown
2077 misalignment, and
2078 3) all misaligned data refs with a known misalignment are supported, and
2079 4) the number of runtime alignment checks is within reason. */
2081 do_versioning =
2082 optimize_loop_nest_for_speed_p (loop)
2083 && (!loop->inner); /* FORNOW */
2085 if (do_versioning)
2087 FOR_EACH_VEC_ELT (datarefs, i, dr)
2089 stmt = DR_STMT (dr);
2090 stmt_info = vinfo_for_stmt (stmt);
2092 /* For interleaving, only the alignment of the first access
2093 matters. */
2094 if (aligned_access_p (dr)
2095 || (STMT_VINFO_GROUPED_ACCESS (stmt_info)
2096 && GROUP_FIRST_ELEMENT (stmt_info) != stmt))
2097 continue;
2099 if (STMT_VINFO_STRIDED_P (stmt_info))
2101 /* Strided loads perform only component accesses, alignment is
2102 irrelevant for them. */
2103 if (!STMT_VINFO_GROUPED_ACCESS (stmt_info))
2104 continue;
2105 do_versioning = false;
2106 break;
2109 supportable_dr_alignment = vect_supportable_dr_alignment (dr, false);
2111 if (!supportable_dr_alignment)
2113 gimple *stmt;
2114 int mask;
2115 tree vectype;
2117 if (known_alignment_for_access_p (dr)
2118 || LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).length ()
2119 >= (unsigned) PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIGNMENT_CHECKS))
2121 do_versioning = false;
2122 break;
2125 stmt = DR_STMT (dr);
2126 vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
2127 gcc_assert (vectype);
2129 /* At present we don't support versioning for alignment
2130 with variable VF, since there's no guarantee that the
2131 VF is a power of two. We could relax this if we added
2132 a way of enforcing a power-of-two size. */
2133 unsigned HOST_WIDE_INT size;
2134 if (!GET_MODE_SIZE (TYPE_MODE (vectype)).is_constant (&size))
2136 do_versioning = false;
2137 break;
2140 /* The rightmost bits of an aligned address must be zeros.
2141 Construct the mask needed for this test. For example,
2142 GET_MODE_SIZE for the vector mode V4SI is 16 bytes so the
2143 mask must be 15 = 0xf. */
2144 mask = size - 1;
2146 /* FORNOW: use the same mask to test all potentially unaligned
2147 references in the loop. The vectorizer currently supports
2148 a single vector size, see the reference to
2149 GET_MODE_NUNITS (TYPE_MODE (vectype)) where the
2150 vectorization factor is computed. */
2151 gcc_assert (!LOOP_VINFO_PTR_MASK (loop_vinfo)
2152 || LOOP_VINFO_PTR_MASK (loop_vinfo) == mask);
2153 LOOP_VINFO_PTR_MASK (loop_vinfo) = mask;
2154 LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).safe_push (
2155 DR_STMT (dr));
2159 /* Versioning requires at least one misaligned data reference. */
2160 if (!LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo))
2161 do_versioning = false;
2162 else if (!do_versioning)
2163 LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).truncate (0);
2166 if (do_versioning)
2168 vec<gimple *> may_misalign_stmts
2169 = LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo);
2170 gimple *stmt;
2172 /* It can now be assumed that the data references in the statements
2173 in LOOP_VINFO_MAY_MISALIGN_STMTS will be aligned in the version
2174 of the loop being vectorized. */
2175 FOR_EACH_VEC_ELT (may_misalign_stmts, i, stmt)
2177 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2178 dr = STMT_VINFO_DATA_REF (stmt_info);
2179 SET_DR_MISALIGNMENT (dr, 0);
2180 if (dump_enabled_p ())
2181 dump_printf_loc (MSG_NOTE, vect_location,
2182 "Alignment of access forced using versioning.\n");
2185 if (dump_enabled_p ())
2186 dump_printf_loc (MSG_NOTE, vect_location,
2187 "Versioning for alignment will be applied.\n");
2189 /* Peeling and versioning can't be done together at this time. */
2190 gcc_assert (! (do_peeling && do_versioning));
2192 stat = vect_verify_datarefs_alignment (loop_vinfo);
2193 gcc_assert (stat);
2194 return stat;
2197 /* This point is reached if neither peeling nor versioning is being done. */
2198 gcc_assert (! (do_peeling || do_versioning));
2200 stat = vect_verify_datarefs_alignment (loop_vinfo);
2201 return stat;
2205 /* Function vect_find_same_alignment_drs.
2207 Update group and alignment relations according to the chosen
2208 vectorization factor. */
2210 static void
2211 vect_find_same_alignment_drs (struct data_dependence_relation *ddr)
2213 struct data_reference *dra = DDR_A (ddr);
2214 struct data_reference *drb = DDR_B (ddr);
2215 stmt_vec_info stmtinfo_a = vinfo_for_stmt (DR_STMT (dra));
2216 stmt_vec_info stmtinfo_b = vinfo_for_stmt (DR_STMT (drb));
2218 if (DDR_ARE_DEPENDENT (ddr) == chrec_known)
2219 return;
2221 if (dra == drb)
2222 return;
2224 if (!operand_equal_p (DR_BASE_ADDRESS (dra), DR_BASE_ADDRESS (drb), 0)
2225 || !operand_equal_p (DR_OFFSET (dra), DR_OFFSET (drb), 0)
2226 || !operand_equal_p (DR_STEP (dra), DR_STEP (drb), 0))
2227 return;
2229 /* Two references with distance zero have the same alignment. */
2230 offset_int diff = (wi::to_offset (DR_INIT (dra))
2231 - wi::to_offset (DR_INIT (drb)));
2232 if (diff != 0)
2234 /* Get the wider of the two alignments. */
2235 unsigned int align_a = (vect_calculate_target_alignment (dra)
2236 / BITS_PER_UNIT);
2237 unsigned int align_b = (vect_calculate_target_alignment (drb)
2238 / BITS_PER_UNIT);
2239 unsigned int max_align = MAX (align_a, align_b);
2241 /* Require the gap to be a multiple of the larger vector alignment. */
2242 if (!wi::multiple_of_p (diff, max_align, SIGNED))
2243 return;
2246 STMT_VINFO_SAME_ALIGN_REFS (stmtinfo_a).safe_push (drb);
2247 STMT_VINFO_SAME_ALIGN_REFS (stmtinfo_b).safe_push (dra);
2248 if (dump_enabled_p ())
2250 dump_printf_loc (MSG_NOTE, vect_location,
2251 "accesses have the same alignment: ");
2252 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra));
2253 dump_printf (MSG_NOTE, " and ");
2254 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb));
2255 dump_printf (MSG_NOTE, "\n");
2260 /* Function vect_analyze_data_refs_alignment
2262 Analyze the alignment of the data-references in the loop.
2263 Return FALSE if a data reference is found that cannot be vectorized. */
2265 bool
2266 vect_analyze_data_refs_alignment (loop_vec_info vinfo)
2268 if (dump_enabled_p ())
2269 dump_printf_loc (MSG_NOTE, vect_location,
2270 "=== vect_analyze_data_refs_alignment ===\n");
2272 /* Mark groups of data references with same alignment using
2273 data dependence information. */
2274 vec<ddr_p> ddrs = vinfo->ddrs;
2275 struct data_dependence_relation *ddr;
2276 unsigned int i;
2278 FOR_EACH_VEC_ELT (ddrs, i, ddr)
2279 vect_find_same_alignment_drs (ddr);
2281 vec<data_reference_p> datarefs = vinfo->datarefs;
2282 struct data_reference *dr;
2284 vect_record_base_alignments (vinfo);
2285 FOR_EACH_VEC_ELT (datarefs, i, dr)
2287 stmt_vec_info stmt_info = vinfo_for_stmt (DR_STMT (dr));
2288 if (STMT_VINFO_VECTORIZABLE (stmt_info)
2289 && !vect_compute_data_ref_alignment (dr))
2291 /* Strided accesses perform only component accesses, misalignment
2292 information is irrelevant for them. */
2293 if (STMT_VINFO_STRIDED_P (stmt_info)
2294 && !STMT_VINFO_GROUPED_ACCESS (stmt_info))
2295 continue;
2297 if (dump_enabled_p ())
2298 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2299 "not vectorized: can't calculate alignment "
2300 "for data ref.\n");
2302 return false;
2306 return true;
2310 /* Analyze alignment of DRs of stmts in NODE. */
2312 static bool
2313 vect_slp_analyze_and_verify_node_alignment (slp_tree node)
2315 /* We vectorize from the first scalar stmt in the node unless
2316 the node is permuted in which case we start from the first
2317 element in the group. */
2318 gimple *first_stmt = SLP_TREE_SCALAR_STMTS (node)[0];
2319 data_reference_p first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
2320 if (SLP_TREE_LOAD_PERMUTATION (node).exists ())
2321 first_stmt = GROUP_FIRST_ELEMENT (vinfo_for_stmt (first_stmt));
2323 data_reference_p dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
2324 if (! vect_compute_data_ref_alignment (dr)
2325 /* For creating the data-ref pointer we need alignment of the
2326 first element anyway. */
2327 || (dr != first_dr
2328 && ! vect_compute_data_ref_alignment (first_dr))
2329 || ! verify_data_ref_alignment (dr))
2331 if (dump_enabled_p ())
2332 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2333 "not vectorized: bad data alignment in basic "
2334 "block.\n");
2335 return false;
2338 return true;
2341 /* Function vect_slp_analyze_instance_alignment
2343 Analyze the alignment of the data-references in the SLP instance.
2344 Return FALSE if a data reference is found that cannot be vectorized. */
2346 bool
2347 vect_slp_analyze_and_verify_instance_alignment (slp_instance instance)
2349 if (dump_enabled_p ())
2350 dump_printf_loc (MSG_NOTE, vect_location,
2351 "=== vect_slp_analyze_and_verify_instance_alignment ===\n");
2353 slp_tree node;
2354 unsigned i;
2355 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (instance), i, node)
2356 if (! vect_slp_analyze_and_verify_node_alignment (node))
2357 return false;
2359 node = SLP_INSTANCE_TREE (instance);
2360 if (STMT_VINFO_DATA_REF (vinfo_for_stmt (SLP_TREE_SCALAR_STMTS (node)[0]))
2361 && ! vect_slp_analyze_and_verify_node_alignment
2362 (SLP_INSTANCE_TREE (instance)))
2363 return false;
2365 return true;
2369 /* Analyze groups of accesses: check that DR belongs to a group of
2370 accesses of legal size, step, etc. Detect gaps, single element
2371 interleaving, and other special cases. Set grouped access info.
2372 Collect groups of strided stores for further use in SLP analysis.
2373 Worker for vect_analyze_group_access. */
2375 static bool
2376 vect_analyze_group_access_1 (struct data_reference *dr)
2378 tree step = DR_STEP (dr);
2379 tree scalar_type = TREE_TYPE (DR_REF (dr));
2380 HOST_WIDE_INT type_size = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (scalar_type));
2381 gimple *stmt = DR_STMT (dr);
2382 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2383 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2384 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
2385 HOST_WIDE_INT dr_step = -1;
2386 HOST_WIDE_INT groupsize, last_accessed_element = 1;
2387 bool slp_impossible = false;
2389 /* For interleaving, GROUPSIZE is STEP counted in elements, i.e., the
2390 size of the interleaving group (including gaps). */
2391 if (tree_fits_shwi_p (step))
2393 dr_step = tree_to_shwi (step);
2394 /* Check that STEP is a multiple of type size. Otherwise there is
2395 a non-element-sized gap at the end of the group which we
2396 cannot represent in GROUP_GAP or GROUP_SIZE.
2397 ??? As we can handle non-constant step fine here we should
2398 simply remove uses of GROUP_GAP between the last and first
2399 element and instead rely on DR_STEP. GROUP_SIZE then would
2400 simply not include that gap. */
2401 if ((dr_step % type_size) != 0)
2403 if (dump_enabled_p ())
2405 dump_printf_loc (MSG_NOTE, vect_location,
2406 "Step ");
2407 dump_generic_expr (MSG_NOTE, TDF_SLIM, step);
2408 dump_printf (MSG_NOTE,
2409 " is not a multiple of the element size for ");
2410 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dr));
2411 dump_printf (MSG_NOTE, "\n");
2413 return false;
2415 groupsize = absu_hwi (dr_step) / type_size;
2417 else
2418 groupsize = 0;
2420 /* Not consecutive access is possible only if it is a part of interleaving. */
2421 if (!GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
2423 /* Check if it this DR is a part of interleaving, and is a single
2424 element of the group that is accessed in the loop. */
2426 /* Gaps are supported only for loads. STEP must be a multiple of the type
2427 size. The size of the group must be a power of 2. */
2428 if (DR_IS_READ (dr)
2429 && (dr_step % type_size) == 0
2430 && groupsize > 0
2431 && pow2p_hwi (groupsize))
2433 GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = stmt;
2434 GROUP_SIZE (vinfo_for_stmt (stmt)) = groupsize;
2435 GROUP_GAP (stmt_info) = groupsize - 1;
2436 if (dump_enabled_p ())
2438 dump_printf_loc (MSG_NOTE, vect_location,
2439 "Detected single element interleaving ");
2440 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dr));
2441 dump_printf (MSG_NOTE, " step ");
2442 dump_generic_expr (MSG_NOTE, TDF_SLIM, step);
2443 dump_printf (MSG_NOTE, "\n");
2446 return true;
2449 if (dump_enabled_p ())
2451 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2452 "not consecutive access ");
2453 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
2456 if (bb_vinfo)
2458 /* Mark the statement as unvectorizable. */
2459 STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr))) = false;
2460 return true;
2463 dump_printf_loc (MSG_NOTE, vect_location, "using strided accesses\n");
2464 STMT_VINFO_STRIDED_P (stmt_info) = true;
2465 return true;
2468 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) == stmt)
2470 /* First stmt in the interleaving chain. Check the chain. */
2471 gimple *next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmt));
2472 struct data_reference *data_ref = dr;
2473 unsigned int count = 1;
2474 tree prev_init = DR_INIT (data_ref);
2475 gimple *prev = stmt;
2476 HOST_WIDE_INT diff, gaps = 0;
2478 while (next)
2480 /* Skip same data-refs. In case that two or more stmts share
2481 data-ref (supported only for loads), we vectorize only the first
2482 stmt, and the rest get their vectorized loads from the first
2483 one. */
2484 if (!tree_int_cst_compare (DR_INIT (data_ref),
2485 DR_INIT (STMT_VINFO_DATA_REF (
2486 vinfo_for_stmt (next)))))
2488 if (DR_IS_WRITE (data_ref))
2490 if (dump_enabled_p ())
2491 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2492 "Two store stmts share the same dr.\n");
2493 return false;
2496 if (dump_enabled_p ())
2497 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2498 "Two or more load stmts share the same dr.\n");
2500 /* For load use the same data-ref load. */
2501 GROUP_SAME_DR_STMT (vinfo_for_stmt (next)) = prev;
2503 prev = next;
2504 next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next));
2505 continue;
2508 prev = next;
2509 data_ref = STMT_VINFO_DATA_REF (vinfo_for_stmt (next));
2511 /* All group members have the same STEP by construction. */
2512 gcc_checking_assert (operand_equal_p (DR_STEP (data_ref), step, 0));
2514 /* Check that the distance between two accesses is equal to the type
2515 size. Otherwise, we have gaps. */
2516 diff = (TREE_INT_CST_LOW (DR_INIT (data_ref))
2517 - TREE_INT_CST_LOW (prev_init)) / type_size;
2518 if (diff != 1)
2520 /* FORNOW: SLP of accesses with gaps is not supported. */
2521 slp_impossible = true;
2522 if (DR_IS_WRITE (data_ref))
2524 if (dump_enabled_p ())
2525 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2526 "interleaved store with gaps\n");
2527 return false;
2530 gaps += diff - 1;
2533 last_accessed_element += diff;
2535 /* Store the gap from the previous member of the group. If there is no
2536 gap in the access, GROUP_GAP is always 1. */
2537 GROUP_GAP (vinfo_for_stmt (next)) = diff;
2539 prev_init = DR_INIT (data_ref);
2540 next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next));
2541 /* Count the number of data-refs in the chain. */
2542 count++;
2545 if (groupsize == 0)
2546 groupsize = count + gaps;
2548 /* This could be UINT_MAX but as we are generating code in a very
2549 inefficient way we have to cap earlier. See PR78699 for example. */
2550 if (groupsize > 4096)
2552 if (dump_enabled_p ())
2553 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2554 "group is too large\n");
2555 return false;
2558 /* Check that the size of the interleaving is equal to count for stores,
2559 i.e., that there are no gaps. */
2560 if (groupsize != count
2561 && !DR_IS_READ (dr))
2563 if (dump_enabled_p ())
2564 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2565 "interleaved store with gaps\n");
2566 return false;
2569 /* If there is a gap after the last load in the group it is the
2570 difference between the groupsize and the last accessed
2571 element.
2572 When there is no gap, this difference should be 0. */
2573 GROUP_GAP (vinfo_for_stmt (stmt)) = groupsize - last_accessed_element;
2575 GROUP_SIZE (vinfo_for_stmt (stmt)) = groupsize;
2576 if (dump_enabled_p ())
2578 dump_printf_loc (MSG_NOTE, vect_location,
2579 "Detected interleaving ");
2580 if (DR_IS_READ (dr))
2581 dump_printf (MSG_NOTE, "load ");
2582 else
2583 dump_printf (MSG_NOTE, "store ");
2584 dump_printf (MSG_NOTE, "of size %u starting with ",
2585 (unsigned)groupsize);
2586 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
2587 if (GROUP_GAP (vinfo_for_stmt (stmt)) != 0)
2588 dump_printf_loc (MSG_NOTE, vect_location,
2589 "There is a gap of %u elements after the group\n",
2590 GROUP_GAP (vinfo_for_stmt (stmt)));
2593 /* SLP: create an SLP data structure for every interleaving group of
2594 stores for further analysis in vect_analyse_slp. */
2595 if (DR_IS_WRITE (dr) && !slp_impossible)
2597 if (loop_vinfo)
2598 LOOP_VINFO_GROUPED_STORES (loop_vinfo).safe_push (stmt);
2599 if (bb_vinfo)
2600 BB_VINFO_GROUPED_STORES (bb_vinfo).safe_push (stmt);
2604 return true;
2607 /* Analyze groups of accesses: check that DR belongs to a group of
2608 accesses of legal size, step, etc. Detect gaps, single element
2609 interleaving, and other special cases. Set grouped access info.
2610 Collect groups of strided stores for further use in SLP analysis. */
2612 static bool
2613 vect_analyze_group_access (struct data_reference *dr)
2615 if (!vect_analyze_group_access_1 (dr))
2617 /* Dissolve the group if present. */
2618 gimple *next;
2619 gimple *stmt = GROUP_FIRST_ELEMENT (vinfo_for_stmt (DR_STMT (dr)));
2620 while (stmt)
2622 stmt_vec_info vinfo = vinfo_for_stmt (stmt);
2623 next = GROUP_NEXT_ELEMENT (vinfo);
2624 GROUP_FIRST_ELEMENT (vinfo) = NULL;
2625 GROUP_NEXT_ELEMENT (vinfo) = NULL;
2626 stmt = next;
2628 return false;
2630 return true;
2633 /* Analyze the access pattern of the data-reference DR.
2634 In case of non-consecutive accesses call vect_analyze_group_access() to
2635 analyze groups of accesses. */
2637 static bool
2638 vect_analyze_data_ref_access (struct data_reference *dr)
2640 tree step = DR_STEP (dr);
2641 tree scalar_type = TREE_TYPE (DR_REF (dr));
2642 gimple *stmt = DR_STMT (dr);
2643 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2644 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2645 struct loop *loop = NULL;
2647 if (loop_vinfo)
2648 loop = LOOP_VINFO_LOOP (loop_vinfo);
2650 if (loop_vinfo && !step)
2652 if (dump_enabled_p ())
2653 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2654 "bad data-ref access in loop\n");
2655 return false;
2658 /* Allow loads with zero step in inner-loop vectorization. */
2659 if (loop_vinfo && integer_zerop (step))
2661 GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = NULL;
2662 if (!nested_in_vect_loop_p (loop, stmt))
2663 return DR_IS_READ (dr);
2664 /* Allow references with zero step for outer loops marked
2665 with pragma omp simd only - it guarantees absence of
2666 loop-carried dependencies between inner loop iterations. */
2667 if (!loop->force_vectorize)
2669 if (dump_enabled_p ())
2670 dump_printf_loc (MSG_NOTE, vect_location,
2671 "zero step in inner loop of nest\n");
2672 return false;
2676 if (loop && nested_in_vect_loop_p (loop, stmt))
2678 /* Interleaved accesses are not yet supported within outer-loop
2679 vectorization for references in the inner-loop. */
2680 GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = NULL;
2682 /* For the rest of the analysis we use the outer-loop step. */
2683 step = STMT_VINFO_DR_STEP (stmt_info);
2684 if (integer_zerop (step))
2686 if (dump_enabled_p ())
2687 dump_printf_loc (MSG_NOTE, vect_location,
2688 "zero step in outer loop.\n");
2689 return DR_IS_READ (dr);
2693 /* Consecutive? */
2694 if (TREE_CODE (step) == INTEGER_CST)
2696 HOST_WIDE_INT dr_step = TREE_INT_CST_LOW (step);
2697 if (!tree_int_cst_compare (step, TYPE_SIZE_UNIT (scalar_type))
2698 || (dr_step < 0
2699 && !compare_tree_int (TYPE_SIZE_UNIT (scalar_type), -dr_step)))
2701 /* Mark that it is not interleaving. */
2702 GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = NULL;
2703 return true;
2707 if (loop && nested_in_vect_loop_p (loop, stmt))
2709 if (dump_enabled_p ())
2710 dump_printf_loc (MSG_NOTE, vect_location,
2711 "grouped access in outer loop.\n");
2712 return false;
2716 /* Assume this is a DR handled by non-constant strided load case. */
2717 if (TREE_CODE (step) != INTEGER_CST)
2718 return (STMT_VINFO_STRIDED_P (stmt_info)
2719 && (!STMT_VINFO_GROUPED_ACCESS (stmt_info)
2720 || vect_analyze_group_access (dr)));
2722 /* Not consecutive access - check if it's a part of interleaving group. */
2723 return vect_analyze_group_access (dr);
2726 /* Compare two data-references DRA and DRB to group them into chunks
2727 suitable for grouping. */
2729 static int
2730 dr_group_sort_cmp (const void *dra_, const void *drb_)
2732 data_reference_p dra = *(data_reference_p *)const_cast<void *>(dra_);
2733 data_reference_p drb = *(data_reference_p *)const_cast<void *>(drb_);
2734 int cmp;
2736 /* Stabilize sort. */
2737 if (dra == drb)
2738 return 0;
2740 /* DRs in different loops never belong to the same group. */
2741 loop_p loopa = gimple_bb (DR_STMT (dra))->loop_father;
2742 loop_p loopb = gimple_bb (DR_STMT (drb))->loop_father;
2743 if (loopa != loopb)
2744 return loopa->num < loopb->num ? -1 : 1;
2746 /* Ordering of DRs according to base. */
2747 cmp = data_ref_compare_tree (DR_BASE_ADDRESS (dra),
2748 DR_BASE_ADDRESS (drb));
2749 if (cmp != 0)
2750 return cmp;
2752 /* And according to DR_OFFSET. */
2753 cmp = data_ref_compare_tree (DR_OFFSET (dra), DR_OFFSET (drb));
2754 if (cmp != 0)
2755 return cmp;
2757 /* Put reads before writes. */
2758 if (DR_IS_READ (dra) != DR_IS_READ (drb))
2759 return DR_IS_READ (dra) ? -1 : 1;
2761 /* Then sort after access size. */
2762 cmp = data_ref_compare_tree (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dra))),
2763 TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (drb))));
2764 if (cmp != 0)
2765 return cmp;
2767 /* And after step. */
2768 cmp = data_ref_compare_tree (DR_STEP (dra), DR_STEP (drb));
2769 if (cmp != 0)
2770 return cmp;
2772 /* Then sort after DR_INIT. In case of identical DRs sort after stmt UID. */
2773 cmp = data_ref_compare_tree (DR_INIT (dra), DR_INIT (drb));
2774 if (cmp == 0)
2775 return gimple_uid (DR_STMT (dra)) < gimple_uid (DR_STMT (drb)) ? -1 : 1;
2776 return cmp;
2779 /* Function vect_analyze_data_ref_accesses.
2781 Analyze the access pattern of all the data references in the loop.
2783 FORNOW: the only access pattern that is considered vectorizable is a
2784 simple step 1 (consecutive) access.
2786 FORNOW: handle only arrays and pointer accesses. */
2788 bool
2789 vect_analyze_data_ref_accesses (vec_info *vinfo)
2791 unsigned int i;
2792 vec<data_reference_p> datarefs = vinfo->datarefs;
2793 struct data_reference *dr;
2795 if (dump_enabled_p ())
2796 dump_printf_loc (MSG_NOTE, vect_location,
2797 "=== vect_analyze_data_ref_accesses ===\n");
2799 if (datarefs.is_empty ())
2800 return true;
2802 /* Sort the array of datarefs to make building the interleaving chains
2803 linear. Don't modify the original vector's order, it is needed for
2804 determining what dependencies are reversed. */
2805 vec<data_reference_p> datarefs_copy = datarefs.copy ();
2806 datarefs_copy.qsort (dr_group_sort_cmp);
2808 /* Build the interleaving chains. */
2809 for (i = 0; i < datarefs_copy.length () - 1;)
2811 data_reference_p dra = datarefs_copy[i];
2812 stmt_vec_info stmtinfo_a = vinfo_for_stmt (DR_STMT (dra));
2813 stmt_vec_info lastinfo = NULL;
2814 if (! STMT_VINFO_VECTORIZABLE (stmtinfo_a))
2816 ++i;
2817 continue;
2819 for (i = i + 1; i < datarefs_copy.length (); ++i)
2821 data_reference_p drb = datarefs_copy[i];
2822 stmt_vec_info stmtinfo_b = vinfo_for_stmt (DR_STMT (drb));
2823 if (! STMT_VINFO_VECTORIZABLE (stmtinfo_b))
2824 break;
2826 /* ??? Imperfect sorting (non-compatible types, non-modulo
2827 accesses, same accesses) can lead to a group to be artificially
2828 split here as we don't just skip over those. If it really
2829 matters we can push those to a worklist and re-iterate
2830 over them. The we can just skip ahead to the next DR here. */
2832 /* DRs in a different loop should not be put into the same
2833 interleaving group. */
2834 if (gimple_bb (DR_STMT (dra))->loop_father
2835 != gimple_bb (DR_STMT (drb))->loop_father)
2836 break;
2838 /* Check that the data-refs have same first location (except init)
2839 and they are both either store or load (not load and store,
2840 not masked loads or stores). */
2841 if (DR_IS_READ (dra) != DR_IS_READ (drb)
2842 || data_ref_compare_tree (DR_BASE_ADDRESS (dra),
2843 DR_BASE_ADDRESS (drb)) != 0
2844 || data_ref_compare_tree (DR_OFFSET (dra), DR_OFFSET (drb)) != 0
2845 || !gimple_assign_single_p (DR_STMT (dra))
2846 || !gimple_assign_single_p (DR_STMT (drb)))
2847 break;
2849 /* Check that the data-refs have the same constant size. */
2850 tree sza = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dra)));
2851 tree szb = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (drb)));
2852 if (!tree_fits_uhwi_p (sza)
2853 || !tree_fits_uhwi_p (szb)
2854 || !tree_int_cst_equal (sza, szb))
2855 break;
2857 /* Check that the data-refs have the same step. */
2858 if (data_ref_compare_tree (DR_STEP (dra), DR_STEP (drb)) != 0)
2859 break;
2861 /* Check the types are compatible.
2862 ??? We don't distinguish this during sorting. */
2863 if (!types_compatible_p (TREE_TYPE (DR_REF (dra)),
2864 TREE_TYPE (DR_REF (drb))))
2865 break;
2867 /* Sorting has ensured that DR_INIT (dra) <= DR_INIT (drb). */
2868 HOST_WIDE_INT init_a = TREE_INT_CST_LOW (DR_INIT (dra));
2869 HOST_WIDE_INT init_b = TREE_INT_CST_LOW (DR_INIT (drb));
2870 HOST_WIDE_INT init_prev
2871 = TREE_INT_CST_LOW (DR_INIT (datarefs_copy[i-1]));
2872 gcc_assert (init_a <= init_b
2873 && init_a <= init_prev
2874 && init_prev <= init_b);
2876 /* Do not place the same access in the interleaving chain twice. */
2877 if (init_b == init_prev)
2879 gcc_assert (gimple_uid (DR_STMT (datarefs_copy[i-1]))
2880 < gimple_uid (DR_STMT (drb)));
2881 /* ??? For now we simply "drop" the later reference which is
2882 otherwise the same rather than finishing off this group.
2883 In the end we'd want to re-process duplicates forming
2884 multiple groups from the refs, likely by just collecting
2885 all candidates (including duplicates and split points
2886 below) in a vector and then process them together. */
2887 continue;
2890 /* If init_b == init_a + the size of the type * k, we have an
2891 interleaving, and DRA is accessed before DRB. */
2892 HOST_WIDE_INT type_size_a = tree_to_uhwi (sza);
2893 if (type_size_a == 0
2894 || (init_b - init_a) % type_size_a != 0)
2895 break;
2897 /* If we have a store, the accesses are adjacent. This splits
2898 groups into chunks we support (we don't support vectorization
2899 of stores with gaps). */
2900 if (!DR_IS_READ (dra) && init_b - init_prev != type_size_a)
2901 break;
2903 /* If the step (if not zero or non-constant) is greater than the
2904 difference between data-refs' inits this splits groups into
2905 suitable sizes. */
2906 if (tree_fits_shwi_p (DR_STEP (dra)))
2908 HOST_WIDE_INT step = tree_to_shwi (DR_STEP (dra));
2909 if (step != 0 && step <= (init_b - init_a))
2910 break;
2913 if (dump_enabled_p ())
2915 dump_printf_loc (MSG_NOTE, vect_location,
2916 "Detected interleaving ");
2917 if (DR_IS_READ (dra))
2918 dump_printf (MSG_NOTE, "load ");
2919 else
2920 dump_printf (MSG_NOTE, "store ");
2921 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra));
2922 dump_printf (MSG_NOTE, " and ");
2923 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb));
2924 dump_printf (MSG_NOTE, "\n");
2927 /* Link the found element into the group list. */
2928 if (!GROUP_FIRST_ELEMENT (stmtinfo_a))
2930 GROUP_FIRST_ELEMENT (stmtinfo_a) = DR_STMT (dra);
2931 lastinfo = stmtinfo_a;
2933 GROUP_FIRST_ELEMENT (stmtinfo_b) = DR_STMT (dra);
2934 GROUP_NEXT_ELEMENT (lastinfo) = DR_STMT (drb);
2935 lastinfo = stmtinfo_b;
2939 FOR_EACH_VEC_ELT (datarefs_copy, i, dr)
2940 if (STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr)))
2941 && !vect_analyze_data_ref_access (dr))
2943 if (dump_enabled_p ())
2944 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2945 "not vectorized: complicated access pattern.\n");
2947 if (is_a <bb_vec_info> (vinfo))
2949 /* Mark the statement as not vectorizable. */
2950 STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr))) = false;
2951 continue;
2953 else
2955 datarefs_copy.release ();
2956 return false;
2960 datarefs_copy.release ();
2961 return true;
2964 /* Function vect_vfa_segment_size.
2966 Create an expression that computes the size of segment
2967 that will be accessed for a data reference. The functions takes into
2968 account that realignment loads may access one more vector.
2970 Input:
2971 DR: The data reference.
2972 LENGTH_FACTOR: segment length to consider.
2974 Return an expression whose value is the size of segment which will be
2975 accessed by DR. */
2977 static tree
2978 vect_vfa_segment_size (struct data_reference *dr, tree length_factor)
2980 tree segment_length;
2982 if (integer_zerop (DR_STEP (dr)))
2983 segment_length = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dr)));
2984 else
2985 segment_length = size_binop (MULT_EXPR,
2986 fold_convert (sizetype, DR_STEP (dr)),
2987 fold_convert (sizetype, length_factor));
2989 if (vect_supportable_dr_alignment (dr, false)
2990 == dr_explicit_realign_optimized)
2992 tree vector_size = TYPE_SIZE_UNIT
2993 (STMT_VINFO_VECTYPE (vinfo_for_stmt (DR_STMT (dr))));
2995 segment_length = size_binop (PLUS_EXPR, segment_length, vector_size);
2997 return segment_length;
3000 /* Function vect_no_alias_p.
3002 Given data references A and B with equal base and offset, see whether
3003 the alias relation can be decided at compilation time. Return 1 if
3004 it can and the references alias, 0 if it can and the references do
3005 not alias, and -1 if we cannot decide at compile time. SEGMENT_LENGTH_A
3006 and SEGMENT_LENGTH_B are the memory lengths accessed by A and B
3007 respectively. */
3009 static int
3010 vect_compile_time_alias (struct data_reference *a, struct data_reference *b,
3011 tree segment_length_a, tree segment_length_b)
3013 poly_offset_int offset_a = wi::to_poly_offset (DR_INIT (a));
3014 poly_offset_int offset_b = wi::to_poly_offset (DR_INIT (b));
3015 poly_uint64 const_length_a;
3016 poly_uint64 const_length_b;
3018 /* For negative step, we need to adjust address range by TYPE_SIZE_UNIT
3019 bytes, e.g., int a[3] -> a[1] range is [a+4, a+16) instead of
3020 [a, a+12) */
3021 if (tree_int_cst_compare (DR_STEP (a), size_zero_node) < 0)
3023 const_length_a = (-wi::to_poly_wide (segment_length_a)).force_uhwi ();
3024 offset_a = (offset_a + vect_get_scalar_dr_size (a)) - const_length_a;
3026 else
3027 const_length_a = tree_to_poly_uint64 (segment_length_a);
3028 if (tree_int_cst_compare (DR_STEP (b), size_zero_node) < 0)
3030 const_length_b = (-wi::to_poly_wide (segment_length_b)).force_uhwi ();
3031 offset_b = (offset_b + vect_get_scalar_dr_size (b)) - const_length_b;
3033 else
3034 const_length_b = tree_to_poly_uint64 (segment_length_b);
3036 if (ranges_known_overlap_p (offset_a, const_length_a,
3037 offset_b, const_length_b))
3038 return 1;
3040 if (!ranges_maybe_overlap_p (offset_a, const_length_a,
3041 offset_b, const_length_b))
3042 return 0;
3044 return -1;
3047 /* Return true if the minimum nonzero dependence distance for loop LOOP_DEPTH
3048 in DDR is >= VF. */
3050 static bool
3051 dependence_distance_ge_vf (data_dependence_relation *ddr,
3052 unsigned int loop_depth, poly_uint64 vf)
3054 if (DDR_ARE_DEPENDENT (ddr) != NULL_TREE
3055 || DDR_NUM_DIST_VECTS (ddr) == 0)
3056 return false;
3058 /* If the dependence is exact, we should have limited the VF instead. */
3059 gcc_checking_assert (DDR_COULD_BE_INDEPENDENT_P (ddr));
3061 unsigned int i;
3062 lambda_vector dist_v;
3063 FOR_EACH_VEC_ELT (DDR_DIST_VECTS (ddr), i, dist_v)
3065 HOST_WIDE_INT dist = dist_v[loop_depth];
3066 if (dist != 0
3067 && !(dist > 0 && DDR_REVERSED_P (ddr))
3068 && maybe_lt ((unsigned HOST_WIDE_INT) abs_hwi (dist), vf))
3069 return false;
3072 if (dump_enabled_p ())
3074 dump_printf_loc (MSG_NOTE, vect_location,
3075 "dependence distance between ");
3076 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (DDR_A (ddr)));
3077 dump_printf (MSG_NOTE, " and ");
3078 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (DDR_B (ddr)));
3079 dump_printf (MSG_NOTE, " is >= VF\n");
3082 return true;
3085 /* Function vect_prune_runtime_alias_test_list.
3087 Prune a list of ddrs to be tested at run-time by versioning for alias.
3088 Merge several alias checks into one if possible.
3089 Return FALSE if resulting list of ddrs is longer then allowed by
3090 PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS, otherwise return TRUE. */
3092 bool
3093 vect_prune_runtime_alias_test_list (loop_vec_info loop_vinfo)
3095 typedef pair_hash <tree_operand_hash, tree_operand_hash> tree_pair_hash;
3096 hash_set <tree_pair_hash> compared_objects;
3098 vec<ddr_p> may_alias_ddrs = LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo);
3099 vec<dr_with_seg_len_pair_t> &comp_alias_ddrs
3100 = LOOP_VINFO_COMP_ALIAS_DDRS (loop_vinfo);
3101 vec<vec_object_pair> &check_unequal_addrs
3102 = LOOP_VINFO_CHECK_UNEQUAL_ADDRS (loop_vinfo);
3103 poly_uint64 vect_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
3104 tree scalar_loop_iters = LOOP_VINFO_NITERS (loop_vinfo);
3106 ddr_p ddr;
3107 unsigned int i;
3108 tree length_factor;
3110 if (dump_enabled_p ())
3111 dump_printf_loc (MSG_NOTE, vect_location,
3112 "=== vect_prune_runtime_alias_test_list ===\n");
3114 if (may_alias_ddrs.is_empty ())
3115 return true;
3117 comp_alias_ddrs.create (may_alias_ddrs.length ());
3119 unsigned int loop_depth
3120 = index_in_loop_nest (LOOP_VINFO_LOOP (loop_vinfo)->num,
3121 LOOP_VINFO_LOOP_NEST (loop_vinfo));
3123 /* First, we collect all data ref pairs for aliasing checks. */
3124 FOR_EACH_VEC_ELT (may_alias_ddrs, i, ddr)
3126 int comp_res;
3127 struct data_reference *dr_a, *dr_b;
3128 gimple *dr_group_first_a, *dr_group_first_b;
3129 tree segment_length_a, segment_length_b;
3130 gimple *stmt_a, *stmt_b;
3132 /* Ignore the alias if the VF we chose ended up being no greater
3133 than the dependence distance. */
3134 if (dependence_distance_ge_vf (ddr, loop_depth, vect_factor))
3135 continue;
3137 if (DDR_OBJECT_A (ddr))
3139 vec_object_pair new_pair (DDR_OBJECT_A (ddr), DDR_OBJECT_B (ddr));
3140 if (!compared_objects.add (new_pair))
3142 if (dump_enabled_p ())
3144 dump_printf_loc (MSG_NOTE, vect_location, "checking that ");
3145 dump_generic_expr (MSG_NOTE, TDF_SLIM, new_pair.first);
3146 dump_printf (MSG_NOTE, " and ");
3147 dump_generic_expr (MSG_NOTE, TDF_SLIM, new_pair.second);
3148 dump_printf (MSG_NOTE, " have different addresses\n");
3150 LOOP_VINFO_CHECK_UNEQUAL_ADDRS (loop_vinfo).safe_push (new_pair);
3152 continue;
3155 dr_a = DDR_A (ddr);
3156 stmt_a = DR_STMT (DDR_A (ddr));
3157 dr_group_first_a = GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt_a));
3158 if (dr_group_first_a)
3160 stmt_a = dr_group_first_a;
3161 dr_a = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt_a));
3164 dr_b = DDR_B (ddr);
3165 stmt_b = DR_STMT (DDR_B (ddr));
3166 dr_group_first_b = GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt_b));
3167 if (dr_group_first_b)
3169 stmt_b = dr_group_first_b;
3170 dr_b = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt_b));
3173 if (!operand_equal_p (DR_STEP (dr_a), DR_STEP (dr_b), 0))
3174 length_factor = scalar_loop_iters;
3175 else
3176 length_factor = size_int (vect_factor);
3177 segment_length_a = vect_vfa_segment_size (dr_a, length_factor);
3178 segment_length_b = vect_vfa_segment_size (dr_b, length_factor);
3180 comp_res = data_ref_compare_tree (DR_BASE_ADDRESS (dr_a),
3181 DR_BASE_ADDRESS (dr_b));
3182 if (comp_res == 0)
3183 comp_res = data_ref_compare_tree (DR_OFFSET (dr_a),
3184 DR_OFFSET (dr_b));
3186 /* See whether the alias is known at compilation time. */
3187 if (comp_res == 0
3188 && TREE_CODE (DR_STEP (dr_a)) == INTEGER_CST
3189 && TREE_CODE (DR_STEP (dr_b)) == INTEGER_CST
3190 && poly_int_tree_p (segment_length_a)
3191 && poly_int_tree_p (segment_length_b))
3193 int res = vect_compile_time_alias (dr_a, dr_b,
3194 segment_length_a,
3195 segment_length_b);
3196 if (res == 0)
3197 continue;
3199 if (res == 1)
3201 if (dump_enabled_p ())
3202 dump_printf_loc (MSG_NOTE, vect_location,
3203 "not vectorized: compilation time alias.\n");
3204 return false;
3208 dr_with_seg_len_pair_t dr_with_seg_len_pair
3209 (dr_with_seg_len (dr_a, segment_length_a),
3210 dr_with_seg_len (dr_b, segment_length_b));
3212 /* Canonicalize pairs by sorting the two DR members. */
3213 if (comp_res > 0)
3214 std::swap (dr_with_seg_len_pair.first, dr_with_seg_len_pair.second);
3216 comp_alias_ddrs.safe_push (dr_with_seg_len_pair);
3219 prune_runtime_alias_test_list (&comp_alias_ddrs, vect_factor);
3221 unsigned int count = (comp_alias_ddrs.length ()
3222 + check_unequal_addrs.length ());
3223 dump_printf_loc (MSG_NOTE, vect_location,
3224 "improved number of alias checks from %d to %d\n",
3225 may_alias_ddrs.length (), count);
3226 if ((int) count > PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS))
3228 if (dump_enabled_p ())
3229 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3230 "number of versioning for alias "
3231 "run-time tests exceeds %d "
3232 "(--param vect-max-version-for-alias-checks)\n",
3233 PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS));
3234 return false;
3237 return true;
3240 /* Return true if a non-affine read or write in STMT is suitable for a
3241 gather load or scatter store. Describe the operation in *INFO if so. */
3243 bool
3244 vect_check_gather_scatter (gimple *stmt, loop_vec_info loop_vinfo,
3245 gather_scatter_info *info)
3247 HOST_WIDE_INT scale = 1;
3248 poly_int64 pbitpos, pbitsize;
3249 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
3250 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3251 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
3252 tree offtype = NULL_TREE;
3253 tree decl, base, off;
3254 machine_mode pmode;
3255 int punsignedp, reversep, pvolatilep = 0;
3257 base = DR_REF (dr);
3258 /* For masked loads/stores, DR_REF (dr) is an artificial MEM_REF,
3259 see if we can use the def stmt of the address. */
3260 if (is_gimple_call (stmt)
3261 && gimple_call_internal_p (stmt)
3262 && (gimple_call_internal_fn (stmt) == IFN_MASK_LOAD
3263 || gimple_call_internal_fn (stmt) == IFN_MASK_STORE)
3264 && TREE_CODE (base) == MEM_REF
3265 && TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME
3266 && integer_zerop (TREE_OPERAND (base, 1))
3267 && !expr_invariant_in_loop_p (loop, TREE_OPERAND (base, 0)))
3269 gimple *def_stmt = SSA_NAME_DEF_STMT (TREE_OPERAND (base, 0));
3270 if (is_gimple_assign (def_stmt)
3271 && gimple_assign_rhs_code (def_stmt) == ADDR_EXPR)
3272 base = TREE_OPERAND (gimple_assign_rhs1 (def_stmt), 0);
3275 /* The gather and scatter builtins need address of the form
3276 loop_invariant + vector * {1, 2, 4, 8}
3278 loop_invariant + sign_extend (vector) * { 1, 2, 4, 8 }.
3279 Unfortunately DR_BASE_ADDRESS/DR_OFFSET can be a mixture
3280 of loop invariants/SSA_NAMEs defined in the loop, with casts,
3281 multiplications and additions in it. To get a vector, we need
3282 a single SSA_NAME that will be defined in the loop and will
3283 contain everything that is not loop invariant and that can be
3284 vectorized. The following code attempts to find such a preexistng
3285 SSA_NAME OFF and put the loop invariants into a tree BASE
3286 that can be gimplified before the loop. */
3287 base = get_inner_reference (base, &pbitsize, &pbitpos, &off, &pmode,
3288 &punsignedp, &reversep, &pvolatilep);
3289 gcc_assert (base && !reversep);
3290 poly_int64 pbytepos = exact_div (pbitpos, BITS_PER_UNIT);
3292 if (TREE_CODE (base) == MEM_REF)
3294 if (!integer_zerop (TREE_OPERAND (base, 1)))
3296 if (off == NULL_TREE)
3297 off = wide_int_to_tree (sizetype, mem_ref_offset (base));
3298 else
3299 off = size_binop (PLUS_EXPR, off,
3300 fold_convert (sizetype, TREE_OPERAND (base, 1)));
3302 base = TREE_OPERAND (base, 0);
3304 else
3305 base = build_fold_addr_expr (base);
3307 if (off == NULL_TREE)
3308 off = size_zero_node;
3310 /* If base is not loop invariant, either off is 0, then we start with just
3311 the constant offset in the loop invariant BASE and continue with base
3312 as OFF, otherwise give up.
3313 We could handle that case by gimplifying the addition of base + off
3314 into some SSA_NAME and use that as off, but for now punt. */
3315 if (!expr_invariant_in_loop_p (loop, base))
3317 if (!integer_zerop (off))
3318 return false;
3319 off = base;
3320 base = size_int (pbytepos);
3322 /* Otherwise put base + constant offset into the loop invariant BASE
3323 and continue with OFF. */
3324 else
3326 base = fold_convert (sizetype, base);
3327 base = size_binop (PLUS_EXPR, base, size_int (pbytepos));
3330 /* OFF at this point may be either a SSA_NAME or some tree expression
3331 from get_inner_reference. Try to peel off loop invariants from it
3332 into BASE as long as possible. */
3333 STRIP_NOPS (off);
3334 while (offtype == NULL_TREE)
3336 enum tree_code code;
3337 tree op0, op1, add = NULL_TREE;
3339 if (TREE_CODE (off) == SSA_NAME)
3341 gimple *def_stmt = SSA_NAME_DEF_STMT (off);
3343 if (expr_invariant_in_loop_p (loop, off))
3344 return false;
3346 if (gimple_code (def_stmt) != GIMPLE_ASSIGN)
3347 break;
3349 op0 = gimple_assign_rhs1 (def_stmt);
3350 code = gimple_assign_rhs_code (def_stmt);
3351 op1 = gimple_assign_rhs2 (def_stmt);
3353 else
3355 if (get_gimple_rhs_class (TREE_CODE (off)) == GIMPLE_TERNARY_RHS)
3356 return false;
3357 code = TREE_CODE (off);
3358 extract_ops_from_tree (off, &code, &op0, &op1);
3360 switch (code)
3362 case POINTER_PLUS_EXPR:
3363 case PLUS_EXPR:
3364 if (expr_invariant_in_loop_p (loop, op0))
3366 add = op0;
3367 off = op1;
3368 do_add:
3369 add = fold_convert (sizetype, add);
3370 if (scale != 1)
3371 add = size_binop (MULT_EXPR, add, size_int (scale));
3372 base = size_binop (PLUS_EXPR, base, add);
3373 continue;
3375 if (expr_invariant_in_loop_p (loop, op1))
3377 add = op1;
3378 off = op0;
3379 goto do_add;
3381 break;
3382 case MINUS_EXPR:
3383 if (expr_invariant_in_loop_p (loop, op1))
3385 add = fold_convert (sizetype, op1);
3386 add = size_binop (MINUS_EXPR, size_zero_node, add);
3387 off = op0;
3388 goto do_add;
3390 break;
3391 case MULT_EXPR:
3392 if (scale == 1 && tree_fits_shwi_p (op1))
3394 scale = tree_to_shwi (op1);
3395 off = op0;
3396 continue;
3398 break;
3399 case SSA_NAME:
3400 off = op0;
3401 continue;
3402 CASE_CONVERT:
3403 if (!POINTER_TYPE_P (TREE_TYPE (op0))
3404 && !INTEGRAL_TYPE_P (TREE_TYPE (op0)))
3405 break;
3406 if (TYPE_PRECISION (TREE_TYPE (op0))
3407 == TYPE_PRECISION (TREE_TYPE (off)))
3409 off = op0;
3410 continue;
3412 if (TYPE_PRECISION (TREE_TYPE (op0))
3413 < TYPE_PRECISION (TREE_TYPE (off)))
3415 off = op0;
3416 offtype = TREE_TYPE (off);
3417 STRIP_NOPS (off);
3418 continue;
3420 break;
3421 default:
3422 break;
3424 break;
3427 /* If at the end OFF still isn't a SSA_NAME or isn't
3428 defined in the loop, punt. */
3429 if (TREE_CODE (off) != SSA_NAME
3430 || expr_invariant_in_loop_p (loop, off))
3431 return false;
3433 if (offtype == NULL_TREE)
3434 offtype = TREE_TYPE (off);
3436 if (DR_IS_READ (dr))
3437 decl = targetm.vectorize.builtin_gather (STMT_VINFO_VECTYPE (stmt_info),
3438 offtype, scale);
3439 else
3440 decl = targetm.vectorize.builtin_scatter (STMT_VINFO_VECTYPE (stmt_info),
3441 offtype, scale);
3443 if (decl == NULL_TREE)
3444 return false;
3446 info->decl = decl;
3447 info->base = base;
3448 info->offset = off;
3449 info->offset_dt = vect_unknown_def_type;
3450 info->offset_vectype = NULL_TREE;
3451 info->scale = scale;
3452 return true;
3455 /* Function vect_analyze_data_refs.
3457 Find all the data references in the loop or basic block.
3459 The general structure of the analysis of data refs in the vectorizer is as
3460 follows:
3461 1- vect_analyze_data_refs(loop/bb): call
3462 compute_data_dependences_for_loop/bb to find and analyze all data-refs
3463 in the loop/bb and their dependences.
3464 2- vect_analyze_dependences(): apply dependence testing using ddrs.
3465 3- vect_analyze_drs_alignment(): check that ref_stmt.alignment is ok.
3466 4- vect_analyze_drs_access(): check that ref_stmt.step is ok.
3470 bool
3471 vect_analyze_data_refs (vec_info *vinfo, poly_uint64 *min_vf)
3473 struct loop *loop = NULL;
3474 unsigned int i;
3475 struct data_reference *dr;
3476 tree scalar_type;
3478 if (dump_enabled_p ())
3479 dump_printf_loc (MSG_NOTE, vect_location,
3480 "=== vect_analyze_data_refs ===\n");
3482 if (loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo))
3483 loop = LOOP_VINFO_LOOP (loop_vinfo);
3485 /* Go through the data-refs, check that the analysis succeeded. Update
3486 pointer from stmt_vec_info struct to DR and vectype. */
3488 vec<data_reference_p> datarefs = vinfo->datarefs;
3489 FOR_EACH_VEC_ELT (datarefs, i, dr)
3491 gimple *stmt;
3492 stmt_vec_info stmt_info;
3493 tree base, offset, init;
3494 enum { SG_NONE, GATHER, SCATTER } gatherscatter = SG_NONE;
3495 bool simd_lane_access = false;
3496 poly_uint64 vf;
3498 again:
3499 if (!dr || !DR_REF (dr))
3501 if (dump_enabled_p ())
3502 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3503 "not vectorized: unhandled data-ref\n");
3504 return false;
3507 stmt = DR_STMT (dr);
3508 stmt_info = vinfo_for_stmt (stmt);
3510 /* Discard clobbers from the dataref vector. We will remove
3511 clobber stmts during vectorization. */
3512 if (gimple_clobber_p (stmt))
3514 free_data_ref (dr);
3515 if (i == datarefs.length () - 1)
3517 datarefs.pop ();
3518 break;
3520 datarefs.ordered_remove (i);
3521 dr = datarefs[i];
3522 goto again;
3525 /* Check that analysis of the data-ref succeeded. */
3526 if (!DR_BASE_ADDRESS (dr) || !DR_OFFSET (dr) || !DR_INIT (dr)
3527 || !DR_STEP (dr))
3529 bool maybe_gather
3530 = DR_IS_READ (dr)
3531 && !TREE_THIS_VOLATILE (DR_REF (dr))
3532 && targetm.vectorize.builtin_gather != NULL;
3533 bool maybe_scatter
3534 = DR_IS_WRITE (dr)
3535 && !TREE_THIS_VOLATILE (DR_REF (dr))
3536 && targetm.vectorize.builtin_scatter != NULL;
3537 bool maybe_simd_lane_access
3538 = is_a <loop_vec_info> (vinfo) && loop->simduid;
3540 /* If target supports vector gather loads or scatter stores, or if
3541 this might be a SIMD lane access, see if they can't be used. */
3542 if (is_a <loop_vec_info> (vinfo)
3543 && (maybe_gather || maybe_scatter || maybe_simd_lane_access)
3544 && !nested_in_vect_loop_p (loop, stmt))
3546 struct data_reference *newdr
3547 = create_data_ref (NULL, loop_containing_stmt (stmt),
3548 DR_REF (dr), stmt, !maybe_scatter,
3549 DR_IS_CONDITIONAL_IN_STMT (dr));
3550 gcc_assert (newdr != NULL && DR_REF (newdr));
3551 if (DR_BASE_ADDRESS (newdr)
3552 && DR_OFFSET (newdr)
3553 && DR_INIT (newdr)
3554 && DR_STEP (newdr)
3555 && integer_zerop (DR_STEP (newdr)))
3557 if (maybe_simd_lane_access)
3559 tree off = DR_OFFSET (newdr);
3560 STRIP_NOPS (off);
3561 if (TREE_CODE (DR_INIT (newdr)) == INTEGER_CST
3562 && TREE_CODE (off) == MULT_EXPR
3563 && tree_fits_uhwi_p (TREE_OPERAND (off, 1)))
3565 tree step = TREE_OPERAND (off, 1);
3566 off = TREE_OPERAND (off, 0);
3567 STRIP_NOPS (off);
3568 if (CONVERT_EXPR_P (off)
3569 && TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (off,
3570 0)))
3571 < TYPE_PRECISION (TREE_TYPE (off)))
3572 off = TREE_OPERAND (off, 0);
3573 if (TREE_CODE (off) == SSA_NAME)
3575 gimple *def = SSA_NAME_DEF_STMT (off);
3576 tree reft = TREE_TYPE (DR_REF (newdr));
3577 if (is_gimple_call (def)
3578 && gimple_call_internal_p (def)
3579 && (gimple_call_internal_fn (def)
3580 == IFN_GOMP_SIMD_LANE))
3582 tree arg = gimple_call_arg (def, 0);
3583 gcc_assert (TREE_CODE (arg) == SSA_NAME);
3584 arg = SSA_NAME_VAR (arg);
3585 if (arg == loop->simduid
3586 /* For now. */
3587 && tree_int_cst_equal
3588 (TYPE_SIZE_UNIT (reft),
3589 step))
3591 DR_OFFSET (newdr) = ssize_int (0);
3592 DR_STEP (newdr) = step;
3593 DR_OFFSET_ALIGNMENT (newdr)
3594 = BIGGEST_ALIGNMENT;
3595 DR_STEP_ALIGNMENT (newdr)
3596 = highest_pow2_factor (step);
3597 dr = newdr;
3598 simd_lane_access = true;
3604 if (!simd_lane_access && (maybe_gather || maybe_scatter))
3606 dr = newdr;
3607 if (maybe_gather)
3608 gatherscatter = GATHER;
3609 else
3610 gatherscatter = SCATTER;
3613 if (gatherscatter == SG_NONE && !simd_lane_access)
3614 free_data_ref (newdr);
3617 if (gatherscatter == SG_NONE && !simd_lane_access)
3619 if (dump_enabled_p ())
3621 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3622 "not vectorized: data ref analysis "
3623 "failed ");
3624 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3627 if (is_a <bb_vec_info> (vinfo))
3628 break;
3630 return false;
3634 if (TREE_CODE (DR_BASE_ADDRESS (dr)) == INTEGER_CST)
3636 if (dump_enabled_p ())
3637 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3638 "not vectorized: base addr of dr is a "
3639 "constant\n");
3641 if (is_a <bb_vec_info> (vinfo))
3642 break;
3644 if (gatherscatter != SG_NONE || simd_lane_access)
3645 free_data_ref (dr);
3646 return false;
3649 if (TREE_THIS_VOLATILE (DR_REF (dr)))
3651 if (dump_enabled_p ())
3653 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3654 "not vectorized: volatile type ");
3655 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3658 if (is_a <bb_vec_info> (vinfo))
3659 break;
3661 return false;
3664 if (stmt_can_throw_internal (stmt))
3666 if (dump_enabled_p ())
3668 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3669 "not vectorized: statement can throw an "
3670 "exception ");
3671 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3674 if (is_a <bb_vec_info> (vinfo))
3675 break;
3677 if (gatherscatter != SG_NONE || simd_lane_access)
3678 free_data_ref (dr);
3679 return false;
3682 if (TREE_CODE (DR_REF (dr)) == COMPONENT_REF
3683 && DECL_BIT_FIELD (TREE_OPERAND (DR_REF (dr), 1)))
3685 if (dump_enabled_p ())
3687 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3688 "not vectorized: statement is bitfield "
3689 "access ");
3690 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3693 if (is_a <bb_vec_info> (vinfo))
3694 break;
3696 if (gatherscatter != SG_NONE || simd_lane_access)
3697 free_data_ref (dr);
3698 return false;
3701 base = unshare_expr (DR_BASE_ADDRESS (dr));
3702 offset = unshare_expr (DR_OFFSET (dr));
3703 init = unshare_expr (DR_INIT (dr));
3705 if (is_gimple_call (stmt)
3706 && (!gimple_call_internal_p (stmt)
3707 || (gimple_call_internal_fn (stmt) != IFN_MASK_LOAD
3708 && gimple_call_internal_fn (stmt) != IFN_MASK_STORE)))
3710 if (dump_enabled_p ())
3712 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3713 "not vectorized: dr in a call ");
3714 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3717 if (is_a <bb_vec_info> (vinfo))
3718 break;
3720 if (gatherscatter != SG_NONE || simd_lane_access)
3721 free_data_ref (dr);
3722 return false;
3725 /* Update DR field in stmt_vec_info struct. */
3727 /* If the dataref is in an inner-loop of the loop that is considered for
3728 for vectorization, we also want to analyze the access relative to
3729 the outer-loop (DR contains information only relative to the
3730 inner-most enclosing loop). We do that by building a reference to the
3731 first location accessed by the inner-loop, and analyze it relative to
3732 the outer-loop. */
3733 if (loop && nested_in_vect_loop_p (loop, stmt))
3735 /* Build a reference to the first location accessed by the
3736 inner loop: *(BASE + INIT + OFFSET). By construction,
3737 this address must be invariant in the inner loop, so we
3738 can consider it as being used in the outer loop. */
3739 tree init_offset = fold_build2 (PLUS_EXPR, TREE_TYPE (offset),
3740 init, offset);
3741 tree init_addr = fold_build_pointer_plus (base, init_offset);
3742 tree init_ref = build_fold_indirect_ref (init_addr);
3744 if (dump_enabled_p ())
3746 dump_printf_loc (MSG_NOTE, vect_location,
3747 "analyze in outer loop: ");
3748 dump_generic_expr (MSG_NOTE, TDF_SLIM, init_ref);
3749 dump_printf (MSG_NOTE, "\n");
3752 if (!dr_analyze_innermost (&STMT_VINFO_DR_WRT_VEC_LOOP (stmt_info),
3753 init_ref, loop))
3754 /* dr_analyze_innermost already explained the failure. */
3755 return false;
3757 if (dump_enabled_p ())
3759 dump_printf_loc (MSG_NOTE, vect_location,
3760 "\touter base_address: ");
3761 dump_generic_expr (MSG_NOTE, TDF_SLIM,
3762 STMT_VINFO_DR_BASE_ADDRESS (stmt_info));
3763 dump_printf (MSG_NOTE, "\n\touter offset from base address: ");
3764 dump_generic_expr (MSG_NOTE, TDF_SLIM,
3765 STMT_VINFO_DR_OFFSET (stmt_info));
3766 dump_printf (MSG_NOTE,
3767 "\n\touter constant offset from base address: ");
3768 dump_generic_expr (MSG_NOTE, TDF_SLIM,
3769 STMT_VINFO_DR_INIT (stmt_info));
3770 dump_printf (MSG_NOTE, "\n\touter step: ");
3771 dump_generic_expr (MSG_NOTE, TDF_SLIM,
3772 STMT_VINFO_DR_STEP (stmt_info));
3773 dump_printf (MSG_NOTE, "\n\touter base alignment: %d\n",
3774 STMT_VINFO_DR_BASE_ALIGNMENT (stmt_info));
3775 dump_printf (MSG_NOTE, "\n\touter base misalignment: %d\n",
3776 STMT_VINFO_DR_BASE_MISALIGNMENT (stmt_info));
3777 dump_printf (MSG_NOTE, "\n\touter offset alignment: %d\n",
3778 STMT_VINFO_DR_OFFSET_ALIGNMENT (stmt_info));
3779 dump_printf (MSG_NOTE, "\n\touter step alignment: %d\n",
3780 STMT_VINFO_DR_STEP_ALIGNMENT (stmt_info));
3784 if (STMT_VINFO_DATA_REF (stmt_info))
3786 if (dump_enabled_p ())
3788 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3789 "not vectorized: more than one data ref "
3790 "in stmt: ");
3791 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3794 if (is_a <bb_vec_info> (vinfo))
3795 break;
3797 if (gatherscatter != SG_NONE || simd_lane_access)
3798 free_data_ref (dr);
3799 return false;
3802 STMT_VINFO_DATA_REF (stmt_info) = dr;
3803 if (simd_lane_access)
3805 STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info) = true;
3806 free_data_ref (datarefs[i]);
3807 datarefs[i] = dr;
3810 if (TREE_CODE (DR_BASE_ADDRESS (dr)) == ADDR_EXPR
3811 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (dr), 0))
3812 && DECL_NONALIASED (TREE_OPERAND (DR_BASE_ADDRESS (dr), 0)))
3814 if (dump_enabled_p ())
3816 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3817 "not vectorized: base object not addressable "
3818 "for stmt: ");
3819 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3821 if (is_a <bb_vec_info> (vinfo))
3823 /* In BB vectorization the ref can still participate
3824 in dependence analysis, we just can't vectorize it. */
3825 STMT_VINFO_VECTORIZABLE (stmt_info) = false;
3826 continue;
3828 return false;
3831 /* Set vectype for STMT. */
3832 scalar_type = TREE_TYPE (DR_REF (dr));
3833 STMT_VINFO_VECTYPE (stmt_info)
3834 = get_vectype_for_scalar_type (scalar_type);
3835 if (!STMT_VINFO_VECTYPE (stmt_info))
3837 if (dump_enabled_p ())
3839 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3840 "not vectorized: no vectype for stmt: ");
3841 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3842 dump_printf (MSG_MISSED_OPTIMIZATION, " scalar_type: ");
3843 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_DETAILS,
3844 scalar_type);
3845 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3848 if (is_a <bb_vec_info> (vinfo))
3850 /* No vector type is fine, the ref can still participate
3851 in dependence analysis, we just can't vectorize it. */
3852 STMT_VINFO_VECTORIZABLE (stmt_info) = false;
3853 continue;
3856 if (gatherscatter != SG_NONE || simd_lane_access)
3858 STMT_VINFO_DATA_REF (stmt_info) = NULL;
3859 if (gatherscatter != SG_NONE)
3860 free_data_ref (dr);
3862 return false;
3864 else
3866 if (dump_enabled_p ())
3868 dump_printf_loc (MSG_NOTE, vect_location,
3869 "got vectype for stmt: ");
3870 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
3871 dump_generic_expr (MSG_NOTE, TDF_SLIM,
3872 STMT_VINFO_VECTYPE (stmt_info));
3873 dump_printf (MSG_NOTE, "\n");
3877 /* Adjust the minimal vectorization factor according to the
3878 vector type. */
3879 vf = TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info));
3880 *min_vf = upper_bound (*min_vf, vf);
3882 if (gatherscatter != SG_NONE)
3884 gather_scatter_info gs_info;
3885 if (!vect_check_gather_scatter (stmt, as_a <loop_vec_info> (vinfo),
3886 &gs_info)
3887 || !get_vectype_for_scalar_type (TREE_TYPE (gs_info.offset)))
3889 STMT_VINFO_DATA_REF (stmt_info) = NULL;
3890 free_data_ref (dr);
3891 if (dump_enabled_p ())
3893 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3894 (gatherscatter == GATHER) ?
3895 "not vectorized: not suitable for gather "
3896 "load " :
3897 "not vectorized: not suitable for scatter "
3898 "store ");
3899 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3901 return false;
3904 free_data_ref (datarefs[i]);
3905 datarefs[i] = dr;
3906 STMT_VINFO_GATHER_SCATTER_P (stmt_info) = gatherscatter;
3909 else if (is_a <loop_vec_info> (vinfo)
3910 && TREE_CODE (DR_STEP (dr)) != INTEGER_CST)
3912 if (nested_in_vect_loop_p (loop, stmt))
3914 if (dump_enabled_p ())
3916 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3917 "not vectorized: not suitable for strided "
3918 "load ");
3919 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3921 return false;
3923 STMT_VINFO_STRIDED_P (stmt_info) = true;
3927 /* If we stopped analysis at the first dataref we could not analyze
3928 when trying to vectorize a basic-block mark the rest of the datarefs
3929 as not vectorizable and truncate the vector of datarefs. That
3930 avoids spending useless time in analyzing their dependence. */
3931 if (i != datarefs.length ())
3933 gcc_assert (is_a <bb_vec_info> (vinfo));
3934 for (unsigned j = i; j < datarefs.length (); ++j)
3936 data_reference_p dr = datarefs[j];
3937 STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr))) = false;
3938 free_data_ref (dr);
3940 datarefs.truncate (i);
3943 return true;
3947 /* Function vect_get_new_vect_var.
3949 Returns a name for a new variable. The current naming scheme appends the
3950 prefix "vect_" or "vect_p" (depending on the value of VAR_KIND) to
3951 the name of vectorizer generated variables, and appends that to NAME if
3952 provided. */
3954 tree
3955 vect_get_new_vect_var (tree type, enum vect_var_kind var_kind, const char *name)
3957 const char *prefix;
3958 tree new_vect_var;
3960 switch (var_kind)
3962 case vect_simple_var:
3963 prefix = "vect";
3964 break;
3965 case vect_scalar_var:
3966 prefix = "stmp";
3967 break;
3968 case vect_mask_var:
3969 prefix = "mask";
3970 break;
3971 case vect_pointer_var:
3972 prefix = "vectp";
3973 break;
3974 default:
3975 gcc_unreachable ();
3978 if (name)
3980 char* tmp = concat (prefix, "_", name, NULL);
3981 new_vect_var = create_tmp_reg (type, tmp);
3982 free (tmp);
3984 else
3985 new_vect_var = create_tmp_reg (type, prefix);
3987 return new_vect_var;
3990 /* Like vect_get_new_vect_var but return an SSA name. */
3992 tree
3993 vect_get_new_ssa_name (tree type, enum vect_var_kind var_kind, const char *name)
3995 const char *prefix;
3996 tree new_vect_var;
3998 switch (var_kind)
4000 case vect_simple_var:
4001 prefix = "vect";
4002 break;
4003 case vect_scalar_var:
4004 prefix = "stmp";
4005 break;
4006 case vect_pointer_var:
4007 prefix = "vectp";
4008 break;
4009 default:
4010 gcc_unreachable ();
4013 if (name)
4015 char* tmp = concat (prefix, "_", name, NULL);
4016 new_vect_var = make_temp_ssa_name (type, NULL, tmp);
4017 free (tmp);
4019 else
4020 new_vect_var = make_temp_ssa_name (type, NULL, prefix);
4022 return new_vect_var;
4025 /* Duplicate ptr info and set alignment/misaligment on NAME from DR. */
4027 static void
4028 vect_duplicate_ssa_name_ptr_info (tree name, data_reference *dr)
4030 duplicate_ssa_name_ptr_info (name, DR_PTR_INFO (dr));
4031 int misalign = DR_MISALIGNMENT (dr);
4032 if (misalign == DR_MISALIGNMENT_UNKNOWN)
4033 mark_ptr_info_alignment_unknown (SSA_NAME_PTR_INFO (name));
4034 else
4035 set_ptr_info_alignment (SSA_NAME_PTR_INFO (name),
4036 DR_TARGET_ALIGNMENT (dr), misalign);
4039 /* Function vect_create_addr_base_for_vector_ref.
4041 Create an expression that computes the address of the first memory location
4042 that will be accessed for a data reference.
4044 Input:
4045 STMT: The statement containing the data reference.
4046 NEW_STMT_LIST: Must be initialized to NULL_TREE or a statement list.
4047 OFFSET: Optional. If supplied, it is be added to the initial address.
4048 LOOP: Specify relative to which loop-nest should the address be computed.
4049 For example, when the dataref is in an inner-loop nested in an
4050 outer-loop that is now being vectorized, LOOP can be either the
4051 outer-loop, or the inner-loop. The first memory location accessed
4052 by the following dataref ('in' points to short):
4054 for (i=0; i<N; i++)
4055 for (j=0; j<M; j++)
4056 s += in[i+j]
4058 is as follows:
4059 if LOOP=i_loop: &in (relative to i_loop)
4060 if LOOP=j_loop: &in+i*2B (relative to j_loop)
4061 BYTE_OFFSET: Optional, defaulted to NULL. If supplied, it is added to the
4062 initial address. Unlike OFFSET, which is number of elements to
4063 be added, BYTE_OFFSET is measured in bytes.
4065 Output:
4066 1. Return an SSA_NAME whose value is the address of the memory location of
4067 the first vector of the data reference.
4068 2. If new_stmt_list is not NULL_TREE after return then the caller must insert
4069 these statement(s) which define the returned SSA_NAME.
4071 FORNOW: We are only handling array accesses with step 1. */
4073 tree
4074 vect_create_addr_base_for_vector_ref (gimple *stmt,
4075 gimple_seq *new_stmt_list,
4076 tree offset,
4077 tree byte_offset)
4079 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4080 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
4081 const char *base_name;
4082 tree addr_base;
4083 tree dest;
4084 gimple_seq seq = NULL;
4085 tree vect_ptr_type;
4086 tree step = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dr)));
4087 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4088 innermost_loop_behavior *drb = vect_dr_behavior (dr);
4090 tree data_ref_base = unshare_expr (drb->base_address);
4091 tree base_offset = unshare_expr (drb->offset);
4092 tree init = unshare_expr (drb->init);
4094 if (loop_vinfo)
4095 base_name = get_name (data_ref_base);
4096 else
4098 base_offset = ssize_int (0);
4099 init = ssize_int (0);
4100 base_name = get_name (DR_REF (dr));
4103 /* Create base_offset */
4104 base_offset = size_binop (PLUS_EXPR,
4105 fold_convert (sizetype, base_offset),
4106 fold_convert (sizetype, init));
4108 if (offset)
4110 offset = fold_build2 (MULT_EXPR, sizetype,
4111 fold_convert (sizetype, offset), step);
4112 base_offset = fold_build2 (PLUS_EXPR, sizetype,
4113 base_offset, offset);
4115 if (byte_offset)
4117 byte_offset = fold_convert (sizetype, byte_offset);
4118 base_offset = fold_build2 (PLUS_EXPR, sizetype,
4119 base_offset, byte_offset);
4122 /* base + base_offset */
4123 if (loop_vinfo)
4124 addr_base = fold_build_pointer_plus (data_ref_base, base_offset);
4125 else
4127 addr_base = build1 (ADDR_EXPR,
4128 build_pointer_type (TREE_TYPE (DR_REF (dr))),
4129 unshare_expr (DR_REF (dr)));
4132 vect_ptr_type = build_pointer_type (STMT_VINFO_VECTYPE (stmt_info));
4133 dest = vect_get_new_vect_var (vect_ptr_type, vect_pointer_var, base_name);
4134 addr_base = force_gimple_operand (addr_base, &seq, true, dest);
4135 gimple_seq_add_seq (new_stmt_list, seq);
4137 if (DR_PTR_INFO (dr)
4138 && TREE_CODE (addr_base) == SSA_NAME
4139 && !SSA_NAME_PTR_INFO (addr_base))
4141 vect_duplicate_ssa_name_ptr_info (addr_base, dr);
4142 if (offset || byte_offset)
4143 mark_ptr_info_alignment_unknown (SSA_NAME_PTR_INFO (addr_base));
4146 if (dump_enabled_p ())
4148 dump_printf_loc (MSG_NOTE, vect_location, "created ");
4149 dump_generic_expr (MSG_NOTE, TDF_SLIM, addr_base);
4150 dump_printf (MSG_NOTE, "\n");
4153 return addr_base;
4157 /* Function vect_create_data_ref_ptr.
4159 Create a new pointer-to-AGGR_TYPE variable (ap), that points to the first
4160 location accessed in the loop by STMT, along with the def-use update
4161 chain to appropriately advance the pointer through the loop iterations.
4162 Also set aliasing information for the pointer. This pointer is used by
4163 the callers to this function to create a memory reference expression for
4164 vector load/store access.
4166 Input:
4167 1. STMT: a stmt that references memory. Expected to be of the form
4168 GIMPLE_ASSIGN <name, data-ref> or
4169 GIMPLE_ASSIGN <data-ref, name>.
4170 2. AGGR_TYPE: the type of the reference, which should be either a vector
4171 or an array.
4172 3. AT_LOOP: the loop where the vector memref is to be created.
4173 4. OFFSET (optional): an offset to be added to the initial address accessed
4174 by the data-ref in STMT.
4175 5. BSI: location where the new stmts are to be placed if there is no loop
4176 6. ONLY_INIT: indicate if ap is to be updated in the loop, or remain
4177 pointing to the initial address.
4178 7. BYTE_OFFSET (optional, defaults to NULL): a byte offset to be added
4179 to the initial address accessed by the data-ref in STMT. This is
4180 similar to OFFSET, but OFFSET is counted in elements, while BYTE_OFFSET
4181 in bytes.
4183 Output:
4184 1. Declare a new ptr to vector_type, and have it point to the base of the
4185 data reference (initial addressed accessed by the data reference).
4186 For example, for vector of type V8HI, the following code is generated:
4188 v8hi *ap;
4189 ap = (v8hi *)initial_address;
4191 if OFFSET is not supplied:
4192 initial_address = &a[init];
4193 if OFFSET is supplied:
4194 initial_address = &a[init + OFFSET];
4195 if BYTE_OFFSET is supplied:
4196 initial_address = &a[init] + BYTE_OFFSET;
4198 Return the initial_address in INITIAL_ADDRESS.
4200 2. If ONLY_INIT is true, just return the initial pointer. Otherwise, also
4201 update the pointer in each iteration of the loop.
4203 Return the increment stmt that updates the pointer in PTR_INCR.
4205 3. Set INV_P to true if the access pattern of the data reference in the
4206 vectorized loop is invariant. Set it to false otherwise.
4208 4. Return the pointer. */
4210 tree
4211 vect_create_data_ref_ptr (gimple *stmt, tree aggr_type, struct loop *at_loop,
4212 tree offset, tree *initial_address,
4213 gimple_stmt_iterator *gsi, gimple **ptr_incr,
4214 bool only_init, bool *inv_p, tree byte_offset)
4216 const char *base_name;
4217 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4218 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4219 struct loop *loop = NULL;
4220 bool nested_in_vect_loop = false;
4221 struct loop *containing_loop = NULL;
4222 tree aggr_ptr_type;
4223 tree aggr_ptr;
4224 tree new_temp;
4225 gimple_seq new_stmt_list = NULL;
4226 edge pe = NULL;
4227 basic_block new_bb;
4228 tree aggr_ptr_init;
4229 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
4230 tree aptr;
4231 gimple_stmt_iterator incr_gsi;
4232 bool insert_after;
4233 tree indx_before_incr, indx_after_incr;
4234 gimple *incr;
4235 tree step;
4236 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
4238 gcc_assert (TREE_CODE (aggr_type) == ARRAY_TYPE
4239 || TREE_CODE (aggr_type) == VECTOR_TYPE);
4241 if (loop_vinfo)
4243 loop = LOOP_VINFO_LOOP (loop_vinfo);
4244 nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
4245 containing_loop = (gimple_bb (stmt))->loop_father;
4246 pe = loop_preheader_edge (loop);
4248 else
4250 gcc_assert (bb_vinfo);
4251 only_init = true;
4252 *ptr_incr = NULL;
4255 /* Check the step (evolution) of the load in LOOP, and record
4256 whether it's invariant. */
4257 step = vect_dr_behavior (dr)->step;
4258 if (integer_zerop (step))
4259 *inv_p = true;
4260 else
4261 *inv_p = false;
4263 /* Create an expression for the first address accessed by this load
4264 in LOOP. */
4265 base_name = get_name (DR_BASE_ADDRESS (dr));
4267 if (dump_enabled_p ())
4269 tree dr_base_type = TREE_TYPE (DR_BASE_OBJECT (dr));
4270 dump_printf_loc (MSG_NOTE, vect_location,
4271 "create %s-pointer variable to type: ",
4272 get_tree_code_name (TREE_CODE (aggr_type)));
4273 dump_generic_expr (MSG_NOTE, TDF_SLIM, aggr_type);
4274 if (TREE_CODE (dr_base_type) == ARRAY_TYPE)
4275 dump_printf (MSG_NOTE, " vectorizing an array ref: ");
4276 else if (TREE_CODE (dr_base_type) == VECTOR_TYPE)
4277 dump_printf (MSG_NOTE, " vectorizing a vector ref: ");
4278 else if (TREE_CODE (dr_base_type) == RECORD_TYPE)
4279 dump_printf (MSG_NOTE, " vectorizing a record based array ref: ");
4280 else
4281 dump_printf (MSG_NOTE, " vectorizing a pointer ref: ");
4282 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_BASE_OBJECT (dr));
4283 dump_printf (MSG_NOTE, "\n");
4286 /* (1) Create the new aggregate-pointer variable.
4287 Vector and array types inherit the alias set of their component
4288 type by default so we need to use a ref-all pointer if the data
4289 reference does not conflict with the created aggregated data
4290 reference because it is not addressable. */
4291 bool need_ref_all = false;
4292 if (!alias_sets_conflict_p (get_alias_set (aggr_type),
4293 get_alias_set (DR_REF (dr))))
4294 need_ref_all = true;
4295 /* Likewise for any of the data references in the stmt group. */
4296 else if (STMT_VINFO_GROUP_SIZE (stmt_info) > 1)
4298 gimple *orig_stmt = STMT_VINFO_GROUP_FIRST_ELEMENT (stmt_info);
4301 stmt_vec_info sinfo = vinfo_for_stmt (orig_stmt);
4302 struct data_reference *sdr = STMT_VINFO_DATA_REF (sinfo);
4303 if (!alias_sets_conflict_p (get_alias_set (aggr_type),
4304 get_alias_set (DR_REF (sdr))))
4306 need_ref_all = true;
4307 break;
4309 orig_stmt = STMT_VINFO_GROUP_NEXT_ELEMENT (sinfo);
4311 while (orig_stmt);
4313 aggr_ptr_type = build_pointer_type_for_mode (aggr_type, ptr_mode,
4314 need_ref_all);
4315 aggr_ptr = vect_get_new_vect_var (aggr_ptr_type, vect_pointer_var, base_name);
4318 /* Note: If the dataref is in an inner-loop nested in LOOP, and we are
4319 vectorizing LOOP (i.e., outer-loop vectorization), we need to create two
4320 def-use update cycles for the pointer: one relative to the outer-loop
4321 (LOOP), which is what steps (3) and (4) below do. The other is relative
4322 to the inner-loop (which is the inner-most loop containing the dataref),
4323 and this is done be step (5) below.
4325 When vectorizing inner-most loops, the vectorized loop (LOOP) is also the
4326 inner-most loop, and so steps (3),(4) work the same, and step (5) is
4327 redundant. Steps (3),(4) create the following:
4329 vp0 = &base_addr;
4330 LOOP: vp1 = phi(vp0,vp2)
4333 vp2 = vp1 + step
4334 goto LOOP
4336 If there is an inner-loop nested in loop, then step (5) will also be
4337 applied, and an additional update in the inner-loop will be created:
4339 vp0 = &base_addr;
4340 LOOP: vp1 = phi(vp0,vp2)
4342 inner: vp3 = phi(vp1,vp4)
4343 vp4 = vp3 + inner_step
4344 if () goto inner
4346 vp2 = vp1 + step
4347 if () goto LOOP */
4349 /* (2) Calculate the initial address of the aggregate-pointer, and set
4350 the aggregate-pointer to point to it before the loop. */
4352 /* Create: (&(base[init_val+offset]+byte_offset) in the loop preheader. */
4354 new_temp = vect_create_addr_base_for_vector_ref (stmt, &new_stmt_list,
4355 offset, byte_offset);
4356 if (new_stmt_list)
4358 if (pe)
4360 new_bb = gsi_insert_seq_on_edge_immediate (pe, new_stmt_list);
4361 gcc_assert (!new_bb);
4363 else
4364 gsi_insert_seq_before (gsi, new_stmt_list, GSI_SAME_STMT);
4367 *initial_address = new_temp;
4368 aggr_ptr_init = new_temp;
4370 /* (3) Handle the updating of the aggregate-pointer inside the loop.
4371 This is needed when ONLY_INIT is false, and also when AT_LOOP is the
4372 inner-loop nested in LOOP (during outer-loop vectorization). */
4374 /* No update in loop is required. */
4375 if (only_init && (!loop_vinfo || at_loop == loop))
4376 aptr = aggr_ptr_init;
4377 else
4379 /* The step of the aggregate pointer is the type size. */
4380 tree iv_step = TYPE_SIZE_UNIT (aggr_type);
4381 /* One exception to the above is when the scalar step of the load in
4382 LOOP is zero. In this case the step here is also zero. */
4383 if (*inv_p)
4384 iv_step = size_zero_node;
4385 else if (tree_int_cst_sgn (step) == -1)
4386 iv_step = fold_build1 (NEGATE_EXPR, TREE_TYPE (iv_step), iv_step);
4388 standard_iv_increment_position (loop, &incr_gsi, &insert_after);
4390 create_iv (aggr_ptr_init,
4391 fold_convert (aggr_ptr_type, iv_step),
4392 aggr_ptr, loop, &incr_gsi, insert_after,
4393 &indx_before_incr, &indx_after_incr);
4394 incr = gsi_stmt (incr_gsi);
4395 set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo));
4397 /* Copy the points-to information if it exists. */
4398 if (DR_PTR_INFO (dr))
4400 vect_duplicate_ssa_name_ptr_info (indx_before_incr, dr);
4401 vect_duplicate_ssa_name_ptr_info (indx_after_incr, dr);
4403 if (ptr_incr)
4404 *ptr_incr = incr;
4406 aptr = indx_before_incr;
4409 if (!nested_in_vect_loop || only_init)
4410 return aptr;
4413 /* (4) Handle the updating of the aggregate-pointer inside the inner-loop
4414 nested in LOOP, if exists. */
4416 gcc_assert (nested_in_vect_loop);
4417 if (!only_init)
4419 standard_iv_increment_position (containing_loop, &incr_gsi,
4420 &insert_after);
4421 create_iv (aptr, fold_convert (aggr_ptr_type, DR_STEP (dr)), aggr_ptr,
4422 containing_loop, &incr_gsi, insert_after, &indx_before_incr,
4423 &indx_after_incr);
4424 incr = gsi_stmt (incr_gsi);
4425 set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo));
4427 /* Copy the points-to information if it exists. */
4428 if (DR_PTR_INFO (dr))
4430 vect_duplicate_ssa_name_ptr_info (indx_before_incr, dr);
4431 vect_duplicate_ssa_name_ptr_info (indx_after_incr, dr);
4433 if (ptr_incr)
4434 *ptr_incr = incr;
4436 return indx_before_incr;
4438 else
4439 gcc_unreachable ();
4443 /* Function bump_vector_ptr
4445 Increment a pointer (to a vector type) by vector-size. If requested,
4446 i.e. if PTR-INCR is given, then also connect the new increment stmt
4447 to the existing def-use update-chain of the pointer, by modifying
4448 the PTR_INCR as illustrated below:
4450 The pointer def-use update-chain before this function:
4451 DATAREF_PTR = phi (p_0, p_2)
4452 ....
4453 PTR_INCR: p_2 = DATAREF_PTR + step
4455 The pointer def-use update-chain after this function:
4456 DATAREF_PTR = phi (p_0, p_2)
4457 ....
4458 NEW_DATAREF_PTR = DATAREF_PTR + BUMP
4459 ....
4460 PTR_INCR: p_2 = NEW_DATAREF_PTR + step
4462 Input:
4463 DATAREF_PTR - ssa_name of a pointer (to vector type) that is being updated
4464 in the loop.
4465 PTR_INCR - optional. The stmt that updates the pointer in each iteration of
4466 the loop. The increment amount across iterations is expected
4467 to be vector_size.
4468 BSI - location where the new update stmt is to be placed.
4469 STMT - the original scalar memory-access stmt that is being vectorized.
4470 BUMP - optional. The offset by which to bump the pointer. If not given,
4471 the offset is assumed to be vector_size.
4473 Output: Return NEW_DATAREF_PTR as illustrated above.
4477 tree
4478 bump_vector_ptr (tree dataref_ptr, gimple *ptr_incr, gimple_stmt_iterator *gsi,
4479 gimple *stmt, tree bump)
4481 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4482 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
4483 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
4484 tree update = TYPE_SIZE_UNIT (vectype);
4485 gassign *incr_stmt;
4486 ssa_op_iter iter;
4487 use_operand_p use_p;
4488 tree new_dataref_ptr;
4490 if (bump)
4491 update = bump;
4493 if (TREE_CODE (dataref_ptr) == SSA_NAME)
4494 new_dataref_ptr = copy_ssa_name (dataref_ptr);
4495 else
4496 new_dataref_ptr = make_ssa_name (TREE_TYPE (dataref_ptr));
4497 incr_stmt = gimple_build_assign (new_dataref_ptr, POINTER_PLUS_EXPR,
4498 dataref_ptr, update);
4499 vect_finish_stmt_generation (stmt, incr_stmt, gsi);
4501 /* Copy the points-to information if it exists. */
4502 if (DR_PTR_INFO (dr))
4504 duplicate_ssa_name_ptr_info (new_dataref_ptr, DR_PTR_INFO (dr));
4505 mark_ptr_info_alignment_unknown (SSA_NAME_PTR_INFO (new_dataref_ptr));
4508 if (!ptr_incr)
4509 return new_dataref_ptr;
4511 /* Update the vector-pointer's cross-iteration increment. */
4512 FOR_EACH_SSA_USE_OPERAND (use_p, ptr_incr, iter, SSA_OP_USE)
4514 tree use = USE_FROM_PTR (use_p);
4516 if (use == dataref_ptr)
4517 SET_USE (use_p, new_dataref_ptr);
4518 else
4519 gcc_assert (tree_int_cst_compare (use, update) == 0);
4522 return new_dataref_ptr;
4526 /* Function vect_create_destination_var.
4528 Create a new temporary of type VECTYPE. */
4530 tree
4531 vect_create_destination_var (tree scalar_dest, tree vectype)
4533 tree vec_dest;
4534 const char *name;
4535 char *new_name;
4536 tree type;
4537 enum vect_var_kind kind;
4539 kind = vectype
4540 ? VECTOR_BOOLEAN_TYPE_P (vectype)
4541 ? vect_mask_var
4542 : vect_simple_var
4543 : vect_scalar_var;
4544 type = vectype ? vectype : TREE_TYPE (scalar_dest);
4546 gcc_assert (TREE_CODE (scalar_dest) == SSA_NAME);
4548 name = get_name (scalar_dest);
4549 if (name)
4550 new_name = xasprintf ("%s_%u", name, SSA_NAME_VERSION (scalar_dest));
4551 else
4552 new_name = xasprintf ("_%u", SSA_NAME_VERSION (scalar_dest));
4553 vec_dest = vect_get_new_vect_var (type, kind, new_name);
4554 free (new_name);
4556 return vec_dest;
4559 /* Function vect_grouped_store_supported.
4561 Returns TRUE if interleave high and interleave low permutations
4562 are supported, and FALSE otherwise. */
4564 bool
4565 vect_grouped_store_supported (tree vectype, unsigned HOST_WIDE_INT count)
4567 machine_mode mode = TYPE_MODE (vectype);
4569 /* vect_permute_store_chain requires the group size to be equal to 3 or
4570 be a power of two. */
4571 if (count != 3 && exact_log2 (count) == -1)
4573 if (dump_enabled_p ())
4574 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4575 "the size of the group of accesses"
4576 " is not a power of 2 or not eqaul to 3\n");
4577 return false;
4580 /* Check that the permutation is supported. */
4581 if (VECTOR_MODE_P (mode))
4583 unsigned int i;
4584 if (count == 3)
4586 unsigned int j0 = 0, j1 = 0, j2 = 0;
4587 unsigned int i, j;
4589 unsigned int nelt;
4590 if (!GET_MODE_NUNITS (mode).is_constant (&nelt))
4592 if (dump_enabled_p ())
4593 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4594 "cannot handle groups of 3 stores for"
4595 " variable-length vectors\n");
4596 return false;
4599 vec_perm_builder sel (nelt, nelt, 1);
4600 sel.quick_grow (nelt);
4601 vec_perm_indices indices;
4602 for (j = 0; j < 3; j++)
4604 int nelt0 = ((3 - j) * nelt) % 3;
4605 int nelt1 = ((3 - j) * nelt + 1) % 3;
4606 int nelt2 = ((3 - j) * nelt + 2) % 3;
4607 for (i = 0; i < nelt; i++)
4609 if (3 * i + nelt0 < nelt)
4610 sel[3 * i + nelt0] = j0++;
4611 if (3 * i + nelt1 < nelt)
4612 sel[3 * i + nelt1] = nelt + j1++;
4613 if (3 * i + nelt2 < nelt)
4614 sel[3 * i + nelt2] = 0;
4616 indices.new_vector (sel, 2, nelt);
4617 if (!can_vec_perm_const_p (mode, indices))
4619 if (dump_enabled_p ())
4620 dump_printf (MSG_MISSED_OPTIMIZATION,
4621 "permutation op not supported by target.\n");
4622 return false;
4625 for (i = 0; i < nelt; i++)
4627 if (3 * i + nelt0 < nelt)
4628 sel[3 * i + nelt0] = 3 * i + nelt0;
4629 if (3 * i + nelt1 < nelt)
4630 sel[3 * i + nelt1] = 3 * i + nelt1;
4631 if (3 * i + nelt2 < nelt)
4632 sel[3 * i + nelt2] = nelt + j2++;
4634 indices.new_vector (sel, 2, nelt);
4635 if (!can_vec_perm_const_p (mode, indices))
4637 if (dump_enabled_p ())
4638 dump_printf (MSG_MISSED_OPTIMIZATION,
4639 "permutation op not supported by target.\n");
4640 return false;
4643 return true;
4645 else
4647 /* If length is not equal to 3 then only power of 2 is supported. */
4648 gcc_assert (pow2p_hwi (count));
4649 poly_uint64 nelt = GET_MODE_NUNITS (mode);
4651 /* The encoding has 2 interleaved stepped patterns. */
4652 vec_perm_builder sel (nelt, 2, 3);
4653 sel.quick_grow (6);
4654 for (i = 0; i < 3; i++)
4656 sel[i * 2] = i;
4657 sel[i * 2 + 1] = i + nelt;
4659 vec_perm_indices indices (sel, 2, nelt);
4660 if (can_vec_perm_const_p (mode, indices))
4662 for (i = 0; i < 6; i++)
4663 sel[i] += exact_div (nelt, 2);
4664 indices.new_vector (sel, 2, nelt);
4665 if (can_vec_perm_const_p (mode, indices))
4666 return true;
4671 if (dump_enabled_p ())
4672 dump_printf (MSG_MISSED_OPTIMIZATION,
4673 "permutaion op not supported by target.\n");
4674 return false;
4678 /* Return TRUE if vec_store_lanes is available for COUNT vectors of
4679 type VECTYPE. */
4681 bool
4682 vect_store_lanes_supported (tree vectype, unsigned HOST_WIDE_INT count)
4684 return vect_lanes_optab_supported_p ("vec_store_lanes",
4685 vec_store_lanes_optab,
4686 vectype, count);
4690 /* Function vect_permute_store_chain.
4692 Given a chain of interleaved stores in DR_CHAIN of LENGTH that must be
4693 a power of 2 or equal to 3, generate interleave_high/low stmts to reorder
4694 the data correctly for the stores. Return the final references for stores
4695 in RESULT_CHAIN.
4697 E.g., LENGTH is 4 and the scalar type is short, i.e., VF is 8.
4698 The input is 4 vectors each containing 8 elements. We assign a number to
4699 each element, the input sequence is:
4701 1st vec: 0 1 2 3 4 5 6 7
4702 2nd vec: 8 9 10 11 12 13 14 15
4703 3rd vec: 16 17 18 19 20 21 22 23
4704 4th vec: 24 25 26 27 28 29 30 31
4706 The output sequence should be:
4708 1st vec: 0 8 16 24 1 9 17 25
4709 2nd vec: 2 10 18 26 3 11 19 27
4710 3rd vec: 4 12 20 28 5 13 21 30
4711 4th vec: 6 14 22 30 7 15 23 31
4713 i.e., we interleave the contents of the four vectors in their order.
4715 We use interleave_high/low instructions to create such output. The input of
4716 each interleave_high/low operation is two vectors:
4717 1st vec 2nd vec
4718 0 1 2 3 4 5 6 7
4719 the even elements of the result vector are obtained left-to-right from the
4720 high/low elements of the first vector. The odd elements of the result are
4721 obtained left-to-right from the high/low elements of the second vector.
4722 The output of interleave_high will be: 0 4 1 5
4723 and of interleave_low: 2 6 3 7
4726 The permutation is done in log LENGTH stages. In each stage interleave_high
4727 and interleave_low stmts are created for each pair of vectors in DR_CHAIN,
4728 where the first argument is taken from the first half of DR_CHAIN and the
4729 second argument from it's second half.
4730 In our example,
4732 I1: interleave_high (1st vec, 3rd vec)
4733 I2: interleave_low (1st vec, 3rd vec)
4734 I3: interleave_high (2nd vec, 4th vec)
4735 I4: interleave_low (2nd vec, 4th vec)
4737 The output for the first stage is:
4739 I1: 0 16 1 17 2 18 3 19
4740 I2: 4 20 5 21 6 22 7 23
4741 I3: 8 24 9 25 10 26 11 27
4742 I4: 12 28 13 29 14 30 15 31
4744 The output of the second stage, i.e. the final result is:
4746 I1: 0 8 16 24 1 9 17 25
4747 I2: 2 10 18 26 3 11 19 27
4748 I3: 4 12 20 28 5 13 21 30
4749 I4: 6 14 22 30 7 15 23 31. */
4751 void
4752 vect_permute_store_chain (vec<tree> dr_chain,
4753 unsigned int length,
4754 gimple *stmt,
4755 gimple_stmt_iterator *gsi,
4756 vec<tree> *result_chain)
4758 tree vect1, vect2, high, low;
4759 gimple *perm_stmt;
4760 tree vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
4761 tree perm_mask_low, perm_mask_high;
4762 tree data_ref;
4763 tree perm3_mask_low, perm3_mask_high;
4764 unsigned int i, j, n, log_length = exact_log2 (length);
4766 result_chain->quick_grow (length);
4767 memcpy (result_chain->address (), dr_chain.address (),
4768 length * sizeof (tree));
4770 if (length == 3)
4772 /* vect_grouped_store_supported ensures that this is constant. */
4773 unsigned int nelt = TYPE_VECTOR_SUBPARTS (vectype).to_constant ();
4774 unsigned int j0 = 0, j1 = 0, j2 = 0;
4776 vec_perm_builder sel (nelt, nelt, 1);
4777 sel.quick_grow (nelt);
4778 vec_perm_indices indices;
4779 for (j = 0; j < 3; j++)
4781 int nelt0 = ((3 - j) * nelt) % 3;
4782 int nelt1 = ((3 - j) * nelt + 1) % 3;
4783 int nelt2 = ((3 - j) * nelt + 2) % 3;
4785 for (i = 0; i < nelt; i++)
4787 if (3 * i + nelt0 < nelt)
4788 sel[3 * i + nelt0] = j0++;
4789 if (3 * i + nelt1 < nelt)
4790 sel[3 * i + nelt1] = nelt + j1++;
4791 if (3 * i + nelt2 < nelt)
4792 sel[3 * i + nelt2] = 0;
4794 indices.new_vector (sel, 2, nelt);
4795 perm3_mask_low = vect_gen_perm_mask_checked (vectype, indices);
4797 for (i = 0; i < nelt; i++)
4799 if (3 * i + nelt0 < nelt)
4800 sel[3 * i + nelt0] = 3 * i + nelt0;
4801 if (3 * i + nelt1 < nelt)
4802 sel[3 * i + nelt1] = 3 * i + nelt1;
4803 if (3 * i + nelt2 < nelt)
4804 sel[3 * i + nelt2] = nelt + j2++;
4806 indices.new_vector (sel, 2, nelt);
4807 perm3_mask_high = vect_gen_perm_mask_checked (vectype, indices);
4809 vect1 = dr_chain[0];
4810 vect2 = dr_chain[1];
4812 /* Create interleaving stmt:
4813 low = VEC_PERM_EXPR <vect1, vect2,
4814 {j, nelt, *, j + 1, nelt + j + 1, *,
4815 j + 2, nelt + j + 2, *, ...}> */
4816 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle3_low");
4817 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, vect1,
4818 vect2, perm3_mask_low);
4819 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
4821 vect1 = data_ref;
4822 vect2 = dr_chain[2];
4823 /* Create interleaving stmt:
4824 low = VEC_PERM_EXPR <vect1, vect2,
4825 {0, 1, nelt + j, 3, 4, nelt + j + 1,
4826 6, 7, nelt + j + 2, ...}> */
4827 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle3_high");
4828 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, vect1,
4829 vect2, perm3_mask_high);
4830 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
4831 (*result_chain)[j] = data_ref;
4834 else
4836 /* If length is not equal to 3 then only power of 2 is supported. */
4837 gcc_assert (pow2p_hwi (length));
4839 /* The encoding has 2 interleaved stepped patterns. */
4840 poly_uint64 nelt = TYPE_VECTOR_SUBPARTS (vectype);
4841 vec_perm_builder sel (nelt, 2, 3);
4842 sel.quick_grow (6);
4843 for (i = 0; i < 3; i++)
4845 sel[i * 2] = i;
4846 sel[i * 2 + 1] = i + nelt;
4848 vec_perm_indices indices (sel, 2, nelt);
4849 perm_mask_high = vect_gen_perm_mask_checked (vectype, indices);
4851 for (i = 0; i < 6; i++)
4852 sel[i] += exact_div (nelt, 2);
4853 indices.new_vector (sel, 2, nelt);
4854 perm_mask_low = vect_gen_perm_mask_checked (vectype, indices);
4856 for (i = 0, n = log_length; i < n; i++)
4858 for (j = 0; j < length/2; j++)
4860 vect1 = dr_chain[j];
4861 vect2 = dr_chain[j+length/2];
4863 /* Create interleaving stmt:
4864 high = VEC_PERM_EXPR <vect1, vect2, {0, nelt, 1, nelt+1,
4865 ...}> */
4866 high = make_temp_ssa_name (vectype, NULL, "vect_inter_high");
4867 perm_stmt = gimple_build_assign (high, VEC_PERM_EXPR, vect1,
4868 vect2, perm_mask_high);
4869 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
4870 (*result_chain)[2*j] = high;
4872 /* Create interleaving stmt:
4873 low = VEC_PERM_EXPR <vect1, vect2,
4874 {nelt/2, nelt*3/2, nelt/2+1, nelt*3/2+1,
4875 ...}> */
4876 low = make_temp_ssa_name (vectype, NULL, "vect_inter_low");
4877 perm_stmt = gimple_build_assign (low, VEC_PERM_EXPR, vect1,
4878 vect2, perm_mask_low);
4879 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
4880 (*result_chain)[2*j+1] = low;
4882 memcpy (dr_chain.address (), result_chain->address (),
4883 length * sizeof (tree));
4888 /* Function vect_setup_realignment
4890 This function is called when vectorizing an unaligned load using
4891 the dr_explicit_realign[_optimized] scheme.
4892 This function generates the following code at the loop prolog:
4894 p = initial_addr;
4895 x msq_init = *(floor(p)); # prolog load
4896 realignment_token = call target_builtin;
4897 loop:
4898 x msq = phi (msq_init, ---)
4900 The stmts marked with x are generated only for the case of
4901 dr_explicit_realign_optimized.
4903 The code above sets up a new (vector) pointer, pointing to the first
4904 location accessed by STMT, and a "floor-aligned" load using that pointer.
4905 It also generates code to compute the "realignment-token" (if the relevant
4906 target hook was defined), and creates a phi-node at the loop-header bb
4907 whose arguments are the result of the prolog-load (created by this
4908 function) and the result of a load that takes place in the loop (to be
4909 created by the caller to this function).
4911 For the case of dr_explicit_realign_optimized:
4912 The caller to this function uses the phi-result (msq) to create the
4913 realignment code inside the loop, and sets up the missing phi argument,
4914 as follows:
4915 loop:
4916 msq = phi (msq_init, lsq)
4917 lsq = *(floor(p')); # load in loop
4918 result = realign_load (msq, lsq, realignment_token);
4920 For the case of dr_explicit_realign:
4921 loop:
4922 msq = *(floor(p)); # load in loop
4923 p' = p + (VS-1);
4924 lsq = *(floor(p')); # load in loop
4925 result = realign_load (msq, lsq, realignment_token);
4927 Input:
4928 STMT - (scalar) load stmt to be vectorized. This load accesses
4929 a memory location that may be unaligned.
4930 BSI - place where new code is to be inserted.
4931 ALIGNMENT_SUPPORT_SCHEME - which of the two misalignment handling schemes
4932 is used.
4934 Output:
4935 REALIGNMENT_TOKEN - the result of a call to the builtin_mask_for_load
4936 target hook, if defined.
4937 Return value - the result of the loop-header phi node. */
4939 tree
4940 vect_setup_realignment (gimple *stmt, gimple_stmt_iterator *gsi,
4941 tree *realignment_token,
4942 enum dr_alignment_support alignment_support_scheme,
4943 tree init_addr,
4944 struct loop **at_loop)
4946 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4947 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
4948 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4949 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
4950 struct loop *loop = NULL;
4951 edge pe = NULL;
4952 tree scalar_dest = gimple_assign_lhs (stmt);
4953 tree vec_dest;
4954 gimple *inc;
4955 tree ptr;
4956 tree data_ref;
4957 basic_block new_bb;
4958 tree msq_init = NULL_TREE;
4959 tree new_temp;
4960 gphi *phi_stmt;
4961 tree msq = NULL_TREE;
4962 gimple_seq stmts = NULL;
4963 bool inv_p;
4964 bool compute_in_loop = false;
4965 bool nested_in_vect_loop = false;
4966 struct loop *containing_loop = (gimple_bb (stmt))->loop_father;
4967 struct loop *loop_for_initial_load = NULL;
4969 if (loop_vinfo)
4971 loop = LOOP_VINFO_LOOP (loop_vinfo);
4972 nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
4975 gcc_assert (alignment_support_scheme == dr_explicit_realign
4976 || alignment_support_scheme == dr_explicit_realign_optimized);
4978 /* We need to generate three things:
4979 1. the misalignment computation
4980 2. the extra vector load (for the optimized realignment scheme).
4981 3. the phi node for the two vectors from which the realignment is
4982 done (for the optimized realignment scheme). */
4984 /* 1. Determine where to generate the misalignment computation.
4986 If INIT_ADDR is NULL_TREE, this indicates that the misalignment
4987 calculation will be generated by this function, outside the loop (in the
4988 preheader). Otherwise, INIT_ADDR had already been computed for us by the
4989 caller, inside the loop.
4991 Background: If the misalignment remains fixed throughout the iterations of
4992 the loop, then both realignment schemes are applicable, and also the
4993 misalignment computation can be done outside LOOP. This is because we are
4994 vectorizing LOOP, and so the memory accesses in LOOP advance in steps that
4995 are a multiple of VS (the Vector Size), and therefore the misalignment in
4996 different vectorized LOOP iterations is always the same.
4997 The problem arises only if the memory access is in an inner-loop nested
4998 inside LOOP, which is now being vectorized using outer-loop vectorization.
4999 This is the only case when the misalignment of the memory access may not
5000 remain fixed throughout the iterations of the inner-loop (as explained in
5001 detail in vect_supportable_dr_alignment). In this case, not only is the
5002 optimized realignment scheme not applicable, but also the misalignment
5003 computation (and generation of the realignment token that is passed to
5004 REALIGN_LOAD) have to be done inside the loop.
5006 In short, INIT_ADDR indicates whether we are in a COMPUTE_IN_LOOP mode
5007 or not, which in turn determines if the misalignment is computed inside
5008 the inner-loop, or outside LOOP. */
5010 if (init_addr != NULL_TREE || !loop_vinfo)
5012 compute_in_loop = true;
5013 gcc_assert (alignment_support_scheme == dr_explicit_realign);
5017 /* 2. Determine where to generate the extra vector load.
5019 For the optimized realignment scheme, instead of generating two vector
5020 loads in each iteration, we generate a single extra vector load in the
5021 preheader of the loop, and in each iteration reuse the result of the
5022 vector load from the previous iteration. In case the memory access is in
5023 an inner-loop nested inside LOOP, which is now being vectorized using
5024 outer-loop vectorization, we need to determine whether this initial vector
5025 load should be generated at the preheader of the inner-loop, or can be
5026 generated at the preheader of LOOP. If the memory access has no evolution
5027 in LOOP, it can be generated in the preheader of LOOP. Otherwise, it has
5028 to be generated inside LOOP (in the preheader of the inner-loop). */
5030 if (nested_in_vect_loop)
5032 tree outerloop_step = STMT_VINFO_DR_STEP (stmt_info);
5033 bool invariant_in_outerloop =
5034 (tree_int_cst_compare (outerloop_step, size_zero_node) == 0);
5035 loop_for_initial_load = (invariant_in_outerloop ? loop : loop->inner);
5037 else
5038 loop_for_initial_load = loop;
5039 if (at_loop)
5040 *at_loop = loop_for_initial_load;
5042 if (loop_for_initial_load)
5043 pe = loop_preheader_edge (loop_for_initial_load);
5045 /* 3. For the case of the optimized realignment, create the first vector
5046 load at the loop preheader. */
5048 if (alignment_support_scheme == dr_explicit_realign_optimized)
5050 /* Create msq_init = *(floor(p1)) in the loop preheader */
5051 gassign *new_stmt;
5053 gcc_assert (!compute_in_loop);
5054 vec_dest = vect_create_destination_var (scalar_dest, vectype);
5055 ptr = vect_create_data_ref_ptr (stmt, vectype, loop_for_initial_load,
5056 NULL_TREE, &init_addr, NULL, &inc,
5057 true, &inv_p);
5058 if (TREE_CODE (ptr) == SSA_NAME)
5059 new_temp = copy_ssa_name (ptr);
5060 else
5061 new_temp = make_ssa_name (TREE_TYPE (ptr));
5062 unsigned int align = DR_TARGET_ALIGNMENT (dr);
5063 new_stmt = gimple_build_assign
5064 (new_temp, BIT_AND_EXPR, ptr,
5065 build_int_cst (TREE_TYPE (ptr), -(HOST_WIDE_INT) align));
5066 new_bb = gsi_insert_on_edge_immediate (pe, new_stmt);
5067 gcc_assert (!new_bb);
5068 data_ref
5069 = build2 (MEM_REF, TREE_TYPE (vec_dest), new_temp,
5070 build_int_cst (reference_alias_ptr_type (DR_REF (dr)), 0));
5071 new_stmt = gimple_build_assign (vec_dest, data_ref);
5072 new_temp = make_ssa_name (vec_dest, new_stmt);
5073 gimple_assign_set_lhs (new_stmt, new_temp);
5074 if (pe)
5076 new_bb = gsi_insert_on_edge_immediate (pe, new_stmt);
5077 gcc_assert (!new_bb);
5079 else
5080 gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
5082 msq_init = gimple_assign_lhs (new_stmt);
5085 /* 4. Create realignment token using a target builtin, if available.
5086 It is done either inside the containing loop, or before LOOP (as
5087 determined above). */
5089 if (targetm.vectorize.builtin_mask_for_load)
5091 gcall *new_stmt;
5092 tree builtin_decl;
5094 /* Compute INIT_ADDR - the initial addressed accessed by this memref. */
5095 if (!init_addr)
5097 /* Generate the INIT_ADDR computation outside LOOP. */
5098 init_addr = vect_create_addr_base_for_vector_ref (stmt, &stmts,
5099 NULL_TREE);
5100 if (loop)
5102 pe = loop_preheader_edge (loop);
5103 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
5104 gcc_assert (!new_bb);
5106 else
5107 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
5110 builtin_decl = targetm.vectorize.builtin_mask_for_load ();
5111 new_stmt = gimple_build_call (builtin_decl, 1, init_addr);
5112 vec_dest =
5113 vect_create_destination_var (scalar_dest,
5114 gimple_call_return_type (new_stmt));
5115 new_temp = make_ssa_name (vec_dest, new_stmt);
5116 gimple_call_set_lhs (new_stmt, new_temp);
5118 if (compute_in_loop)
5119 gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
5120 else
5122 /* Generate the misalignment computation outside LOOP. */
5123 pe = loop_preheader_edge (loop);
5124 new_bb = gsi_insert_on_edge_immediate (pe, new_stmt);
5125 gcc_assert (!new_bb);
5128 *realignment_token = gimple_call_lhs (new_stmt);
5130 /* The result of the CALL_EXPR to this builtin is determined from
5131 the value of the parameter and no global variables are touched
5132 which makes the builtin a "const" function. Requiring the
5133 builtin to have the "const" attribute makes it unnecessary
5134 to call mark_call_clobbered. */
5135 gcc_assert (TREE_READONLY (builtin_decl));
5138 if (alignment_support_scheme == dr_explicit_realign)
5139 return msq;
5141 gcc_assert (!compute_in_loop);
5142 gcc_assert (alignment_support_scheme == dr_explicit_realign_optimized);
5145 /* 5. Create msq = phi <msq_init, lsq> in loop */
5147 pe = loop_preheader_edge (containing_loop);
5148 vec_dest = vect_create_destination_var (scalar_dest, vectype);
5149 msq = make_ssa_name (vec_dest);
5150 phi_stmt = create_phi_node (msq, containing_loop->header);
5151 add_phi_arg (phi_stmt, msq_init, pe, UNKNOWN_LOCATION);
5153 return msq;
5157 /* Function vect_grouped_load_supported.
5159 COUNT is the size of the load group (the number of statements plus the
5160 number of gaps). SINGLE_ELEMENT_P is true if there is actually
5161 only one statement, with a gap of COUNT - 1.
5163 Returns true if a suitable permute exists. */
5165 bool
5166 vect_grouped_load_supported (tree vectype, bool single_element_p,
5167 unsigned HOST_WIDE_INT count)
5169 machine_mode mode = TYPE_MODE (vectype);
5171 /* If this is single-element interleaving with an element distance
5172 that leaves unused vector loads around punt - we at least create
5173 very sub-optimal code in that case (and blow up memory,
5174 see PR65518). */
5175 if (single_element_p && maybe_gt (count, TYPE_VECTOR_SUBPARTS (vectype)))
5177 if (dump_enabled_p ())
5178 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5179 "single-element interleaving not supported "
5180 "for not adjacent vector loads\n");
5181 return false;
5184 /* vect_permute_load_chain requires the group size to be equal to 3 or
5185 be a power of two. */
5186 if (count != 3 && exact_log2 (count) == -1)
5188 if (dump_enabled_p ())
5189 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5190 "the size of the group of accesses"
5191 " is not a power of 2 or not equal to 3\n");
5192 return false;
5195 /* Check that the permutation is supported. */
5196 if (VECTOR_MODE_P (mode))
5198 unsigned int i, j;
5199 if (count == 3)
5201 unsigned int nelt;
5202 if (!GET_MODE_NUNITS (mode).is_constant (&nelt))
5204 if (dump_enabled_p ())
5205 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5206 "cannot handle groups of 3 loads for"
5207 " variable-length vectors\n");
5208 return false;
5211 vec_perm_builder sel (nelt, nelt, 1);
5212 sel.quick_grow (nelt);
5213 vec_perm_indices indices;
5214 unsigned int k;
5215 for (k = 0; k < 3; k++)
5217 for (i = 0; i < nelt; i++)
5218 if (3 * i + k < 2 * nelt)
5219 sel[i] = 3 * i + k;
5220 else
5221 sel[i] = 0;
5222 indices.new_vector (sel, 2, nelt);
5223 if (!can_vec_perm_const_p (mode, indices))
5225 if (dump_enabled_p ())
5226 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5227 "shuffle of 3 loads is not supported by"
5228 " target\n");
5229 return false;
5231 for (i = 0, j = 0; i < nelt; i++)
5232 if (3 * i + k < 2 * nelt)
5233 sel[i] = i;
5234 else
5235 sel[i] = nelt + ((nelt + k) % 3) + 3 * (j++);
5236 indices.new_vector (sel, 2, nelt);
5237 if (!can_vec_perm_const_p (mode, indices))
5239 if (dump_enabled_p ())
5240 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5241 "shuffle of 3 loads is not supported by"
5242 " target\n");
5243 return false;
5246 return true;
5248 else
5250 /* If length is not equal to 3 then only power of 2 is supported. */
5251 gcc_assert (pow2p_hwi (count));
5252 poly_uint64 nelt = GET_MODE_NUNITS (mode);
5254 /* The encoding has a single stepped pattern. */
5255 vec_perm_builder sel (nelt, 1, 3);
5256 sel.quick_grow (3);
5257 for (i = 0; i < 3; i++)
5258 sel[i] = i * 2;
5259 vec_perm_indices indices (sel, 2, nelt);
5260 if (can_vec_perm_const_p (mode, indices))
5262 for (i = 0; i < 3; i++)
5263 sel[i] = i * 2 + 1;
5264 indices.new_vector (sel, 2, nelt);
5265 if (can_vec_perm_const_p (mode, indices))
5266 return true;
5271 if (dump_enabled_p ())
5272 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5273 "extract even/odd not supported by target\n");
5274 return false;
5277 /* Return TRUE if vec_load_lanes is available for COUNT vectors of
5278 type VECTYPE. */
5280 bool
5281 vect_load_lanes_supported (tree vectype, unsigned HOST_WIDE_INT count)
5283 return vect_lanes_optab_supported_p ("vec_load_lanes",
5284 vec_load_lanes_optab,
5285 vectype, count);
5288 /* Function vect_permute_load_chain.
5290 Given a chain of interleaved loads in DR_CHAIN of LENGTH that must be
5291 a power of 2 or equal to 3, generate extract_even/odd stmts to reorder
5292 the input data correctly. Return the final references for loads in
5293 RESULT_CHAIN.
5295 E.g., LENGTH is 4 and the scalar type is short, i.e., VF is 8.
5296 The input is 4 vectors each containing 8 elements. We assign a number to each
5297 element, the input sequence is:
5299 1st vec: 0 1 2 3 4 5 6 7
5300 2nd vec: 8 9 10 11 12 13 14 15
5301 3rd vec: 16 17 18 19 20 21 22 23
5302 4th vec: 24 25 26 27 28 29 30 31
5304 The output sequence should be:
5306 1st vec: 0 4 8 12 16 20 24 28
5307 2nd vec: 1 5 9 13 17 21 25 29
5308 3rd vec: 2 6 10 14 18 22 26 30
5309 4th vec: 3 7 11 15 19 23 27 31
5311 i.e., the first output vector should contain the first elements of each
5312 interleaving group, etc.
5314 We use extract_even/odd instructions to create such output. The input of
5315 each extract_even/odd operation is two vectors
5316 1st vec 2nd vec
5317 0 1 2 3 4 5 6 7
5319 and the output is the vector of extracted even/odd elements. The output of
5320 extract_even will be: 0 2 4 6
5321 and of extract_odd: 1 3 5 7
5324 The permutation is done in log LENGTH stages. In each stage extract_even
5325 and extract_odd stmts are created for each pair of vectors in DR_CHAIN in
5326 their order. In our example,
5328 E1: extract_even (1st vec, 2nd vec)
5329 E2: extract_odd (1st vec, 2nd vec)
5330 E3: extract_even (3rd vec, 4th vec)
5331 E4: extract_odd (3rd vec, 4th vec)
5333 The output for the first stage will be:
5335 E1: 0 2 4 6 8 10 12 14
5336 E2: 1 3 5 7 9 11 13 15
5337 E3: 16 18 20 22 24 26 28 30
5338 E4: 17 19 21 23 25 27 29 31
5340 In order to proceed and create the correct sequence for the next stage (or
5341 for the correct output, if the second stage is the last one, as in our
5342 example), we first put the output of extract_even operation and then the
5343 output of extract_odd in RESULT_CHAIN (which is then copied to DR_CHAIN).
5344 The input for the second stage is:
5346 1st vec (E1): 0 2 4 6 8 10 12 14
5347 2nd vec (E3): 16 18 20 22 24 26 28 30
5348 3rd vec (E2): 1 3 5 7 9 11 13 15
5349 4th vec (E4): 17 19 21 23 25 27 29 31
5351 The output of the second stage:
5353 E1: 0 4 8 12 16 20 24 28
5354 E2: 2 6 10 14 18 22 26 30
5355 E3: 1 5 9 13 17 21 25 29
5356 E4: 3 7 11 15 19 23 27 31
5358 And RESULT_CHAIN after reordering:
5360 1st vec (E1): 0 4 8 12 16 20 24 28
5361 2nd vec (E3): 1 5 9 13 17 21 25 29
5362 3rd vec (E2): 2 6 10 14 18 22 26 30
5363 4th vec (E4): 3 7 11 15 19 23 27 31. */
5365 static void
5366 vect_permute_load_chain (vec<tree> dr_chain,
5367 unsigned int length,
5368 gimple *stmt,
5369 gimple_stmt_iterator *gsi,
5370 vec<tree> *result_chain)
5372 tree data_ref, first_vect, second_vect;
5373 tree perm_mask_even, perm_mask_odd;
5374 tree perm3_mask_low, perm3_mask_high;
5375 gimple *perm_stmt;
5376 tree vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
5377 unsigned int i, j, log_length = exact_log2 (length);
5379 result_chain->quick_grow (length);
5380 memcpy (result_chain->address (), dr_chain.address (),
5381 length * sizeof (tree));
5383 if (length == 3)
5385 /* vect_grouped_load_supported ensures that this is constant. */
5386 unsigned nelt = TYPE_VECTOR_SUBPARTS (vectype).to_constant ();
5387 unsigned int k;
5389 vec_perm_builder sel (nelt, nelt, 1);
5390 sel.quick_grow (nelt);
5391 vec_perm_indices indices;
5392 for (k = 0; k < 3; k++)
5394 for (i = 0; i < nelt; i++)
5395 if (3 * i + k < 2 * nelt)
5396 sel[i] = 3 * i + k;
5397 else
5398 sel[i] = 0;
5399 indices.new_vector (sel, 2, nelt);
5400 perm3_mask_low = vect_gen_perm_mask_checked (vectype, indices);
5402 for (i = 0, j = 0; i < nelt; i++)
5403 if (3 * i + k < 2 * nelt)
5404 sel[i] = i;
5405 else
5406 sel[i] = nelt + ((nelt + k) % 3) + 3 * (j++);
5407 indices.new_vector (sel, 2, nelt);
5408 perm3_mask_high = vect_gen_perm_mask_checked (vectype, indices);
5410 first_vect = dr_chain[0];
5411 second_vect = dr_chain[1];
5413 /* Create interleaving stmt (low part of):
5414 low = VEC_PERM_EXPR <first_vect, second_vect2, {k, 3 + k, 6 + k,
5415 ...}> */
5416 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle3_low");
5417 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, first_vect,
5418 second_vect, perm3_mask_low);
5419 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5421 /* Create interleaving stmt (high part of):
5422 high = VEC_PERM_EXPR <first_vect, second_vect2, {k, 3 + k, 6 + k,
5423 ...}> */
5424 first_vect = data_ref;
5425 second_vect = dr_chain[2];
5426 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle3_high");
5427 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, first_vect,
5428 second_vect, perm3_mask_high);
5429 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5430 (*result_chain)[k] = data_ref;
5433 else
5435 /* If length is not equal to 3 then only power of 2 is supported. */
5436 gcc_assert (pow2p_hwi (length));
5438 /* The encoding has a single stepped pattern. */
5439 poly_uint64 nelt = TYPE_VECTOR_SUBPARTS (vectype);
5440 vec_perm_builder sel (nelt, 1, 3);
5441 sel.quick_grow (3);
5442 for (i = 0; i < 3; ++i)
5443 sel[i] = i * 2;
5444 vec_perm_indices indices (sel, 2, nelt);
5445 perm_mask_even = vect_gen_perm_mask_checked (vectype, indices);
5447 for (i = 0; i < 3; ++i)
5448 sel[i] = i * 2 + 1;
5449 indices.new_vector (sel, 2, nelt);
5450 perm_mask_odd = vect_gen_perm_mask_checked (vectype, indices);
5452 for (i = 0; i < log_length; i++)
5454 for (j = 0; j < length; j += 2)
5456 first_vect = dr_chain[j];
5457 second_vect = dr_chain[j+1];
5459 /* data_ref = permute_even (first_data_ref, second_data_ref); */
5460 data_ref = make_temp_ssa_name (vectype, NULL, "vect_perm_even");
5461 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
5462 first_vect, second_vect,
5463 perm_mask_even);
5464 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5465 (*result_chain)[j/2] = data_ref;
5467 /* data_ref = permute_odd (first_data_ref, second_data_ref); */
5468 data_ref = make_temp_ssa_name (vectype, NULL, "vect_perm_odd");
5469 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
5470 first_vect, second_vect,
5471 perm_mask_odd);
5472 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5473 (*result_chain)[j/2+length/2] = data_ref;
5475 memcpy (dr_chain.address (), result_chain->address (),
5476 length * sizeof (tree));
5481 /* Function vect_shift_permute_load_chain.
5483 Given a chain of loads in DR_CHAIN of LENGTH 2 or 3, generate
5484 sequence of stmts to reorder the input data accordingly.
5485 Return the final references for loads in RESULT_CHAIN.
5486 Return true if successed, false otherwise.
5488 E.g., LENGTH is 3 and the scalar type is short, i.e., VF is 8.
5489 The input is 3 vectors each containing 8 elements. We assign a
5490 number to each element, the input sequence is:
5492 1st vec: 0 1 2 3 4 5 6 7
5493 2nd vec: 8 9 10 11 12 13 14 15
5494 3rd vec: 16 17 18 19 20 21 22 23
5496 The output sequence should be:
5498 1st vec: 0 3 6 9 12 15 18 21
5499 2nd vec: 1 4 7 10 13 16 19 22
5500 3rd vec: 2 5 8 11 14 17 20 23
5502 We use 3 shuffle instructions and 3 * 3 - 1 shifts to create such output.
5504 First we shuffle all 3 vectors to get correct elements order:
5506 1st vec: ( 0 3 6) ( 1 4 7) ( 2 5)
5507 2nd vec: ( 8 11 14) ( 9 12 15) (10 13)
5508 3rd vec: (16 19 22) (17 20 23) (18 21)
5510 Next we unite and shift vector 3 times:
5512 1st step:
5513 shift right by 6 the concatenation of:
5514 "1st vec" and "2nd vec"
5515 ( 0 3 6) ( 1 4 7) |( 2 5) _ ( 8 11 14) ( 9 12 15)| (10 13)
5516 "2nd vec" and "3rd vec"
5517 ( 8 11 14) ( 9 12 15) |(10 13) _ (16 19 22) (17 20 23)| (18 21)
5518 "3rd vec" and "1st vec"
5519 (16 19 22) (17 20 23) |(18 21) _ ( 0 3 6) ( 1 4 7)| ( 2 5)
5520 | New vectors |
5522 So that now new vectors are:
5524 1st vec: ( 2 5) ( 8 11 14) ( 9 12 15)
5525 2nd vec: (10 13) (16 19 22) (17 20 23)
5526 3rd vec: (18 21) ( 0 3 6) ( 1 4 7)
5528 2nd step:
5529 shift right by 5 the concatenation of:
5530 "1st vec" and "3rd vec"
5531 ( 2 5) ( 8 11 14) |( 9 12 15) _ (18 21) ( 0 3 6)| ( 1 4 7)
5532 "2nd vec" and "1st vec"
5533 (10 13) (16 19 22) |(17 20 23) _ ( 2 5) ( 8 11 14)| ( 9 12 15)
5534 "3rd vec" and "2nd vec"
5535 (18 21) ( 0 3 6) |( 1 4 7) _ (10 13) (16 19 22)| (17 20 23)
5536 | New vectors |
5538 So that now new vectors are:
5540 1st vec: ( 9 12 15) (18 21) ( 0 3 6)
5541 2nd vec: (17 20 23) ( 2 5) ( 8 11 14)
5542 3rd vec: ( 1 4 7) (10 13) (16 19 22) READY
5544 3rd step:
5545 shift right by 5 the concatenation of:
5546 "1st vec" and "1st vec"
5547 ( 9 12 15) (18 21) |( 0 3 6) _ ( 9 12 15) (18 21)| ( 0 3 6)
5548 shift right by 3 the concatenation of:
5549 "2nd vec" and "2nd vec"
5550 (17 20 23) |( 2 5) ( 8 11 14) _ (17 20 23)| ( 2 5) ( 8 11 14)
5551 | New vectors |
5553 So that now all vectors are READY:
5554 1st vec: ( 0 3 6) ( 9 12 15) (18 21)
5555 2nd vec: ( 2 5) ( 8 11 14) (17 20 23)
5556 3rd vec: ( 1 4 7) (10 13) (16 19 22)
5558 This algorithm is faster than one in vect_permute_load_chain if:
5559 1. "shift of a concatination" is faster than general permutation.
5560 This is usually so.
5561 2. The TARGET machine can't execute vector instructions in parallel.
5562 This is because each step of the algorithm depends on previous.
5563 The algorithm in vect_permute_load_chain is much more parallel.
5565 The algorithm is applicable only for LOAD CHAIN LENGTH less than VF.
5568 static bool
5569 vect_shift_permute_load_chain (vec<tree> dr_chain,
5570 unsigned int length,
5571 gimple *stmt,
5572 gimple_stmt_iterator *gsi,
5573 vec<tree> *result_chain)
5575 tree vect[3], vect_shift[3], data_ref, first_vect, second_vect;
5576 tree perm2_mask1, perm2_mask2, perm3_mask;
5577 tree select_mask, shift1_mask, shift2_mask, shift3_mask, shift4_mask;
5578 gimple *perm_stmt;
5580 tree vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
5581 unsigned int i;
5582 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5583 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5585 unsigned HOST_WIDE_INT nelt, vf;
5586 if (!TYPE_VECTOR_SUBPARTS (vectype).is_constant (&nelt)
5587 || !LOOP_VINFO_VECT_FACTOR (loop_vinfo).is_constant (&vf))
5588 /* Not supported for variable-length vectors. */
5589 return false;
5591 vec_perm_builder sel (nelt, nelt, 1);
5592 sel.quick_grow (nelt);
5594 result_chain->quick_grow (length);
5595 memcpy (result_chain->address (), dr_chain.address (),
5596 length * sizeof (tree));
5598 if (pow2p_hwi (length) && vf > 4)
5600 unsigned int j, log_length = exact_log2 (length);
5601 for (i = 0; i < nelt / 2; ++i)
5602 sel[i] = i * 2;
5603 for (i = 0; i < nelt / 2; ++i)
5604 sel[nelt / 2 + i] = i * 2 + 1;
5605 vec_perm_indices indices (sel, 2, nelt);
5606 if (!can_vec_perm_const_p (TYPE_MODE (vectype), indices))
5608 if (dump_enabled_p ())
5609 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5610 "shuffle of 2 fields structure is not \
5611 supported by target\n");
5612 return false;
5614 perm2_mask1 = vect_gen_perm_mask_checked (vectype, indices);
5616 for (i = 0; i < nelt / 2; ++i)
5617 sel[i] = i * 2 + 1;
5618 for (i = 0; i < nelt / 2; ++i)
5619 sel[nelt / 2 + i] = i * 2;
5620 indices.new_vector (sel, 2, nelt);
5621 if (!can_vec_perm_const_p (TYPE_MODE (vectype), indices))
5623 if (dump_enabled_p ())
5624 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5625 "shuffle of 2 fields structure is not \
5626 supported by target\n");
5627 return false;
5629 perm2_mask2 = vect_gen_perm_mask_checked (vectype, indices);
5631 /* Generating permutation constant to shift all elements.
5632 For vector length 8 it is {4 5 6 7 8 9 10 11}. */
5633 for (i = 0; i < nelt; i++)
5634 sel[i] = nelt / 2 + i;
5635 indices.new_vector (sel, 2, nelt);
5636 if (!can_vec_perm_const_p (TYPE_MODE (vectype), indices))
5638 if (dump_enabled_p ())
5639 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5640 "shift permutation is not supported by target\n");
5641 return false;
5643 shift1_mask = vect_gen_perm_mask_checked (vectype, indices);
5645 /* Generating permutation constant to select vector from 2.
5646 For vector length 8 it is {0 1 2 3 12 13 14 15}. */
5647 for (i = 0; i < nelt / 2; i++)
5648 sel[i] = i;
5649 for (i = nelt / 2; i < nelt; i++)
5650 sel[i] = nelt + i;
5651 indices.new_vector (sel, 2, nelt);
5652 if (!can_vec_perm_const_p (TYPE_MODE (vectype), indices))
5654 if (dump_enabled_p ())
5655 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5656 "select is not supported by target\n");
5657 return false;
5659 select_mask = vect_gen_perm_mask_checked (vectype, indices);
5661 for (i = 0; i < log_length; i++)
5663 for (j = 0; j < length; j += 2)
5665 first_vect = dr_chain[j];
5666 second_vect = dr_chain[j + 1];
5668 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle2");
5669 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
5670 first_vect, first_vect,
5671 perm2_mask1);
5672 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5673 vect[0] = data_ref;
5675 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle2");
5676 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
5677 second_vect, second_vect,
5678 perm2_mask2);
5679 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5680 vect[1] = data_ref;
5682 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shift");
5683 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
5684 vect[0], vect[1], shift1_mask);
5685 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5686 (*result_chain)[j/2 + length/2] = data_ref;
5688 data_ref = make_temp_ssa_name (vectype, NULL, "vect_select");
5689 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
5690 vect[0], vect[1], select_mask);
5691 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5692 (*result_chain)[j/2] = data_ref;
5694 memcpy (dr_chain.address (), result_chain->address (),
5695 length * sizeof (tree));
5697 return true;
5699 if (length == 3 && vf > 2)
5701 unsigned int k = 0, l = 0;
5703 /* Generating permutation constant to get all elements in rigth order.
5704 For vector length 8 it is {0 3 6 1 4 7 2 5}. */
5705 for (i = 0; i < nelt; i++)
5707 if (3 * k + (l % 3) >= nelt)
5709 k = 0;
5710 l += (3 - (nelt % 3));
5712 sel[i] = 3 * k + (l % 3);
5713 k++;
5715 vec_perm_indices indices (sel, 2, nelt);
5716 if (!can_vec_perm_const_p (TYPE_MODE (vectype), indices))
5718 if (dump_enabled_p ())
5719 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5720 "shuffle of 3 fields structure is not \
5721 supported by target\n");
5722 return false;
5724 perm3_mask = vect_gen_perm_mask_checked (vectype, indices);
5726 /* Generating permutation constant to shift all elements.
5727 For vector length 8 it is {6 7 8 9 10 11 12 13}. */
5728 for (i = 0; i < nelt; i++)
5729 sel[i] = 2 * (nelt / 3) + (nelt % 3) + i;
5730 indices.new_vector (sel, 2, nelt);
5731 if (!can_vec_perm_const_p (TYPE_MODE (vectype), indices))
5733 if (dump_enabled_p ())
5734 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5735 "shift permutation is not supported by target\n");
5736 return false;
5738 shift1_mask = vect_gen_perm_mask_checked (vectype, indices);
5740 /* Generating permutation constant to shift all elements.
5741 For vector length 8 it is {5 6 7 8 9 10 11 12}. */
5742 for (i = 0; i < nelt; i++)
5743 sel[i] = 2 * (nelt / 3) + 1 + i;
5744 indices.new_vector (sel, 2, nelt);
5745 if (!can_vec_perm_const_p (TYPE_MODE (vectype), indices))
5747 if (dump_enabled_p ())
5748 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5749 "shift permutation is not supported by target\n");
5750 return false;
5752 shift2_mask = vect_gen_perm_mask_checked (vectype, indices);
5754 /* Generating permutation constant to shift all elements.
5755 For vector length 8 it is {3 4 5 6 7 8 9 10}. */
5756 for (i = 0; i < nelt; i++)
5757 sel[i] = (nelt / 3) + (nelt % 3) / 2 + i;
5758 indices.new_vector (sel, 2, nelt);
5759 if (!can_vec_perm_const_p (TYPE_MODE (vectype), indices))
5761 if (dump_enabled_p ())
5762 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5763 "shift permutation is not supported by target\n");
5764 return false;
5766 shift3_mask = vect_gen_perm_mask_checked (vectype, indices);
5768 /* Generating permutation constant to shift all elements.
5769 For vector length 8 it is {5 6 7 8 9 10 11 12}. */
5770 for (i = 0; i < nelt; i++)
5771 sel[i] = 2 * (nelt / 3) + (nelt % 3) / 2 + i;
5772 indices.new_vector (sel, 2, nelt);
5773 if (!can_vec_perm_const_p (TYPE_MODE (vectype), indices))
5775 if (dump_enabled_p ())
5776 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5777 "shift permutation is not supported by target\n");
5778 return false;
5780 shift4_mask = vect_gen_perm_mask_checked (vectype, indices);
5782 for (k = 0; k < 3; k++)
5784 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle3");
5785 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
5786 dr_chain[k], dr_chain[k],
5787 perm3_mask);
5788 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5789 vect[k] = data_ref;
5792 for (k = 0; k < 3; k++)
5794 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shift1");
5795 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
5796 vect[k % 3], vect[(k + 1) % 3],
5797 shift1_mask);
5798 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5799 vect_shift[k] = data_ref;
5802 for (k = 0; k < 3; k++)
5804 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shift2");
5805 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
5806 vect_shift[(4 - k) % 3],
5807 vect_shift[(3 - k) % 3],
5808 shift2_mask);
5809 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5810 vect[k] = data_ref;
5813 (*result_chain)[3 - (nelt % 3)] = vect[2];
5815 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shift3");
5816 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, vect[0],
5817 vect[0], shift3_mask);
5818 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5819 (*result_chain)[nelt % 3] = data_ref;
5821 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shift4");
5822 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, vect[1],
5823 vect[1], shift4_mask);
5824 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5825 (*result_chain)[0] = data_ref;
5826 return true;
5828 return false;
5831 /* Function vect_transform_grouped_load.
5833 Given a chain of input interleaved data-refs (in DR_CHAIN), build statements
5834 to perform their permutation and ascribe the result vectorized statements to
5835 the scalar statements.
5838 void
5839 vect_transform_grouped_load (gimple *stmt, vec<tree> dr_chain, int size,
5840 gimple_stmt_iterator *gsi)
5842 machine_mode mode;
5843 vec<tree> result_chain = vNULL;
5845 /* DR_CHAIN contains input data-refs that are a part of the interleaving.
5846 RESULT_CHAIN is the output of vect_permute_load_chain, it contains permuted
5847 vectors, that are ready for vector computation. */
5848 result_chain.create (size);
5850 /* If reassociation width for vector type is 2 or greater target machine can
5851 execute 2 or more vector instructions in parallel. Otherwise try to
5852 get chain for loads group using vect_shift_permute_load_chain. */
5853 mode = TYPE_MODE (STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt)));
5854 if (targetm.sched.reassociation_width (VEC_PERM_EXPR, mode) > 1
5855 || pow2p_hwi (size)
5856 || !vect_shift_permute_load_chain (dr_chain, size, stmt,
5857 gsi, &result_chain))
5858 vect_permute_load_chain (dr_chain, size, stmt, gsi, &result_chain);
5859 vect_record_grouped_load_vectors (stmt, result_chain);
5860 result_chain.release ();
5863 /* RESULT_CHAIN contains the output of a group of grouped loads that were
5864 generated as part of the vectorization of STMT. Assign the statement
5865 for each vector to the associated scalar statement. */
5867 void
5868 vect_record_grouped_load_vectors (gimple *stmt, vec<tree> result_chain)
5870 gimple *first_stmt = GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt));
5871 gimple *next_stmt, *new_stmt;
5872 unsigned int i, gap_count;
5873 tree tmp_data_ref;
5875 /* Put a permuted data-ref in the VECTORIZED_STMT field.
5876 Since we scan the chain starting from it's first node, their order
5877 corresponds the order of data-refs in RESULT_CHAIN. */
5878 next_stmt = first_stmt;
5879 gap_count = 1;
5880 FOR_EACH_VEC_ELT (result_chain, i, tmp_data_ref)
5882 if (!next_stmt)
5883 break;
5885 /* Skip the gaps. Loads created for the gaps will be removed by dead
5886 code elimination pass later. No need to check for the first stmt in
5887 the group, since it always exists.
5888 GROUP_GAP is the number of steps in elements from the previous
5889 access (if there is no gap GROUP_GAP is 1). We skip loads that
5890 correspond to the gaps. */
5891 if (next_stmt != first_stmt
5892 && gap_count < GROUP_GAP (vinfo_for_stmt (next_stmt)))
5894 gap_count++;
5895 continue;
5898 while (next_stmt)
5900 new_stmt = SSA_NAME_DEF_STMT (tmp_data_ref);
5901 /* We assume that if VEC_STMT is not NULL, this is a case of multiple
5902 copies, and we put the new vector statement in the first available
5903 RELATED_STMT. */
5904 if (!STMT_VINFO_VEC_STMT (vinfo_for_stmt (next_stmt)))
5905 STMT_VINFO_VEC_STMT (vinfo_for_stmt (next_stmt)) = new_stmt;
5906 else
5908 if (!GROUP_SAME_DR_STMT (vinfo_for_stmt (next_stmt)))
5910 gimple *prev_stmt =
5911 STMT_VINFO_VEC_STMT (vinfo_for_stmt (next_stmt));
5912 gimple *rel_stmt =
5913 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (prev_stmt));
5914 while (rel_stmt)
5916 prev_stmt = rel_stmt;
5917 rel_stmt =
5918 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (rel_stmt));
5921 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (prev_stmt)) =
5922 new_stmt;
5926 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
5927 gap_count = 1;
5928 /* If NEXT_STMT accesses the same DR as the previous statement,
5929 put the same TMP_DATA_REF as its vectorized statement; otherwise
5930 get the next data-ref from RESULT_CHAIN. */
5931 if (!next_stmt || !GROUP_SAME_DR_STMT (vinfo_for_stmt (next_stmt)))
5932 break;
5937 /* Function vect_force_dr_alignment_p.
5939 Returns whether the alignment of a DECL can be forced to be aligned
5940 on ALIGNMENT bit boundary. */
5942 bool
5943 vect_can_force_dr_alignment_p (const_tree decl, unsigned int alignment)
5945 if (!VAR_P (decl))
5946 return false;
5948 if (decl_in_symtab_p (decl)
5949 && !symtab_node::get (decl)->can_increase_alignment_p ())
5950 return false;
5952 if (TREE_STATIC (decl))
5953 return (alignment <= MAX_OFILE_ALIGNMENT);
5954 else
5955 return (alignment <= MAX_STACK_ALIGNMENT);
5959 /* Return whether the data reference DR is supported with respect to its
5960 alignment.
5961 If CHECK_ALIGNED_ACCESSES is TRUE, check if the access is supported even
5962 it is aligned, i.e., check if it is possible to vectorize it with different
5963 alignment. */
5965 enum dr_alignment_support
5966 vect_supportable_dr_alignment (struct data_reference *dr,
5967 bool check_aligned_accesses)
5969 gimple *stmt = DR_STMT (dr);
5970 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5971 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
5972 machine_mode mode = TYPE_MODE (vectype);
5973 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5974 struct loop *vect_loop = NULL;
5975 bool nested_in_vect_loop = false;
5977 if (aligned_access_p (dr) && !check_aligned_accesses)
5978 return dr_aligned;
5980 /* For now assume all conditional loads/stores support unaligned
5981 access without any special code. */
5982 if (is_gimple_call (stmt)
5983 && gimple_call_internal_p (stmt)
5984 && (gimple_call_internal_fn (stmt) == IFN_MASK_LOAD
5985 || gimple_call_internal_fn (stmt) == IFN_MASK_STORE))
5986 return dr_unaligned_supported;
5988 if (loop_vinfo)
5990 vect_loop = LOOP_VINFO_LOOP (loop_vinfo);
5991 nested_in_vect_loop = nested_in_vect_loop_p (vect_loop, stmt);
5994 /* Possibly unaligned access. */
5996 /* We can choose between using the implicit realignment scheme (generating
5997 a misaligned_move stmt) and the explicit realignment scheme (generating
5998 aligned loads with a REALIGN_LOAD). There are two variants to the
5999 explicit realignment scheme: optimized, and unoptimized.
6000 We can optimize the realignment only if the step between consecutive
6001 vector loads is equal to the vector size. Since the vector memory
6002 accesses advance in steps of VS (Vector Size) in the vectorized loop, it
6003 is guaranteed that the misalignment amount remains the same throughout the
6004 execution of the vectorized loop. Therefore, we can create the
6005 "realignment token" (the permutation mask that is passed to REALIGN_LOAD)
6006 at the loop preheader.
6008 However, in the case of outer-loop vectorization, when vectorizing a
6009 memory access in the inner-loop nested within the LOOP that is now being
6010 vectorized, while it is guaranteed that the misalignment of the
6011 vectorized memory access will remain the same in different outer-loop
6012 iterations, it is *not* guaranteed that is will remain the same throughout
6013 the execution of the inner-loop. This is because the inner-loop advances
6014 with the original scalar step (and not in steps of VS). If the inner-loop
6015 step happens to be a multiple of VS, then the misalignment remains fixed
6016 and we can use the optimized realignment scheme. For example:
6018 for (i=0; i<N; i++)
6019 for (j=0; j<M; j++)
6020 s += a[i+j];
6022 When vectorizing the i-loop in the above example, the step between
6023 consecutive vector loads is 1, and so the misalignment does not remain
6024 fixed across the execution of the inner-loop, and the realignment cannot
6025 be optimized (as illustrated in the following pseudo vectorized loop):
6027 for (i=0; i<N; i+=4)
6028 for (j=0; j<M; j++){
6029 vs += vp[i+j]; // misalignment of &vp[i+j] is {0,1,2,3,0,1,2,3,...}
6030 // when j is {0,1,2,3,4,5,6,7,...} respectively.
6031 // (assuming that we start from an aligned address).
6034 We therefore have to use the unoptimized realignment scheme:
6036 for (i=0; i<N; i+=4)
6037 for (j=k; j<M; j+=4)
6038 vs += vp[i+j]; // misalignment of &vp[i+j] is always k (assuming
6039 // that the misalignment of the initial address is
6040 // 0).
6042 The loop can then be vectorized as follows:
6044 for (k=0; k<4; k++){
6045 rt = get_realignment_token (&vp[k]);
6046 for (i=0; i<N; i+=4){
6047 v1 = vp[i+k];
6048 for (j=k; j<M; j+=4){
6049 v2 = vp[i+j+VS-1];
6050 va = REALIGN_LOAD <v1,v2,rt>;
6051 vs += va;
6052 v1 = v2;
6055 } */
6057 if (DR_IS_READ (dr))
6059 bool is_packed = false;
6060 tree type = (TREE_TYPE (DR_REF (dr)));
6062 if (optab_handler (vec_realign_load_optab, mode) != CODE_FOR_nothing
6063 && (!targetm.vectorize.builtin_mask_for_load
6064 || targetm.vectorize.builtin_mask_for_load ()))
6066 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
6068 /* If we are doing SLP then the accesses need not have the
6069 same alignment, instead it depends on the SLP group size. */
6070 if (loop_vinfo
6071 && STMT_SLP_TYPE (stmt_info)
6072 && !multiple_p (LOOP_VINFO_VECT_FACTOR (loop_vinfo)
6073 * GROUP_SIZE (vinfo_for_stmt
6074 (GROUP_FIRST_ELEMENT (stmt_info))),
6075 TYPE_VECTOR_SUBPARTS (vectype)))
6077 else if (!loop_vinfo
6078 || (nested_in_vect_loop
6079 && maybe_ne (TREE_INT_CST_LOW (DR_STEP (dr)),
6080 GET_MODE_SIZE (TYPE_MODE (vectype)))))
6081 return dr_explicit_realign;
6082 else
6083 return dr_explicit_realign_optimized;
6085 if (!known_alignment_for_access_p (dr))
6086 is_packed = not_size_aligned (DR_REF (dr));
6088 if (targetm.vectorize.support_vector_misalignment
6089 (mode, type, DR_MISALIGNMENT (dr), is_packed))
6090 /* Can't software pipeline the loads, but can at least do them. */
6091 return dr_unaligned_supported;
6093 else
6095 bool is_packed = false;
6096 tree type = (TREE_TYPE (DR_REF (dr)));
6098 if (!known_alignment_for_access_p (dr))
6099 is_packed = not_size_aligned (DR_REF (dr));
6101 if (targetm.vectorize.support_vector_misalignment
6102 (mode, type, DR_MISALIGNMENT (dr), is_packed))
6103 return dr_unaligned_supported;
6106 /* Unsupported. */
6107 return dr_unaligned_unsupported;