Handle constant fp classifications in fold-const-call.c
[official-gcc.git] / gcc / tree-vect-data-refs.c
blob3de71fb309451ec72ce8df7b57b8ef023c03f0ac
1 /* Data References Analysis and Manipulation Utilities for Vectorization.
2 Copyright (C) 2003-2015 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
4 and Ira Rosen <irar@il.ibm.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "backend.h"
26 #include "target.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "gimple.h"
30 #include "predict.h"
31 #include "tm_p.h"
32 #include "ssa.h"
33 #include "optabs-tree.h"
34 #include "cgraph.h"
35 #include "dumpfile.h"
36 #include "alias.h"
37 #include "fold-const.h"
38 #include "stor-layout.h"
39 #include "tree-eh.h"
40 #include "gimplify.h"
41 #include "gimple-iterator.h"
42 #include "gimplify-me.h"
43 #include "tree-ssa-loop-ivopts.h"
44 #include "tree-ssa-loop-manip.h"
45 #include "tree-ssa-loop.h"
46 #include "cfgloop.h"
47 #include "tree-scalar-evolution.h"
48 #include "tree-vectorizer.h"
49 #include "expr.h"
50 #include "builtins.h"
51 #include "params.h"
53 /* Return true if load- or store-lanes optab OPTAB is implemented for
54 COUNT vectors of type VECTYPE. NAME is the name of OPTAB. */
56 static bool
57 vect_lanes_optab_supported_p (const char *name, convert_optab optab,
58 tree vectype, unsigned HOST_WIDE_INT count)
60 machine_mode mode, array_mode;
61 bool limit_p;
63 mode = TYPE_MODE (vectype);
64 limit_p = !targetm.array_mode_supported_p (mode, count);
65 array_mode = mode_for_size (count * GET_MODE_BITSIZE (mode),
66 MODE_INT, limit_p);
68 if (array_mode == BLKmode)
70 if (dump_enabled_p ())
71 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
72 "no array mode for %s[" HOST_WIDE_INT_PRINT_DEC "]\n",
73 GET_MODE_NAME (mode), count);
74 return false;
77 if (convert_optab_handler (optab, array_mode, mode) == CODE_FOR_nothing)
79 if (dump_enabled_p ())
80 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
81 "cannot use %s<%s><%s>\n", name,
82 GET_MODE_NAME (array_mode), GET_MODE_NAME (mode));
83 return false;
86 if (dump_enabled_p ())
87 dump_printf_loc (MSG_NOTE, vect_location,
88 "can use %s<%s><%s>\n", name, GET_MODE_NAME (array_mode),
89 GET_MODE_NAME (mode));
91 return true;
95 /* Return the smallest scalar part of STMT.
96 This is used to determine the vectype of the stmt. We generally set the
97 vectype according to the type of the result (lhs). For stmts whose
98 result-type is different than the type of the arguments (e.g., demotion,
99 promotion), vectype will be reset appropriately (later). Note that we have
100 to visit the smallest datatype in this function, because that determines the
101 VF. If the smallest datatype in the loop is present only as the rhs of a
102 promotion operation - we'd miss it.
103 Such a case, where a variable of this datatype does not appear in the lhs
104 anywhere in the loop, can only occur if it's an invariant: e.g.:
105 'int_x = (int) short_inv', which we'd expect to have been optimized away by
106 invariant motion. However, we cannot rely on invariant motion to always
107 take invariants out of the loop, and so in the case of promotion we also
108 have to check the rhs.
109 LHS_SIZE_UNIT and RHS_SIZE_UNIT contain the sizes of the corresponding
110 types. */
112 tree
113 vect_get_smallest_scalar_type (gimple *stmt, HOST_WIDE_INT *lhs_size_unit,
114 HOST_WIDE_INT *rhs_size_unit)
116 tree scalar_type = gimple_expr_type (stmt);
117 HOST_WIDE_INT lhs, rhs;
119 lhs = rhs = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (scalar_type));
121 if (is_gimple_assign (stmt)
122 && (gimple_assign_cast_p (stmt)
123 || gimple_assign_rhs_code (stmt) == WIDEN_MULT_EXPR
124 || gimple_assign_rhs_code (stmt) == WIDEN_LSHIFT_EXPR
125 || gimple_assign_rhs_code (stmt) == FLOAT_EXPR))
127 tree rhs_type = TREE_TYPE (gimple_assign_rhs1 (stmt));
129 rhs = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (rhs_type));
130 if (rhs < lhs)
131 scalar_type = rhs_type;
134 *lhs_size_unit = lhs;
135 *rhs_size_unit = rhs;
136 return scalar_type;
140 /* Insert DDR into LOOP_VINFO list of ddrs that may alias and need to be
141 tested at run-time. Return TRUE if DDR was successfully inserted.
142 Return false if versioning is not supported. */
144 static bool
145 vect_mark_for_runtime_alias_test (ddr_p ddr, loop_vec_info loop_vinfo)
147 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
149 if ((unsigned) PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS) == 0)
150 return false;
152 if (dump_enabled_p ())
154 dump_printf_loc (MSG_NOTE, vect_location,
155 "mark for run-time aliasing test between ");
156 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (DDR_A (ddr)));
157 dump_printf (MSG_NOTE, " and ");
158 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (DDR_B (ddr)));
159 dump_printf (MSG_NOTE, "\n");
162 if (optimize_loop_nest_for_size_p (loop))
164 if (dump_enabled_p ())
165 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
166 "versioning not supported when optimizing"
167 " for size.\n");
168 return false;
171 /* FORNOW: We don't support versioning with outer-loop vectorization. */
172 if (loop->inner)
174 if (dump_enabled_p ())
175 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
176 "versioning not yet supported for outer-loops.\n");
177 return false;
180 /* FORNOW: We don't support creating runtime alias tests for non-constant
181 step. */
182 if (TREE_CODE (DR_STEP (DDR_A (ddr))) != INTEGER_CST
183 || TREE_CODE (DR_STEP (DDR_B (ddr))) != INTEGER_CST)
185 if (dump_enabled_p ())
186 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
187 "versioning not yet supported for non-constant "
188 "step\n");
189 return false;
192 LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo).safe_push (ddr);
193 return true;
197 /* Function vect_analyze_data_ref_dependence.
199 Return TRUE if there (might) exist a dependence between a memory-reference
200 DRA and a memory-reference DRB. When versioning for alias may check a
201 dependence at run-time, return FALSE. Adjust *MAX_VF according to
202 the data dependence. */
204 static bool
205 vect_analyze_data_ref_dependence (struct data_dependence_relation *ddr,
206 loop_vec_info loop_vinfo, int *max_vf)
208 unsigned int i;
209 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
210 struct data_reference *dra = DDR_A (ddr);
211 struct data_reference *drb = DDR_B (ddr);
212 stmt_vec_info stmtinfo_a = vinfo_for_stmt (DR_STMT (dra));
213 stmt_vec_info stmtinfo_b = vinfo_for_stmt (DR_STMT (drb));
214 lambda_vector dist_v;
215 unsigned int loop_depth;
217 /* In loop analysis all data references should be vectorizable. */
218 if (!STMT_VINFO_VECTORIZABLE (stmtinfo_a)
219 || !STMT_VINFO_VECTORIZABLE (stmtinfo_b))
220 gcc_unreachable ();
222 /* Independent data accesses. */
223 if (DDR_ARE_DEPENDENT (ddr) == chrec_known)
224 return false;
226 if (dra == drb
227 || (DR_IS_READ (dra) && DR_IS_READ (drb)))
228 return false;
230 /* Even if we have an anti-dependence then, as the vectorized loop covers at
231 least two scalar iterations, there is always also a true dependence.
232 As the vectorizer does not re-order loads and stores we can ignore
233 the anti-dependence if TBAA can disambiguate both DRs similar to the
234 case with known negative distance anti-dependences (positive
235 distance anti-dependences would violate TBAA constraints). */
236 if (((DR_IS_READ (dra) && DR_IS_WRITE (drb))
237 || (DR_IS_WRITE (dra) && DR_IS_READ (drb)))
238 && !alias_sets_conflict_p (get_alias_set (DR_REF (dra)),
239 get_alias_set (DR_REF (drb))))
240 return false;
242 /* Unknown data dependence. */
243 if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know)
245 /* If user asserted safelen consecutive iterations can be
246 executed concurrently, assume independence. */
247 if (loop->safelen >= 2)
249 if (loop->safelen < *max_vf)
250 *max_vf = loop->safelen;
251 LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo) = false;
252 return false;
255 if (STMT_VINFO_GATHER_SCATTER_P (stmtinfo_a)
256 || STMT_VINFO_GATHER_SCATTER_P (stmtinfo_b))
258 if (dump_enabled_p ())
260 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
261 "versioning for alias not supported for: "
262 "can't determine dependence between ");
263 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
264 DR_REF (dra));
265 dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
266 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
267 DR_REF (drb));
268 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
270 return true;
273 if (dump_enabled_p ())
275 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
276 "versioning for alias required: "
277 "can't determine dependence between ");
278 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
279 DR_REF (dra));
280 dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
281 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
282 DR_REF (drb));
283 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
286 /* Add to list of ddrs that need to be tested at run-time. */
287 return !vect_mark_for_runtime_alias_test (ddr, loop_vinfo);
290 /* Known data dependence. */
291 if (DDR_NUM_DIST_VECTS (ddr) == 0)
293 /* If user asserted safelen consecutive iterations can be
294 executed concurrently, assume independence. */
295 if (loop->safelen >= 2)
297 if (loop->safelen < *max_vf)
298 *max_vf = loop->safelen;
299 LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo) = false;
300 return false;
303 if (STMT_VINFO_GATHER_SCATTER_P (stmtinfo_a)
304 || STMT_VINFO_GATHER_SCATTER_P (stmtinfo_b))
306 if (dump_enabled_p ())
308 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
309 "versioning for alias not supported for: "
310 "bad dist vector for ");
311 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
312 DR_REF (dra));
313 dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
314 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
315 DR_REF (drb));
316 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
318 return true;
321 if (dump_enabled_p ())
323 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
324 "versioning for alias required: "
325 "bad dist vector for ");
326 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (dra));
327 dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
328 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (drb));
329 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
331 /* Add to list of ddrs that need to be tested at run-time. */
332 return !vect_mark_for_runtime_alias_test (ddr, loop_vinfo);
335 loop_depth = index_in_loop_nest (loop->num, DDR_LOOP_NEST (ddr));
336 FOR_EACH_VEC_ELT (DDR_DIST_VECTS (ddr), i, dist_v)
338 int dist = dist_v[loop_depth];
340 if (dump_enabled_p ())
341 dump_printf_loc (MSG_NOTE, vect_location,
342 "dependence distance = %d.\n", dist);
344 if (dist == 0)
346 if (dump_enabled_p ())
348 dump_printf_loc (MSG_NOTE, vect_location,
349 "dependence distance == 0 between ");
350 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra));
351 dump_printf (MSG_NOTE, " and ");
352 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb));
353 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
356 /* When we perform grouped accesses and perform implicit CSE
357 by detecting equal accesses and doing disambiguation with
358 runtime alias tests like for
359 .. = a[i];
360 .. = a[i+1];
361 a[i] = ..;
362 a[i+1] = ..;
363 *p = ..;
364 .. = a[i];
365 .. = a[i+1];
366 where we will end up loading { a[i], a[i+1] } once, make
367 sure that inserting group loads before the first load and
368 stores after the last store will do the right thing.
369 Similar for groups like
370 a[i] = ...;
371 ... = a[i];
372 a[i+1] = ...;
373 where loads from the group interleave with the store. */
374 if (STMT_VINFO_GROUPED_ACCESS (stmtinfo_a)
375 || STMT_VINFO_GROUPED_ACCESS (stmtinfo_b))
377 gimple *earlier_stmt;
378 earlier_stmt = get_earlier_stmt (DR_STMT (dra), DR_STMT (drb));
379 if (DR_IS_WRITE
380 (STMT_VINFO_DATA_REF (vinfo_for_stmt (earlier_stmt))))
382 if (dump_enabled_p ())
383 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
384 "READ_WRITE dependence in interleaving."
385 "\n");
386 return true;
390 continue;
393 if (dist > 0 && DDR_REVERSED_P (ddr))
395 /* If DDR_REVERSED_P the order of the data-refs in DDR was
396 reversed (to make distance vector positive), and the actual
397 distance is negative. */
398 if (dump_enabled_p ())
399 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
400 "dependence distance negative.\n");
401 /* Record a negative dependence distance to later limit the
402 amount of stmt copying / unrolling we can perform.
403 Only need to handle read-after-write dependence. */
404 if (DR_IS_READ (drb)
405 && (STMT_VINFO_MIN_NEG_DIST (stmtinfo_b) == 0
406 || STMT_VINFO_MIN_NEG_DIST (stmtinfo_b) > (unsigned)dist))
407 STMT_VINFO_MIN_NEG_DIST (stmtinfo_b) = dist;
408 continue;
411 if (abs (dist) >= 2
412 && abs (dist) < *max_vf)
414 /* The dependence distance requires reduction of the maximal
415 vectorization factor. */
416 *max_vf = abs (dist);
417 if (dump_enabled_p ())
418 dump_printf_loc (MSG_NOTE, vect_location,
419 "adjusting maximal vectorization factor to %i\n",
420 *max_vf);
423 if (abs (dist) >= *max_vf)
425 /* Dependence distance does not create dependence, as far as
426 vectorization is concerned, in this case. */
427 if (dump_enabled_p ())
428 dump_printf_loc (MSG_NOTE, vect_location,
429 "dependence distance >= VF.\n");
430 continue;
433 if (dump_enabled_p ())
435 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
436 "not vectorized, possible dependence "
437 "between data-refs ");
438 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra));
439 dump_printf (MSG_NOTE, " and ");
440 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb));
441 dump_printf (MSG_NOTE, "\n");
444 return true;
447 return false;
450 /* Function vect_analyze_data_ref_dependences.
452 Examine all the data references in the loop, and make sure there do not
453 exist any data dependences between them. Set *MAX_VF according to
454 the maximum vectorization factor the data dependences allow. */
456 bool
457 vect_analyze_data_ref_dependences (loop_vec_info loop_vinfo, int *max_vf)
459 unsigned int i;
460 struct data_dependence_relation *ddr;
462 if (dump_enabled_p ())
463 dump_printf_loc (MSG_NOTE, vect_location,
464 "=== vect_analyze_data_ref_dependences ===\n");
466 LOOP_VINFO_DDRS (loop_vinfo)
467 .create (LOOP_VINFO_DATAREFS (loop_vinfo).length ()
468 * LOOP_VINFO_DATAREFS (loop_vinfo).length ());
469 LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo) = true;
470 if (!compute_all_dependences (LOOP_VINFO_DATAREFS (loop_vinfo),
471 &LOOP_VINFO_DDRS (loop_vinfo),
472 LOOP_VINFO_LOOP_NEST (loop_vinfo), true))
473 return false;
475 FOR_EACH_VEC_ELT (LOOP_VINFO_DDRS (loop_vinfo), i, ddr)
476 if (vect_analyze_data_ref_dependence (ddr, loop_vinfo, max_vf))
477 return false;
479 return true;
483 /* Function vect_slp_analyze_data_ref_dependence.
485 Return TRUE if there (might) exist a dependence between a memory-reference
486 DRA and a memory-reference DRB. When versioning for alias may check a
487 dependence at run-time, return FALSE. Adjust *MAX_VF according to
488 the data dependence. */
490 static bool
491 vect_slp_analyze_data_ref_dependence (struct data_dependence_relation *ddr)
493 struct data_reference *dra = DDR_A (ddr);
494 struct data_reference *drb = DDR_B (ddr);
496 /* We need to check dependences of statements marked as unvectorizable
497 as well, they still can prohibit vectorization. */
499 /* Independent data accesses. */
500 if (DDR_ARE_DEPENDENT (ddr) == chrec_known)
501 return false;
503 if (dra == drb)
504 return false;
506 /* Read-read is OK. */
507 if (DR_IS_READ (dra) && DR_IS_READ (drb))
508 return false;
510 /* If dra and drb are part of the same interleaving chain consider
511 them independent. */
512 if (STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (DR_STMT (dra)))
513 && (GROUP_FIRST_ELEMENT (vinfo_for_stmt (DR_STMT (dra)))
514 == GROUP_FIRST_ELEMENT (vinfo_for_stmt (DR_STMT (drb)))))
515 return false;
517 /* Unknown data dependence. */
518 if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know)
520 if (dump_enabled_p ())
522 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
523 "can't determine dependence between ");
524 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (dra));
525 dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
526 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (drb));
527 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
530 else if (dump_enabled_p ())
532 dump_printf_loc (MSG_NOTE, vect_location,
533 "determined dependence between ");
534 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra));
535 dump_printf (MSG_NOTE, " and ");
536 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb));
537 dump_printf (MSG_NOTE, "\n");
540 /* We do not vectorize basic blocks with write-write dependencies. */
541 if (DR_IS_WRITE (dra) && DR_IS_WRITE (drb))
542 return true;
544 /* If we have a read-write dependence check that the load is before the store.
545 When we vectorize basic blocks, vector load can be only before
546 corresponding scalar load, and vector store can be only after its
547 corresponding scalar store. So the order of the acceses is preserved in
548 case the load is before the store. */
549 gimple *earlier_stmt = get_earlier_stmt (DR_STMT (dra), DR_STMT (drb));
550 if (DR_IS_READ (STMT_VINFO_DATA_REF (vinfo_for_stmt (earlier_stmt))))
552 /* That only holds for load-store pairs taking part in vectorization. */
553 if (STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dra)))
554 && STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (drb))))
555 return false;
558 return true;
562 /* Function vect_analyze_data_ref_dependences.
564 Examine all the data references in the basic-block, and make sure there
565 do not exist any data dependences between them. Set *MAX_VF according to
566 the maximum vectorization factor the data dependences allow. */
568 bool
569 vect_slp_analyze_data_ref_dependences (bb_vec_info bb_vinfo)
571 struct data_dependence_relation *ddr;
572 unsigned int i;
574 if (dump_enabled_p ())
575 dump_printf_loc (MSG_NOTE, vect_location,
576 "=== vect_slp_analyze_data_ref_dependences ===\n");
578 if (!compute_all_dependences (BB_VINFO_DATAREFS (bb_vinfo),
579 &BB_VINFO_DDRS (bb_vinfo),
580 vNULL, true))
581 return false;
583 FOR_EACH_VEC_ELT (BB_VINFO_DDRS (bb_vinfo), i, ddr)
584 if (vect_slp_analyze_data_ref_dependence (ddr))
585 return false;
587 return true;
591 /* Function vect_compute_data_ref_alignment
593 Compute the misalignment of the data reference DR.
595 Output:
596 1. If during the misalignment computation it is found that the data reference
597 cannot be vectorized then false is returned.
598 2. DR_MISALIGNMENT (DR) is defined.
600 FOR NOW: No analysis is actually performed. Misalignment is calculated
601 only for trivial cases. TODO. */
603 static bool
604 vect_compute_data_ref_alignment (struct data_reference *dr)
606 gimple *stmt = DR_STMT (dr);
607 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
608 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
609 struct loop *loop = NULL;
610 tree ref = DR_REF (dr);
611 tree vectype;
612 tree base, base_addr;
613 tree misalign = NULL_TREE;
614 tree aligned_to;
615 unsigned HOST_WIDE_INT alignment;
617 if (dump_enabled_p ())
618 dump_printf_loc (MSG_NOTE, vect_location,
619 "vect_compute_data_ref_alignment:\n");
621 if (loop_vinfo)
622 loop = LOOP_VINFO_LOOP (loop_vinfo);
624 /* Initialize misalignment to unknown. */
625 SET_DR_MISALIGNMENT (dr, -1);
627 if (tree_fits_shwi_p (DR_STEP (dr)))
628 misalign = DR_INIT (dr);
629 aligned_to = DR_ALIGNED_TO (dr);
630 base_addr = DR_BASE_ADDRESS (dr);
631 vectype = STMT_VINFO_VECTYPE (stmt_info);
633 /* In case the dataref is in an inner-loop of the loop that is being
634 vectorized (LOOP), we use the base and misalignment information
635 relative to the outer-loop (LOOP). This is ok only if the misalignment
636 stays the same throughout the execution of the inner-loop, which is why
637 we have to check that the stride of the dataref in the inner-loop evenly
638 divides by the vector size. */
639 if (loop && nested_in_vect_loop_p (loop, stmt))
641 tree step = DR_STEP (dr);
643 if (tree_fits_shwi_p (step)
644 && tree_to_shwi (step) % GET_MODE_SIZE (TYPE_MODE (vectype)) == 0)
646 if (dump_enabled_p ())
647 dump_printf_loc (MSG_NOTE, vect_location,
648 "inner step divides the vector-size.\n");
649 misalign = STMT_VINFO_DR_INIT (stmt_info);
650 aligned_to = STMT_VINFO_DR_ALIGNED_TO (stmt_info);
651 base_addr = STMT_VINFO_DR_BASE_ADDRESS (stmt_info);
653 else
655 if (dump_enabled_p ())
656 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
657 "inner step doesn't divide the vector-size.\n");
658 misalign = NULL_TREE;
662 /* Similarly we can only use base and misalignment information relative to
663 an innermost loop if the misalignment stays the same throughout the
664 execution of the loop. As above, this is the case if the stride of
665 the dataref evenly divides by the vector size. */
666 else
668 tree step = DR_STEP (dr);
669 unsigned vf = loop ? LOOP_VINFO_VECT_FACTOR (loop_vinfo) : 1;
671 if (tree_fits_shwi_p (step)
672 && ((tree_to_shwi (step) * vf)
673 % GET_MODE_SIZE (TYPE_MODE (vectype)) != 0))
675 if (dump_enabled_p ())
676 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
677 "step doesn't divide the vector-size.\n");
678 misalign = NULL_TREE;
682 /* To look at alignment of the base we have to preserve an inner MEM_REF
683 as that carries alignment information of the actual access. */
684 base = ref;
685 while (handled_component_p (base))
686 base = TREE_OPERAND (base, 0);
687 if (TREE_CODE (base) == MEM_REF)
688 base = build2 (MEM_REF, TREE_TYPE (base), base_addr,
689 build_int_cst (TREE_TYPE (TREE_OPERAND (base, 1)), 0));
690 unsigned int base_alignment = get_object_alignment (base);
692 if (base_alignment >= TYPE_ALIGN (TREE_TYPE (vectype)))
693 DR_VECT_AUX (dr)->base_element_aligned = true;
695 alignment = TYPE_ALIGN_UNIT (vectype);
697 if ((compare_tree_int (aligned_to, alignment) < 0)
698 || !misalign)
700 if (dump_enabled_p ())
702 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
703 "Unknown alignment for access: ");
704 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, ref);
705 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
707 return true;
710 if (base_alignment < TYPE_ALIGN (vectype))
712 /* Strip an inner MEM_REF to a bare decl if possible. */
713 if (TREE_CODE (base) == MEM_REF
714 && integer_zerop (TREE_OPERAND (base, 1))
715 && TREE_CODE (TREE_OPERAND (base, 0)) == ADDR_EXPR)
716 base = TREE_OPERAND (TREE_OPERAND (base, 0), 0);
718 if (!vect_can_force_dr_alignment_p (base, TYPE_ALIGN (vectype)))
720 if (dump_enabled_p ())
722 dump_printf_loc (MSG_NOTE, vect_location,
723 "can't force alignment of ref: ");
724 dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
725 dump_printf (MSG_NOTE, "\n");
727 return true;
730 /* Force the alignment of the decl.
731 NOTE: This is the only change to the code we make during
732 the analysis phase, before deciding to vectorize the loop. */
733 if (dump_enabled_p ())
735 dump_printf_loc (MSG_NOTE, vect_location, "force alignment of ");
736 dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
737 dump_printf (MSG_NOTE, "\n");
740 DR_VECT_AUX (dr)->base_decl = base;
741 DR_VECT_AUX (dr)->base_misaligned = true;
742 DR_VECT_AUX (dr)->base_element_aligned = true;
745 /* If this is a backward running DR then first access in the larger
746 vectype actually is N-1 elements before the address in the DR.
747 Adjust misalign accordingly. */
748 if (tree_int_cst_sgn (DR_STEP (dr)) < 0)
750 tree offset = ssize_int (TYPE_VECTOR_SUBPARTS (vectype) - 1);
751 /* DR_STEP(dr) is the same as -TYPE_SIZE of the scalar type,
752 otherwise we wouldn't be here. */
753 offset = fold_build2 (MULT_EXPR, ssizetype, offset, DR_STEP (dr));
754 /* PLUS because DR_STEP was negative. */
755 misalign = size_binop (PLUS_EXPR, misalign, offset);
758 SET_DR_MISALIGNMENT (dr,
759 wi::mod_floor (misalign, alignment, SIGNED).to_uhwi ());
761 if (dump_enabled_p ())
763 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
764 "misalign = %d bytes of ref ", DR_MISALIGNMENT (dr));
765 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, ref);
766 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
769 return true;
773 /* Function vect_compute_data_refs_alignment
775 Compute the misalignment of data references in the loop.
776 Return FALSE if a data reference is found that cannot be vectorized. */
778 static bool
779 vect_compute_data_refs_alignment (vec_info *vinfo)
781 vec<data_reference_p> datarefs = vinfo->datarefs;
782 struct data_reference *dr;
783 unsigned int i;
785 FOR_EACH_VEC_ELT (datarefs, i, dr)
787 stmt_vec_info stmt_info = vinfo_for_stmt (DR_STMT (dr));
788 if (STMT_VINFO_VECTORIZABLE (stmt_info)
789 && !vect_compute_data_ref_alignment (dr))
791 /* Strided accesses perform only component accesses, misalignment
792 information is irrelevant for them. */
793 if (STMT_VINFO_STRIDED_P (stmt_info)
794 && !STMT_VINFO_GROUPED_ACCESS (stmt_info))
795 continue;
797 if (is_a <bb_vec_info> (vinfo))
799 /* Mark unsupported statement as unvectorizable. */
800 STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr))) = false;
801 continue;
803 else
804 return false;
808 return true;
812 /* Function vect_update_misalignment_for_peel
814 DR - the data reference whose misalignment is to be adjusted.
815 DR_PEEL - the data reference whose misalignment is being made
816 zero in the vector loop by the peel.
817 NPEEL - the number of iterations in the peel loop if the misalignment
818 of DR_PEEL is known at compile time. */
820 static void
821 vect_update_misalignment_for_peel (struct data_reference *dr,
822 struct data_reference *dr_peel, int npeel)
824 unsigned int i;
825 vec<dr_p> same_align_drs;
826 struct data_reference *current_dr;
827 int dr_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr))));
828 int dr_peel_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr_peel))));
829 stmt_vec_info stmt_info = vinfo_for_stmt (DR_STMT (dr));
830 stmt_vec_info peel_stmt_info = vinfo_for_stmt (DR_STMT (dr_peel));
832 /* For interleaved data accesses the step in the loop must be multiplied by
833 the size of the interleaving group. */
834 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
835 dr_size *= GROUP_SIZE (vinfo_for_stmt (GROUP_FIRST_ELEMENT (stmt_info)));
836 if (STMT_VINFO_GROUPED_ACCESS (peel_stmt_info))
837 dr_peel_size *= GROUP_SIZE (peel_stmt_info);
839 /* It can be assumed that the data refs with the same alignment as dr_peel
840 are aligned in the vector loop. */
841 same_align_drs
842 = STMT_VINFO_SAME_ALIGN_REFS (vinfo_for_stmt (DR_STMT (dr_peel)));
843 FOR_EACH_VEC_ELT (same_align_drs, i, current_dr)
845 if (current_dr != dr)
846 continue;
847 gcc_assert (DR_MISALIGNMENT (dr) / dr_size ==
848 DR_MISALIGNMENT (dr_peel) / dr_peel_size);
849 SET_DR_MISALIGNMENT (dr, 0);
850 return;
853 if (known_alignment_for_access_p (dr)
854 && known_alignment_for_access_p (dr_peel))
856 bool negative = tree_int_cst_compare (DR_STEP (dr), size_zero_node) < 0;
857 int misal = DR_MISALIGNMENT (dr);
858 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
859 misal += negative ? -npeel * dr_size : npeel * dr_size;
860 misal &= (TYPE_ALIGN (vectype) / BITS_PER_UNIT) - 1;
861 SET_DR_MISALIGNMENT (dr, misal);
862 return;
865 if (dump_enabled_p ())
866 dump_printf_loc (MSG_NOTE, vect_location, "Setting misalignment to -1.\n");
867 SET_DR_MISALIGNMENT (dr, -1);
871 /* Function vect_verify_datarefs_alignment
873 Return TRUE if all data references in the loop can be
874 handled with respect to alignment. */
876 bool
877 vect_verify_datarefs_alignment (vec_info *vinfo)
879 vec<data_reference_p> datarefs = vinfo->datarefs;
880 struct data_reference *dr;
881 enum dr_alignment_support supportable_dr_alignment;
882 unsigned int i;
884 FOR_EACH_VEC_ELT (datarefs, i, dr)
886 gimple *stmt = DR_STMT (dr);
887 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
889 if (!STMT_VINFO_RELEVANT_P (stmt_info))
890 continue;
892 /* For interleaving, only the alignment of the first access matters.
893 Skip statements marked as not vectorizable. */
894 if ((STMT_VINFO_GROUPED_ACCESS (stmt_info)
895 && GROUP_FIRST_ELEMENT (stmt_info) != stmt)
896 || !STMT_VINFO_VECTORIZABLE (stmt_info))
897 continue;
899 /* Strided accesses perform only component accesses, alignment is
900 irrelevant for them. */
901 if (STMT_VINFO_STRIDED_P (stmt_info)
902 && !STMT_VINFO_GROUPED_ACCESS (stmt_info))
903 continue;
905 supportable_dr_alignment = vect_supportable_dr_alignment (dr, false);
906 if (!supportable_dr_alignment)
908 if (dump_enabled_p ())
910 if (DR_IS_READ (dr))
911 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
912 "not vectorized: unsupported unaligned load.");
913 else
914 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
915 "not vectorized: unsupported unaligned "
916 "store.");
918 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
919 DR_REF (dr));
920 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
922 return false;
924 if (supportable_dr_alignment != dr_aligned && dump_enabled_p ())
925 dump_printf_loc (MSG_NOTE, vect_location,
926 "Vectorizing an unaligned access.\n");
928 return true;
931 /* Given an memory reference EXP return whether its alignment is less
932 than its size. */
934 static bool
935 not_size_aligned (tree exp)
937 if (!tree_fits_uhwi_p (TYPE_SIZE (TREE_TYPE (exp))))
938 return true;
940 return (tree_to_uhwi (TYPE_SIZE (TREE_TYPE (exp)))
941 > get_object_alignment (exp));
944 /* Function vector_alignment_reachable_p
946 Return true if vector alignment for DR is reachable by peeling
947 a few loop iterations. Return false otherwise. */
949 static bool
950 vector_alignment_reachable_p (struct data_reference *dr)
952 gimple *stmt = DR_STMT (dr);
953 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
954 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
956 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
958 /* For interleaved access we peel only if number of iterations in
959 the prolog loop ({VF - misalignment}), is a multiple of the
960 number of the interleaved accesses. */
961 int elem_size, mis_in_elements;
962 int nelements = TYPE_VECTOR_SUBPARTS (vectype);
964 /* FORNOW: handle only known alignment. */
965 if (!known_alignment_for_access_p (dr))
966 return false;
968 elem_size = GET_MODE_SIZE (TYPE_MODE (vectype)) / nelements;
969 mis_in_elements = DR_MISALIGNMENT (dr) / elem_size;
971 if ((nelements - mis_in_elements) % GROUP_SIZE (stmt_info))
972 return false;
975 /* If misalignment is known at the compile time then allow peeling
976 only if natural alignment is reachable through peeling. */
977 if (known_alignment_for_access_p (dr) && !aligned_access_p (dr))
979 HOST_WIDE_INT elmsize =
980 int_cst_value (TYPE_SIZE_UNIT (TREE_TYPE (vectype)));
981 if (dump_enabled_p ())
983 dump_printf_loc (MSG_NOTE, vect_location,
984 "data size =" HOST_WIDE_INT_PRINT_DEC, elmsize);
985 dump_printf (MSG_NOTE,
986 ". misalignment = %d.\n", DR_MISALIGNMENT (dr));
988 if (DR_MISALIGNMENT (dr) % elmsize)
990 if (dump_enabled_p ())
991 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
992 "data size does not divide the misalignment.\n");
993 return false;
997 if (!known_alignment_for_access_p (dr))
999 tree type = TREE_TYPE (DR_REF (dr));
1000 bool is_packed = not_size_aligned (DR_REF (dr));
1001 if (dump_enabled_p ())
1002 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1003 "Unknown misalignment, is_packed = %d\n",is_packed);
1004 if ((TYPE_USER_ALIGN (type) && !is_packed)
1005 || targetm.vectorize.vector_alignment_reachable (type, is_packed))
1006 return true;
1007 else
1008 return false;
1011 return true;
1015 /* Calculate the cost of the memory access represented by DR. */
1017 static void
1018 vect_get_data_access_cost (struct data_reference *dr,
1019 unsigned int *inside_cost,
1020 unsigned int *outside_cost,
1021 stmt_vector_for_cost *body_cost_vec)
1023 gimple *stmt = DR_STMT (dr);
1024 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1025 int nunits = TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info));
1026 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1027 int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1028 int ncopies = vf / nunits;
1030 if (DR_IS_READ (dr))
1031 vect_get_load_cost (dr, ncopies, true, inside_cost, outside_cost,
1032 NULL, body_cost_vec, false);
1033 else
1034 vect_get_store_cost (dr, ncopies, inside_cost, body_cost_vec);
1036 if (dump_enabled_p ())
1037 dump_printf_loc (MSG_NOTE, vect_location,
1038 "vect_get_data_access_cost: inside_cost = %d, "
1039 "outside_cost = %d.\n", *inside_cost, *outside_cost);
1043 typedef struct _vect_peel_info
1045 int npeel;
1046 struct data_reference *dr;
1047 unsigned int count;
1048 } *vect_peel_info;
1050 typedef struct _vect_peel_extended_info
1052 struct _vect_peel_info peel_info;
1053 unsigned int inside_cost;
1054 unsigned int outside_cost;
1055 stmt_vector_for_cost body_cost_vec;
1056 } *vect_peel_extended_info;
1059 /* Peeling hashtable helpers. */
1061 struct peel_info_hasher : free_ptr_hash <_vect_peel_info>
1063 static inline hashval_t hash (const _vect_peel_info *);
1064 static inline bool equal (const _vect_peel_info *, const _vect_peel_info *);
1067 inline hashval_t
1068 peel_info_hasher::hash (const _vect_peel_info *peel_info)
1070 return (hashval_t) peel_info->npeel;
1073 inline bool
1074 peel_info_hasher::equal (const _vect_peel_info *a, const _vect_peel_info *b)
1076 return (a->npeel == b->npeel);
1080 /* Insert DR into peeling hash table with NPEEL as key. */
1082 static void
1083 vect_peeling_hash_insert (hash_table<peel_info_hasher> *peeling_htab,
1084 loop_vec_info loop_vinfo, struct data_reference *dr,
1085 int npeel)
1087 struct _vect_peel_info elem, *slot;
1088 _vect_peel_info **new_slot;
1089 bool supportable_dr_alignment = vect_supportable_dr_alignment (dr, true);
1091 elem.npeel = npeel;
1092 slot = peeling_htab->find (&elem);
1093 if (slot)
1094 slot->count++;
1095 else
1097 slot = XNEW (struct _vect_peel_info);
1098 slot->npeel = npeel;
1099 slot->dr = dr;
1100 slot->count = 1;
1101 new_slot = peeling_htab->find_slot (slot, INSERT);
1102 *new_slot = slot;
1105 if (!supportable_dr_alignment
1106 && unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo)))
1107 slot->count += VECT_MAX_COST;
1111 /* Traverse peeling hash table to find peeling option that aligns maximum
1112 number of data accesses. */
1115 vect_peeling_hash_get_most_frequent (_vect_peel_info **slot,
1116 _vect_peel_extended_info *max)
1118 vect_peel_info elem = *slot;
1120 if (elem->count > max->peel_info.count
1121 || (elem->count == max->peel_info.count
1122 && max->peel_info.npeel > elem->npeel))
1124 max->peel_info.npeel = elem->npeel;
1125 max->peel_info.count = elem->count;
1126 max->peel_info.dr = elem->dr;
1129 return 1;
1133 /* Traverse peeling hash table and calculate cost for each peeling option.
1134 Find the one with the lowest cost. */
1137 vect_peeling_hash_get_lowest_cost (_vect_peel_info **slot,
1138 _vect_peel_extended_info *min)
1140 vect_peel_info elem = *slot;
1141 int save_misalignment, dummy;
1142 unsigned int inside_cost = 0, outside_cost = 0, i;
1143 gimple *stmt = DR_STMT (elem->dr);
1144 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1145 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1146 vec<data_reference_p> datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
1147 struct data_reference *dr;
1148 stmt_vector_for_cost prologue_cost_vec, body_cost_vec, epilogue_cost_vec;
1150 prologue_cost_vec.create (2);
1151 body_cost_vec.create (2);
1152 epilogue_cost_vec.create (2);
1154 FOR_EACH_VEC_ELT (datarefs, i, dr)
1156 stmt = DR_STMT (dr);
1157 stmt_info = vinfo_for_stmt (stmt);
1158 /* For interleaving, only the alignment of the first access
1159 matters. */
1160 if (STMT_VINFO_GROUPED_ACCESS (stmt_info)
1161 && GROUP_FIRST_ELEMENT (stmt_info) != stmt)
1162 continue;
1164 save_misalignment = DR_MISALIGNMENT (dr);
1165 vect_update_misalignment_for_peel (dr, elem->dr, elem->npeel);
1166 vect_get_data_access_cost (dr, &inside_cost, &outside_cost,
1167 &body_cost_vec);
1168 SET_DR_MISALIGNMENT (dr, save_misalignment);
1171 outside_cost += vect_get_known_peeling_cost
1172 (loop_vinfo, elem->npeel, &dummy,
1173 &LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo),
1174 &prologue_cost_vec, &epilogue_cost_vec);
1176 /* Prologue and epilogue costs are added to the target model later.
1177 These costs depend only on the scalar iteration cost, the
1178 number of peeling iterations finally chosen, and the number of
1179 misaligned statements. So discard the information found here. */
1180 prologue_cost_vec.release ();
1181 epilogue_cost_vec.release ();
1183 if (inside_cost < min->inside_cost
1184 || (inside_cost == min->inside_cost && outside_cost < min->outside_cost))
1186 min->inside_cost = inside_cost;
1187 min->outside_cost = outside_cost;
1188 min->body_cost_vec.release ();
1189 min->body_cost_vec = body_cost_vec;
1190 min->peel_info.dr = elem->dr;
1191 min->peel_info.npeel = elem->npeel;
1193 else
1194 body_cost_vec.release ();
1196 return 1;
1200 /* Choose best peeling option by traversing peeling hash table and either
1201 choosing an option with the lowest cost (if cost model is enabled) or the
1202 option that aligns as many accesses as possible. */
1204 static struct data_reference *
1205 vect_peeling_hash_choose_best_peeling (hash_table<peel_info_hasher> *peeling_htab,
1206 loop_vec_info loop_vinfo,
1207 unsigned int *npeel,
1208 stmt_vector_for_cost *body_cost_vec)
1210 struct _vect_peel_extended_info res;
1212 res.peel_info.dr = NULL;
1213 res.body_cost_vec = stmt_vector_for_cost ();
1215 if (!unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo)))
1217 res.inside_cost = INT_MAX;
1218 res.outside_cost = INT_MAX;
1219 peeling_htab->traverse <_vect_peel_extended_info *,
1220 vect_peeling_hash_get_lowest_cost> (&res);
1222 else
1224 res.peel_info.count = 0;
1225 peeling_htab->traverse <_vect_peel_extended_info *,
1226 vect_peeling_hash_get_most_frequent> (&res);
1229 *npeel = res.peel_info.npeel;
1230 *body_cost_vec = res.body_cost_vec;
1231 return res.peel_info.dr;
1235 /* Function vect_enhance_data_refs_alignment
1237 This pass will use loop versioning and loop peeling in order to enhance
1238 the alignment of data references in the loop.
1240 FOR NOW: we assume that whatever versioning/peeling takes place, only the
1241 original loop is to be vectorized. Any other loops that are created by
1242 the transformations performed in this pass - are not supposed to be
1243 vectorized. This restriction will be relaxed.
1245 This pass will require a cost model to guide it whether to apply peeling
1246 or versioning or a combination of the two. For example, the scheme that
1247 intel uses when given a loop with several memory accesses, is as follows:
1248 choose one memory access ('p') which alignment you want to force by doing
1249 peeling. Then, either (1) generate a loop in which 'p' is aligned and all
1250 other accesses are not necessarily aligned, or (2) use loop versioning to
1251 generate one loop in which all accesses are aligned, and another loop in
1252 which only 'p' is necessarily aligned.
1254 ("Automatic Intra-Register Vectorization for the Intel Architecture",
1255 Aart J.C. Bik, Milind Girkar, Paul M. Grey and Ximmin Tian, International
1256 Journal of Parallel Programming, Vol. 30, No. 2, April 2002.)
1258 Devising a cost model is the most critical aspect of this work. It will
1259 guide us on which access to peel for, whether to use loop versioning, how
1260 many versions to create, etc. The cost model will probably consist of
1261 generic considerations as well as target specific considerations (on
1262 powerpc for example, misaligned stores are more painful than misaligned
1263 loads).
1265 Here are the general steps involved in alignment enhancements:
1267 -- original loop, before alignment analysis:
1268 for (i=0; i<N; i++){
1269 x = q[i]; # DR_MISALIGNMENT(q) = unknown
1270 p[i] = y; # DR_MISALIGNMENT(p) = unknown
1273 -- After vect_compute_data_refs_alignment:
1274 for (i=0; i<N; i++){
1275 x = q[i]; # DR_MISALIGNMENT(q) = 3
1276 p[i] = y; # DR_MISALIGNMENT(p) = unknown
1279 -- Possibility 1: we do loop versioning:
1280 if (p is aligned) {
1281 for (i=0; i<N; i++){ # loop 1A
1282 x = q[i]; # DR_MISALIGNMENT(q) = 3
1283 p[i] = y; # DR_MISALIGNMENT(p) = 0
1286 else {
1287 for (i=0; i<N; i++){ # loop 1B
1288 x = q[i]; # DR_MISALIGNMENT(q) = 3
1289 p[i] = y; # DR_MISALIGNMENT(p) = unaligned
1293 -- Possibility 2: we do loop peeling:
1294 for (i = 0; i < 3; i++){ # (scalar loop, not to be vectorized).
1295 x = q[i];
1296 p[i] = y;
1298 for (i = 3; i < N; i++){ # loop 2A
1299 x = q[i]; # DR_MISALIGNMENT(q) = 0
1300 p[i] = y; # DR_MISALIGNMENT(p) = unknown
1303 -- Possibility 3: combination of loop peeling and versioning:
1304 for (i = 0; i < 3; i++){ # (scalar loop, not to be vectorized).
1305 x = q[i];
1306 p[i] = y;
1308 if (p is aligned) {
1309 for (i = 3; i<N; i++){ # loop 3A
1310 x = q[i]; # DR_MISALIGNMENT(q) = 0
1311 p[i] = y; # DR_MISALIGNMENT(p) = 0
1314 else {
1315 for (i = 3; i<N; i++){ # loop 3B
1316 x = q[i]; # DR_MISALIGNMENT(q) = 0
1317 p[i] = y; # DR_MISALIGNMENT(p) = unaligned
1321 These loops are later passed to loop_transform to be vectorized. The
1322 vectorizer will use the alignment information to guide the transformation
1323 (whether to generate regular loads/stores, or with special handling for
1324 misalignment). */
1326 bool
1327 vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo)
1329 vec<data_reference_p> datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
1330 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1331 enum dr_alignment_support supportable_dr_alignment;
1332 struct data_reference *dr0 = NULL, *first_store = NULL;
1333 struct data_reference *dr;
1334 unsigned int i, j;
1335 bool do_peeling = false;
1336 bool do_versioning = false;
1337 bool stat;
1338 gimple *stmt;
1339 stmt_vec_info stmt_info;
1340 unsigned int npeel = 0;
1341 bool all_misalignments_unknown = true;
1342 unsigned int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1343 unsigned possible_npeel_number = 1;
1344 tree vectype;
1345 unsigned int nelements, mis, same_align_drs_max = 0;
1346 stmt_vector_for_cost body_cost_vec = stmt_vector_for_cost ();
1347 hash_table<peel_info_hasher> peeling_htab (1);
1349 if (dump_enabled_p ())
1350 dump_printf_loc (MSG_NOTE, vect_location,
1351 "=== vect_enhance_data_refs_alignment ===\n");
1353 /* Reset data so we can safely be called multiple times. */
1354 LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).truncate (0);
1355 LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) = 0;
1357 /* While cost model enhancements are expected in the future, the high level
1358 view of the code at this time is as follows:
1360 A) If there is a misaligned access then see if peeling to align
1361 this access can make all data references satisfy
1362 vect_supportable_dr_alignment. If so, update data structures
1363 as needed and return true.
1365 B) If peeling wasn't possible and there is a data reference with an
1366 unknown misalignment that does not satisfy vect_supportable_dr_alignment
1367 then see if loop versioning checks can be used to make all data
1368 references satisfy vect_supportable_dr_alignment. If so, update
1369 data structures as needed and return true.
1371 C) If neither peeling nor versioning were successful then return false if
1372 any data reference does not satisfy vect_supportable_dr_alignment.
1374 D) Return true (all data references satisfy vect_supportable_dr_alignment).
1376 Note, Possibility 3 above (which is peeling and versioning together) is not
1377 being done at this time. */
1379 /* (1) Peeling to force alignment. */
1381 /* (1.1) Decide whether to perform peeling, and how many iterations to peel:
1382 Considerations:
1383 + How many accesses will become aligned due to the peeling
1384 - How many accesses will become unaligned due to the peeling,
1385 and the cost of misaligned accesses.
1386 - The cost of peeling (the extra runtime checks, the increase
1387 in code size). */
1389 FOR_EACH_VEC_ELT (datarefs, i, dr)
1391 stmt = DR_STMT (dr);
1392 stmt_info = vinfo_for_stmt (stmt);
1394 if (!STMT_VINFO_RELEVANT_P (stmt_info))
1395 continue;
1397 /* For interleaving, only the alignment of the first access
1398 matters. */
1399 if (STMT_VINFO_GROUPED_ACCESS (stmt_info)
1400 && GROUP_FIRST_ELEMENT (stmt_info) != stmt)
1401 continue;
1403 /* For invariant accesses there is nothing to enhance. */
1404 if (integer_zerop (DR_STEP (dr)))
1405 continue;
1407 /* Strided accesses perform only component accesses, alignment is
1408 irrelevant for them. */
1409 if (STMT_VINFO_STRIDED_P (stmt_info)
1410 && !STMT_VINFO_GROUPED_ACCESS (stmt_info))
1411 continue;
1413 supportable_dr_alignment = vect_supportable_dr_alignment (dr, true);
1414 do_peeling = vector_alignment_reachable_p (dr);
1415 if (do_peeling)
1417 if (known_alignment_for_access_p (dr))
1419 unsigned int npeel_tmp;
1420 bool negative = tree_int_cst_compare (DR_STEP (dr),
1421 size_zero_node) < 0;
1423 /* Save info about DR in the hash table. */
1424 vectype = STMT_VINFO_VECTYPE (stmt_info);
1425 nelements = TYPE_VECTOR_SUBPARTS (vectype);
1426 mis = DR_MISALIGNMENT (dr) / GET_MODE_SIZE (TYPE_MODE (
1427 TREE_TYPE (DR_REF (dr))));
1428 npeel_tmp = (negative
1429 ? (mis - nelements) : (nelements - mis))
1430 & (nelements - 1);
1432 /* For multiple types, it is possible that the bigger type access
1433 will have more than one peeling option. E.g., a loop with two
1434 types: one of size (vector size / 4), and the other one of
1435 size (vector size / 8). Vectorization factor will 8. If both
1436 access are misaligned by 3, the first one needs one scalar
1437 iteration to be aligned, and the second one needs 5. But the
1438 the first one will be aligned also by peeling 5 scalar
1439 iterations, and in that case both accesses will be aligned.
1440 Hence, except for the immediate peeling amount, we also want
1441 to try to add full vector size, while we don't exceed
1442 vectorization factor.
1443 We do this automtically for cost model, since we calculate cost
1444 for every peeling option. */
1445 if (unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo)))
1447 if (STMT_SLP_TYPE (stmt_info))
1448 possible_npeel_number
1449 = (vf * GROUP_SIZE (stmt_info)) / nelements;
1450 else
1451 possible_npeel_number = vf / nelements;
1454 /* Handle the aligned case. We may decide to align some other
1455 access, making DR unaligned. */
1456 if (DR_MISALIGNMENT (dr) == 0)
1458 npeel_tmp = 0;
1459 if (unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo)))
1460 possible_npeel_number++;
1463 for (j = 0; j < possible_npeel_number; j++)
1465 vect_peeling_hash_insert (&peeling_htab, loop_vinfo,
1466 dr, npeel_tmp);
1467 npeel_tmp += nelements;
1470 all_misalignments_unknown = false;
1471 /* Data-ref that was chosen for the case that all the
1472 misalignments are unknown is not relevant anymore, since we
1473 have a data-ref with known alignment. */
1474 dr0 = NULL;
1476 else
1478 /* If we don't know any misalignment values, we prefer
1479 peeling for data-ref that has the maximum number of data-refs
1480 with the same alignment, unless the target prefers to align
1481 stores over load. */
1482 if (all_misalignments_unknown)
1484 unsigned same_align_drs
1485 = STMT_VINFO_SAME_ALIGN_REFS (stmt_info).length ();
1486 if (!dr0
1487 || same_align_drs_max < same_align_drs)
1489 same_align_drs_max = same_align_drs;
1490 dr0 = dr;
1492 /* For data-refs with the same number of related
1493 accesses prefer the one where the misalign
1494 computation will be invariant in the outermost loop. */
1495 else if (same_align_drs_max == same_align_drs)
1497 struct loop *ivloop0, *ivloop;
1498 ivloop0 = outermost_invariant_loop_for_expr
1499 (loop, DR_BASE_ADDRESS (dr0));
1500 ivloop = outermost_invariant_loop_for_expr
1501 (loop, DR_BASE_ADDRESS (dr));
1502 if ((ivloop && !ivloop0)
1503 || (ivloop && ivloop0
1504 && flow_loop_nested_p (ivloop, ivloop0)))
1505 dr0 = dr;
1508 if (!first_store && DR_IS_WRITE (dr))
1509 first_store = dr;
1512 /* If there are both known and unknown misaligned accesses in the
1513 loop, we choose peeling amount according to the known
1514 accesses. */
1515 if (!supportable_dr_alignment)
1517 dr0 = dr;
1518 if (!first_store && DR_IS_WRITE (dr))
1519 first_store = dr;
1523 else
1525 if (!aligned_access_p (dr))
1527 if (dump_enabled_p ())
1528 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1529 "vector alignment may not be reachable\n");
1530 break;
1535 /* Check if we can possibly peel the loop. */
1536 if (!vect_can_advance_ivs_p (loop_vinfo)
1537 || !slpeel_can_duplicate_loop_p (loop, single_exit (loop))
1538 || loop->inner)
1539 do_peeling = false;
1541 if (do_peeling
1542 && all_misalignments_unknown
1543 && vect_supportable_dr_alignment (dr0, false))
1545 /* Check if the target requires to prefer stores over loads, i.e., if
1546 misaligned stores are more expensive than misaligned loads (taking
1547 drs with same alignment into account). */
1548 if (first_store && DR_IS_READ (dr0))
1550 unsigned int load_inside_cost = 0, load_outside_cost = 0;
1551 unsigned int store_inside_cost = 0, store_outside_cost = 0;
1552 unsigned int load_inside_penalty = 0, load_outside_penalty = 0;
1553 unsigned int store_inside_penalty = 0, store_outside_penalty = 0;
1554 stmt_vector_for_cost dummy;
1555 dummy.create (2);
1557 vect_get_data_access_cost (dr0, &load_inside_cost, &load_outside_cost,
1558 &dummy);
1559 vect_get_data_access_cost (first_store, &store_inside_cost,
1560 &store_outside_cost, &dummy);
1562 dummy.release ();
1564 /* Calculate the penalty for leaving FIRST_STORE unaligned (by
1565 aligning the load DR0). */
1566 load_inside_penalty = store_inside_cost;
1567 load_outside_penalty = store_outside_cost;
1568 for (i = 0;
1569 STMT_VINFO_SAME_ALIGN_REFS (vinfo_for_stmt (
1570 DR_STMT (first_store))).iterate (i, &dr);
1571 i++)
1572 if (DR_IS_READ (dr))
1574 load_inside_penalty += load_inside_cost;
1575 load_outside_penalty += load_outside_cost;
1577 else
1579 load_inside_penalty += store_inside_cost;
1580 load_outside_penalty += store_outside_cost;
1583 /* Calculate the penalty for leaving DR0 unaligned (by
1584 aligning the FIRST_STORE). */
1585 store_inside_penalty = load_inside_cost;
1586 store_outside_penalty = load_outside_cost;
1587 for (i = 0;
1588 STMT_VINFO_SAME_ALIGN_REFS (vinfo_for_stmt (
1589 DR_STMT (dr0))).iterate (i, &dr);
1590 i++)
1591 if (DR_IS_READ (dr))
1593 store_inside_penalty += load_inside_cost;
1594 store_outside_penalty += load_outside_cost;
1596 else
1598 store_inside_penalty += store_inside_cost;
1599 store_outside_penalty += store_outside_cost;
1602 if (load_inside_penalty > store_inside_penalty
1603 || (load_inside_penalty == store_inside_penalty
1604 && load_outside_penalty > store_outside_penalty))
1605 dr0 = first_store;
1608 /* In case there are only loads with different unknown misalignments, use
1609 peeling only if it may help to align other accesses in the loop or
1610 if it may help improving load bandwith when we'd end up using
1611 unaligned loads. */
1612 tree dr0_vt = STMT_VINFO_VECTYPE (vinfo_for_stmt (DR_STMT (dr0)));
1613 if (!first_store
1614 && !STMT_VINFO_SAME_ALIGN_REFS (
1615 vinfo_for_stmt (DR_STMT (dr0))).length ()
1616 && (vect_supportable_dr_alignment (dr0, false)
1617 != dr_unaligned_supported
1618 || (builtin_vectorization_cost (vector_load, dr0_vt, 0)
1619 == builtin_vectorization_cost (unaligned_load, dr0_vt, -1))))
1620 do_peeling = false;
1623 if (do_peeling && !dr0)
1625 /* Peeling is possible, but there is no data access that is not supported
1626 unless aligned. So we try to choose the best possible peeling. */
1628 /* We should get here only if there are drs with known misalignment. */
1629 gcc_assert (!all_misalignments_unknown);
1631 /* Choose the best peeling from the hash table. */
1632 dr0 = vect_peeling_hash_choose_best_peeling (&peeling_htab,
1633 loop_vinfo, &npeel,
1634 &body_cost_vec);
1635 if (!dr0 || !npeel)
1636 do_peeling = false;
1639 if (do_peeling)
1641 stmt = DR_STMT (dr0);
1642 stmt_info = vinfo_for_stmt (stmt);
1643 vectype = STMT_VINFO_VECTYPE (stmt_info);
1644 nelements = TYPE_VECTOR_SUBPARTS (vectype);
1646 if (known_alignment_for_access_p (dr0))
1648 bool negative = tree_int_cst_compare (DR_STEP (dr0),
1649 size_zero_node) < 0;
1650 if (!npeel)
1652 /* Since it's known at compile time, compute the number of
1653 iterations in the peeled loop (the peeling factor) for use in
1654 updating DR_MISALIGNMENT values. The peeling factor is the
1655 vectorization factor minus the misalignment as an element
1656 count. */
1657 mis = DR_MISALIGNMENT (dr0);
1658 mis /= GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr0))));
1659 npeel = ((negative ? mis - nelements : nelements - mis)
1660 & (nelements - 1));
1663 /* For interleaved data access every iteration accesses all the
1664 members of the group, therefore we divide the number of iterations
1665 by the group size. */
1666 stmt_info = vinfo_for_stmt (DR_STMT (dr0));
1667 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
1668 npeel /= GROUP_SIZE (stmt_info);
1670 if (dump_enabled_p ())
1671 dump_printf_loc (MSG_NOTE, vect_location,
1672 "Try peeling by %d\n", npeel);
1675 /* Ensure that all data refs can be vectorized after the peel. */
1676 FOR_EACH_VEC_ELT (datarefs, i, dr)
1678 int save_misalignment;
1680 if (dr == dr0)
1681 continue;
1683 stmt = DR_STMT (dr);
1684 stmt_info = vinfo_for_stmt (stmt);
1685 /* For interleaving, only the alignment of the first access
1686 matters. */
1687 if (STMT_VINFO_GROUPED_ACCESS (stmt_info)
1688 && GROUP_FIRST_ELEMENT (stmt_info) != stmt)
1689 continue;
1691 /* Strided accesses perform only component accesses, alignment is
1692 irrelevant for them. */
1693 if (STMT_VINFO_STRIDED_P (stmt_info)
1694 && !STMT_VINFO_GROUPED_ACCESS (stmt_info))
1695 continue;
1697 save_misalignment = DR_MISALIGNMENT (dr);
1698 vect_update_misalignment_for_peel (dr, dr0, npeel);
1699 supportable_dr_alignment = vect_supportable_dr_alignment (dr, false);
1700 SET_DR_MISALIGNMENT (dr, save_misalignment);
1702 if (!supportable_dr_alignment)
1704 do_peeling = false;
1705 break;
1709 if (do_peeling && known_alignment_for_access_p (dr0) && npeel == 0)
1711 stat = vect_verify_datarefs_alignment (loop_vinfo);
1712 if (!stat)
1713 do_peeling = false;
1714 else
1716 body_cost_vec.release ();
1717 return stat;
1721 /* Cost model #1 - honor --param vect-max-peeling-for-alignment. */
1722 if (do_peeling)
1724 unsigned max_allowed_peel
1725 = PARAM_VALUE (PARAM_VECT_MAX_PEELING_FOR_ALIGNMENT);
1726 if (max_allowed_peel != (unsigned)-1)
1728 unsigned max_peel = npeel;
1729 if (max_peel == 0)
1731 gimple *dr_stmt = DR_STMT (dr0);
1732 stmt_vec_info vinfo = vinfo_for_stmt (dr_stmt);
1733 tree vtype = STMT_VINFO_VECTYPE (vinfo);
1734 max_peel = TYPE_VECTOR_SUBPARTS (vtype) - 1;
1736 if (max_peel > max_allowed_peel)
1738 do_peeling = false;
1739 if (dump_enabled_p ())
1740 dump_printf_loc (MSG_NOTE, vect_location,
1741 "Disable peeling, max peels reached: %d\n", max_peel);
1746 /* Cost model #2 - if peeling may result in a remaining loop not
1747 iterating enough to be vectorized then do not peel. */
1748 if (do_peeling
1749 && LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
1751 unsigned max_peel
1752 = npeel == 0 ? LOOP_VINFO_VECT_FACTOR (loop_vinfo) - 1 : npeel;
1753 if (LOOP_VINFO_INT_NITERS (loop_vinfo)
1754 < LOOP_VINFO_VECT_FACTOR (loop_vinfo) + max_peel)
1755 do_peeling = false;
1758 if (do_peeling)
1760 /* (1.2) Update the DR_MISALIGNMENT of each data reference DR_i.
1761 If the misalignment of DR_i is identical to that of dr0 then set
1762 DR_MISALIGNMENT (DR_i) to zero. If the misalignment of DR_i and
1763 dr0 are known at compile time then increment DR_MISALIGNMENT (DR_i)
1764 by the peeling factor times the element size of DR_i (MOD the
1765 vectorization factor times the size). Otherwise, the
1766 misalignment of DR_i must be set to unknown. */
1767 FOR_EACH_VEC_ELT (datarefs, i, dr)
1768 if (dr != dr0)
1769 vect_update_misalignment_for_peel (dr, dr0, npeel);
1771 LOOP_VINFO_UNALIGNED_DR (loop_vinfo) = dr0;
1772 if (npeel)
1773 LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) = npeel;
1774 else
1775 LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo)
1776 = DR_MISALIGNMENT (dr0);
1777 SET_DR_MISALIGNMENT (dr0, 0);
1778 if (dump_enabled_p ())
1780 dump_printf_loc (MSG_NOTE, vect_location,
1781 "Alignment of access forced using peeling.\n");
1782 dump_printf_loc (MSG_NOTE, vect_location,
1783 "Peeling for alignment will be applied.\n");
1785 /* The inside-loop cost will be accounted for in vectorizable_load
1786 and vectorizable_store correctly with adjusted alignments.
1787 Drop the body_cst_vec on the floor here. */
1788 body_cost_vec.release ();
1790 stat = vect_verify_datarefs_alignment (loop_vinfo);
1791 gcc_assert (stat);
1792 return stat;
1796 body_cost_vec.release ();
1798 /* (2) Versioning to force alignment. */
1800 /* Try versioning if:
1801 1) optimize loop for speed
1802 2) there is at least one unsupported misaligned data ref with an unknown
1803 misalignment, and
1804 3) all misaligned data refs with a known misalignment are supported, and
1805 4) the number of runtime alignment checks is within reason. */
1807 do_versioning =
1808 optimize_loop_nest_for_speed_p (loop)
1809 && (!loop->inner); /* FORNOW */
1811 if (do_versioning)
1813 FOR_EACH_VEC_ELT (datarefs, i, dr)
1815 stmt = DR_STMT (dr);
1816 stmt_info = vinfo_for_stmt (stmt);
1818 /* For interleaving, only the alignment of the first access
1819 matters. */
1820 if (aligned_access_p (dr)
1821 || (STMT_VINFO_GROUPED_ACCESS (stmt_info)
1822 && GROUP_FIRST_ELEMENT (stmt_info) != stmt))
1823 continue;
1825 if (STMT_VINFO_STRIDED_P (stmt_info))
1827 /* Strided loads perform only component accesses, alignment is
1828 irrelevant for them. */
1829 if (!STMT_VINFO_GROUPED_ACCESS (stmt_info))
1830 continue;
1831 do_versioning = false;
1832 break;
1835 supportable_dr_alignment = vect_supportable_dr_alignment (dr, false);
1837 if (!supportable_dr_alignment)
1839 gimple *stmt;
1840 int mask;
1841 tree vectype;
1843 if (known_alignment_for_access_p (dr)
1844 || LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).length ()
1845 >= (unsigned) PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIGNMENT_CHECKS))
1847 do_versioning = false;
1848 break;
1851 stmt = DR_STMT (dr);
1852 vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
1853 gcc_assert (vectype);
1855 /* The rightmost bits of an aligned address must be zeros.
1856 Construct the mask needed for this test. For example,
1857 GET_MODE_SIZE for the vector mode V4SI is 16 bytes so the
1858 mask must be 15 = 0xf. */
1859 mask = GET_MODE_SIZE (TYPE_MODE (vectype)) - 1;
1861 /* FORNOW: use the same mask to test all potentially unaligned
1862 references in the loop. The vectorizer currently supports
1863 a single vector size, see the reference to
1864 GET_MODE_NUNITS (TYPE_MODE (vectype)) where the
1865 vectorization factor is computed. */
1866 gcc_assert (!LOOP_VINFO_PTR_MASK (loop_vinfo)
1867 || LOOP_VINFO_PTR_MASK (loop_vinfo) == mask);
1868 LOOP_VINFO_PTR_MASK (loop_vinfo) = mask;
1869 LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).safe_push (
1870 DR_STMT (dr));
1874 /* Versioning requires at least one misaligned data reference. */
1875 if (!LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo))
1876 do_versioning = false;
1877 else if (!do_versioning)
1878 LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).truncate (0);
1881 if (do_versioning)
1883 vec<gimple *> may_misalign_stmts
1884 = LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo);
1885 gimple *stmt;
1887 /* It can now be assumed that the data references in the statements
1888 in LOOP_VINFO_MAY_MISALIGN_STMTS will be aligned in the version
1889 of the loop being vectorized. */
1890 FOR_EACH_VEC_ELT (may_misalign_stmts, i, stmt)
1892 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1893 dr = STMT_VINFO_DATA_REF (stmt_info);
1894 SET_DR_MISALIGNMENT (dr, 0);
1895 if (dump_enabled_p ())
1896 dump_printf_loc (MSG_NOTE, vect_location,
1897 "Alignment of access forced using versioning.\n");
1900 if (dump_enabled_p ())
1901 dump_printf_loc (MSG_NOTE, vect_location,
1902 "Versioning for alignment will be applied.\n");
1904 /* Peeling and versioning can't be done together at this time. */
1905 gcc_assert (! (do_peeling && do_versioning));
1907 stat = vect_verify_datarefs_alignment (loop_vinfo);
1908 gcc_assert (stat);
1909 return stat;
1912 /* This point is reached if neither peeling nor versioning is being done. */
1913 gcc_assert (! (do_peeling || do_versioning));
1915 stat = vect_verify_datarefs_alignment (loop_vinfo);
1916 return stat;
1920 /* Function vect_find_same_alignment_drs.
1922 Update group and alignment relations according to the chosen
1923 vectorization factor. */
1925 static void
1926 vect_find_same_alignment_drs (struct data_dependence_relation *ddr,
1927 loop_vec_info loop_vinfo)
1929 unsigned int i;
1930 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1931 int vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1932 struct data_reference *dra = DDR_A (ddr);
1933 struct data_reference *drb = DDR_B (ddr);
1934 stmt_vec_info stmtinfo_a = vinfo_for_stmt (DR_STMT (dra));
1935 stmt_vec_info stmtinfo_b = vinfo_for_stmt (DR_STMT (drb));
1936 int dra_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dra))));
1937 int drb_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (drb))));
1938 lambda_vector dist_v;
1939 unsigned int loop_depth;
1941 if (DDR_ARE_DEPENDENT (ddr) == chrec_known)
1942 return;
1944 if (dra == drb)
1945 return;
1947 if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know)
1948 return;
1950 /* Loop-based vectorization and known data dependence. */
1951 if (DDR_NUM_DIST_VECTS (ddr) == 0)
1952 return;
1954 /* Data-dependence analysis reports a distance vector of zero
1955 for data-references that overlap only in the first iteration
1956 but have different sign step (see PR45764).
1957 So as a sanity check require equal DR_STEP. */
1958 if (!operand_equal_p (DR_STEP (dra), DR_STEP (drb), 0))
1959 return;
1961 loop_depth = index_in_loop_nest (loop->num, DDR_LOOP_NEST (ddr));
1962 FOR_EACH_VEC_ELT (DDR_DIST_VECTS (ddr), i, dist_v)
1964 int dist = dist_v[loop_depth];
1966 if (dump_enabled_p ())
1967 dump_printf_loc (MSG_NOTE, vect_location,
1968 "dependence distance = %d.\n", dist);
1970 /* Same loop iteration. */
1971 if (dist == 0
1972 || (dist % vectorization_factor == 0 && dra_size == drb_size))
1974 /* Two references with distance zero have the same alignment. */
1975 STMT_VINFO_SAME_ALIGN_REFS (stmtinfo_a).safe_push (drb);
1976 STMT_VINFO_SAME_ALIGN_REFS (stmtinfo_b).safe_push (dra);
1977 if (dump_enabled_p ())
1979 dump_printf_loc (MSG_NOTE, vect_location,
1980 "accesses have the same alignment.\n");
1981 dump_printf (MSG_NOTE,
1982 "dependence distance modulo vf == 0 between ");
1983 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra));
1984 dump_printf (MSG_NOTE, " and ");
1985 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb));
1986 dump_printf (MSG_NOTE, "\n");
1993 /* Function vect_analyze_data_refs_alignment
1995 Analyze the alignment of the data-references in the loop.
1996 Return FALSE if a data reference is found that cannot be vectorized. */
1998 bool
1999 vect_analyze_data_refs_alignment (vec_info *vinfo)
2001 if (dump_enabled_p ())
2002 dump_printf_loc (MSG_NOTE, vect_location,
2003 "=== vect_analyze_data_refs_alignment ===\n");
2005 /* Mark groups of data references with same alignment using
2006 data dependence information. */
2007 if (is_a <loop_vec_info> (vinfo))
2009 vec<ddr_p> ddrs = vinfo->ddrs;
2010 struct data_dependence_relation *ddr;
2011 unsigned int i;
2013 FOR_EACH_VEC_ELT (ddrs, i, ddr)
2014 vect_find_same_alignment_drs (ddr, as_a <loop_vec_info> (vinfo));
2017 if (!vect_compute_data_refs_alignment (vinfo))
2019 if (dump_enabled_p ())
2020 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2021 "not vectorized: can't calculate alignment "
2022 "for data ref.\n");
2023 return false;
2026 return true;
2030 /* Analyze groups of accesses: check that DR belongs to a group of
2031 accesses of legal size, step, etc. Detect gaps, single element
2032 interleaving, and other special cases. Set grouped access info.
2033 Collect groups of strided stores for further use in SLP analysis.
2034 Worker for vect_analyze_group_access. */
2036 static bool
2037 vect_analyze_group_access_1 (struct data_reference *dr)
2039 tree step = DR_STEP (dr);
2040 tree scalar_type = TREE_TYPE (DR_REF (dr));
2041 HOST_WIDE_INT type_size = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (scalar_type));
2042 gimple *stmt = DR_STMT (dr);
2043 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2044 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2045 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
2046 HOST_WIDE_INT dr_step = -1;
2047 HOST_WIDE_INT groupsize, last_accessed_element = 1;
2048 bool slp_impossible = false;
2049 struct loop *loop = NULL;
2051 if (loop_vinfo)
2052 loop = LOOP_VINFO_LOOP (loop_vinfo);
2054 /* For interleaving, GROUPSIZE is STEP counted in elements, i.e., the
2055 size of the interleaving group (including gaps). */
2056 if (tree_fits_shwi_p (step))
2058 dr_step = tree_to_shwi (step);
2059 groupsize = absu_hwi (dr_step) / type_size;
2061 else
2062 groupsize = 0;
2064 /* Not consecutive access is possible only if it is a part of interleaving. */
2065 if (!GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
2067 /* Check if it this DR is a part of interleaving, and is a single
2068 element of the group that is accessed in the loop. */
2070 /* Gaps are supported only for loads. STEP must be a multiple of the type
2071 size. The size of the group must be a power of 2. */
2072 if (DR_IS_READ (dr)
2073 && (dr_step % type_size) == 0
2074 && groupsize > 0
2075 && exact_log2 (groupsize) != -1)
2077 GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = stmt;
2078 GROUP_SIZE (vinfo_for_stmt (stmt)) = groupsize;
2079 if (dump_enabled_p ())
2081 dump_printf_loc (MSG_NOTE, vect_location,
2082 "Detected single element interleaving ");
2083 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dr));
2084 dump_printf (MSG_NOTE, " step ");
2085 dump_generic_expr (MSG_NOTE, TDF_SLIM, step);
2086 dump_printf (MSG_NOTE, "\n");
2089 if (loop_vinfo)
2091 if (dump_enabled_p ())
2092 dump_printf_loc (MSG_NOTE, vect_location,
2093 "Data access with gaps requires scalar "
2094 "epilogue loop\n");
2095 if (loop->inner)
2097 if (dump_enabled_p ())
2098 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2099 "Peeling for outer loop is not"
2100 " supported\n");
2101 return false;
2104 LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) = true;
2107 return true;
2110 if (dump_enabled_p ())
2112 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2113 "not consecutive access ");
2114 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
2117 if (bb_vinfo)
2119 /* Mark the statement as unvectorizable. */
2120 STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr))) = false;
2121 return true;
2124 dump_printf_loc (MSG_NOTE, vect_location, "using strided accesses\n");
2125 STMT_VINFO_STRIDED_P (stmt_info) = true;
2126 return true;
2129 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) == stmt)
2131 /* First stmt in the interleaving chain. Check the chain. */
2132 gimple *next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmt));
2133 struct data_reference *data_ref = dr;
2134 unsigned int count = 1;
2135 tree prev_init = DR_INIT (data_ref);
2136 gimple *prev = stmt;
2137 HOST_WIDE_INT diff, gaps = 0;
2139 while (next)
2141 /* Skip same data-refs. In case that two or more stmts share
2142 data-ref (supported only for loads), we vectorize only the first
2143 stmt, and the rest get their vectorized loads from the first
2144 one. */
2145 if (!tree_int_cst_compare (DR_INIT (data_ref),
2146 DR_INIT (STMT_VINFO_DATA_REF (
2147 vinfo_for_stmt (next)))))
2149 if (DR_IS_WRITE (data_ref))
2151 if (dump_enabled_p ())
2152 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2153 "Two store stmts share the same dr.\n");
2154 return false;
2157 if (dump_enabled_p ())
2158 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2159 "Two or more load stmts share the same dr.\n");
2161 /* For load use the same data-ref load. */
2162 GROUP_SAME_DR_STMT (vinfo_for_stmt (next)) = prev;
2164 prev = next;
2165 next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next));
2166 continue;
2169 prev = next;
2170 data_ref = STMT_VINFO_DATA_REF (vinfo_for_stmt (next));
2172 /* All group members have the same STEP by construction. */
2173 gcc_checking_assert (operand_equal_p (DR_STEP (data_ref), step, 0));
2175 /* Check that the distance between two accesses is equal to the type
2176 size. Otherwise, we have gaps. */
2177 diff = (TREE_INT_CST_LOW (DR_INIT (data_ref))
2178 - TREE_INT_CST_LOW (prev_init)) / type_size;
2179 if (diff != 1)
2181 /* FORNOW: SLP of accesses with gaps is not supported. */
2182 slp_impossible = true;
2183 if (DR_IS_WRITE (data_ref))
2185 if (dump_enabled_p ())
2186 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2187 "interleaved store with gaps\n");
2188 return false;
2191 gaps += diff - 1;
2194 last_accessed_element += diff;
2196 /* Store the gap from the previous member of the group. If there is no
2197 gap in the access, GROUP_GAP is always 1. */
2198 GROUP_GAP (vinfo_for_stmt (next)) = diff;
2200 prev_init = DR_INIT (data_ref);
2201 next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next));
2202 /* Count the number of data-refs in the chain. */
2203 count++;
2206 if (groupsize == 0)
2207 groupsize = count + gaps;
2209 if (groupsize > UINT_MAX)
2211 if (dump_enabled_p ())
2212 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2213 "group is too large\n");
2214 return false;
2217 /* Check that the size of the interleaving is equal to count for stores,
2218 i.e., that there are no gaps. */
2219 if (groupsize != count
2220 && !DR_IS_READ (dr))
2222 if (dump_enabled_p ())
2223 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2224 "interleaved store with gaps\n");
2225 return false;
2228 /* If there is a gap after the last load in the group it is the
2229 difference between the groupsize and the last accessed
2230 element.
2231 When there is no gap, this difference should be 0. */
2232 GROUP_GAP (vinfo_for_stmt (stmt)) = groupsize - last_accessed_element;
2234 GROUP_SIZE (vinfo_for_stmt (stmt)) = groupsize;
2235 if (dump_enabled_p ())
2237 dump_printf_loc (MSG_NOTE, vect_location,
2238 "Detected interleaving ");
2239 if (DR_IS_READ (dr))
2240 dump_printf (MSG_NOTE, "load ");
2241 else
2242 dump_printf (MSG_NOTE, "store ");
2243 dump_printf (MSG_NOTE, "of size %u starting with ",
2244 (unsigned)groupsize);
2245 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
2246 if (GROUP_GAP (vinfo_for_stmt (stmt)) != 0)
2247 dump_printf_loc (MSG_NOTE, vect_location,
2248 "There is a gap of %u elements after the group\n",
2249 GROUP_GAP (vinfo_for_stmt (stmt)));
2252 /* SLP: create an SLP data structure for every interleaving group of
2253 stores for further analysis in vect_analyse_slp. */
2254 if (DR_IS_WRITE (dr) && !slp_impossible)
2256 if (loop_vinfo)
2257 LOOP_VINFO_GROUPED_STORES (loop_vinfo).safe_push (stmt);
2258 if (bb_vinfo)
2259 BB_VINFO_GROUPED_STORES (bb_vinfo).safe_push (stmt);
2262 /* If there is a gap in the end of the group or the group size cannot
2263 be made a multiple of the vector element count then we access excess
2264 elements in the last iteration and thus need to peel that off. */
2265 if (loop_vinfo
2266 && (groupsize - last_accessed_element > 0
2267 || exact_log2 (groupsize) == -1))
2270 if (dump_enabled_p ())
2271 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2272 "Data access with gaps requires scalar "
2273 "epilogue loop\n");
2274 if (loop->inner)
2276 if (dump_enabled_p ())
2277 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2278 "Peeling for outer loop is not supported\n");
2279 return false;
2282 LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) = true;
2286 return true;
2289 /* Analyze groups of accesses: check that DR belongs to a group of
2290 accesses of legal size, step, etc. Detect gaps, single element
2291 interleaving, and other special cases. Set grouped access info.
2292 Collect groups of strided stores for further use in SLP analysis. */
2294 static bool
2295 vect_analyze_group_access (struct data_reference *dr)
2297 if (!vect_analyze_group_access_1 (dr))
2299 /* Dissolve the group if present. */
2300 gimple *next;
2301 gimple *stmt = GROUP_FIRST_ELEMENT (vinfo_for_stmt (DR_STMT (dr)));
2302 while (stmt)
2304 stmt_vec_info vinfo = vinfo_for_stmt (stmt);
2305 next = GROUP_NEXT_ELEMENT (vinfo);
2306 GROUP_FIRST_ELEMENT (vinfo) = NULL;
2307 GROUP_NEXT_ELEMENT (vinfo) = NULL;
2308 stmt = next;
2310 return false;
2312 return true;
2315 /* Analyze the access pattern of the data-reference DR.
2316 In case of non-consecutive accesses call vect_analyze_group_access() to
2317 analyze groups of accesses. */
2319 static bool
2320 vect_analyze_data_ref_access (struct data_reference *dr)
2322 tree step = DR_STEP (dr);
2323 tree scalar_type = TREE_TYPE (DR_REF (dr));
2324 gimple *stmt = DR_STMT (dr);
2325 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2326 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2327 struct loop *loop = NULL;
2329 if (loop_vinfo)
2330 loop = LOOP_VINFO_LOOP (loop_vinfo);
2332 if (loop_vinfo && !step)
2334 if (dump_enabled_p ())
2335 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2336 "bad data-ref access in loop\n");
2337 return false;
2340 /* Allow loads with zero step in inner-loop vectorization. */
2341 if (loop_vinfo && integer_zerop (step))
2343 GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = NULL;
2344 if (!nested_in_vect_loop_p (loop, stmt))
2345 return DR_IS_READ (dr);
2346 /* Allow references with zero step for outer loops marked
2347 with pragma omp simd only - it guarantees absence of
2348 loop-carried dependencies between inner loop iterations. */
2349 if (!loop->force_vectorize)
2351 if (dump_enabled_p ())
2352 dump_printf_loc (MSG_NOTE, vect_location,
2353 "zero step in inner loop of nest\n");
2354 return false;
2358 if (loop && nested_in_vect_loop_p (loop, stmt))
2360 /* Interleaved accesses are not yet supported within outer-loop
2361 vectorization for references in the inner-loop. */
2362 GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = NULL;
2364 /* For the rest of the analysis we use the outer-loop step. */
2365 step = STMT_VINFO_DR_STEP (stmt_info);
2366 if (integer_zerop (step))
2368 if (dump_enabled_p ())
2369 dump_printf_loc (MSG_NOTE, vect_location,
2370 "zero step in outer loop.\n");
2371 return DR_IS_READ (dr);
2375 /* Consecutive? */
2376 if (TREE_CODE (step) == INTEGER_CST)
2378 HOST_WIDE_INT dr_step = TREE_INT_CST_LOW (step);
2379 if (!tree_int_cst_compare (step, TYPE_SIZE_UNIT (scalar_type))
2380 || (dr_step < 0
2381 && !compare_tree_int (TYPE_SIZE_UNIT (scalar_type), -dr_step)))
2383 /* Mark that it is not interleaving. */
2384 GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = NULL;
2385 return true;
2389 if (loop && nested_in_vect_loop_p (loop, stmt))
2391 if (dump_enabled_p ())
2392 dump_printf_loc (MSG_NOTE, vect_location,
2393 "grouped access in outer loop.\n");
2394 return false;
2398 /* Assume this is a DR handled by non-constant strided load case. */
2399 if (TREE_CODE (step) != INTEGER_CST)
2400 return (STMT_VINFO_STRIDED_P (stmt_info)
2401 && (!STMT_VINFO_GROUPED_ACCESS (stmt_info)
2402 || vect_analyze_group_access (dr)));
2404 /* Not consecutive access - check if it's a part of interleaving group. */
2405 return vect_analyze_group_access (dr);
2410 /* A helper function used in the comparator function to sort data
2411 references. T1 and T2 are two data references to be compared.
2412 The function returns -1, 0, or 1. */
2414 static int
2415 compare_tree (tree t1, tree t2)
2417 int i, cmp;
2418 enum tree_code code;
2419 char tclass;
2421 if (t1 == t2)
2422 return 0;
2423 if (t1 == NULL)
2424 return -1;
2425 if (t2 == NULL)
2426 return 1;
2429 if (TREE_CODE (t1) != TREE_CODE (t2))
2430 return TREE_CODE (t1) < TREE_CODE (t2) ? -1 : 1;
2432 code = TREE_CODE (t1);
2433 switch (code)
2435 /* For const values, we can just use hash values for comparisons. */
2436 case INTEGER_CST:
2437 case REAL_CST:
2438 case FIXED_CST:
2439 case STRING_CST:
2440 case COMPLEX_CST:
2441 case VECTOR_CST:
2443 hashval_t h1 = iterative_hash_expr (t1, 0);
2444 hashval_t h2 = iterative_hash_expr (t2, 0);
2445 if (h1 != h2)
2446 return h1 < h2 ? -1 : 1;
2447 break;
2450 case SSA_NAME:
2451 cmp = compare_tree (SSA_NAME_VAR (t1), SSA_NAME_VAR (t2));
2452 if (cmp != 0)
2453 return cmp;
2455 if (SSA_NAME_VERSION (t1) != SSA_NAME_VERSION (t2))
2456 return SSA_NAME_VERSION (t1) < SSA_NAME_VERSION (t2) ? -1 : 1;
2457 break;
2459 default:
2460 tclass = TREE_CODE_CLASS (code);
2462 /* For var-decl, we could compare their UIDs. */
2463 if (tclass == tcc_declaration)
2465 if (DECL_UID (t1) != DECL_UID (t2))
2466 return DECL_UID (t1) < DECL_UID (t2) ? -1 : 1;
2467 break;
2470 /* For expressions with operands, compare their operands recursively. */
2471 for (i = TREE_OPERAND_LENGTH (t1) - 1; i >= 0; --i)
2473 cmp = compare_tree (TREE_OPERAND (t1, i), TREE_OPERAND (t2, i));
2474 if (cmp != 0)
2475 return cmp;
2479 return 0;
2483 /* Compare two data-references DRA and DRB to group them into chunks
2484 suitable for grouping. */
2486 static int
2487 dr_group_sort_cmp (const void *dra_, const void *drb_)
2489 data_reference_p dra = *(data_reference_p *)const_cast<void *>(dra_);
2490 data_reference_p drb = *(data_reference_p *)const_cast<void *>(drb_);
2491 int cmp;
2493 /* Stabilize sort. */
2494 if (dra == drb)
2495 return 0;
2497 /* Ordering of DRs according to base. */
2498 if (!operand_equal_p (DR_BASE_ADDRESS (dra), DR_BASE_ADDRESS (drb), 0))
2500 cmp = compare_tree (DR_BASE_ADDRESS (dra), DR_BASE_ADDRESS (drb));
2501 if (cmp != 0)
2502 return cmp;
2505 /* And according to DR_OFFSET. */
2506 if (!dr_equal_offsets_p (dra, drb))
2508 cmp = compare_tree (DR_OFFSET (dra), DR_OFFSET (drb));
2509 if (cmp != 0)
2510 return cmp;
2513 /* Put reads before writes. */
2514 if (DR_IS_READ (dra) != DR_IS_READ (drb))
2515 return DR_IS_READ (dra) ? -1 : 1;
2517 /* Then sort after access size. */
2518 if (!operand_equal_p (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dra))),
2519 TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (drb))), 0))
2521 cmp = compare_tree (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dra))),
2522 TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (drb))));
2523 if (cmp != 0)
2524 return cmp;
2527 /* And after step. */
2528 if (!operand_equal_p (DR_STEP (dra), DR_STEP (drb), 0))
2530 cmp = compare_tree (DR_STEP (dra), DR_STEP (drb));
2531 if (cmp != 0)
2532 return cmp;
2535 /* Then sort after DR_INIT. In case of identical DRs sort after stmt UID. */
2536 cmp = tree_int_cst_compare (DR_INIT (dra), DR_INIT (drb));
2537 if (cmp == 0)
2538 return gimple_uid (DR_STMT (dra)) < gimple_uid (DR_STMT (drb)) ? -1 : 1;
2539 return cmp;
2542 /* Function vect_analyze_data_ref_accesses.
2544 Analyze the access pattern of all the data references in the loop.
2546 FORNOW: the only access pattern that is considered vectorizable is a
2547 simple step 1 (consecutive) access.
2549 FORNOW: handle only arrays and pointer accesses. */
2551 bool
2552 vect_analyze_data_ref_accesses (vec_info *vinfo)
2554 unsigned int i;
2555 vec<data_reference_p> datarefs = vinfo->datarefs;
2556 struct data_reference *dr;
2558 if (dump_enabled_p ())
2559 dump_printf_loc (MSG_NOTE, vect_location,
2560 "=== vect_analyze_data_ref_accesses ===\n");
2562 if (datarefs.is_empty ())
2563 return true;
2565 /* Sort the array of datarefs to make building the interleaving chains
2566 linear. Don't modify the original vector's order, it is needed for
2567 determining what dependencies are reversed. */
2568 vec<data_reference_p> datarefs_copy = datarefs.copy ();
2569 datarefs_copy.qsort (dr_group_sort_cmp);
2571 /* Build the interleaving chains. */
2572 for (i = 0; i < datarefs_copy.length () - 1;)
2574 data_reference_p dra = datarefs_copy[i];
2575 stmt_vec_info stmtinfo_a = vinfo_for_stmt (DR_STMT (dra));
2576 stmt_vec_info lastinfo = NULL;
2577 for (i = i + 1; i < datarefs_copy.length (); ++i)
2579 data_reference_p drb = datarefs_copy[i];
2580 stmt_vec_info stmtinfo_b = vinfo_for_stmt (DR_STMT (drb));
2582 /* ??? Imperfect sorting (non-compatible types, non-modulo
2583 accesses, same accesses) can lead to a group to be artificially
2584 split here as we don't just skip over those. If it really
2585 matters we can push those to a worklist and re-iterate
2586 over them. The we can just skip ahead to the next DR here. */
2588 /* Check that the data-refs have same first location (except init)
2589 and they are both either store or load (not load and store,
2590 not masked loads or stores). */
2591 if (DR_IS_READ (dra) != DR_IS_READ (drb)
2592 || !operand_equal_p (DR_BASE_ADDRESS (dra),
2593 DR_BASE_ADDRESS (drb), 0)
2594 || !dr_equal_offsets_p (dra, drb)
2595 || !gimple_assign_single_p (DR_STMT (dra))
2596 || !gimple_assign_single_p (DR_STMT (drb)))
2597 break;
2599 /* Check that the data-refs have the same constant size. */
2600 tree sza = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dra)));
2601 tree szb = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (drb)));
2602 if (!tree_fits_uhwi_p (sza)
2603 || !tree_fits_uhwi_p (szb)
2604 || !tree_int_cst_equal (sza, szb))
2605 break;
2607 /* Check that the data-refs have the same step. */
2608 if (!operand_equal_p (DR_STEP (dra), DR_STEP (drb), 0))
2609 break;
2611 /* Do not place the same access in the interleaving chain twice. */
2612 if (tree_int_cst_compare (DR_INIT (dra), DR_INIT (drb)) == 0)
2613 break;
2615 /* Check the types are compatible.
2616 ??? We don't distinguish this during sorting. */
2617 if (!types_compatible_p (TREE_TYPE (DR_REF (dra)),
2618 TREE_TYPE (DR_REF (drb))))
2619 break;
2621 /* Sorting has ensured that DR_INIT (dra) <= DR_INIT (drb). */
2622 HOST_WIDE_INT init_a = TREE_INT_CST_LOW (DR_INIT (dra));
2623 HOST_WIDE_INT init_b = TREE_INT_CST_LOW (DR_INIT (drb));
2624 gcc_assert (init_a < init_b);
2626 /* If init_b == init_a + the size of the type * k, we have an
2627 interleaving, and DRA is accessed before DRB. */
2628 HOST_WIDE_INT type_size_a = tree_to_uhwi (sza);
2629 if ((init_b - init_a) % type_size_a != 0)
2630 break;
2632 /* If we have a store, the accesses are adjacent. This splits
2633 groups into chunks we support (we don't support vectorization
2634 of stores with gaps). */
2635 if (!DR_IS_READ (dra)
2636 && (init_b - (HOST_WIDE_INT) TREE_INT_CST_LOW
2637 (DR_INIT (datarefs_copy[i-1]))
2638 != type_size_a))
2639 break;
2641 /* If the step (if not zero or non-constant) is greater than the
2642 difference between data-refs' inits this splits groups into
2643 suitable sizes. */
2644 if (tree_fits_shwi_p (DR_STEP (dra)))
2646 HOST_WIDE_INT step = tree_to_shwi (DR_STEP (dra));
2647 if (step != 0 && step <= (init_b - init_a))
2648 break;
2651 if (dump_enabled_p ())
2653 dump_printf_loc (MSG_NOTE, vect_location,
2654 "Detected interleaving ");
2655 if (DR_IS_READ (dra))
2656 dump_printf (MSG_NOTE, "load ");
2657 else
2658 dump_printf (MSG_NOTE, "store ");
2659 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra));
2660 dump_printf (MSG_NOTE, " and ");
2661 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb));
2662 dump_printf (MSG_NOTE, "\n");
2665 /* Link the found element into the group list. */
2666 if (!GROUP_FIRST_ELEMENT (stmtinfo_a))
2668 GROUP_FIRST_ELEMENT (stmtinfo_a) = DR_STMT (dra);
2669 lastinfo = stmtinfo_a;
2671 GROUP_FIRST_ELEMENT (stmtinfo_b) = DR_STMT (dra);
2672 GROUP_NEXT_ELEMENT (lastinfo) = DR_STMT (drb);
2673 lastinfo = stmtinfo_b;
2677 FOR_EACH_VEC_ELT (datarefs_copy, i, dr)
2678 if (STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr)))
2679 && !vect_analyze_data_ref_access (dr))
2681 if (dump_enabled_p ())
2682 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2683 "not vectorized: complicated access pattern.\n");
2685 if (is_a <bb_vec_info> (vinfo))
2687 /* Mark the statement as not vectorizable. */
2688 STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr))) = false;
2689 continue;
2691 else
2693 datarefs_copy.release ();
2694 return false;
2698 datarefs_copy.release ();
2699 return true;
2703 /* Operator == between two dr_with_seg_len objects.
2705 This equality operator is used to make sure two data refs
2706 are the same one so that we will consider to combine the
2707 aliasing checks of those two pairs of data dependent data
2708 refs. */
2710 static bool
2711 operator == (const dr_with_seg_len& d1,
2712 const dr_with_seg_len& d2)
2714 return operand_equal_p (DR_BASE_ADDRESS (d1.dr),
2715 DR_BASE_ADDRESS (d2.dr), 0)
2716 && compare_tree (d1.offset, d2.offset) == 0
2717 && compare_tree (d1.seg_len, d2.seg_len) == 0;
2720 /* Function comp_dr_with_seg_len_pair.
2722 Comparison function for sorting objects of dr_with_seg_len_pair_t
2723 so that we can combine aliasing checks in one scan. */
2725 static int
2726 comp_dr_with_seg_len_pair (const void *p1_, const void *p2_)
2728 const dr_with_seg_len_pair_t* p1 = (const dr_with_seg_len_pair_t *) p1_;
2729 const dr_with_seg_len_pair_t* p2 = (const dr_with_seg_len_pair_t *) p2_;
2731 const dr_with_seg_len &p11 = p1->first,
2732 &p12 = p1->second,
2733 &p21 = p2->first,
2734 &p22 = p2->second;
2736 /* For DR pairs (a, b) and (c, d), we only consider to merge the alias checks
2737 if a and c have the same basic address snd step, and b and d have the same
2738 address and step. Therefore, if any a&c or b&d don't have the same address
2739 and step, we don't care the order of those two pairs after sorting. */
2740 int comp_res;
2742 if ((comp_res = compare_tree (DR_BASE_ADDRESS (p11.dr),
2743 DR_BASE_ADDRESS (p21.dr))) != 0)
2744 return comp_res;
2745 if ((comp_res = compare_tree (DR_BASE_ADDRESS (p12.dr),
2746 DR_BASE_ADDRESS (p22.dr))) != 0)
2747 return comp_res;
2748 if ((comp_res = compare_tree (DR_STEP (p11.dr), DR_STEP (p21.dr))) != 0)
2749 return comp_res;
2750 if ((comp_res = compare_tree (DR_STEP (p12.dr), DR_STEP (p22.dr))) != 0)
2751 return comp_res;
2752 if ((comp_res = compare_tree (p11.offset, p21.offset)) != 0)
2753 return comp_res;
2754 if ((comp_res = compare_tree (p12.offset, p22.offset)) != 0)
2755 return comp_res;
2757 return 0;
2760 /* Function vect_vfa_segment_size.
2762 Create an expression that computes the size of segment
2763 that will be accessed for a data reference. The functions takes into
2764 account that realignment loads may access one more vector.
2766 Input:
2767 DR: The data reference.
2768 LENGTH_FACTOR: segment length to consider.
2770 Return an expression whose value is the size of segment which will be
2771 accessed by DR. */
2773 static tree
2774 vect_vfa_segment_size (struct data_reference *dr, tree length_factor)
2776 tree segment_length;
2778 if (integer_zerop (DR_STEP (dr)))
2779 segment_length = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dr)));
2780 else
2781 segment_length = size_binop (MULT_EXPR,
2782 fold_convert (sizetype, DR_STEP (dr)),
2783 fold_convert (sizetype, length_factor));
2785 if (vect_supportable_dr_alignment (dr, false)
2786 == dr_explicit_realign_optimized)
2788 tree vector_size = TYPE_SIZE_UNIT
2789 (STMT_VINFO_VECTYPE (vinfo_for_stmt (DR_STMT (dr))));
2791 segment_length = size_binop (PLUS_EXPR, segment_length, vector_size);
2793 return segment_length;
2796 /* Function vect_prune_runtime_alias_test_list.
2798 Prune a list of ddrs to be tested at run-time by versioning for alias.
2799 Merge several alias checks into one if possible.
2800 Return FALSE if resulting list of ddrs is longer then allowed by
2801 PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS, otherwise return TRUE. */
2803 bool
2804 vect_prune_runtime_alias_test_list (loop_vec_info loop_vinfo)
2806 vec<ddr_p> may_alias_ddrs =
2807 LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo);
2808 vec<dr_with_seg_len_pair_t>& comp_alias_ddrs =
2809 LOOP_VINFO_COMP_ALIAS_DDRS (loop_vinfo);
2810 int vect_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2811 tree scalar_loop_iters = LOOP_VINFO_NITERS (loop_vinfo);
2813 ddr_p ddr;
2814 unsigned int i;
2815 tree length_factor;
2817 if (dump_enabled_p ())
2818 dump_printf_loc (MSG_NOTE, vect_location,
2819 "=== vect_prune_runtime_alias_test_list ===\n");
2821 if (may_alias_ddrs.is_empty ())
2822 return true;
2824 /* Basically, for each pair of dependent data refs store_ptr_0
2825 and load_ptr_0, we create an expression:
2827 ((store_ptr_0 + store_segment_length_0) <= load_ptr_0)
2828 || (load_ptr_0 + load_segment_length_0) <= store_ptr_0))
2830 for aliasing checks. However, in some cases we can decrease
2831 the number of checks by combining two checks into one. For
2832 example, suppose we have another pair of data refs store_ptr_0
2833 and load_ptr_1, and if the following condition is satisfied:
2835 load_ptr_0 < load_ptr_1 &&
2836 load_ptr_1 - load_ptr_0 - load_segment_length_0 < store_segment_length_0
2838 (this condition means, in each iteration of vectorized loop,
2839 the accessed memory of store_ptr_0 cannot be between the memory
2840 of load_ptr_0 and load_ptr_1.)
2842 we then can use only the following expression to finish the
2843 alising checks between store_ptr_0 & load_ptr_0 and
2844 store_ptr_0 & load_ptr_1:
2846 ((store_ptr_0 + store_segment_length_0) <= load_ptr_0)
2847 || (load_ptr_1 + load_segment_length_1 <= store_ptr_0))
2849 Note that we only consider that load_ptr_0 and load_ptr_1 have the
2850 same basic address. */
2852 comp_alias_ddrs.create (may_alias_ddrs.length ());
2854 /* First, we collect all data ref pairs for aliasing checks. */
2855 FOR_EACH_VEC_ELT (may_alias_ddrs, i, ddr)
2857 struct data_reference *dr_a, *dr_b;
2858 gimple *dr_group_first_a, *dr_group_first_b;
2859 tree segment_length_a, segment_length_b;
2860 gimple *stmt_a, *stmt_b;
2862 dr_a = DDR_A (ddr);
2863 stmt_a = DR_STMT (DDR_A (ddr));
2864 dr_group_first_a = GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt_a));
2865 if (dr_group_first_a)
2867 stmt_a = dr_group_first_a;
2868 dr_a = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt_a));
2871 dr_b = DDR_B (ddr);
2872 stmt_b = DR_STMT (DDR_B (ddr));
2873 dr_group_first_b = GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt_b));
2874 if (dr_group_first_b)
2876 stmt_b = dr_group_first_b;
2877 dr_b = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt_b));
2880 if (!operand_equal_p (DR_STEP (dr_a), DR_STEP (dr_b), 0))
2881 length_factor = scalar_loop_iters;
2882 else
2883 length_factor = size_int (vect_factor);
2884 segment_length_a = vect_vfa_segment_size (dr_a, length_factor);
2885 segment_length_b = vect_vfa_segment_size (dr_b, length_factor);
2887 dr_with_seg_len_pair_t dr_with_seg_len_pair
2888 (dr_with_seg_len (dr_a, segment_length_a),
2889 dr_with_seg_len (dr_b, segment_length_b));
2891 if (compare_tree (DR_BASE_ADDRESS (dr_a), DR_BASE_ADDRESS (dr_b)) > 0)
2892 std::swap (dr_with_seg_len_pair.first, dr_with_seg_len_pair.second);
2894 comp_alias_ddrs.safe_push (dr_with_seg_len_pair);
2897 /* Second, we sort the collected data ref pairs so that we can scan
2898 them once to combine all possible aliasing checks. */
2899 comp_alias_ddrs.qsort (comp_dr_with_seg_len_pair);
2901 /* Third, we scan the sorted dr pairs and check if we can combine
2902 alias checks of two neighbouring dr pairs. */
2903 for (size_t i = 1; i < comp_alias_ddrs.length (); ++i)
2905 /* Deal with two ddrs (dr_a1, dr_b1) and (dr_a2, dr_b2). */
2906 dr_with_seg_len *dr_a1 = &comp_alias_ddrs[i-1].first,
2907 *dr_b1 = &comp_alias_ddrs[i-1].second,
2908 *dr_a2 = &comp_alias_ddrs[i].first,
2909 *dr_b2 = &comp_alias_ddrs[i].second;
2911 /* Remove duplicate data ref pairs. */
2912 if (*dr_a1 == *dr_a2 && *dr_b1 == *dr_b2)
2914 if (dump_enabled_p ())
2916 dump_printf_loc (MSG_NOTE, vect_location,
2917 "found equal ranges ");
2918 dump_generic_expr (MSG_NOTE, TDF_SLIM,
2919 DR_REF (dr_a1->dr));
2920 dump_printf (MSG_NOTE, ", ");
2921 dump_generic_expr (MSG_NOTE, TDF_SLIM,
2922 DR_REF (dr_b1->dr));
2923 dump_printf (MSG_NOTE, " and ");
2924 dump_generic_expr (MSG_NOTE, TDF_SLIM,
2925 DR_REF (dr_a2->dr));
2926 dump_printf (MSG_NOTE, ", ");
2927 dump_generic_expr (MSG_NOTE, TDF_SLIM,
2928 DR_REF (dr_b2->dr));
2929 dump_printf (MSG_NOTE, "\n");
2932 comp_alias_ddrs.ordered_remove (i--);
2933 continue;
2936 if (*dr_a1 == *dr_a2 || *dr_b1 == *dr_b2)
2938 /* We consider the case that DR_B1 and DR_B2 are same memrefs,
2939 and DR_A1 and DR_A2 are two consecutive memrefs. */
2940 if (*dr_a1 == *dr_a2)
2942 std::swap (dr_a1, dr_b1);
2943 std::swap (dr_a2, dr_b2);
2946 if (!operand_equal_p (DR_BASE_ADDRESS (dr_a1->dr),
2947 DR_BASE_ADDRESS (dr_a2->dr),
2949 || !tree_fits_shwi_p (dr_a1->offset)
2950 || !tree_fits_shwi_p (dr_a2->offset))
2951 continue;
2953 HOST_WIDE_INT diff = (tree_to_shwi (dr_a2->offset)
2954 - tree_to_shwi (dr_a1->offset));
2957 /* Now we check if the following condition is satisfied:
2959 DIFF - SEGMENT_LENGTH_A < SEGMENT_LENGTH_B
2961 where DIFF = DR_A2->OFFSET - DR_A1->OFFSET. However,
2962 SEGMENT_LENGTH_A or SEGMENT_LENGTH_B may not be constant so we
2963 have to make a best estimation. We can get the minimum value
2964 of SEGMENT_LENGTH_B as a constant, represented by MIN_SEG_LEN_B,
2965 then either of the following two conditions can guarantee the
2966 one above:
2968 1: DIFF <= MIN_SEG_LEN_B
2969 2: DIFF - SEGMENT_LENGTH_A < MIN_SEG_LEN_B
2973 HOST_WIDE_INT min_seg_len_b = (tree_fits_shwi_p (dr_b1->seg_len)
2974 ? tree_to_shwi (dr_b1->seg_len)
2975 : vect_factor);
2977 if (diff <= min_seg_len_b
2978 || (tree_fits_shwi_p (dr_a1->seg_len)
2979 && diff - tree_to_shwi (dr_a1->seg_len) < min_seg_len_b))
2981 if (dump_enabled_p ())
2983 dump_printf_loc (MSG_NOTE, vect_location,
2984 "merging ranges for ");
2985 dump_generic_expr (MSG_NOTE, TDF_SLIM,
2986 DR_REF (dr_a1->dr));
2987 dump_printf (MSG_NOTE, ", ");
2988 dump_generic_expr (MSG_NOTE, TDF_SLIM,
2989 DR_REF (dr_b1->dr));
2990 dump_printf (MSG_NOTE, " and ");
2991 dump_generic_expr (MSG_NOTE, TDF_SLIM,
2992 DR_REF (dr_a2->dr));
2993 dump_printf (MSG_NOTE, ", ");
2994 dump_generic_expr (MSG_NOTE, TDF_SLIM,
2995 DR_REF (dr_b2->dr));
2996 dump_printf (MSG_NOTE, "\n");
2999 dr_a1->seg_len = size_binop (PLUS_EXPR,
3000 dr_a2->seg_len, size_int (diff));
3001 comp_alias_ddrs.ordered_remove (i--);
3006 dump_printf_loc (MSG_NOTE, vect_location,
3007 "improved number of alias checks from %d to %d\n",
3008 may_alias_ddrs.length (), comp_alias_ddrs.length ());
3009 if ((int) comp_alias_ddrs.length () >
3010 PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS))
3011 return false;
3013 return true;
3016 /* Check whether a non-affine read or write in stmt is suitable for gather load
3017 or scatter store and if so, return a builtin decl for that operation. */
3019 tree
3020 vect_check_gather_scatter (gimple *stmt, loop_vec_info loop_vinfo, tree *basep,
3021 tree *offp, int *scalep)
3023 HOST_WIDE_INT scale = 1, pbitpos, pbitsize;
3024 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
3025 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3026 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
3027 tree offtype = NULL_TREE;
3028 tree decl, base, off;
3029 machine_mode pmode;
3030 int punsignedp, pvolatilep;
3032 base = DR_REF (dr);
3033 /* For masked loads/stores, DR_REF (dr) is an artificial MEM_REF,
3034 see if we can use the def stmt of the address. */
3035 if (is_gimple_call (stmt)
3036 && gimple_call_internal_p (stmt)
3037 && (gimple_call_internal_fn (stmt) == IFN_MASK_LOAD
3038 || gimple_call_internal_fn (stmt) == IFN_MASK_STORE)
3039 && TREE_CODE (base) == MEM_REF
3040 && TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME
3041 && integer_zerop (TREE_OPERAND (base, 1))
3042 && !expr_invariant_in_loop_p (loop, TREE_OPERAND (base, 0)))
3044 gimple *def_stmt = SSA_NAME_DEF_STMT (TREE_OPERAND (base, 0));
3045 if (is_gimple_assign (def_stmt)
3046 && gimple_assign_rhs_code (def_stmt) == ADDR_EXPR)
3047 base = TREE_OPERAND (gimple_assign_rhs1 (def_stmt), 0);
3050 /* The gather and scatter builtins need address of the form
3051 loop_invariant + vector * {1, 2, 4, 8}
3053 loop_invariant + sign_extend (vector) * { 1, 2, 4, 8 }.
3054 Unfortunately DR_BASE_ADDRESS/DR_OFFSET can be a mixture
3055 of loop invariants/SSA_NAMEs defined in the loop, with casts,
3056 multiplications and additions in it. To get a vector, we need
3057 a single SSA_NAME that will be defined in the loop and will
3058 contain everything that is not loop invariant and that can be
3059 vectorized. The following code attempts to find such a preexistng
3060 SSA_NAME OFF and put the loop invariants into a tree BASE
3061 that can be gimplified before the loop. */
3062 base = get_inner_reference (base, &pbitsize, &pbitpos, &off,
3063 &pmode, &punsignedp, &pvolatilep, false);
3064 gcc_assert (base != NULL_TREE && (pbitpos % BITS_PER_UNIT) == 0);
3066 if (TREE_CODE (base) == MEM_REF)
3068 if (!integer_zerop (TREE_OPERAND (base, 1)))
3070 if (off == NULL_TREE)
3072 offset_int moff = mem_ref_offset (base);
3073 off = wide_int_to_tree (sizetype, moff);
3075 else
3076 off = size_binop (PLUS_EXPR, off,
3077 fold_convert (sizetype, TREE_OPERAND (base, 1)));
3079 base = TREE_OPERAND (base, 0);
3081 else
3082 base = build_fold_addr_expr (base);
3084 if (off == NULL_TREE)
3085 off = size_zero_node;
3087 /* If base is not loop invariant, either off is 0, then we start with just
3088 the constant offset in the loop invariant BASE and continue with base
3089 as OFF, otherwise give up.
3090 We could handle that case by gimplifying the addition of base + off
3091 into some SSA_NAME and use that as off, but for now punt. */
3092 if (!expr_invariant_in_loop_p (loop, base))
3094 if (!integer_zerop (off))
3095 return NULL_TREE;
3096 off = base;
3097 base = size_int (pbitpos / BITS_PER_UNIT);
3099 /* Otherwise put base + constant offset into the loop invariant BASE
3100 and continue with OFF. */
3101 else
3103 base = fold_convert (sizetype, base);
3104 base = size_binop (PLUS_EXPR, base, size_int (pbitpos / BITS_PER_UNIT));
3107 /* OFF at this point may be either a SSA_NAME or some tree expression
3108 from get_inner_reference. Try to peel off loop invariants from it
3109 into BASE as long as possible. */
3110 STRIP_NOPS (off);
3111 while (offtype == NULL_TREE)
3113 enum tree_code code;
3114 tree op0, op1, add = NULL_TREE;
3116 if (TREE_CODE (off) == SSA_NAME)
3118 gimple *def_stmt = SSA_NAME_DEF_STMT (off);
3120 if (expr_invariant_in_loop_p (loop, off))
3121 return NULL_TREE;
3123 if (gimple_code (def_stmt) != GIMPLE_ASSIGN)
3124 break;
3126 op0 = gimple_assign_rhs1 (def_stmt);
3127 code = gimple_assign_rhs_code (def_stmt);
3128 op1 = gimple_assign_rhs2 (def_stmt);
3130 else
3132 if (get_gimple_rhs_class (TREE_CODE (off)) == GIMPLE_TERNARY_RHS)
3133 return NULL_TREE;
3134 code = TREE_CODE (off);
3135 extract_ops_from_tree (off, &code, &op0, &op1);
3137 switch (code)
3139 case POINTER_PLUS_EXPR:
3140 case PLUS_EXPR:
3141 if (expr_invariant_in_loop_p (loop, op0))
3143 add = op0;
3144 off = op1;
3145 do_add:
3146 add = fold_convert (sizetype, add);
3147 if (scale != 1)
3148 add = size_binop (MULT_EXPR, add, size_int (scale));
3149 base = size_binop (PLUS_EXPR, base, add);
3150 continue;
3152 if (expr_invariant_in_loop_p (loop, op1))
3154 add = op1;
3155 off = op0;
3156 goto do_add;
3158 break;
3159 case MINUS_EXPR:
3160 if (expr_invariant_in_loop_p (loop, op1))
3162 add = fold_convert (sizetype, op1);
3163 add = size_binop (MINUS_EXPR, size_zero_node, add);
3164 off = op0;
3165 goto do_add;
3167 break;
3168 case MULT_EXPR:
3169 if (scale == 1 && tree_fits_shwi_p (op1))
3171 scale = tree_to_shwi (op1);
3172 off = op0;
3173 continue;
3175 break;
3176 case SSA_NAME:
3177 off = op0;
3178 continue;
3179 CASE_CONVERT:
3180 if (!POINTER_TYPE_P (TREE_TYPE (op0))
3181 && !INTEGRAL_TYPE_P (TREE_TYPE (op0)))
3182 break;
3183 if (TYPE_PRECISION (TREE_TYPE (op0))
3184 == TYPE_PRECISION (TREE_TYPE (off)))
3186 off = op0;
3187 continue;
3189 if (TYPE_PRECISION (TREE_TYPE (op0))
3190 < TYPE_PRECISION (TREE_TYPE (off)))
3192 off = op0;
3193 offtype = TREE_TYPE (off);
3194 STRIP_NOPS (off);
3195 continue;
3197 break;
3198 default:
3199 break;
3201 break;
3204 /* If at the end OFF still isn't a SSA_NAME or isn't
3205 defined in the loop, punt. */
3206 if (TREE_CODE (off) != SSA_NAME
3207 || expr_invariant_in_loop_p (loop, off))
3208 return NULL_TREE;
3210 if (offtype == NULL_TREE)
3211 offtype = TREE_TYPE (off);
3213 if (DR_IS_READ (dr))
3214 decl = targetm.vectorize.builtin_gather (STMT_VINFO_VECTYPE (stmt_info),
3215 offtype, scale);
3216 else
3217 decl = targetm.vectorize.builtin_scatter (STMT_VINFO_VECTYPE (stmt_info),
3218 offtype, scale);
3220 if (decl == NULL_TREE)
3221 return NULL_TREE;
3223 if (basep)
3224 *basep = base;
3225 if (offp)
3226 *offp = off;
3227 if (scalep)
3228 *scalep = scale;
3229 return decl;
3232 /* Function vect_analyze_data_refs.
3234 Find all the data references in the loop or basic block.
3236 The general structure of the analysis of data refs in the vectorizer is as
3237 follows:
3238 1- vect_analyze_data_refs(loop/bb): call
3239 compute_data_dependences_for_loop/bb to find and analyze all data-refs
3240 in the loop/bb and their dependences.
3241 2- vect_analyze_dependences(): apply dependence testing using ddrs.
3242 3- vect_analyze_drs_alignment(): check that ref_stmt.alignment is ok.
3243 4- vect_analyze_drs_access(): check that ref_stmt.step is ok.
3247 bool
3248 vect_analyze_data_refs (vec_info *vinfo, int *min_vf)
3250 struct loop *loop = NULL;
3251 unsigned int i;
3252 struct data_reference *dr;
3253 tree scalar_type;
3255 if (dump_enabled_p ())
3256 dump_printf_loc (MSG_NOTE, vect_location,
3257 "=== vect_analyze_data_refs ===\n");
3259 if (loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo))
3260 loop = LOOP_VINFO_LOOP (loop_vinfo);
3262 /* Go through the data-refs, check that the analysis succeeded. Update
3263 pointer from stmt_vec_info struct to DR and vectype. */
3265 vec<data_reference_p> datarefs = vinfo->datarefs;
3266 FOR_EACH_VEC_ELT (datarefs, i, dr)
3268 gimple *stmt;
3269 stmt_vec_info stmt_info;
3270 tree base, offset, init;
3271 enum { SG_NONE, GATHER, SCATTER } gatherscatter = SG_NONE;
3272 bool simd_lane_access = false;
3273 int vf;
3275 again:
3276 if (!dr || !DR_REF (dr))
3278 if (dump_enabled_p ())
3279 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3280 "not vectorized: unhandled data-ref\n");
3281 return false;
3284 stmt = DR_STMT (dr);
3285 stmt_info = vinfo_for_stmt (stmt);
3287 /* Discard clobbers from the dataref vector. We will remove
3288 clobber stmts during vectorization. */
3289 if (gimple_clobber_p (stmt))
3291 free_data_ref (dr);
3292 if (i == datarefs.length () - 1)
3294 datarefs.pop ();
3295 break;
3297 datarefs.ordered_remove (i);
3298 dr = datarefs[i];
3299 goto again;
3302 /* Check that analysis of the data-ref succeeded. */
3303 if (!DR_BASE_ADDRESS (dr) || !DR_OFFSET (dr) || !DR_INIT (dr)
3304 || !DR_STEP (dr))
3306 bool maybe_gather
3307 = DR_IS_READ (dr)
3308 && !TREE_THIS_VOLATILE (DR_REF (dr))
3309 && targetm.vectorize.builtin_gather != NULL;
3310 bool maybe_scatter
3311 = DR_IS_WRITE (dr)
3312 && !TREE_THIS_VOLATILE (DR_REF (dr))
3313 && targetm.vectorize.builtin_scatter != NULL;
3314 bool maybe_simd_lane_access
3315 = is_a <loop_vec_info> (vinfo) && loop->simduid;
3317 /* If target supports vector gather loads or scatter stores, or if
3318 this might be a SIMD lane access, see if they can't be used. */
3319 if (is_a <loop_vec_info> (vinfo)
3320 && (maybe_gather || maybe_scatter || maybe_simd_lane_access)
3321 && !nested_in_vect_loop_p (loop, stmt))
3323 struct data_reference *newdr
3324 = create_data_ref (NULL, loop_containing_stmt (stmt),
3325 DR_REF (dr), stmt, maybe_scatter ? false : true);
3326 gcc_assert (newdr != NULL && DR_REF (newdr));
3327 if (DR_BASE_ADDRESS (newdr)
3328 && DR_OFFSET (newdr)
3329 && DR_INIT (newdr)
3330 && DR_STEP (newdr)
3331 && integer_zerop (DR_STEP (newdr)))
3333 if (maybe_simd_lane_access)
3335 tree off = DR_OFFSET (newdr);
3336 STRIP_NOPS (off);
3337 if (TREE_CODE (DR_INIT (newdr)) == INTEGER_CST
3338 && TREE_CODE (off) == MULT_EXPR
3339 && tree_fits_uhwi_p (TREE_OPERAND (off, 1)))
3341 tree step = TREE_OPERAND (off, 1);
3342 off = TREE_OPERAND (off, 0);
3343 STRIP_NOPS (off);
3344 if (CONVERT_EXPR_P (off)
3345 && TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (off,
3346 0)))
3347 < TYPE_PRECISION (TREE_TYPE (off)))
3348 off = TREE_OPERAND (off, 0);
3349 if (TREE_CODE (off) == SSA_NAME)
3351 gimple *def = SSA_NAME_DEF_STMT (off);
3352 tree reft = TREE_TYPE (DR_REF (newdr));
3353 if (is_gimple_call (def)
3354 && gimple_call_internal_p (def)
3355 && (gimple_call_internal_fn (def)
3356 == IFN_GOMP_SIMD_LANE))
3358 tree arg = gimple_call_arg (def, 0);
3359 gcc_assert (TREE_CODE (arg) == SSA_NAME);
3360 arg = SSA_NAME_VAR (arg);
3361 if (arg == loop->simduid
3362 /* For now. */
3363 && tree_int_cst_equal
3364 (TYPE_SIZE_UNIT (reft),
3365 step))
3367 DR_OFFSET (newdr) = ssize_int (0);
3368 DR_STEP (newdr) = step;
3369 DR_ALIGNED_TO (newdr)
3370 = size_int (BIGGEST_ALIGNMENT);
3371 dr = newdr;
3372 simd_lane_access = true;
3378 if (!simd_lane_access && (maybe_gather || maybe_scatter))
3380 dr = newdr;
3381 if (maybe_gather)
3382 gatherscatter = GATHER;
3383 else
3384 gatherscatter = SCATTER;
3387 if (gatherscatter == SG_NONE && !simd_lane_access)
3388 free_data_ref (newdr);
3391 if (gatherscatter == SG_NONE && !simd_lane_access)
3393 if (dump_enabled_p ())
3395 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3396 "not vectorized: data ref analysis "
3397 "failed ");
3398 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3399 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3402 if (is_a <bb_vec_info> (vinfo))
3403 break;
3405 return false;
3409 if (TREE_CODE (DR_BASE_ADDRESS (dr)) == INTEGER_CST)
3411 if (dump_enabled_p ())
3412 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3413 "not vectorized: base addr of dr is a "
3414 "constant\n");
3416 if (is_a <bb_vec_info> (vinfo))
3417 break;
3419 if (gatherscatter != SG_NONE || simd_lane_access)
3420 free_data_ref (dr);
3421 return false;
3424 if (TREE_THIS_VOLATILE (DR_REF (dr)))
3426 if (dump_enabled_p ())
3428 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3429 "not vectorized: volatile type ");
3430 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3431 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3434 if (is_a <bb_vec_info> (vinfo))
3435 break;
3437 return false;
3440 if (stmt_can_throw_internal (stmt))
3442 if (dump_enabled_p ())
3444 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3445 "not vectorized: statement can throw an "
3446 "exception ");
3447 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3448 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3451 if (is_a <bb_vec_info> (vinfo))
3452 break;
3454 if (gatherscatter != SG_NONE || simd_lane_access)
3455 free_data_ref (dr);
3456 return false;
3459 if (TREE_CODE (DR_REF (dr)) == COMPONENT_REF
3460 && DECL_BIT_FIELD (TREE_OPERAND (DR_REF (dr), 1)))
3462 if (dump_enabled_p ())
3464 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3465 "not vectorized: statement is bitfield "
3466 "access ");
3467 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3468 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3471 if (is_a <bb_vec_info> (vinfo))
3472 break;
3474 if (gatherscatter != SG_NONE || simd_lane_access)
3475 free_data_ref (dr);
3476 return false;
3479 base = unshare_expr (DR_BASE_ADDRESS (dr));
3480 offset = unshare_expr (DR_OFFSET (dr));
3481 init = unshare_expr (DR_INIT (dr));
3483 if (is_gimple_call (stmt)
3484 && (!gimple_call_internal_p (stmt)
3485 || (gimple_call_internal_fn (stmt) != IFN_MASK_LOAD
3486 && gimple_call_internal_fn (stmt) != IFN_MASK_STORE)))
3488 if (dump_enabled_p ())
3490 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3491 "not vectorized: dr in a call ");
3492 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3493 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3496 if (is_a <bb_vec_info> (vinfo))
3497 break;
3499 if (gatherscatter != SG_NONE || simd_lane_access)
3500 free_data_ref (dr);
3501 return false;
3504 /* Update DR field in stmt_vec_info struct. */
3506 /* If the dataref is in an inner-loop of the loop that is considered for
3507 for vectorization, we also want to analyze the access relative to
3508 the outer-loop (DR contains information only relative to the
3509 inner-most enclosing loop). We do that by building a reference to the
3510 first location accessed by the inner-loop, and analyze it relative to
3511 the outer-loop. */
3512 if (loop && nested_in_vect_loop_p (loop, stmt))
3514 tree outer_step, outer_base, outer_init;
3515 HOST_WIDE_INT pbitsize, pbitpos;
3516 tree poffset;
3517 machine_mode pmode;
3518 int punsignedp, pvolatilep;
3519 affine_iv base_iv, offset_iv;
3520 tree dinit;
3522 /* Build a reference to the first location accessed by the
3523 inner-loop: *(BASE+INIT). (The first location is actually
3524 BASE+INIT+OFFSET, but we add OFFSET separately later). */
3525 tree inner_base = build_fold_indirect_ref
3526 (fold_build_pointer_plus (base, init));
3528 if (dump_enabled_p ())
3530 dump_printf_loc (MSG_NOTE, vect_location,
3531 "analyze in outer-loop: ");
3532 dump_generic_expr (MSG_NOTE, TDF_SLIM, inner_base);
3533 dump_printf (MSG_NOTE, "\n");
3536 outer_base = get_inner_reference (inner_base, &pbitsize, &pbitpos,
3537 &poffset, &pmode, &punsignedp, &pvolatilep, false);
3538 gcc_assert (outer_base != NULL_TREE);
3540 if (pbitpos % BITS_PER_UNIT != 0)
3542 if (dump_enabled_p ())
3543 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3544 "failed: bit offset alignment.\n");
3545 return false;
3548 outer_base = build_fold_addr_expr (outer_base);
3549 if (!simple_iv (loop, loop_containing_stmt (stmt), outer_base,
3550 &base_iv, false))
3552 if (dump_enabled_p ())
3553 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3554 "failed: evolution of base is not affine.\n");
3555 return false;
3558 if (offset)
3560 if (poffset)
3561 poffset = fold_build2 (PLUS_EXPR, TREE_TYPE (offset), offset,
3562 poffset);
3563 else
3564 poffset = offset;
3567 if (!poffset)
3569 offset_iv.base = ssize_int (0);
3570 offset_iv.step = ssize_int (0);
3572 else if (!simple_iv (loop, loop_containing_stmt (stmt), poffset,
3573 &offset_iv, false))
3575 if (dump_enabled_p ())
3576 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3577 "evolution of offset is not affine.\n");
3578 return false;
3581 outer_init = ssize_int (pbitpos / BITS_PER_UNIT);
3582 split_constant_offset (base_iv.base, &base_iv.base, &dinit);
3583 outer_init = size_binop (PLUS_EXPR, outer_init, dinit);
3584 split_constant_offset (offset_iv.base, &offset_iv.base, &dinit);
3585 outer_init = size_binop (PLUS_EXPR, outer_init, dinit);
3587 outer_step = size_binop (PLUS_EXPR,
3588 fold_convert (ssizetype, base_iv.step),
3589 fold_convert (ssizetype, offset_iv.step));
3591 STMT_VINFO_DR_STEP (stmt_info) = outer_step;
3592 /* FIXME: Use canonicalize_base_object_address (base_iv.base); */
3593 STMT_VINFO_DR_BASE_ADDRESS (stmt_info) = base_iv.base;
3594 STMT_VINFO_DR_INIT (stmt_info) = outer_init;
3595 STMT_VINFO_DR_OFFSET (stmt_info) =
3596 fold_convert (ssizetype, offset_iv.base);
3597 STMT_VINFO_DR_ALIGNED_TO (stmt_info) =
3598 size_int (highest_pow2_factor (offset_iv.base));
3600 if (dump_enabled_p ())
3602 dump_printf_loc (MSG_NOTE, vect_location,
3603 "\touter base_address: ");
3604 dump_generic_expr (MSG_NOTE, TDF_SLIM,
3605 STMT_VINFO_DR_BASE_ADDRESS (stmt_info));
3606 dump_printf (MSG_NOTE, "\n\touter offset from base address: ");
3607 dump_generic_expr (MSG_NOTE, TDF_SLIM,
3608 STMT_VINFO_DR_OFFSET (stmt_info));
3609 dump_printf (MSG_NOTE,
3610 "\n\touter constant offset from base address: ");
3611 dump_generic_expr (MSG_NOTE, TDF_SLIM,
3612 STMT_VINFO_DR_INIT (stmt_info));
3613 dump_printf (MSG_NOTE, "\n\touter step: ");
3614 dump_generic_expr (MSG_NOTE, TDF_SLIM,
3615 STMT_VINFO_DR_STEP (stmt_info));
3616 dump_printf (MSG_NOTE, "\n\touter aligned to: ");
3617 dump_generic_expr (MSG_NOTE, TDF_SLIM,
3618 STMT_VINFO_DR_ALIGNED_TO (stmt_info));
3619 dump_printf (MSG_NOTE, "\n");
3623 if (STMT_VINFO_DATA_REF (stmt_info))
3625 if (dump_enabled_p ())
3627 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3628 "not vectorized: more than one data ref "
3629 "in stmt: ");
3630 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3631 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3634 if (is_a <bb_vec_info> (vinfo))
3635 break;
3637 if (gatherscatter != SG_NONE || simd_lane_access)
3638 free_data_ref (dr);
3639 return false;
3642 STMT_VINFO_DATA_REF (stmt_info) = dr;
3643 if (simd_lane_access)
3645 STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info) = true;
3646 free_data_ref (datarefs[i]);
3647 datarefs[i] = dr;
3650 /* Set vectype for STMT. */
3651 scalar_type = TREE_TYPE (DR_REF (dr));
3652 STMT_VINFO_VECTYPE (stmt_info)
3653 = get_vectype_for_scalar_type (scalar_type);
3654 if (!STMT_VINFO_VECTYPE (stmt_info))
3656 if (dump_enabled_p ())
3658 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3659 "not vectorized: no vectype for stmt: ");
3660 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3661 dump_printf (MSG_MISSED_OPTIMIZATION, " scalar_type: ");
3662 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_DETAILS,
3663 scalar_type);
3664 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3667 if (is_a <bb_vec_info> (vinfo))
3668 break;
3670 if (gatherscatter != SG_NONE || simd_lane_access)
3672 STMT_VINFO_DATA_REF (stmt_info) = NULL;
3673 if (gatherscatter != SG_NONE)
3674 free_data_ref (dr);
3676 return false;
3678 else
3680 if (dump_enabled_p ())
3682 dump_printf_loc (MSG_NOTE, vect_location,
3683 "got vectype for stmt: ");
3684 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
3685 dump_generic_expr (MSG_NOTE, TDF_SLIM,
3686 STMT_VINFO_VECTYPE (stmt_info));
3687 dump_printf (MSG_NOTE, "\n");
3691 /* Adjust the minimal vectorization factor according to the
3692 vector type. */
3693 vf = TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info));
3694 if (vf > *min_vf)
3695 *min_vf = vf;
3697 if (gatherscatter != SG_NONE)
3699 tree off;
3700 if (!vect_check_gather_scatter (stmt, as_a <loop_vec_info> (vinfo),
3701 NULL, &off, NULL)
3702 || get_vectype_for_scalar_type (TREE_TYPE (off)) == NULL_TREE)
3704 STMT_VINFO_DATA_REF (stmt_info) = NULL;
3705 free_data_ref (dr);
3706 if (dump_enabled_p ())
3708 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3709 (gatherscatter == GATHER) ?
3710 "not vectorized: not suitable for gather "
3711 "load " :
3712 "not vectorized: not suitable for scatter "
3713 "store ");
3714 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3715 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3717 return false;
3720 datarefs[i] = dr;
3721 STMT_VINFO_GATHER_SCATTER_P (stmt_info) = gatherscatter;
3724 else if (is_a <loop_vec_info> (vinfo)
3725 && TREE_CODE (DR_STEP (dr)) != INTEGER_CST)
3727 if (nested_in_vect_loop_p (loop, stmt))
3729 if (dump_enabled_p ())
3731 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3732 "not vectorized: not suitable for strided "
3733 "load ");
3734 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3735 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3737 return false;
3739 STMT_VINFO_STRIDED_P (stmt_info) = true;
3743 /* If we stopped analysis at the first dataref we could not analyze
3744 when trying to vectorize a basic-block mark the rest of the datarefs
3745 as not vectorizable and truncate the vector of datarefs. That
3746 avoids spending useless time in analyzing their dependence. */
3747 if (i != datarefs.length ())
3749 gcc_assert (is_a <bb_vec_info> (vinfo));
3750 for (unsigned j = i; j < datarefs.length (); ++j)
3752 data_reference_p dr = datarefs[j];
3753 STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr))) = false;
3754 free_data_ref (dr);
3756 datarefs.truncate (i);
3759 return true;
3763 /* Function vect_get_new_vect_var.
3765 Returns a name for a new variable. The current naming scheme appends the
3766 prefix "vect_" or "vect_p" (depending on the value of VAR_KIND) to
3767 the name of vectorizer generated variables, and appends that to NAME if
3768 provided. */
3770 tree
3771 vect_get_new_vect_var (tree type, enum vect_var_kind var_kind, const char *name)
3773 const char *prefix;
3774 tree new_vect_var;
3776 switch (var_kind)
3778 case vect_simple_var:
3779 prefix = "vect";
3780 break;
3781 case vect_scalar_var:
3782 prefix = "stmp";
3783 break;
3784 case vect_pointer_var:
3785 prefix = "vectp";
3786 break;
3787 default:
3788 gcc_unreachable ();
3791 if (name)
3793 char* tmp = concat (prefix, "_", name, NULL);
3794 new_vect_var = create_tmp_reg (type, tmp);
3795 free (tmp);
3797 else
3798 new_vect_var = create_tmp_reg (type, prefix);
3800 return new_vect_var;
3803 /* Like vect_get_new_vect_var but return an SSA name. */
3805 tree
3806 vect_get_new_ssa_name (tree type, enum vect_var_kind var_kind, const char *name)
3808 const char *prefix;
3809 tree new_vect_var;
3811 switch (var_kind)
3813 case vect_simple_var:
3814 prefix = "vect";
3815 break;
3816 case vect_scalar_var:
3817 prefix = "stmp";
3818 break;
3819 case vect_pointer_var:
3820 prefix = "vectp";
3821 break;
3822 default:
3823 gcc_unreachable ();
3826 if (name)
3828 char* tmp = concat (prefix, "_", name, NULL);
3829 new_vect_var = make_temp_ssa_name (type, NULL, tmp);
3830 free (tmp);
3832 else
3833 new_vect_var = make_temp_ssa_name (type, NULL, prefix);
3835 return new_vect_var;
3838 /* Duplicate ptr info and set alignment/misaligment on NAME from DR. */
3840 static void
3841 vect_duplicate_ssa_name_ptr_info (tree name, data_reference *dr,
3842 stmt_vec_info stmt_info)
3844 duplicate_ssa_name_ptr_info (name, DR_PTR_INFO (dr));
3845 unsigned int align = TYPE_ALIGN_UNIT (STMT_VINFO_VECTYPE (stmt_info));
3846 int misalign = DR_MISALIGNMENT (dr);
3847 if (misalign == -1)
3848 mark_ptr_info_alignment_unknown (SSA_NAME_PTR_INFO (name));
3849 else
3850 set_ptr_info_alignment (SSA_NAME_PTR_INFO (name), align, misalign);
3853 /* Function vect_create_addr_base_for_vector_ref.
3855 Create an expression that computes the address of the first memory location
3856 that will be accessed for a data reference.
3858 Input:
3859 STMT: The statement containing the data reference.
3860 NEW_STMT_LIST: Must be initialized to NULL_TREE or a statement list.
3861 OFFSET: Optional. If supplied, it is be added to the initial address.
3862 LOOP: Specify relative to which loop-nest should the address be computed.
3863 For example, when the dataref is in an inner-loop nested in an
3864 outer-loop that is now being vectorized, LOOP can be either the
3865 outer-loop, or the inner-loop. The first memory location accessed
3866 by the following dataref ('in' points to short):
3868 for (i=0; i<N; i++)
3869 for (j=0; j<M; j++)
3870 s += in[i+j]
3872 is as follows:
3873 if LOOP=i_loop: &in (relative to i_loop)
3874 if LOOP=j_loop: &in+i*2B (relative to j_loop)
3875 BYTE_OFFSET: Optional, defaulted to NULL. If supplied, it is added to the
3876 initial address. Unlike OFFSET, which is number of elements to
3877 be added, BYTE_OFFSET is measured in bytes.
3879 Output:
3880 1. Return an SSA_NAME whose value is the address of the memory location of
3881 the first vector of the data reference.
3882 2. If new_stmt_list is not NULL_TREE after return then the caller must insert
3883 these statement(s) which define the returned SSA_NAME.
3885 FORNOW: We are only handling array accesses with step 1. */
3887 tree
3888 vect_create_addr_base_for_vector_ref (gimple *stmt,
3889 gimple_seq *new_stmt_list,
3890 tree offset,
3891 struct loop *loop,
3892 tree byte_offset)
3894 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3895 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
3896 tree data_ref_base;
3897 const char *base_name;
3898 tree addr_base;
3899 tree dest;
3900 gimple_seq seq = NULL;
3901 tree base_offset;
3902 tree init;
3903 tree vect_ptr_type;
3904 tree step = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dr)));
3905 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3907 if (loop_vinfo && loop && loop != (gimple_bb (stmt))->loop_father)
3909 struct loop *outer_loop = LOOP_VINFO_LOOP (loop_vinfo);
3911 gcc_assert (nested_in_vect_loop_p (outer_loop, stmt));
3913 data_ref_base = unshare_expr (STMT_VINFO_DR_BASE_ADDRESS (stmt_info));
3914 base_offset = unshare_expr (STMT_VINFO_DR_OFFSET (stmt_info));
3915 init = unshare_expr (STMT_VINFO_DR_INIT (stmt_info));
3917 else
3919 data_ref_base = unshare_expr (DR_BASE_ADDRESS (dr));
3920 base_offset = unshare_expr (DR_OFFSET (dr));
3921 init = unshare_expr (DR_INIT (dr));
3924 if (loop_vinfo)
3925 base_name = get_name (data_ref_base);
3926 else
3928 base_offset = ssize_int (0);
3929 init = ssize_int (0);
3930 base_name = get_name (DR_REF (dr));
3933 /* Create base_offset */
3934 base_offset = size_binop (PLUS_EXPR,
3935 fold_convert (sizetype, base_offset),
3936 fold_convert (sizetype, init));
3938 if (offset)
3940 offset = fold_build2 (MULT_EXPR, sizetype,
3941 fold_convert (sizetype, offset), step);
3942 base_offset = fold_build2 (PLUS_EXPR, sizetype,
3943 base_offset, offset);
3945 if (byte_offset)
3947 byte_offset = fold_convert (sizetype, byte_offset);
3948 base_offset = fold_build2 (PLUS_EXPR, sizetype,
3949 base_offset, byte_offset);
3952 /* base + base_offset */
3953 if (loop_vinfo)
3954 addr_base = fold_build_pointer_plus (data_ref_base, base_offset);
3955 else
3957 addr_base = build1 (ADDR_EXPR,
3958 build_pointer_type (TREE_TYPE (DR_REF (dr))),
3959 unshare_expr (DR_REF (dr)));
3962 vect_ptr_type = build_pointer_type (STMT_VINFO_VECTYPE (stmt_info));
3963 dest = vect_get_new_vect_var (vect_ptr_type, vect_pointer_var, base_name);
3964 addr_base = force_gimple_operand (addr_base, &seq, true, dest);
3965 gimple_seq_add_seq (new_stmt_list, seq);
3967 if (DR_PTR_INFO (dr)
3968 && TREE_CODE (addr_base) == SSA_NAME
3969 && !SSA_NAME_PTR_INFO (addr_base))
3971 vect_duplicate_ssa_name_ptr_info (addr_base, dr, stmt_info);
3972 if (offset || byte_offset)
3973 mark_ptr_info_alignment_unknown (SSA_NAME_PTR_INFO (addr_base));
3976 if (dump_enabled_p ())
3978 dump_printf_loc (MSG_NOTE, vect_location, "created ");
3979 dump_generic_expr (MSG_NOTE, TDF_SLIM, addr_base);
3980 dump_printf (MSG_NOTE, "\n");
3983 return addr_base;
3987 /* Function vect_create_data_ref_ptr.
3989 Create a new pointer-to-AGGR_TYPE variable (ap), that points to the first
3990 location accessed in the loop by STMT, along with the def-use update
3991 chain to appropriately advance the pointer through the loop iterations.
3992 Also set aliasing information for the pointer. This pointer is used by
3993 the callers to this function to create a memory reference expression for
3994 vector load/store access.
3996 Input:
3997 1. STMT: a stmt that references memory. Expected to be of the form
3998 GIMPLE_ASSIGN <name, data-ref> or
3999 GIMPLE_ASSIGN <data-ref, name>.
4000 2. AGGR_TYPE: the type of the reference, which should be either a vector
4001 or an array.
4002 3. AT_LOOP: the loop where the vector memref is to be created.
4003 4. OFFSET (optional): an offset to be added to the initial address accessed
4004 by the data-ref in STMT.
4005 5. BSI: location where the new stmts are to be placed if there is no loop
4006 6. ONLY_INIT: indicate if ap is to be updated in the loop, or remain
4007 pointing to the initial address.
4008 7. BYTE_OFFSET (optional, defaults to NULL): a byte offset to be added
4009 to the initial address accessed by the data-ref in STMT. This is
4010 similar to OFFSET, but OFFSET is counted in elements, while BYTE_OFFSET
4011 in bytes.
4013 Output:
4014 1. Declare a new ptr to vector_type, and have it point to the base of the
4015 data reference (initial addressed accessed by the data reference).
4016 For example, for vector of type V8HI, the following code is generated:
4018 v8hi *ap;
4019 ap = (v8hi *)initial_address;
4021 if OFFSET is not supplied:
4022 initial_address = &a[init];
4023 if OFFSET is supplied:
4024 initial_address = &a[init + OFFSET];
4025 if BYTE_OFFSET is supplied:
4026 initial_address = &a[init] + BYTE_OFFSET;
4028 Return the initial_address in INITIAL_ADDRESS.
4030 2. If ONLY_INIT is true, just return the initial pointer. Otherwise, also
4031 update the pointer in each iteration of the loop.
4033 Return the increment stmt that updates the pointer in PTR_INCR.
4035 3. Set INV_P to true if the access pattern of the data reference in the
4036 vectorized loop is invariant. Set it to false otherwise.
4038 4. Return the pointer. */
4040 tree
4041 vect_create_data_ref_ptr (gimple *stmt, tree aggr_type, struct loop *at_loop,
4042 tree offset, tree *initial_address,
4043 gimple_stmt_iterator *gsi, gimple **ptr_incr,
4044 bool only_init, bool *inv_p, tree byte_offset)
4046 const char *base_name;
4047 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4048 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4049 struct loop *loop = NULL;
4050 bool nested_in_vect_loop = false;
4051 struct loop *containing_loop = NULL;
4052 tree aggr_ptr_type;
4053 tree aggr_ptr;
4054 tree new_temp;
4055 gimple_seq new_stmt_list = NULL;
4056 edge pe = NULL;
4057 basic_block new_bb;
4058 tree aggr_ptr_init;
4059 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
4060 tree aptr;
4061 gimple_stmt_iterator incr_gsi;
4062 bool insert_after;
4063 tree indx_before_incr, indx_after_incr;
4064 gimple *incr;
4065 tree step;
4066 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
4068 gcc_assert (TREE_CODE (aggr_type) == ARRAY_TYPE
4069 || TREE_CODE (aggr_type) == VECTOR_TYPE);
4071 if (loop_vinfo)
4073 loop = LOOP_VINFO_LOOP (loop_vinfo);
4074 nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
4075 containing_loop = (gimple_bb (stmt))->loop_father;
4076 pe = loop_preheader_edge (loop);
4078 else
4080 gcc_assert (bb_vinfo);
4081 only_init = true;
4082 *ptr_incr = NULL;
4085 /* Check the step (evolution) of the load in LOOP, and record
4086 whether it's invariant. */
4087 if (nested_in_vect_loop)
4088 step = STMT_VINFO_DR_STEP (stmt_info);
4089 else
4090 step = DR_STEP (STMT_VINFO_DATA_REF (stmt_info));
4092 if (integer_zerop (step))
4093 *inv_p = true;
4094 else
4095 *inv_p = false;
4097 /* Create an expression for the first address accessed by this load
4098 in LOOP. */
4099 base_name = get_name (DR_BASE_ADDRESS (dr));
4101 if (dump_enabled_p ())
4103 tree dr_base_type = TREE_TYPE (DR_BASE_OBJECT (dr));
4104 dump_printf_loc (MSG_NOTE, vect_location,
4105 "create %s-pointer variable to type: ",
4106 get_tree_code_name (TREE_CODE (aggr_type)));
4107 dump_generic_expr (MSG_NOTE, TDF_SLIM, aggr_type);
4108 if (TREE_CODE (dr_base_type) == ARRAY_TYPE)
4109 dump_printf (MSG_NOTE, " vectorizing an array ref: ");
4110 else if (TREE_CODE (dr_base_type) == VECTOR_TYPE)
4111 dump_printf (MSG_NOTE, " vectorizing a vector ref: ");
4112 else if (TREE_CODE (dr_base_type) == RECORD_TYPE)
4113 dump_printf (MSG_NOTE, " vectorizing a record based array ref: ");
4114 else
4115 dump_printf (MSG_NOTE, " vectorizing a pointer ref: ");
4116 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_BASE_OBJECT (dr));
4117 dump_printf (MSG_NOTE, "\n");
4120 /* (1) Create the new aggregate-pointer variable.
4121 Vector and array types inherit the alias set of their component
4122 type by default so we need to use a ref-all pointer if the data
4123 reference does not conflict with the created aggregated data
4124 reference because it is not addressable. */
4125 bool need_ref_all = false;
4126 if (!alias_sets_conflict_p (get_alias_set (aggr_type),
4127 get_alias_set (DR_REF (dr))))
4128 need_ref_all = true;
4129 /* Likewise for any of the data references in the stmt group. */
4130 else if (STMT_VINFO_GROUP_SIZE (stmt_info) > 1)
4132 gimple *orig_stmt = STMT_VINFO_GROUP_FIRST_ELEMENT (stmt_info);
4135 stmt_vec_info sinfo = vinfo_for_stmt (orig_stmt);
4136 struct data_reference *sdr = STMT_VINFO_DATA_REF (sinfo);
4137 if (!alias_sets_conflict_p (get_alias_set (aggr_type),
4138 get_alias_set (DR_REF (sdr))))
4140 need_ref_all = true;
4141 break;
4143 orig_stmt = STMT_VINFO_GROUP_NEXT_ELEMENT (sinfo);
4145 while (orig_stmt);
4147 aggr_ptr_type = build_pointer_type_for_mode (aggr_type, ptr_mode,
4148 need_ref_all);
4149 aggr_ptr = vect_get_new_vect_var (aggr_ptr_type, vect_pointer_var, base_name);
4152 /* Note: If the dataref is in an inner-loop nested in LOOP, and we are
4153 vectorizing LOOP (i.e., outer-loop vectorization), we need to create two
4154 def-use update cycles for the pointer: one relative to the outer-loop
4155 (LOOP), which is what steps (3) and (4) below do. The other is relative
4156 to the inner-loop (which is the inner-most loop containing the dataref),
4157 and this is done be step (5) below.
4159 When vectorizing inner-most loops, the vectorized loop (LOOP) is also the
4160 inner-most loop, and so steps (3),(4) work the same, and step (5) is
4161 redundant. Steps (3),(4) create the following:
4163 vp0 = &base_addr;
4164 LOOP: vp1 = phi(vp0,vp2)
4167 vp2 = vp1 + step
4168 goto LOOP
4170 If there is an inner-loop nested in loop, then step (5) will also be
4171 applied, and an additional update in the inner-loop will be created:
4173 vp0 = &base_addr;
4174 LOOP: vp1 = phi(vp0,vp2)
4176 inner: vp3 = phi(vp1,vp4)
4177 vp4 = vp3 + inner_step
4178 if () goto inner
4180 vp2 = vp1 + step
4181 if () goto LOOP */
4183 /* (2) Calculate the initial address of the aggregate-pointer, and set
4184 the aggregate-pointer to point to it before the loop. */
4186 /* Create: (&(base[init_val+offset]+byte_offset) in the loop preheader. */
4188 new_temp = vect_create_addr_base_for_vector_ref (stmt, &new_stmt_list,
4189 offset, loop, byte_offset);
4190 if (new_stmt_list)
4192 if (pe)
4194 new_bb = gsi_insert_seq_on_edge_immediate (pe, new_stmt_list);
4195 gcc_assert (!new_bb);
4197 else
4198 gsi_insert_seq_before (gsi, new_stmt_list, GSI_SAME_STMT);
4201 *initial_address = new_temp;
4202 aggr_ptr_init = new_temp;
4204 /* (3) Handle the updating of the aggregate-pointer inside the loop.
4205 This is needed when ONLY_INIT is false, and also when AT_LOOP is the
4206 inner-loop nested in LOOP (during outer-loop vectorization). */
4208 /* No update in loop is required. */
4209 if (only_init && (!loop_vinfo || at_loop == loop))
4210 aptr = aggr_ptr_init;
4211 else
4213 /* The step of the aggregate pointer is the type size. */
4214 tree iv_step = TYPE_SIZE_UNIT (aggr_type);
4215 /* One exception to the above is when the scalar step of the load in
4216 LOOP is zero. In this case the step here is also zero. */
4217 if (*inv_p)
4218 iv_step = size_zero_node;
4219 else if (tree_int_cst_sgn (step) == -1)
4220 iv_step = fold_build1 (NEGATE_EXPR, TREE_TYPE (iv_step), iv_step);
4222 standard_iv_increment_position (loop, &incr_gsi, &insert_after);
4224 create_iv (aggr_ptr_init,
4225 fold_convert (aggr_ptr_type, iv_step),
4226 aggr_ptr, loop, &incr_gsi, insert_after,
4227 &indx_before_incr, &indx_after_incr);
4228 incr = gsi_stmt (incr_gsi);
4229 set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo));
4231 /* Copy the points-to information if it exists. */
4232 if (DR_PTR_INFO (dr))
4234 vect_duplicate_ssa_name_ptr_info (indx_before_incr, dr, stmt_info);
4235 vect_duplicate_ssa_name_ptr_info (indx_after_incr, dr, stmt_info);
4237 if (ptr_incr)
4238 *ptr_incr = incr;
4240 aptr = indx_before_incr;
4243 if (!nested_in_vect_loop || only_init)
4244 return aptr;
4247 /* (4) Handle the updating of the aggregate-pointer inside the inner-loop
4248 nested in LOOP, if exists. */
4250 gcc_assert (nested_in_vect_loop);
4251 if (!only_init)
4253 standard_iv_increment_position (containing_loop, &incr_gsi,
4254 &insert_after);
4255 create_iv (aptr, fold_convert (aggr_ptr_type, DR_STEP (dr)), aggr_ptr,
4256 containing_loop, &incr_gsi, insert_after, &indx_before_incr,
4257 &indx_after_incr);
4258 incr = gsi_stmt (incr_gsi);
4259 set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo));
4261 /* Copy the points-to information if it exists. */
4262 if (DR_PTR_INFO (dr))
4264 vect_duplicate_ssa_name_ptr_info (indx_before_incr, dr, stmt_info);
4265 vect_duplicate_ssa_name_ptr_info (indx_after_incr, dr, stmt_info);
4267 if (ptr_incr)
4268 *ptr_incr = incr;
4270 return indx_before_incr;
4272 else
4273 gcc_unreachable ();
4277 /* Function bump_vector_ptr
4279 Increment a pointer (to a vector type) by vector-size. If requested,
4280 i.e. if PTR-INCR is given, then also connect the new increment stmt
4281 to the existing def-use update-chain of the pointer, by modifying
4282 the PTR_INCR as illustrated below:
4284 The pointer def-use update-chain before this function:
4285 DATAREF_PTR = phi (p_0, p_2)
4286 ....
4287 PTR_INCR: p_2 = DATAREF_PTR + step
4289 The pointer def-use update-chain after this function:
4290 DATAREF_PTR = phi (p_0, p_2)
4291 ....
4292 NEW_DATAREF_PTR = DATAREF_PTR + BUMP
4293 ....
4294 PTR_INCR: p_2 = NEW_DATAREF_PTR + step
4296 Input:
4297 DATAREF_PTR - ssa_name of a pointer (to vector type) that is being updated
4298 in the loop.
4299 PTR_INCR - optional. The stmt that updates the pointer in each iteration of
4300 the loop. The increment amount across iterations is expected
4301 to be vector_size.
4302 BSI - location where the new update stmt is to be placed.
4303 STMT - the original scalar memory-access stmt that is being vectorized.
4304 BUMP - optional. The offset by which to bump the pointer. If not given,
4305 the offset is assumed to be vector_size.
4307 Output: Return NEW_DATAREF_PTR as illustrated above.
4311 tree
4312 bump_vector_ptr (tree dataref_ptr, gimple *ptr_incr, gimple_stmt_iterator *gsi,
4313 gimple *stmt, tree bump)
4315 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4316 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
4317 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
4318 tree update = TYPE_SIZE_UNIT (vectype);
4319 gassign *incr_stmt;
4320 ssa_op_iter iter;
4321 use_operand_p use_p;
4322 tree new_dataref_ptr;
4324 if (bump)
4325 update = bump;
4327 if (TREE_CODE (dataref_ptr) == SSA_NAME)
4328 new_dataref_ptr = copy_ssa_name (dataref_ptr);
4329 else
4330 new_dataref_ptr = make_ssa_name (TREE_TYPE (dataref_ptr));
4331 incr_stmt = gimple_build_assign (new_dataref_ptr, POINTER_PLUS_EXPR,
4332 dataref_ptr, update);
4333 vect_finish_stmt_generation (stmt, incr_stmt, gsi);
4335 /* Copy the points-to information if it exists. */
4336 if (DR_PTR_INFO (dr))
4338 duplicate_ssa_name_ptr_info (new_dataref_ptr, DR_PTR_INFO (dr));
4339 mark_ptr_info_alignment_unknown (SSA_NAME_PTR_INFO (new_dataref_ptr));
4342 if (!ptr_incr)
4343 return new_dataref_ptr;
4345 /* Update the vector-pointer's cross-iteration increment. */
4346 FOR_EACH_SSA_USE_OPERAND (use_p, ptr_incr, iter, SSA_OP_USE)
4348 tree use = USE_FROM_PTR (use_p);
4350 if (use == dataref_ptr)
4351 SET_USE (use_p, new_dataref_ptr);
4352 else
4353 gcc_assert (tree_int_cst_compare (use, update) == 0);
4356 return new_dataref_ptr;
4360 /* Function vect_create_destination_var.
4362 Create a new temporary of type VECTYPE. */
4364 tree
4365 vect_create_destination_var (tree scalar_dest, tree vectype)
4367 tree vec_dest;
4368 const char *name;
4369 char *new_name;
4370 tree type;
4371 enum vect_var_kind kind;
4373 kind = vectype ? vect_simple_var : vect_scalar_var;
4374 type = vectype ? vectype : TREE_TYPE (scalar_dest);
4376 gcc_assert (TREE_CODE (scalar_dest) == SSA_NAME);
4378 name = get_name (scalar_dest);
4379 if (name)
4380 new_name = xasprintf ("%s_%u", name, SSA_NAME_VERSION (scalar_dest));
4381 else
4382 new_name = xasprintf ("_%u", SSA_NAME_VERSION (scalar_dest));
4383 vec_dest = vect_get_new_vect_var (type, kind, new_name);
4384 free (new_name);
4386 return vec_dest;
4389 /* Function vect_grouped_store_supported.
4391 Returns TRUE if interleave high and interleave low permutations
4392 are supported, and FALSE otherwise. */
4394 bool
4395 vect_grouped_store_supported (tree vectype, unsigned HOST_WIDE_INT count)
4397 machine_mode mode = TYPE_MODE (vectype);
4399 /* vect_permute_store_chain requires the group size to be equal to 3 or
4400 be a power of two. */
4401 if (count != 3 && exact_log2 (count) == -1)
4403 if (dump_enabled_p ())
4404 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4405 "the size of the group of accesses"
4406 " is not a power of 2 or not eqaul to 3\n");
4407 return false;
4410 /* Check that the permutation is supported. */
4411 if (VECTOR_MODE_P (mode))
4413 unsigned int i, nelt = GET_MODE_NUNITS (mode);
4414 unsigned char *sel = XALLOCAVEC (unsigned char, nelt);
4416 if (count == 3)
4418 unsigned int j0 = 0, j1 = 0, j2 = 0;
4419 unsigned int i, j;
4421 for (j = 0; j < 3; j++)
4423 int nelt0 = ((3 - j) * nelt) % 3;
4424 int nelt1 = ((3 - j) * nelt + 1) % 3;
4425 int nelt2 = ((3 - j) * nelt + 2) % 3;
4426 for (i = 0; i < nelt; i++)
4428 if (3 * i + nelt0 < nelt)
4429 sel[3 * i + nelt0] = j0++;
4430 if (3 * i + nelt1 < nelt)
4431 sel[3 * i + nelt1] = nelt + j1++;
4432 if (3 * i + nelt2 < nelt)
4433 sel[3 * i + nelt2] = 0;
4435 if (!can_vec_perm_p (mode, false, sel))
4437 if (dump_enabled_p ())
4438 dump_printf (MSG_MISSED_OPTIMIZATION,
4439 "permutaion op not supported by target.\n");
4440 return false;
4443 for (i = 0; i < nelt; i++)
4445 if (3 * i + nelt0 < nelt)
4446 sel[3 * i + nelt0] = 3 * i + nelt0;
4447 if (3 * i + nelt1 < nelt)
4448 sel[3 * i + nelt1] = 3 * i + nelt1;
4449 if (3 * i + nelt2 < nelt)
4450 sel[3 * i + nelt2] = nelt + j2++;
4452 if (!can_vec_perm_p (mode, false, sel))
4454 if (dump_enabled_p ())
4455 dump_printf (MSG_MISSED_OPTIMIZATION,
4456 "permutaion op not supported by target.\n");
4457 return false;
4460 return true;
4462 else
4464 /* If length is not equal to 3 then only power of 2 is supported. */
4465 gcc_assert (exact_log2 (count) != -1);
4467 for (i = 0; i < nelt / 2; i++)
4469 sel[i * 2] = i;
4470 sel[i * 2 + 1] = i + nelt;
4472 if (can_vec_perm_p (mode, false, sel))
4474 for (i = 0; i < nelt; i++)
4475 sel[i] += nelt / 2;
4476 if (can_vec_perm_p (mode, false, sel))
4477 return true;
4482 if (dump_enabled_p ())
4483 dump_printf (MSG_MISSED_OPTIMIZATION,
4484 "permutaion op not supported by target.\n");
4485 return false;
4489 /* Return TRUE if vec_store_lanes is available for COUNT vectors of
4490 type VECTYPE. */
4492 bool
4493 vect_store_lanes_supported (tree vectype, unsigned HOST_WIDE_INT count)
4495 return vect_lanes_optab_supported_p ("vec_store_lanes",
4496 vec_store_lanes_optab,
4497 vectype, count);
4501 /* Function vect_permute_store_chain.
4503 Given a chain of interleaved stores in DR_CHAIN of LENGTH that must be
4504 a power of 2 or equal to 3, generate interleave_high/low stmts to reorder
4505 the data correctly for the stores. Return the final references for stores
4506 in RESULT_CHAIN.
4508 E.g., LENGTH is 4 and the scalar type is short, i.e., VF is 8.
4509 The input is 4 vectors each containing 8 elements. We assign a number to
4510 each element, the input sequence is:
4512 1st vec: 0 1 2 3 4 5 6 7
4513 2nd vec: 8 9 10 11 12 13 14 15
4514 3rd vec: 16 17 18 19 20 21 22 23
4515 4th vec: 24 25 26 27 28 29 30 31
4517 The output sequence should be:
4519 1st vec: 0 8 16 24 1 9 17 25
4520 2nd vec: 2 10 18 26 3 11 19 27
4521 3rd vec: 4 12 20 28 5 13 21 30
4522 4th vec: 6 14 22 30 7 15 23 31
4524 i.e., we interleave the contents of the four vectors in their order.
4526 We use interleave_high/low instructions to create such output. The input of
4527 each interleave_high/low operation is two vectors:
4528 1st vec 2nd vec
4529 0 1 2 3 4 5 6 7
4530 the even elements of the result vector are obtained left-to-right from the
4531 high/low elements of the first vector. The odd elements of the result are
4532 obtained left-to-right from the high/low elements of the second vector.
4533 The output of interleave_high will be: 0 4 1 5
4534 and of interleave_low: 2 6 3 7
4537 The permutation is done in log LENGTH stages. In each stage interleave_high
4538 and interleave_low stmts are created for each pair of vectors in DR_CHAIN,
4539 where the first argument is taken from the first half of DR_CHAIN and the
4540 second argument from it's second half.
4541 In our example,
4543 I1: interleave_high (1st vec, 3rd vec)
4544 I2: interleave_low (1st vec, 3rd vec)
4545 I3: interleave_high (2nd vec, 4th vec)
4546 I4: interleave_low (2nd vec, 4th vec)
4548 The output for the first stage is:
4550 I1: 0 16 1 17 2 18 3 19
4551 I2: 4 20 5 21 6 22 7 23
4552 I3: 8 24 9 25 10 26 11 27
4553 I4: 12 28 13 29 14 30 15 31
4555 The output of the second stage, i.e. the final result is:
4557 I1: 0 8 16 24 1 9 17 25
4558 I2: 2 10 18 26 3 11 19 27
4559 I3: 4 12 20 28 5 13 21 30
4560 I4: 6 14 22 30 7 15 23 31. */
4562 void
4563 vect_permute_store_chain (vec<tree> dr_chain,
4564 unsigned int length,
4565 gimple *stmt,
4566 gimple_stmt_iterator *gsi,
4567 vec<tree> *result_chain)
4569 tree vect1, vect2, high, low;
4570 gimple *perm_stmt;
4571 tree vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
4572 tree perm_mask_low, perm_mask_high;
4573 tree data_ref;
4574 tree perm3_mask_low, perm3_mask_high;
4575 unsigned int i, n, log_length = exact_log2 (length);
4576 unsigned int j, nelt = TYPE_VECTOR_SUBPARTS (vectype);
4577 unsigned char *sel = XALLOCAVEC (unsigned char, nelt);
4579 result_chain->quick_grow (length);
4580 memcpy (result_chain->address (), dr_chain.address (),
4581 length * sizeof (tree));
4583 if (length == 3)
4585 unsigned int j0 = 0, j1 = 0, j2 = 0;
4587 for (j = 0; j < 3; j++)
4589 int nelt0 = ((3 - j) * nelt) % 3;
4590 int nelt1 = ((3 - j) * nelt + 1) % 3;
4591 int nelt2 = ((3 - j) * nelt + 2) % 3;
4593 for (i = 0; i < nelt; i++)
4595 if (3 * i + nelt0 < nelt)
4596 sel[3 * i + nelt0] = j0++;
4597 if (3 * i + nelt1 < nelt)
4598 sel[3 * i + nelt1] = nelt + j1++;
4599 if (3 * i + nelt2 < nelt)
4600 sel[3 * i + nelt2] = 0;
4602 perm3_mask_low = vect_gen_perm_mask_checked (vectype, sel);
4604 for (i = 0; i < nelt; i++)
4606 if (3 * i + nelt0 < nelt)
4607 sel[3 * i + nelt0] = 3 * i + nelt0;
4608 if (3 * i + nelt1 < nelt)
4609 sel[3 * i + nelt1] = 3 * i + nelt1;
4610 if (3 * i + nelt2 < nelt)
4611 sel[3 * i + nelt2] = nelt + j2++;
4613 perm3_mask_high = vect_gen_perm_mask_checked (vectype, sel);
4615 vect1 = dr_chain[0];
4616 vect2 = dr_chain[1];
4618 /* Create interleaving stmt:
4619 low = VEC_PERM_EXPR <vect1, vect2,
4620 {j, nelt, *, j + 1, nelt + j + 1, *,
4621 j + 2, nelt + j + 2, *, ...}> */
4622 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle3_low");
4623 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, vect1,
4624 vect2, perm3_mask_low);
4625 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
4627 vect1 = data_ref;
4628 vect2 = dr_chain[2];
4629 /* Create interleaving stmt:
4630 low = VEC_PERM_EXPR <vect1, vect2,
4631 {0, 1, nelt + j, 3, 4, nelt + j + 1,
4632 6, 7, nelt + j + 2, ...}> */
4633 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle3_high");
4634 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, vect1,
4635 vect2, perm3_mask_high);
4636 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
4637 (*result_chain)[j] = data_ref;
4640 else
4642 /* If length is not equal to 3 then only power of 2 is supported. */
4643 gcc_assert (exact_log2 (length) != -1);
4645 for (i = 0, n = nelt / 2; i < n; i++)
4647 sel[i * 2] = i;
4648 sel[i * 2 + 1] = i + nelt;
4650 perm_mask_high = vect_gen_perm_mask_checked (vectype, sel);
4652 for (i = 0; i < nelt; i++)
4653 sel[i] += nelt / 2;
4654 perm_mask_low = vect_gen_perm_mask_checked (vectype, sel);
4656 for (i = 0, n = log_length; i < n; i++)
4658 for (j = 0; j < length/2; j++)
4660 vect1 = dr_chain[j];
4661 vect2 = dr_chain[j+length/2];
4663 /* Create interleaving stmt:
4664 high = VEC_PERM_EXPR <vect1, vect2, {0, nelt, 1, nelt+1,
4665 ...}> */
4666 high = make_temp_ssa_name (vectype, NULL, "vect_inter_high");
4667 perm_stmt = gimple_build_assign (high, VEC_PERM_EXPR, vect1,
4668 vect2, perm_mask_high);
4669 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
4670 (*result_chain)[2*j] = high;
4672 /* Create interleaving stmt:
4673 low = VEC_PERM_EXPR <vect1, vect2,
4674 {nelt/2, nelt*3/2, nelt/2+1, nelt*3/2+1,
4675 ...}> */
4676 low = make_temp_ssa_name (vectype, NULL, "vect_inter_low");
4677 perm_stmt = gimple_build_assign (low, VEC_PERM_EXPR, vect1,
4678 vect2, perm_mask_low);
4679 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
4680 (*result_chain)[2*j+1] = low;
4682 memcpy (dr_chain.address (), result_chain->address (),
4683 length * sizeof (tree));
4688 /* Function vect_setup_realignment
4690 This function is called when vectorizing an unaligned load using
4691 the dr_explicit_realign[_optimized] scheme.
4692 This function generates the following code at the loop prolog:
4694 p = initial_addr;
4695 x msq_init = *(floor(p)); # prolog load
4696 realignment_token = call target_builtin;
4697 loop:
4698 x msq = phi (msq_init, ---)
4700 The stmts marked with x are generated only for the case of
4701 dr_explicit_realign_optimized.
4703 The code above sets up a new (vector) pointer, pointing to the first
4704 location accessed by STMT, and a "floor-aligned" load using that pointer.
4705 It also generates code to compute the "realignment-token" (if the relevant
4706 target hook was defined), and creates a phi-node at the loop-header bb
4707 whose arguments are the result of the prolog-load (created by this
4708 function) and the result of a load that takes place in the loop (to be
4709 created by the caller to this function).
4711 For the case of dr_explicit_realign_optimized:
4712 The caller to this function uses the phi-result (msq) to create the
4713 realignment code inside the loop, and sets up the missing phi argument,
4714 as follows:
4715 loop:
4716 msq = phi (msq_init, lsq)
4717 lsq = *(floor(p')); # load in loop
4718 result = realign_load (msq, lsq, realignment_token);
4720 For the case of dr_explicit_realign:
4721 loop:
4722 msq = *(floor(p)); # load in loop
4723 p' = p + (VS-1);
4724 lsq = *(floor(p')); # load in loop
4725 result = realign_load (msq, lsq, realignment_token);
4727 Input:
4728 STMT - (scalar) load stmt to be vectorized. This load accesses
4729 a memory location that may be unaligned.
4730 BSI - place where new code is to be inserted.
4731 ALIGNMENT_SUPPORT_SCHEME - which of the two misalignment handling schemes
4732 is used.
4734 Output:
4735 REALIGNMENT_TOKEN - the result of a call to the builtin_mask_for_load
4736 target hook, if defined.
4737 Return value - the result of the loop-header phi node. */
4739 tree
4740 vect_setup_realignment (gimple *stmt, gimple_stmt_iterator *gsi,
4741 tree *realignment_token,
4742 enum dr_alignment_support alignment_support_scheme,
4743 tree init_addr,
4744 struct loop **at_loop)
4746 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4747 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
4748 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4749 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
4750 struct loop *loop = NULL;
4751 edge pe = NULL;
4752 tree scalar_dest = gimple_assign_lhs (stmt);
4753 tree vec_dest;
4754 gimple *inc;
4755 tree ptr;
4756 tree data_ref;
4757 basic_block new_bb;
4758 tree msq_init = NULL_TREE;
4759 tree new_temp;
4760 gphi *phi_stmt;
4761 tree msq = NULL_TREE;
4762 gimple_seq stmts = NULL;
4763 bool inv_p;
4764 bool compute_in_loop = false;
4765 bool nested_in_vect_loop = false;
4766 struct loop *containing_loop = (gimple_bb (stmt))->loop_father;
4767 struct loop *loop_for_initial_load = NULL;
4769 if (loop_vinfo)
4771 loop = LOOP_VINFO_LOOP (loop_vinfo);
4772 nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
4775 gcc_assert (alignment_support_scheme == dr_explicit_realign
4776 || alignment_support_scheme == dr_explicit_realign_optimized);
4778 /* We need to generate three things:
4779 1. the misalignment computation
4780 2. the extra vector load (for the optimized realignment scheme).
4781 3. the phi node for the two vectors from which the realignment is
4782 done (for the optimized realignment scheme). */
4784 /* 1. Determine where to generate the misalignment computation.
4786 If INIT_ADDR is NULL_TREE, this indicates that the misalignment
4787 calculation will be generated by this function, outside the loop (in the
4788 preheader). Otherwise, INIT_ADDR had already been computed for us by the
4789 caller, inside the loop.
4791 Background: If the misalignment remains fixed throughout the iterations of
4792 the loop, then both realignment schemes are applicable, and also the
4793 misalignment computation can be done outside LOOP. This is because we are
4794 vectorizing LOOP, and so the memory accesses in LOOP advance in steps that
4795 are a multiple of VS (the Vector Size), and therefore the misalignment in
4796 different vectorized LOOP iterations is always the same.
4797 The problem arises only if the memory access is in an inner-loop nested
4798 inside LOOP, which is now being vectorized using outer-loop vectorization.
4799 This is the only case when the misalignment of the memory access may not
4800 remain fixed throughout the iterations of the inner-loop (as explained in
4801 detail in vect_supportable_dr_alignment). In this case, not only is the
4802 optimized realignment scheme not applicable, but also the misalignment
4803 computation (and generation of the realignment token that is passed to
4804 REALIGN_LOAD) have to be done inside the loop.
4806 In short, INIT_ADDR indicates whether we are in a COMPUTE_IN_LOOP mode
4807 or not, which in turn determines if the misalignment is computed inside
4808 the inner-loop, or outside LOOP. */
4810 if (init_addr != NULL_TREE || !loop_vinfo)
4812 compute_in_loop = true;
4813 gcc_assert (alignment_support_scheme == dr_explicit_realign);
4817 /* 2. Determine where to generate the extra vector load.
4819 For the optimized realignment scheme, instead of generating two vector
4820 loads in each iteration, we generate a single extra vector load in the
4821 preheader of the loop, and in each iteration reuse the result of the
4822 vector load from the previous iteration. In case the memory access is in
4823 an inner-loop nested inside LOOP, which is now being vectorized using
4824 outer-loop vectorization, we need to determine whether this initial vector
4825 load should be generated at the preheader of the inner-loop, or can be
4826 generated at the preheader of LOOP. If the memory access has no evolution
4827 in LOOP, it can be generated in the preheader of LOOP. Otherwise, it has
4828 to be generated inside LOOP (in the preheader of the inner-loop). */
4830 if (nested_in_vect_loop)
4832 tree outerloop_step = STMT_VINFO_DR_STEP (stmt_info);
4833 bool invariant_in_outerloop =
4834 (tree_int_cst_compare (outerloop_step, size_zero_node) == 0);
4835 loop_for_initial_load = (invariant_in_outerloop ? loop : loop->inner);
4837 else
4838 loop_for_initial_load = loop;
4839 if (at_loop)
4840 *at_loop = loop_for_initial_load;
4842 if (loop_for_initial_load)
4843 pe = loop_preheader_edge (loop_for_initial_load);
4845 /* 3. For the case of the optimized realignment, create the first vector
4846 load at the loop preheader. */
4848 if (alignment_support_scheme == dr_explicit_realign_optimized)
4850 /* Create msq_init = *(floor(p1)) in the loop preheader */
4851 gassign *new_stmt;
4853 gcc_assert (!compute_in_loop);
4854 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4855 ptr = vect_create_data_ref_ptr (stmt, vectype, loop_for_initial_load,
4856 NULL_TREE, &init_addr, NULL, &inc,
4857 true, &inv_p);
4858 if (TREE_CODE (ptr) == SSA_NAME)
4859 new_temp = copy_ssa_name (ptr);
4860 else
4861 new_temp = make_ssa_name (TREE_TYPE (ptr));
4862 new_stmt = gimple_build_assign
4863 (new_temp, BIT_AND_EXPR, ptr,
4864 build_int_cst (TREE_TYPE (ptr),
4865 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
4866 new_bb = gsi_insert_on_edge_immediate (pe, new_stmt);
4867 gcc_assert (!new_bb);
4868 data_ref
4869 = build2 (MEM_REF, TREE_TYPE (vec_dest), new_temp,
4870 build_int_cst (reference_alias_ptr_type (DR_REF (dr)), 0));
4871 new_stmt = gimple_build_assign (vec_dest, data_ref);
4872 new_temp = make_ssa_name (vec_dest, new_stmt);
4873 gimple_assign_set_lhs (new_stmt, new_temp);
4874 if (pe)
4876 new_bb = gsi_insert_on_edge_immediate (pe, new_stmt);
4877 gcc_assert (!new_bb);
4879 else
4880 gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
4882 msq_init = gimple_assign_lhs (new_stmt);
4885 /* 4. Create realignment token using a target builtin, if available.
4886 It is done either inside the containing loop, or before LOOP (as
4887 determined above). */
4889 if (targetm.vectorize.builtin_mask_for_load)
4891 gcall *new_stmt;
4892 tree builtin_decl;
4894 /* Compute INIT_ADDR - the initial addressed accessed by this memref. */
4895 if (!init_addr)
4897 /* Generate the INIT_ADDR computation outside LOOP. */
4898 init_addr = vect_create_addr_base_for_vector_ref (stmt, &stmts,
4899 NULL_TREE, loop);
4900 if (loop)
4902 pe = loop_preheader_edge (loop);
4903 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
4904 gcc_assert (!new_bb);
4906 else
4907 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
4910 builtin_decl = targetm.vectorize.builtin_mask_for_load ();
4911 new_stmt = gimple_build_call (builtin_decl, 1, init_addr);
4912 vec_dest =
4913 vect_create_destination_var (scalar_dest,
4914 gimple_call_return_type (new_stmt));
4915 new_temp = make_ssa_name (vec_dest, new_stmt);
4916 gimple_call_set_lhs (new_stmt, new_temp);
4918 if (compute_in_loop)
4919 gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
4920 else
4922 /* Generate the misalignment computation outside LOOP. */
4923 pe = loop_preheader_edge (loop);
4924 new_bb = gsi_insert_on_edge_immediate (pe, new_stmt);
4925 gcc_assert (!new_bb);
4928 *realignment_token = gimple_call_lhs (new_stmt);
4930 /* The result of the CALL_EXPR to this builtin is determined from
4931 the value of the parameter and no global variables are touched
4932 which makes the builtin a "const" function. Requiring the
4933 builtin to have the "const" attribute makes it unnecessary
4934 to call mark_call_clobbered. */
4935 gcc_assert (TREE_READONLY (builtin_decl));
4938 if (alignment_support_scheme == dr_explicit_realign)
4939 return msq;
4941 gcc_assert (!compute_in_loop);
4942 gcc_assert (alignment_support_scheme == dr_explicit_realign_optimized);
4945 /* 5. Create msq = phi <msq_init, lsq> in loop */
4947 pe = loop_preheader_edge (containing_loop);
4948 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4949 msq = make_ssa_name (vec_dest);
4950 phi_stmt = create_phi_node (msq, containing_loop->header);
4951 add_phi_arg (phi_stmt, msq_init, pe, UNKNOWN_LOCATION);
4953 return msq;
4957 /* Function vect_grouped_load_supported.
4959 Returns TRUE if even and odd permutations are supported,
4960 and FALSE otherwise. */
4962 bool
4963 vect_grouped_load_supported (tree vectype, unsigned HOST_WIDE_INT count)
4965 machine_mode mode = TYPE_MODE (vectype);
4967 /* vect_permute_load_chain requires the group size to be equal to 3 or
4968 be a power of two. */
4969 if (count != 3 && exact_log2 (count) == -1)
4971 if (dump_enabled_p ())
4972 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4973 "the size of the group of accesses"
4974 " is not a power of 2 or not equal to 3\n");
4975 return false;
4978 /* Check that the permutation is supported. */
4979 if (VECTOR_MODE_P (mode))
4981 unsigned int i, j, nelt = GET_MODE_NUNITS (mode);
4982 unsigned char *sel = XALLOCAVEC (unsigned char, nelt);
4984 if (count == 3)
4986 unsigned int k;
4987 for (k = 0; k < 3; k++)
4989 for (i = 0; i < nelt; i++)
4990 if (3 * i + k < 2 * nelt)
4991 sel[i] = 3 * i + k;
4992 else
4993 sel[i] = 0;
4994 if (!can_vec_perm_p (mode, false, sel))
4996 if (dump_enabled_p ())
4997 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4998 "shuffle of 3 loads is not supported by"
4999 " target\n");
5000 return false;
5002 for (i = 0, j = 0; i < nelt; i++)
5003 if (3 * i + k < 2 * nelt)
5004 sel[i] = i;
5005 else
5006 sel[i] = nelt + ((nelt + k) % 3) + 3 * (j++);
5007 if (!can_vec_perm_p (mode, false, sel))
5009 if (dump_enabled_p ())
5010 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5011 "shuffle of 3 loads is not supported by"
5012 " target\n");
5013 return false;
5016 return true;
5018 else
5020 /* If length is not equal to 3 then only power of 2 is supported. */
5021 gcc_assert (exact_log2 (count) != -1);
5022 for (i = 0; i < nelt; i++)
5023 sel[i] = i * 2;
5024 if (can_vec_perm_p (mode, false, sel))
5026 for (i = 0; i < nelt; i++)
5027 sel[i] = i * 2 + 1;
5028 if (can_vec_perm_p (mode, false, sel))
5029 return true;
5034 if (dump_enabled_p ())
5035 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5036 "extract even/odd not supported by target\n");
5037 return false;
5040 /* Return TRUE if vec_load_lanes is available for COUNT vectors of
5041 type VECTYPE. */
5043 bool
5044 vect_load_lanes_supported (tree vectype, unsigned HOST_WIDE_INT count)
5046 return vect_lanes_optab_supported_p ("vec_load_lanes",
5047 vec_load_lanes_optab,
5048 vectype, count);
5051 /* Function vect_permute_load_chain.
5053 Given a chain of interleaved loads in DR_CHAIN of LENGTH that must be
5054 a power of 2 or equal to 3, generate extract_even/odd stmts to reorder
5055 the input data correctly. Return the final references for loads in
5056 RESULT_CHAIN.
5058 E.g., LENGTH is 4 and the scalar type is short, i.e., VF is 8.
5059 The input is 4 vectors each containing 8 elements. We assign a number to each
5060 element, the input sequence is:
5062 1st vec: 0 1 2 3 4 5 6 7
5063 2nd vec: 8 9 10 11 12 13 14 15
5064 3rd vec: 16 17 18 19 20 21 22 23
5065 4th vec: 24 25 26 27 28 29 30 31
5067 The output sequence should be:
5069 1st vec: 0 4 8 12 16 20 24 28
5070 2nd vec: 1 5 9 13 17 21 25 29
5071 3rd vec: 2 6 10 14 18 22 26 30
5072 4th vec: 3 7 11 15 19 23 27 31
5074 i.e., the first output vector should contain the first elements of each
5075 interleaving group, etc.
5077 We use extract_even/odd instructions to create such output. The input of
5078 each extract_even/odd operation is two vectors
5079 1st vec 2nd vec
5080 0 1 2 3 4 5 6 7
5082 and the output is the vector of extracted even/odd elements. The output of
5083 extract_even will be: 0 2 4 6
5084 and of extract_odd: 1 3 5 7
5087 The permutation is done in log LENGTH stages. In each stage extract_even
5088 and extract_odd stmts are created for each pair of vectors in DR_CHAIN in
5089 their order. In our example,
5091 E1: extract_even (1st vec, 2nd vec)
5092 E2: extract_odd (1st vec, 2nd vec)
5093 E3: extract_even (3rd vec, 4th vec)
5094 E4: extract_odd (3rd vec, 4th vec)
5096 The output for the first stage will be:
5098 E1: 0 2 4 6 8 10 12 14
5099 E2: 1 3 5 7 9 11 13 15
5100 E3: 16 18 20 22 24 26 28 30
5101 E4: 17 19 21 23 25 27 29 31
5103 In order to proceed and create the correct sequence for the next stage (or
5104 for the correct output, if the second stage is the last one, as in our
5105 example), we first put the output of extract_even operation and then the
5106 output of extract_odd in RESULT_CHAIN (which is then copied to DR_CHAIN).
5107 The input for the second stage is:
5109 1st vec (E1): 0 2 4 6 8 10 12 14
5110 2nd vec (E3): 16 18 20 22 24 26 28 30
5111 3rd vec (E2): 1 3 5 7 9 11 13 15
5112 4th vec (E4): 17 19 21 23 25 27 29 31
5114 The output of the second stage:
5116 E1: 0 4 8 12 16 20 24 28
5117 E2: 2 6 10 14 18 22 26 30
5118 E3: 1 5 9 13 17 21 25 29
5119 E4: 3 7 11 15 19 23 27 31
5121 And RESULT_CHAIN after reordering:
5123 1st vec (E1): 0 4 8 12 16 20 24 28
5124 2nd vec (E3): 1 5 9 13 17 21 25 29
5125 3rd vec (E2): 2 6 10 14 18 22 26 30
5126 4th vec (E4): 3 7 11 15 19 23 27 31. */
5128 static void
5129 vect_permute_load_chain (vec<tree> dr_chain,
5130 unsigned int length,
5131 gimple *stmt,
5132 gimple_stmt_iterator *gsi,
5133 vec<tree> *result_chain)
5135 tree data_ref, first_vect, second_vect;
5136 tree perm_mask_even, perm_mask_odd;
5137 tree perm3_mask_low, perm3_mask_high;
5138 gimple *perm_stmt;
5139 tree vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
5140 unsigned int i, j, log_length = exact_log2 (length);
5141 unsigned nelt = TYPE_VECTOR_SUBPARTS (vectype);
5142 unsigned char *sel = XALLOCAVEC (unsigned char, nelt);
5144 result_chain->quick_grow (length);
5145 memcpy (result_chain->address (), dr_chain.address (),
5146 length * sizeof (tree));
5148 if (length == 3)
5150 unsigned int k;
5152 for (k = 0; k < 3; k++)
5154 for (i = 0; i < nelt; i++)
5155 if (3 * i + k < 2 * nelt)
5156 sel[i] = 3 * i + k;
5157 else
5158 sel[i] = 0;
5159 perm3_mask_low = vect_gen_perm_mask_checked (vectype, sel);
5161 for (i = 0, j = 0; i < nelt; i++)
5162 if (3 * i + k < 2 * nelt)
5163 sel[i] = i;
5164 else
5165 sel[i] = nelt + ((nelt + k) % 3) + 3 * (j++);
5167 perm3_mask_high = vect_gen_perm_mask_checked (vectype, sel);
5169 first_vect = dr_chain[0];
5170 second_vect = dr_chain[1];
5172 /* Create interleaving stmt (low part of):
5173 low = VEC_PERM_EXPR <first_vect, second_vect2, {k, 3 + k, 6 + k,
5174 ...}> */
5175 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle3_low");
5176 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, first_vect,
5177 second_vect, perm3_mask_low);
5178 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5180 /* Create interleaving stmt (high part of):
5181 high = VEC_PERM_EXPR <first_vect, second_vect2, {k, 3 + k, 6 + k,
5182 ...}> */
5183 first_vect = data_ref;
5184 second_vect = dr_chain[2];
5185 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle3_high");
5186 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, first_vect,
5187 second_vect, perm3_mask_high);
5188 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5189 (*result_chain)[k] = data_ref;
5192 else
5194 /* If length is not equal to 3 then only power of 2 is supported. */
5195 gcc_assert (exact_log2 (length) != -1);
5197 for (i = 0; i < nelt; ++i)
5198 sel[i] = i * 2;
5199 perm_mask_even = vect_gen_perm_mask_checked (vectype, sel);
5201 for (i = 0; i < nelt; ++i)
5202 sel[i] = i * 2 + 1;
5203 perm_mask_odd = vect_gen_perm_mask_checked (vectype, sel);
5205 for (i = 0; i < log_length; i++)
5207 for (j = 0; j < length; j += 2)
5209 first_vect = dr_chain[j];
5210 second_vect = dr_chain[j+1];
5212 /* data_ref = permute_even (first_data_ref, second_data_ref); */
5213 data_ref = make_temp_ssa_name (vectype, NULL, "vect_perm_even");
5214 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
5215 first_vect, second_vect,
5216 perm_mask_even);
5217 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5218 (*result_chain)[j/2] = data_ref;
5220 /* data_ref = permute_odd (first_data_ref, second_data_ref); */
5221 data_ref = make_temp_ssa_name (vectype, NULL, "vect_perm_odd");
5222 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
5223 first_vect, second_vect,
5224 perm_mask_odd);
5225 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5226 (*result_chain)[j/2+length/2] = data_ref;
5228 memcpy (dr_chain.address (), result_chain->address (),
5229 length * sizeof (tree));
5234 /* Function vect_shift_permute_load_chain.
5236 Given a chain of loads in DR_CHAIN of LENGTH 2 or 3, generate
5237 sequence of stmts to reorder the input data accordingly.
5238 Return the final references for loads in RESULT_CHAIN.
5239 Return true if successed, false otherwise.
5241 E.g., LENGTH is 3 and the scalar type is short, i.e., VF is 8.
5242 The input is 3 vectors each containing 8 elements. We assign a
5243 number to each element, the input sequence is:
5245 1st vec: 0 1 2 3 4 5 6 7
5246 2nd vec: 8 9 10 11 12 13 14 15
5247 3rd vec: 16 17 18 19 20 21 22 23
5249 The output sequence should be:
5251 1st vec: 0 3 6 9 12 15 18 21
5252 2nd vec: 1 4 7 10 13 16 19 22
5253 3rd vec: 2 5 8 11 14 17 20 23
5255 We use 3 shuffle instructions and 3 * 3 - 1 shifts to create such output.
5257 First we shuffle all 3 vectors to get correct elements order:
5259 1st vec: ( 0 3 6) ( 1 4 7) ( 2 5)
5260 2nd vec: ( 8 11 14) ( 9 12 15) (10 13)
5261 3rd vec: (16 19 22) (17 20 23) (18 21)
5263 Next we unite and shift vector 3 times:
5265 1st step:
5266 shift right by 6 the concatenation of:
5267 "1st vec" and "2nd vec"
5268 ( 0 3 6) ( 1 4 7) |( 2 5) _ ( 8 11 14) ( 9 12 15)| (10 13)
5269 "2nd vec" and "3rd vec"
5270 ( 8 11 14) ( 9 12 15) |(10 13) _ (16 19 22) (17 20 23)| (18 21)
5271 "3rd vec" and "1st vec"
5272 (16 19 22) (17 20 23) |(18 21) _ ( 0 3 6) ( 1 4 7)| ( 2 5)
5273 | New vectors |
5275 So that now new vectors are:
5277 1st vec: ( 2 5) ( 8 11 14) ( 9 12 15)
5278 2nd vec: (10 13) (16 19 22) (17 20 23)
5279 3rd vec: (18 21) ( 0 3 6) ( 1 4 7)
5281 2nd step:
5282 shift right by 5 the concatenation of:
5283 "1st vec" and "3rd vec"
5284 ( 2 5) ( 8 11 14) |( 9 12 15) _ (18 21) ( 0 3 6)| ( 1 4 7)
5285 "2nd vec" and "1st vec"
5286 (10 13) (16 19 22) |(17 20 23) _ ( 2 5) ( 8 11 14)| ( 9 12 15)
5287 "3rd vec" and "2nd vec"
5288 (18 21) ( 0 3 6) |( 1 4 7) _ (10 13) (16 19 22)| (17 20 23)
5289 | New vectors |
5291 So that now new vectors are:
5293 1st vec: ( 9 12 15) (18 21) ( 0 3 6)
5294 2nd vec: (17 20 23) ( 2 5) ( 8 11 14)
5295 3rd vec: ( 1 4 7) (10 13) (16 19 22) READY
5297 3rd step:
5298 shift right by 5 the concatenation of:
5299 "1st vec" and "1st vec"
5300 ( 9 12 15) (18 21) |( 0 3 6) _ ( 9 12 15) (18 21)| ( 0 3 6)
5301 shift right by 3 the concatenation of:
5302 "2nd vec" and "2nd vec"
5303 (17 20 23) |( 2 5) ( 8 11 14) _ (17 20 23)| ( 2 5) ( 8 11 14)
5304 | New vectors |
5306 So that now all vectors are READY:
5307 1st vec: ( 0 3 6) ( 9 12 15) (18 21)
5308 2nd vec: ( 2 5) ( 8 11 14) (17 20 23)
5309 3rd vec: ( 1 4 7) (10 13) (16 19 22)
5311 This algorithm is faster than one in vect_permute_load_chain if:
5312 1. "shift of a concatination" is faster than general permutation.
5313 This is usually so.
5314 2. The TARGET machine can't execute vector instructions in parallel.
5315 This is because each step of the algorithm depends on previous.
5316 The algorithm in vect_permute_load_chain is much more parallel.
5318 The algorithm is applicable only for LOAD CHAIN LENGTH less than VF.
5321 static bool
5322 vect_shift_permute_load_chain (vec<tree> dr_chain,
5323 unsigned int length,
5324 gimple *stmt,
5325 gimple_stmt_iterator *gsi,
5326 vec<tree> *result_chain)
5328 tree vect[3], vect_shift[3], data_ref, first_vect, second_vect;
5329 tree perm2_mask1, perm2_mask2, perm3_mask;
5330 tree select_mask, shift1_mask, shift2_mask, shift3_mask, shift4_mask;
5331 gimple *perm_stmt;
5333 tree vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
5334 unsigned int i;
5335 unsigned nelt = TYPE_VECTOR_SUBPARTS (vectype);
5336 unsigned char *sel = XALLOCAVEC (unsigned char, nelt);
5337 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5338 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5340 result_chain->quick_grow (length);
5341 memcpy (result_chain->address (), dr_chain.address (),
5342 length * sizeof (tree));
5344 if (exact_log2 (length) != -1 && LOOP_VINFO_VECT_FACTOR (loop_vinfo) > 4)
5346 unsigned int j, log_length = exact_log2 (length);
5347 for (i = 0; i < nelt / 2; ++i)
5348 sel[i] = i * 2;
5349 for (i = 0; i < nelt / 2; ++i)
5350 sel[nelt / 2 + i] = i * 2 + 1;
5351 if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
5353 if (dump_enabled_p ())
5354 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5355 "shuffle of 2 fields structure is not \
5356 supported by target\n");
5357 return false;
5359 perm2_mask1 = vect_gen_perm_mask_checked (vectype, sel);
5361 for (i = 0; i < nelt / 2; ++i)
5362 sel[i] = i * 2 + 1;
5363 for (i = 0; i < nelt / 2; ++i)
5364 sel[nelt / 2 + i] = i * 2;
5365 if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
5367 if (dump_enabled_p ())
5368 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5369 "shuffle of 2 fields structure is not \
5370 supported by target\n");
5371 return false;
5373 perm2_mask2 = vect_gen_perm_mask_checked (vectype, sel);
5375 /* Generating permutation constant to shift all elements.
5376 For vector length 8 it is {4 5 6 7 8 9 10 11}. */
5377 for (i = 0; i < nelt; i++)
5378 sel[i] = nelt / 2 + i;
5379 if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
5381 if (dump_enabled_p ())
5382 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5383 "shift permutation is not supported by target\n");
5384 return false;
5386 shift1_mask = vect_gen_perm_mask_checked (vectype, sel);
5388 /* Generating permutation constant to select vector from 2.
5389 For vector length 8 it is {0 1 2 3 12 13 14 15}. */
5390 for (i = 0; i < nelt / 2; i++)
5391 sel[i] = i;
5392 for (i = nelt / 2; i < nelt; i++)
5393 sel[i] = nelt + i;
5394 if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
5396 if (dump_enabled_p ())
5397 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5398 "select is not supported by target\n");
5399 return false;
5401 select_mask = vect_gen_perm_mask_checked (vectype, sel);
5403 for (i = 0; i < log_length; i++)
5405 for (j = 0; j < length; j += 2)
5407 first_vect = dr_chain[j];
5408 second_vect = dr_chain[j + 1];
5410 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle2");
5411 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
5412 first_vect, first_vect,
5413 perm2_mask1);
5414 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5415 vect[0] = data_ref;
5417 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle2");
5418 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
5419 second_vect, second_vect,
5420 perm2_mask2);
5421 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5422 vect[1] = data_ref;
5424 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shift");
5425 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
5426 vect[0], vect[1], shift1_mask);
5427 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5428 (*result_chain)[j/2 + length/2] = data_ref;
5430 data_ref = make_temp_ssa_name (vectype, NULL, "vect_select");
5431 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
5432 vect[0], vect[1], select_mask);
5433 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5434 (*result_chain)[j/2] = data_ref;
5436 memcpy (dr_chain.address (), result_chain->address (),
5437 length * sizeof (tree));
5439 return true;
5441 if (length == 3 && LOOP_VINFO_VECT_FACTOR (loop_vinfo) > 2)
5443 unsigned int k = 0, l = 0;
5445 /* Generating permutation constant to get all elements in rigth order.
5446 For vector length 8 it is {0 3 6 1 4 7 2 5}. */
5447 for (i = 0; i < nelt; i++)
5449 if (3 * k + (l % 3) >= nelt)
5451 k = 0;
5452 l += (3 - (nelt % 3));
5454 sel[i] = 3 * k + (l % 3);
5455 k++;
5457 if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
5459 if (dump_enabled_p ())
5460 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5461 "shuffle of 3 fields structure is not \
5462 supported by target\n");
5463 return false;
5465 perm3_mask = vect_gen_perm_mask_checked (vectype, sel);
5467 /* Generating permutation constant to shift all elements.
5468 For vector length 8 it is {6 7 8 9 10 11 12 13}. */
5469 for (i = 0; i < nelt; i++)
5470 sel[i] = 2 * (nelt / 3) + (nelt % 3) + i;
5471 if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
5473 if (dump_enabled_p ())
5474 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5475 "shift permutation is not supported by target\n");
5476 return false;
5478 shift1_mask = vect_gen_perm_mask_checked (vectype, sel);
5480 /* Generating permutation constant to shift all elements.
5481 For vector length 8 it is {5 6 7 8 9 10 11 12}. */
5482 for (i = 0; i < nelt; i++)
5483 sel[i] = 2 * (nelt / 3) + 1 + i;
5484 if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
5486 if (dump_enabled_p ())
5487 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5488 "shift permutation is not supported by target\n");
5489 return false;
5491 shift2_mask = vect_gen_perm_mask_checked (vectype, sel);
5493 /* Generating permutation constant to shift all elements.
5494 For vector length 8 it is {3 4 5 6 7 8 9 10}. */
5495 for (i = 0; i < nelt; i++)
5496 sel[i] = (nelt / 3) + (nelt % 3) / 2 + i;
5497 if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
5499 if (dump_enabled_p ())
5500 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5501 "shift permutation is not supported by target\n");
5502 return false;
5504 shift3_mask = vect_gen_perm_mask_checked (vectype, sel);
5506 /* Generating permutation constant to shift all elements.
5507 For vector length 8 it is {5 6 7 8 9 10 11 12}. */
5508 for (i = 0; i < nelt; i++)
5509 sel[i] = 2 * (nelt / 3) + (nelt % 3) / 2 + i;
5510 if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
5512 if (dump_enabled_p ())
5513 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5514 "shift permutation is not supported by target\n");
5515 return false;
5517 shift4_mask = vect_gen_perm_mask_checked (vectype, sel);
5519 for (k = 0; k < 3; k++)
5521 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle3");
5522 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
5523 dr_chain[k], dr_chain[k],
5524 perm3_mask);
5525 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5526 vect[k] = data_ref;
5529 for (k = 0; k < 3; k++)
5531 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shift1");
5532 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
5533 vect[k % 3], vect[(k + 1) % 3],
5534 shift1_mask);
5535 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5536 vect_shift[k] = data_ref;
5539 for (k = 0; k < 3; k++)
5541 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shift2");
5542 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
5543 vect_shift[(4 - k) % 3],
5544 vect_shift[(3 - k) % 3],
5545 shift2_mask);
5546 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5547 vect[k] = data_ref;
5550 (*result_chain)[3 - (nelt % 3)] = vect[2];
5552 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shift3");
5553 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, vect[0],
5554 vect[0], shift3_mask);
5555 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5556 (*result_chain)[nelt % 3] = data_ref;
5558 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shift4");
5559 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, vect[1],
5560 vect[1], shift4_mask);
5561 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5562 (*result_chain)[0] = data_ref;
5563 return true;
5565 return false;
5568 /* Function vect_transform_grouped_load.
5570 Given a chain of input interleaved data-refs (in DR_CHAIN), build statements
5571 to perform their permutation and ascribe the result vectorized statements to
5572 the scalar statements.
5575 void
5576 vect_transform_grouped_load (gimple *stmt, vec<tree> dr_chain, int size,
5577 gimple_stmt_iterator *gsi)
5579 machine_mode mode;
5580 vec<tree> result_chain = vNULL;
5582 /* DR_CHAIN contains input data-refs that are a part of the interleaving.
5583 RESULT_CHAIN is the output of vect_permute_load_chain, it contains permuted
5584 vectors, that are ready for vector computation. */
5585 result_chain.create (size);
5587 /* If reassociation width for vector type is 2 or greater target machine can
5588 execute 2 or more vector instructions in parallel. Otherwise try to
5589 get chain for loads group using vect_shift_permute_load_chain. */
5590 mode = TYPE_MODE (STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt)));
5591 if (targetm.sched.reassociation_width (VEC_PERM_EXPR, mode) > 1
5592 || exact_log2 (size) != -1
5593 || !vect_shift_permute_load_chain (dr_chain, size, stmt,
5594 gsi, &result_chain))
5595 vect_permute_load_chain (dr_chain, size, stmt, gsi, &result_chain);
5596 vect_record_grouped_load_vectors (stmt, result_chain);
5597 result_chain.release ();
5600 /* RESULT_CHAIN contains the output of a group of grouped loads that were
5601 generated as part of the vectorization of STMT. Assign the statement
5602 for each vector to the associated scalar statement. */
5604 void
5605 vect_record_grouped_load_vectors (gimple *stmt, vec<tree> result_chain)
5607 gimple *first_stmt = GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt));
5608 gimple *next_stmt, *new_stmt;
5609 unsigned int i, gap_count;
5610 tree tmp_data_ref;
5612 /* Put a permuted data-ref in the VECTORIZED_STMT field.
5613 Since we scan the chain starting from it's first node, their order
5614 corresponds the order of data-refs in RESULT_CHAIN. */
5615 next_stmt = first_stmt;
5616 gap_count = 1;
5617 FOR_EACH_VEC_ELT (result_chain, i, tmp_data_ref)
5619 if (!next_stmt)
5620 break;
5622 /* Skip the gaps. Loads created for the gaps will be removed by dead
5623 code elimination pass later. No need to check for the first stmt in
5624 the group, since it always exists.
5625 GROUP_GAP is the number of steps in elements from the previous
5626 access (if there is no gap GROUP_GAP is 1). We skip loads that
5627 correspond to the gaps. */
5628 if (next_stmt != first_stmt
5629 && gap_count < GROUP_GAP (vinfo_for_stmt (next_stmt)))
5631 gap_count++;
5632 continue;
5635 while (next_stmt)
5637 new_stmt = SSA_NAME_DEF_STMT (tmp_data_ref);
5638 /* We assume that if VEC_STMT is not NULL, this is a case of multiple
5639 copies, and we put the new vector statement in the first available
5640 RELATED_STMT. */
5641 if (!STMT_VINFO_VEC_STMT (vinfo_for_stmt (next_stmt)))
5642 STMT_VINFO_VEC_STMT (vinfo_for_stmt (next_stmt)) = new_stmt;
5643 else
5645 if (!GROUP_SAME_DR_STMT (vinfo_for_stmt (next_stmt)))
5647 gimple *prev_stmt =
5648 STMT_VINFO_VEC_STMT (vinfo_for_stmt (next_stmt));
5649 gimple *rel_stmt =
5650 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (prev_stmt));
5651 while (rel_stmt)
5653 prev_stmt = rel_stmt;
5654 rel_stmt =
5655 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (rel_stmt));
5658 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (prev_stmt)) =
5659 new_stmt;
5663 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
5664 gap_count = 1;
5665 /* If NEXT_STMT accesses the same DR as the previous statement,
5666 put the same TMP_DATA_REF as its vectorized statement; otherwise
5667 get the next data-ref from RESULT_CHAIN. */
5668 if (!next_stmt || !GROUP_SAME_DR_STMT (vinfo_for_stmt (next_stmt)))
5669 break;
5674 /* Function vect_force_dr_alignment_p.
5676 Returns whether the alignment of a DECL can be forced to be aligned
5677 on ALIGNMENT bit boundary. */
5679 bool
5680 vect_can_force_dr_alignment_p (const_tree decl, unsigned int alignment)
5682 if (TREE_CODE (decl) != VAR_DECL)
5683 return false;
5685 if (decl_in_symtab_p (decl)
5686 && !symtab_node::get (decl)->can_increase_alignment_p ())
5687 return false;
5689 if (TREE_STATIC (decl))
5690 return (alignment <= MAX_OFILE_ALIGNMENT);
5691 else
5692 return (alignment <= MAX_STACK_ALIGNMENT);
5696 /* Return whether the data reference DR is supported with respect to its
5697 alignment.
5698 If CHECK_ALIGNED_ACCESSES is TRUE, check if the access is supported even
5699 it is aligned, i.e., check if it is possible to vectorize it with different
5700 alignment. */
5702 enum dr_alignment_support
5703 vect_supportable_dr_alignment (struct data_reference *dr,
5704 bool check_aligned_accesses)
5706 gimple *stmt = DR_STMT (dr);
5707 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5708 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
5709 machine_mode mode = TYPE_MODE (vectype);
5710 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5711 struct loop *vect_loop = NULL;
5712 bool nested_in_vect_loop = false;
5714 if (aligned_access_p (dr) && !check_aligned_accesses)
5715 return dr_aligned;
5717 /* For now assume all conditional loads/stores support unaligned
5718 access without any special code. */
5719 if (is_gimple_call (stmt)
5720 && gimple_call_internal_p (stmt)
5721 && (gimple_call_internal_fn (stmt) == IFN_MASK_LOAD
5722 || gimple_call_internal_fn (stmt) == IFN_MASK_STORE))
5723 return dr_unaligned_supported;
5725 if (loop_vinfo)
5727 vect_loop = LOOP_VINFO_LOOP (loop_vinfo);
5728 nested_in_vect_loop = nested_in_vect_loop_p (vect_loop, stmt);
5731 /* Possibly unaligned access. */
5733 /* We can choose between using the implicit realignment scheme (generating
5734 a misaligned_move stmt) and the explicit realignment scheme (generating
5735 aligned loads with a REALIGN_LOAD). There are two variants to the
5736 explicit realignment scheme: optimized, and unoptimized.
5737 We can optimize the realignment only if the step between consecutive
5738 vector loads is equal to the vector size. Since the vector memory
5739 accesses advance in steps of VS (Vector Size) in the vectorized loop, it
5740 is guaranteed that the misalignment amount remains the same throughout the
5741 execution of the vectorized loop. Therefore, we can create the
5742 "realignment token" (the permutation mask that is passed to REALIGN_LOAD)
5743 at the loop preheader.
5745 However, in the case of outer-loop vectorization, when vectorizing a
5746 memory access in the inner-loop nested within the LOOP that is now being
5747 vectorized, while it is guaranteed that the misalignment of the
5748 vectorized memory access will remain the same in different outer-loop
5749 iterations, it is *not* guaranteed that is will remain the same throughout
5750 the execution of the inner-loop. This is because the inner-loop advances
5751 with the original scalar step (and not in steps of VS). If the inner-loop
5752 step happens to be a multiple of VS, then the misalignment remains fixed
5753 and we can use the optimized realignment scheme. For example:
5755 for (i=0; i<N; i++)
5756 for (j=0; j<M; j++)
5757 s += a[i+j];
5759 When vectorizing the i-loop in the above example, the step between
5760 consecutive vector loads is 1, and so the misalignment does not remain
5761 fixed across the execution of the inner-loop, and the realignment cannot
5762 be optimized (as illustrated in the following pseudo vectorized loop):
5764 for (i=0; i<N; i+=4)
5765 for (j=0; j<M; j++){
5766 vs += vp[i+j]; // misalignment of &vp[i+j] is {0,1,2,3,0,1,2,3,...}
5767 // when j is {0,1,2,3,4,5,6,7,...} respectively.
5768 // (assuming that we start from an aligned address).
5771 We therefore have to use the unoptimized realignment scheme:
5773 for (i=0; i<N; i+=4)
5774 for (j=k; j<M; j+=4)
5775 vs += vp[i+j]; // misalignment of &vp[i+j] is always k (assuming
5776 // that the misalignment of the initial address is
5777 // 0).
5779 The loop can then be vectorized as follows:
5781 for (k=0; k<4; k++){
5782 rt = get_realignment_token (&vp[k]);
5783 for (i=0; i<N; i+=4){
5784 v1 = vp[i+k];
5785 for (j=k; j<M; j+=4){
5786 v2 = vp[i+j+VS-1];
5787 va = REALIGN_LOAD <v1,v2,rt>;
5788 vs += va;
5789 v1 = v2;
5792 } */
5794 if (DR_IS_READ (dr))
5796 bool is_packed = false;
5797 tree type = (TREE_TYPE (DR_REF (dr)));
5799 if (optab_handler (vec_realign_load_optab, mode) != CODE_FOR_nothing
5800 && (!targetm.vectorize.builtin_mask_for_load
5801 || targetm.vectorize.builtin_mask_for_load ()))
5803 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
5804 if ((nested_in_vect_loop
5805 && (TREE_INT_CST_LOW (DR_STEP (dr))
5806 != GET_MODE_SIZE (TYPE_MODE (vectype))))
5807 || !loop_vinfo)
5808 return dr_explicit_realign;
5809 else
5810 return dr_explicit_realign_optimized;
5812 if (!known_alignment_for_access_p (dr))
5813 is_packed = not_size_aligned (DR_REF (dr));
5815 if ((TYPE_USER_ALIGN (type) && !is_packed)
5816 || targetm.vectorize.
5817 support_vector_misalignment (mode, type,
5818 DR_MISALIGNMENT (dr), is_packed))
5819 /* Can't software pipeline the loads, but can at least do them. */
5820 return dr_unaligned_supported;
5822 else
5824 bool is_packed = false;
5825 tree type = (TREE_TYPE (DR_REF (dr)));
5827 if (!known_alignment_for_access_p (dr))
5828 is_packed = not_size_aligned (DR_REF (dr));
5830 if ((TYPE_USER_ALIGN (type) && !is_packed)
5831 || targetm.vectorize.
5832 support_vector_misalignment (mode, type,
5833 DR_MISALIGNMENT (dr), is_packed))
5834 return dr_unaligned_supported;
5837 /* Unsupported. */
5838 return dr_unaligned_unsupported;