2015-03-05 Richard Biener <rguenther@suse.de>
[official-gcc.git] / gcc / tree-vect-data-refs.c
blobffe83e2b2f33d353e1a5abf97381da4c72f95b27
1 /* Data References Analysis and Manipulation Utilities for Vectorization.
2 Copyright (C) 2003-2015 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
4 and Ira Rosen <irar@il.ibm.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "dumpfile.h"
26 #include "tm.h"
27 #include "hash-set.h"
28 #include "machmode.h"
29 #include "vec.h"
30 #include "double-int.h"
31 #include "input.h"
32 #include "alias.h"
33 #include "symtab.h"
34 #include "wide-int.h"
35 #include "inchash.h"
36 #include "tree.h"
37 #include "fold-const.h"
38 #include "stor-layout.h"
39 #include "tm_p.h"
40 #include "target.h"
41 #include "predict.h"
42 #include "hard-reg-set.h"
43 #include "function.h"
44 #include "dominance.h"
45 #include "cfg.h"
46 #include "basic-block.h"
47 #include "gimple-pretty-print.h"
48 #include "tree-ssa-alias.h"
49 #include "internal-fn.h"
50 #include "tree-eh.h"
51 #include "gimple-expr.h"
52 #include "is-a.h"
53 #include "gimple.h"
54 #include "gimplify.h"
55 #include "gimple-iterator.h"
56 #include "gimplify-me.h"
57 #include "gimple-ssa.h"
58 #include "tree-phinodes.h"
59 #include "ssa-iterators.h"
60 #include "stringpool.h"
61 #include "tree-ssanames.h"
62 #include "tree-ssa-loop-ivopts.h"
63 #include "tree-ssa-loop-manip.h"
64 #include "tree-ssa-loop.h"
65 #include "cfgloop.h"
66 #include "tree-chrec.h"
67 #include "tree-scalar-evolution.h"
68 #include "tree-vectorizer.h"
69 #include "diagnostic-core.h"
70 #include "hash-map.h"
71 #include "plugin-api.h"
72 #include "ipa-ref.h"
73 #include "cgraph.h"
74 /* Need to include rtl.h, expr.h, etc. for optabs. */
75 #include "hashtab.h"
76 #include "rtl.h"
77 #include "flags.h"
78 #include "statistics.h"
79 #include "real.h"
80 #include "fixed-value.h"
81 #include "insn-config.h"
82 #include "expmed.h"
83 #include "dojump.h"
84 #include "explow.h"
85 #include "calls.h"
86 #include "emit-rtl.h"
87 #include "varasm.h"
88 #include "stmt.h"
89 #include "expr.h"
90 #include "insn-codes.h"
91 #include "optabs.h"
92 #include "builtins.h"
94 /* Return true if load- or store-lanes optab OPTAB is implemented for
95 COUNT vectors of type VECTYPE. NAME is the name of OPTAB. */
97 static bool
98 vect_lanes_optab_supported_p (const char *name, convert_optab optab,
99 tree vectype, unsigned HOST_WIDE_INT count)
101 machine_mode mode, array_mode;
102 bool limit_p;
104 mode = TYPE_MODE (vectype);
105 limit_p = !targetm.array_mode_supported_p (mode, count);
106 array_mode = mode_for_size (count * GET_MODE_BITSIZE (mode),
107 MODE_INT, limit_p);
109 if (array_mode == BLKmode)
111 if (dump_enabled_p ())
112 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
113 "no array mode for %s[" HOST_WIDE_INT_PRINT_DEC "]\n",
114 GET_MODE_NAME (mode), count);
115 return false;
118 if (convert_optab_handler (optab, array_mode, mode) == CODE_FOR_nothing)
120 if (dump_enabled_p ())
121 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
122 "cannot use %s<%s><%s>\n", name,
123 GET_MODE_NAME (array_mode), GET_MODE_NAME (mode));
124 return false;
127 if (dump_enabled_p ())
128 dump_printf_loc (MSG_NOTE, vect_location,
129 "can use %s<%s><%s>\n", name, GET_MODE_NAME (array_mode),
130 GET_MODE_NAME (mode));
132 return true;
136 /* Return the smallest scalar part of STMT.
137 This is used to determine the vectype of the stmt. We generally set the
138 vectype according to the type of the result (lhs). For stmts whose
139 result-type is different than the type of the arguments (e.g., demotion,
140 promotion), vectype will be reset appropriately (later). Note that we have
141 to visit the smallest datatype in this function, because that determines the
142 VF. If the smallest datatype in the loop is present only as the rhs of a
143 promotion operation - we'd miss it.
144 Such a case, where a variable of this datatype does not appear in the lhs
145 anywhere in the loop, can only occur if it's an invariant: e.g.:
146 'int_x = (int) short_inv', which we'd expect to have been optimized away by
147 invariant motion. However, we cannot rely on invariant motion to always
148 take invariants out of the loop, and so in the case of promotion we also
149 have to check the rhs.
150 LHS_SIZE_UNIT and RHS_SIZE_UNIT contain the sizes of the corresponding
151 types. */
153 tree
154 vect_get_smallest_scalar_type (gimple stmt, HOST_WIDE_INT *lhs_size_unit,
155 HOST_WIDE_INT *rhs_size_unit)
157 tree scalar_type = gimple_expr_type (stmt);
158 HOST_WIDE_INT lhs, rhs;
160 lhs = rhs = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (scalar_type));
162 if (is_gimple_assign (stmt)
163 && (gimple_assign_cast_p (stmt)
164 || gimple_assign_rhs_code (stmt) == WIDEN_MULT_EXPR
165 || gimple_assign_rhs_code (stmt) == WIDEN_LSHIFT_EXPR
166 || gimple_assign_rhs_code (stmt) == FLOAT_EXPR))
168 tree rhs_type = TREE_TYPE (gimple_assign_rhs1 (stmt));
170 rhs = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (rhs_type));
171 if (rhs < lhs)
172 scalar_type = rhs_type;
175 *lhs_size_unit = lhs;
176 *rhs_size_unit = rhs;
177 return scalar_type;
181 /* Insert DDR into LOOP_VINFO list of ddrs that may alias and need to be
182 tested at run-time. Return TRUE if DDR was successfully inserted.
183 Return false if versioning is not supported. */
185 static bool
186 vect_mark_for_runtime_alias_test (ddr_p ddr, loop_vec_info loop_vinfo)
188 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
190 if ((unsigned) PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS) == 0)
191 return false;
193 if (dump_enabled_p ())
195 dump_printf_loc (MSG_NOTE, vect_location,
196 "mark for run-time aliasing test between ");
197 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (DDR_A (ddr)));
198 dump_printf (MSG_NOTE, " and ");
199 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (DDR_B (ddr)));
200 dump_printf (MSG_NOTE, "\n");
203 if (optimize_loop_nest_for_size_p (loop))
205 if (dump_enabled_p ())
206 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
207 "versioning not supported when optimizing"
208 " for size.\n");
209 return false;
212 /* FORNOW: We don't support versioning with outer-loop vectorization. */
213 if (loop->inner)
215 if (dump_enabled_p ())
216 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
217 "versioning not yet supported for outer-loops.\n");
218 return false;
221 /* FORNOW: We don't support creating runtime alias tests for non-constant
222 step. */
223 if (TREE_CODE (DR_STEP (DDR_A (ddr))) != INTEGER_CST
224 || TREE_CODE (DR_STEP (DDR_B (ddr))) != INTEGER_CST)
226 if (dump_enabled_p ())
227 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
228 "versioning not yet supported for non-constant "
229 "step\n");
230 return false;
233 LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo).safe_push (ddr);
234 return true;
238 /* Function vect_analyze_data_ref_dependence.
240 Return TRUE if there (might) exist a dependence between a memory-reference
241 DRA and a memory-reference DRB. When versioning for alias may check a
242 dependence at run-time, return FALSE. Adjust *MAX_VF according to
243 the data dependence. */
245 static bool
246 vect_analyze_data_ref_dependence (struct data_dependence_relation *ddr,
247 loop_vec_info loop_vinfo, int *max_vf)
249 unsigned int i;
250 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
251 struct data_reference *dra = DDR_A (ddr);
252 struct data_reference *drb = DDR_B (ddr);
253 stmt_vec_info stmtinfo_a = vinfo_for_stmt (DR_STMT (dra));
254 stmt_vec_info stmtinfo_b = vinfo_for_stmt (DR_STMT (drb));
255 lambda_vector dist_v;
256 unsigned int loop_depth;
258 /* In loop analysis all data references should be vectorizable. */
259 if (!STMT_VINFO_VECTORIZABLE (stmtinfo_a)
260 || !STMT_VINFO_VECTORIZABLE (stmtinfo_b))
261 gcc_unreachable ();
263 /* Independent data accesses. */
264 if (DDR_ARE_DEPENDENT (ddr) == chrec_known)
265 return false;
267 if (dra == drb
268 || (DR_IS_READ (dra) && DR_IS_READ (drb)))
269 return false;
271 /* Even if we have an anti-dependence then, as the vectorized loop covers at
272 least two scalar iterations, there is always also a true dependence.
273 As the vectorizer does not re-order loads and stores we can ignore
274 the anti-dependence if TBAA can disambiguate both DRs similar to the
275 case with known negative distance anti-dependences (positive
276 distance anti-dependences would violate TBAA constraints). */
277 if (((DR_IS_READ (dra) && DR_IS_WRITE (drb))
278 || (DR_IS_WRITE (dra) && DR_IS_READ (drb)))
279 && !alias_sets_conflict_p (get_alias_set (DR_REF (dra)),
280 get_alias_set (DR_REF (drb))))
281 return false;
283 /* Unknown data dependence. */
284 if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know)
286 /* If user asserted safelen consecutive iterations can be
287 executed concurrently, assume independence. */
288 if (loop->safelen >= 2)
290 if (loop->safelen < *max_vf)
291 *max_vf = loop->safelen;
292 LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo) = false;
293 return false;
296 if (STMT_VINFO_GATHER_P (stmtinfo_a)
297 || STMT_VINFO_GATHER_P (stmtinfo_b))
299 if (dump_enabled_p ())
301 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
302 "versioning for alias not supported for: "
303 "can't determine dependence between ");
304 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
305 DR_REF (dra));
306 dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
307 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
308 DR_REF (drb));
309 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
311 return true;
314 if (dump_enabled_p ())
316 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
317 "versioning for alias required: "
318 "can't determine dependence between ");
319 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
320 DR_REF (dra));
321 dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
322 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
323 DR_REF (drb));
324 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
327 /* Add to list of ddrs that need to be tested at run-time. */
328 return !vect_mark_for_runtime_alias_test (ddr, loop_vinfo);
331 /* Known data dependence. */
332 if (DDR_NUM_DIST_VECTS (ddr) == 0)
334 /* If user asserted safelen consecutive iterations can be
335 executed concurrently, assume independence. */
336 if (loop->safelen >= 2)
338 if (loop->safelen < *max_vf)
339 *max_vf = loop->safelen;
340 LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo) = false;
341 return false;
344 if (STMT_VINFO_GATHER_P (stmtinfo_a)
345 || STMT_VINFO_GATHER_P (stmtinfo_b))
347 if (dump_enabled_p ())
349 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
350 "versioning for alias not supported for: "
351 "bad dist vector for ");
352 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
353 DR_REF (dra));
354 dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
355 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
356 DR_REF (drb));
357 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
359 return true;
362 if (dump_enabled_p ())
364 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
365 "versioning for alias required: "
366 "bad dist vector for ");
367 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (dra));
368 dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
369 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (drb));
370 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
372 /* Add to list of ddrs that need to be tested at run-time. */
373 return !vect_mark_for_runtime_alias_test (ddr, loop_vinfo);
376 loop_depth = index_in_loop_nest (loop->num, DDR_LOOP_NEST (ddr));
377 FOR_EACH_VEC_ELT (DDR_DIST_VECTS (ddr), i, dist_v)
379 int dist = dist_v[loop_depth];
381 if (dump_enabled_p ())
382 dump_printf_loc (MSG_NOTE, vect_location,
383 "dependence distance = %d.\n", dist);
385 if (dist == 0)
387 if (dump_enabled_p ())
389 dump_printf_loc (MSG_NOTE, vect_location,
390 "dependence distance == 0 between ");
391 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra));
392 dump_printf (MSG_NOTE, " and ");
393 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb));
394 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
397 /* When we perform grouped accesses and perform implicit CSE
398 by detecting equal accesses and doing disambiguation with
399 runtime alias tests like for
400 .. = a[i];
401 .. = a[i+1];
402 a[i] = ..;
403 a[i+1] = ..;
404 *p = ..;
405 .. = a[i];
406 .. = a[i+1];
407 where we will end up loading { a[i], a[i+1] } once, make
408 sure that inserting group loads before the first load and
409 stores after the last store will do the right thing.
410 Similar for groups like
411 a[i] = ...;
412 ... = a[i];
413 a[i+1] = ...;
414 where loads from the group interleave with the store. */
415 if (STMT_VINFO_GROUPED_ACCESS (stmtinfo_a)
416 || STMT_VINFO_GROUPED_ACCESS (stmtinfo_b))
418 gimple earlier_stmt;
419 earlier_stmt = get_earlier_stmt (DR_STMT (dra), DR_STMT (drb));
420 if (DR_IS_WRITE
421 (STMT_VINFO_DATA_REF (vinfo_for_stmt (earlier_stmt))))
423 if (dump_enabled_p ())
424 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
425 "READ_WRITE dependence in interleaving."
426 "\n");
427 return true;
431 continue;
434 if (dist > 0 && DDR_REVERSED_P (ddr))
436 /* If DDR_REVERSED_P the order of the data-refs in DDR was
437 reversed (to make distance vector positive), and the actual
438 distance is negative. */
439 if (dump_enabled_p ())
440 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
441 "dependence distance negative.\n");
442 /* Record a negative dependence distance to later limit the
443 amount of stmt copying / unrolling we can perform.
444 Only need to handle read-after-write dependence. */
445 if (DR_IS_READ (drb)
446 && (STMT_VINFO_MIN_NEG_DIST (stmtinfo_b) == 0
447 || STMT_VINFO_MIN_NEG_DIST (stmtinfo_b) > (unsigned)dist))
448 STMT_VINFO_MIN_NEG_DIST (stmtinfo_b) = dist;
449 continue;
452 if (abs (dist) >= 2
453 && abs (dist) < *max_vf)
455 /* The dependence distance requires reduction of the maximal
456 vectorization factor. */
457 *max_vf = abs (dist);
458 if (dump_enabled_p ())
459 dump_printf_loc (MSG_NOTE, vect_location,
460 "adjusting maximal vectorization factor to %i\n",
461 *max_vf);
464 if (abs (dist) >= *max_vf)
466 /* Dependence distance does not create dependence, as far as
467 vectorization is concerned, in this case. */
468 if (dump_enabled_p ())
469 dump_printf_loc (MSG_NOTE, vect_location,
470 "dependence distance >= VF.\n");
471 continue;
474 if (dump_enabled_p ())
476 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
477 "not vectorized, possible dependence "
478 "between data-refs ");
479 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra));
480 dump_printf (MSG_NOTE, " and ");
481 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb));
482 dump_printf (MSG_NOTE, "\n");
485 return true;
488 return false;
491 /* Function vect_analyze_data_ref_dependences.
493 Examine all the data references in the loop, and make sure there do not
494 exist any data dependences between them. Set *MAX_VF according to
495 the maximum vectorization factor the data dependences allow. */
497 bool
498 vect_analyze_data_ref_dependences (loop_vec_info loop_vinfo, int *max_vf)
500 unsigned int i;
501 struct data_dependence_relation *ddr;
503 if (dump_enabled_p ())
504 dump_printf_loc (MSG_NOTE, vect_location,
505 "=== vect_analyze_data_ref_dependences ===\n");
507 LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo) = true;
508 if (!compute_all_dependences (LOOP_VINFO_DATAREFS (loop_vinfo),
509 &LOOP_VINFO_DDRS (loop_vinfo),
510 LOOP_VINFO_LOOP_NEST (loop_vinfo), true))
511 return false;
513 FOR_EACH_VEC_ELT (LOOP_VINFO_DDRS (loop_vinfo), i, ddr)
514 if (vect_analyze_data_ref_dependence (ddr, loop_vinfo, max_vf))
515 return false;
517 return true;
521 /* Function vect_slp_analyze_data_ref_dependence.
523 Return TRUE if there (might) exist a dependence between a memory-reference
524 DRA and a memory-reference DRB. When versioning for alias may check a
525 dependence at run-time, return FALSE. Adjust *MAX_VF according to
526 the data dependence. */
528 static bool
529 vect_slp_analyze_data_ref_dependence (struct data_dependence_relation *ddr)
531 struct data_reference *dra = DDR_A (ddr);
532 struct data_reference *drb = DDR_B (ddr);
534 /* We need to check dependences of statements marked as unvectorizable
535 as well, they still can prohibit vectorization. */
537 /* Independent data accesses. */
538 if (DDR_ARE_DEPENDENT (ddr) == chrec_known)
539 return false;
541 if (dra == drb)
542 return false;
544 /* Read-read is OK. */
545 if (DR_IS_READ (dra) && DR_IS_READ (drb))
546 return false;
548 /* If dra and drb are part of the same interleaving chain consider
549 them independent. */
550 if (STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (DR_STMT (dra)))
551 && (GROUP_FIRST_ELEMENT (vinfo_for_stmt (DR_STMT (dra)))
552 == GROUP_FIRST_ELEMENT (vinfo_for_stmt (DR_STMT (drb)))))
553 return false;
555 /* Unknown data dependence. */
556 if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know)
558 if (dump_enabled_p ())
560 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
561 "can't determine dependence between ");
562 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (dra));
563 dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
564 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (drb));
565 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
568 else if (dump_enabled_p ())
570 dump_printf_loc (MSG_NOTE, vect_location,
571 "determined dependence between ");
572 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra));
573 dump_printf (MSG_NOTE, " and ");
574 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb));
575 dump_printf (MSG_NOTE, "\n");
578 /* We do not vectorize basic blocks with write-write dependencies. */
579 if (DR_IS_WRITE (dra) && DR_IS_WRITE (drb))
580 return true;
582 /* If we have a read-write dependence check that the load is before the store.
583 When we vectorize basic blocks, vector load can be only before
584 corresponding scalar load, and vector store can be only after its
585 corresponding scalar store. So the order of the acceses is preserved in
586 case the load is before the store. */
587 gimple earlier_stmt = get_earlier_stmt (DR_STMT (dra), DR_STMT (drb));
588 if (DR_IS_READ (STMT_VINFO_DATA_REF (vinfo_for_stmt (earlier_stmt))))
590 /* That only holds for load-store pairs taking part in vectorization. */
591 if (STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dra)))
592 && STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (drb))))
593 return false;
596 return true;
600 /* Function vect_analyze_data_ref_dependences.
602 Examine all the data references in the basic-block, and make sure there
603 do not exist any data dependences between them. Set *MAX_VF according to
604 the maximum vectorization factor the data dependences allow. */
606 bool
607 vect_slp_analyze_data_ref_dependences (bb_vec_info bb_vinfo)
609 struct data_dependence_relation *ddr;
610 unsigned int i;
612 if (dump_enabled_p ())
613 dump_printf_loc (MSG_NOTE, vect_location,
614 "=== vect_slp_analyze_data_ref_dependences ===\n");
616 if (!compute_all_dependences (BB_VINFO_DATAREFS (bb_vinfo),
617 &BB_VINFO_DDRS (bb_vinfo),
618 vNULL, true))
619 return false;
621 FOR_EACH_VEC_ELT (BB_VINFO_DDRS (bb_vinfo), i, ddr)
622 if (vect_slp_analyze_data_ref_dependence (ddr))
623 return false;
625 return true;
629 /* Function vect_compute_data_ref_alignment
631 Compute the misalignment of the data reference DR.
633 Output:
634 1. If during the misalignment computation it is found that the data reference
635 cannot be vectorized then false is returned.
636 2. DR_MISALIGNMENT (DR) is defined.
638 FOR NOW: No analysis is actually performed. Misalignment is calculated
639 only for trivial cases. TODO. */
641 static bool
642 vect_compute_data_ref_alignment (struct data_reference *dr)
644 gimple stmt = DR_STMT (dr);
645 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
646 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
647 struct loop *loop = NULL;
648 tree ref = DR_REF (dr);
649 tree vectype;
650 tree base, base_addr;
651 bool base_aligned;
652 tree misalign;
653 tree aligned_to;
654 unsigned HOST_WIDE_INT alignment;
656 if (dump_enabled_p ())
657 dump_printf_loc (MSG_NOTE, vect_location,
658 "vect_compute_data_ref_alignment:\n");
660 if (loop_vinfo)
661 loop = LOOP_VINFO_LOOP (loop_vinfo);
663 /* Initialize misalignment to unknown. */
664 SET_DR_MISALIGNMENT (dr, -1);
666 /* Strided loads perform only component accesses, misalignment information
667 is irrelevant for them. */
668 if (STMT_VINFO_STRIDE_LOAD_P (stmt_info))
669 return true;
671 misalign = DR_INIT (dr);
672 aligned_to = DR_ALIGNED_TO (dr);
673 base_addr = DR_BASE_ADDRESS (dr);
674 vectype = STMT_VINFO_VECTYPE (stmt_info);
676 /* In case the dataref is in an inner-loop of the loop that is being
677 vectorized (LOOP), we use the base and misalignment information
678 relative to the outer-loop (LOOP). This is ok only if the misalignment
679 stays the same throughout the execution of the inner-loop, which is why
680 we have to check that the stride of the dataref in the inner-loop evenly
681 divides by the vector size. */
682 if (loop && nested_in_vect_loop_p (loop, stmt))
684 tree step = DR_STEP (dr);
685 HOST_WIDE_INT dr_step = TREE_INT_CST_LOW (step);
687 if (dr_step % GET_MODE_SIZE (TYPE_MODE (vectype)) == 0)
689 if (dump_enabled_p ())
690 dump_printf_loc (MSG_NOTE, vect_location,
691 "inner step divides the vector-size.\n");
692 misalign = STMT_VINFO_DR_INIT (stmt_info);
693 aligned_to = STMT_VINFO_DR_ALIGNED_TO (stmt_info);
694 base_addr = STMT_VINFO_DR_BASE_ADDRESS (stmt_info);
696 else
698 if (dump_enabled_p ())
699 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
700 "inner step doesn't divide the vector-size.\n");
701 misalign = NULL_TREE;
705 /* Similarly, if we're doing basic-block vectorization, we can only use
706 base and misalignment information relative to an innermost loop if the
707 misalignment stays the same throughout the execution of the loop.
708 As above, this is the case if the stride of the dataref evenly divides
709 by the vector size. */
710 if (!loop)
712 tree step = DR_STEP (dr);
713 HOST_WIDE_INT dr_step = TREE_INT_CST_LOW (step);
715 if (dr_step % GET_MODE_SIZE (TYPE_MODE (vectype)) != 0)
717 if (dump_enabled_p ())
718 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
719 "SLP: step doesn't divide the vector-size.\n");
720 misalign = NULL_TREE;
724 alignment = TYPE_ALIGN_UNIT (vectype);
726 if ((compare_tree_int (aligned_to, alignment) < 0)
727 || !misalign)
729 if (dump_enabled_p ())
731 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
732 "Unknown alignment for access: ");
733 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, ref);
734 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
736 return true;
739 /* To look at alignment of the base we have to preserve an inner MEM_REF
740 as that carries alignment information of the actual access. */
741 base = ref;
742 while (handled_component_p (base))
743 base = TREE_OPERAND (base, 0);
744 if (TREE_CODE (base) == MEM_REF)
745 base = build2 (MEM_REF, TREE_TYPE (base), base_addr,
746 build_int_cst (TREE_TYPE (TREE_OPERAND (base, 1)), 0));
748 if (get_object_alignment (base) >= TYPE_ALIGN (vectype))
749 base_aligned = true;
750 else
751 base_aligned = false;
753 if (!base_aligned)
755 /* Strip an inner MEM_REF to a bare decl if possible. */
756 if (TREE_CODE (base) == MEM_REF
757 && integer_zerop (TREE_OPERAND (base, 1))
758 && TREE_CODE (TREE_OPERAND (base, 0)) == ADDR_EXPR)
759 base = TREE_OPERAND (TREE_OPERAND (base, 0), 0);
761 /* Do not change the alignment of global variables here if
762 flag_section_anchors is enabled as we already generated
763 RTL for other functions. Most global variables should
764 have been aligned during the IPA increase_alignment pass. */
765 if (!vect_can_force_dr_alignment_p (base, TYPE_ALIGN (vectype))
766 || (TREE_STATIC (base) && flag_section_anchors))
768 if (dump_enabled_p ())
770 dump_printf_loc (MSG_NOTE, vect_location,
771 "can't force alignment of ref: ");
772 dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
773 dump_printf (MSG_NOTE, "\n");
775 return true;
778 /* Force the alignment of the decl.
779 NOTE: This is the only change to the code we make during
780 the analysis phase, before deciding to vectorize the loop. */
781 if (dump_enabled_p ())
783 dump_printf_loc (MSG_NOTE, vect_location, "force alignment of ");
784 dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
785 dump_printf (MSG_NOTE, "\n");
788 ((dataref_aux *)dr->aux)->base_decl = base;
789 ((dataref_aux *)dr->aux)->base_misaligned = true;
792 /* If this is a backward running DR then first access in the larger
793 vectype actually is N-1 elements before the address in the DR.
794 Adjust misalign accordingly. */
795 if (tree_int_cst_sgn (DR_STEP (dr)) < 0)
797 tree offset = ssize_int (TYPE_VECTOR_SUBPARTS (vectype) - 1);
798 /* DR_STEP(dr) is the same as -TYPE_SIZE of the scalar type,
799 otherwise we wouldn't be here. */
800 offset = fold_build2 (MULT_EXPR, ssizetype, offset, DR_STEP (dr));
801 /* PLUS because DR_STEP was negative. */
802 misalign = size_binop (PLUS_EXPR, misalign, offset);
805 SET_DR_MISALIGNMENT (dr,
806 wi::mod_floor (misalign, alignment, SIGNED).to_uhwi ());
808 if (dump_enabled_p ())
810 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
811 "misalign = %d bytes of ref ", DR_MISALIGNMENT (dr));
812 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, ref);
813 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
816 return true;
820 /* Function vect_compute_data_refs_alignment
822 Compute the misalignment of data references in the loop.
823 Return FALSE if a data reference is found that cannot be vectorized. */
825 static bool
826 vect_compute_data_refs_alignment (loop_vec_info loop_vinfo,
827 bb_vec_info bb_vinfo)
829 vec<data_reference_p> datarefs;
830 struct data_reference *dr;
831 unsigned int i;
833 if (loop_vinfo)
834 datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
835 else
836 datarefs = BB_VINFO_DATAREFS (bb_vinfo);
838 FOR_EACH_VEC_ELT (datarefs, i, dr)
839 if (STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr)))
840 && !vect_compute_data_ref_alignment (dr))
842 if (bb_vinfo)
844 /* Mark unsupported statement as unvectorizable. */
845 STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr))) = false;
846 continue;
848 else
849 return false;
852 return true;
856 /* Function vect_update_misalignment_for_peel
858 DR - the data reference whose misalignment is to be adjusted.
859 DR_PEEL - the data reference whose misalignment is being made
860 zero in the vector loop by the peel.
861 NPEEL - the number of iterations in the peel loop if the misalignment
862 of DR_PEEL is known at compile time. */
864 static void
865 vect_update_misalignment_for_peel (struct data_reference *dr,
866 struct data_reference *dr_peel, int npeel)
868 unsigned int i;
869 vec<dr_p> same_align_drs;
870 struct data_reference *current_dr;
871 int dr_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr))));
872 int dr_peel_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr_peel))));
873 stmt_vec_info stmt_info = vinfo_for_stmt (DR_STMT (dr));
874 stmt_vec_info peel_stmt_info = vinfo_for_stmt (DR_STMT (dr_peel));
876 /* For interleaved data accesses the step in the loop must be multiplied by
877 the size of the interleaving group. */
878 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
879 dr_size *= GROUP_SIZE (vinfo_for_stmt (GROUP_FIRST_ELEMENT (stmt_info)));
880 if (STMT_VINFO_GROUPED_ACCESS (peel_stmt_info))
881 dr_peel_size *= GROUP_SIZE (peel_stmt_info);
883 /* It can be assumed that the data refs with the same alignment as dr_peel
884 are aligned in the vector loop. */
885 same_align_drs
886 = STMT_VINFO_SAME_ALIGN_REFS (vinfo_for_stmt (DR_STMT (dr_peel)));
887 FOR_EACH_VEC_ELT (same_align_drs, i, current_dr)
889 if (current_dr != dr)
890 continue;
891 gcc_assert (DR_MISALIGNMENT (dr) / dr_size ==
892 DR_MISALIGNMENT (dr_peel) / dr_peel_size);
893 SET_DR_MISALIGNMENT (dr, 0);
894 return;
897 if (known_alignment_for_access_p (dr)
898 && known_alignment_for_access_p (dr_peel))
900 bool negative = tree_int_cst_compare (DR_STEP (dr), size_zero_node) < 0;
901 int misal = DR_MISALIGNMENT (dr);
902 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
903 misal += negative ? -npeel * dr_size : npeel * dr_size;
904 misal &= (TYPE_ALIGN (vectype) / BITS_PER_UNIT) - 1;
905 SET_DR_MISALIGNMENT (dr, misal);
906 return;
909 if (dump_enabled_p ())
910 dump_printf_loc (MSG_NOTE, vect_location, "Setting misalignment to -1.\n");
911 SET_DR_MISALIGNMENT (dr, -1);
915 /* Function vect_verify_datarefs_alignment
917 Return TRUE if all data references in the loop can be
918 handled with respect to alignment. */
920 bool
921 vect_verify_datarefs_alignment (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo)
923 vec<data_reference_p> datarefs;
924 struct data_reference *dr;
925 enum dr_alignment_support supportable_dr_alignment;
926 unsigned int i;
928 if (loop_vinfo)
929 datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
930 else
931 datarefs = BB_VINFO_DATAREFS (bb_vinfo);
933 FOR_EACH_VEC_ELT (datarefs, i, dr)
935 gimple stmt = DR_STMT (dr);
936 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
938 if (!STMT_VINFO_RELEVANT_P (stmt_info))
939 continue;
941 /* For interleaving, only the alignment of the first access matters.
942 Skip statements marked as not vectorizable. */
943 if ((STMT_VINFO_GROUPED_ACCESS (stmt_info)
944 && GROUP_FIRST_ELEMENT (stmt_info) != stmt)
945 || !STMT_VINFO_VECTORIZABLE (stmt_info))
946 continue;
948 /* Strided loads perform only component accesses, alignment is
949 irrelevant for them. */
950 if (STMT_VINFO_STRIDE_LOAD_P (stmt_info))
951 continue;
953 supportable_dr_alignment = vect_supportable_dr_alignment (dr, false);
954 if (!supportable_dr_alignment)
956 if (dump_enabled_p ())
958 if (DR_IS_READ (dr))
959 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
960 "not vectorized: unsupported unaligned load.");
961 else
962 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
963 "not vectorized: unsupported unaligned "
964 "store.");
966 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
967 DR_REF (dr));
968 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
970 return false;
972 if (supportable_dr_alignment != dr_aligned && dump_enabled_p ())
973 dump_printf_loc (MSG_NOTE, vect_location,
974 "Vectorizing an unaligned access.\n");
976 return true;
979 /* Given an memory reference EXP return whether its alignment is less
980 than its size. */
982 static bool
983 not_size_aligned (tree exp)
985 if (!tree_fits_uhwi_p (TYPE_SIZE (TREE_TYPE (exp))))
986 return true;
988 return (tree_to_uhwi (TYPE_SIZE (TREE_TYPE (exp)))
989 > get_object_alignment (exp));
992 /* Function vector_alignment_reachable_p
994 Return true if vector alignment for DR is reachable by peeling
995 a few loop iterations. Return false otherwise. */
997 static bool
998 vector_alignment_reachable_p (struct data_reference *dr)
1000 gimple stmt = DR_STMT (dr);
1001 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1002 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1004 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
1006 /* For interleaved access we peel only if number of iterations in
1007 the prolog loop ({VF - misalignment}), is a multiple of the
1008 number of the interleaved accesses. */
1009 int elem_size, mis_in_elements;
1010 int nelements = TYPE_VECTOR_SUBPARTS (vectype);
1012 /* FORNOW: handle only known alignment. */
1013 if (!known_alignment_for_access_p (dr))
1014 return false;
1016 elem_size = GET_MODE_SIZE (TYPE_MODE (vectype)) / nelements;
1017 mis_in_elements = DR_MISALIGNMENT (dr) / elem_size;
1019 if ((nelements - mis_in_elements) % GROUP_SIZE (stmt_info))
1020 return false;
1023 /* If misalignment is known at the compile time then allow peeling
1024 only if natural alignment is reachable through peeling. */
1025 if (known_alignment_for_access_p (dr) && !aligned_access_p (dr))
1027 HOST_WIDE_INT elmsize =
1028 int_cst_value (TYPE_SIZE_UNIT (TREE_TYPE (vectype)));
1029 if (dump_enabled_p ())
1031 dump_printf_loc (MSG_NOTE, vect_location,
1032 "data size =" HOST_WIDE_INT_PRINT_DEC, elmsize);
1033 dump_printf (MSG_NOTE,
1034 ". misalignment = %d.\n", DR_MISALIGNMENT (dr));
1036 if (DR_MISALIGNMENT (dr) % elmsize)
1038 if (dump_enabled_p ())
1039 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1040 "data size does not divide the misalignment.\n");
1041 return false;
1045 if (!known_alignment_for_access_p (dr))
1047 tree type = TREE_TYPE (DR_REF (dr));
1048 bool is_packed = not_size_aligned (DR_REF (dr));
1049 if (dump_enabled_p ())
1050 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1051 "Unknown misalignment, is_packed = %d\n",is_packed);
1052 if ((TYPE_USER_ALIGN (type) && !is_packed)
1053 || targetm.vectorize.vector_alignment_reachable (type, is_packed))
1054 return true;
1055 else
1056 return false;
1059 return true;
1063 /* Calculate the cost of the memory access represented by DR. */
1065 static void
1066 vect_get_data_access_cost (struct data_reference *dr,
1067 unsigned int *inside_cost,
1068 unsigned int *outside_cost,
1069 stmt_vector_for_cost *body_cost_vec)
1071 gimple stmt = DR_STMT (dr);
1072 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1073 int nunits = TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info));
1074 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1075 int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1076 int ncopies = vf / nunits;
1078 if (DR_IS_READ (dr))
1079 vect_get_load_cost (dr, ncopies, true, inside_cost, outside_cost,
1080 NULL, body_cost_vec, false);
1081 else
1082 vect_get_store_cost (dr, ncopies, inside_cost, body_cost_vec);
1084 if (dump_enabled_p ())
1085 dump_printf_loc (MSG_NOTE, vect_location,
1086 "vect_get_data_access_cost: inside_cost = %d, "
1087 "outside_cost = %d.\n", *inside_cost, *outside_cost);
1091 /* Insert DR into peeling hash table with NPEEL as key. */
1093 static void
1094 vect_peeling_hash_insert (loop_vec_info loop_vinfo, struct data_reference *dr,
1095 int npeel)
1097 struct _vect_peel_info elem, *slot;
1098 _vect_peel_info **new_slot;
1099 bool supportable_dr_alignment = vect_supportable_dr_alignment (dr, true);
1101 elem.npeel = npeel;
1102 slot = LOOP_VINFO_PEELING_HTAB (loop_vinfo)->find (&elem);
1103 if (slot)
1104 slot->count++;
1105 else
1107 slot = XNEW (struct _vect_peel_info);
1108 slot->npeel = npeel;
1109 slot->dr = dr;
1110 slot->count = 1;
1111 new_slot
1112 = LOOP_VINFO_PEELING_HTAB (loop_vinfo)->find_slot (slot, INSERT);
1113 *new_slot = slot;
1116 if (!supportable_dr_alignment
1117 && unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo)))
1118 slot->count += VECT_MAX_COST;
1122 /* Traverse peeling hash table to find peeling option that aligns maximum
1123 number of data accesses. */
1126 vect_peeling_hash_get_most_frequent (_vect_peel_info **slot,
1127 _vect_peel_extended_info *max)
1129 vect_peel_info elem = *slot;
1131 if (elem->count > max->peel_info.count
1132 || (elem->count == max->peel_info.count
1133 && max->peel_info.npeel > elem->npeel))
1135 max->peel_info.npeel = elem->npeel;
1136 max->peel_info.count = elem->count;
1137 max->peel_info.dr = elem->dr;
1140 return 1;
1144 /* Traverse peeling hash table and calculate cost for each peeling option.
1145 Find the one with the lowest cost. */
1148 vect_peeling_hash_get_lowest_cost (_vect_peel_info **slot,
1149 _vect_peel_extended_info *min)
1151 vect_peel_info elem = *slot;
1152 int save_misalignment, dummy;
1153 unsigned int inside_cost = 0, outside_cost = 0, i;
1154 gimple stmt = DR_STMT (elem->dr);
1155 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1156 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1157 vec<data_reference_p> datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
1158 struct data_reference *dr;
1159 stmt_vector_for_cost prologue_cost_vec, body_cost_vec, epilogue_cost_vec;
1160 int single_iter_cost;
1162 prologue_cost_vec.create (2);
1163 body_cost_vec.create (2);
1164 epilogue_cost_vec.create (2);
1166 FOR_EACH_VEC_ELT (datarefs, i, dr)
1168 stmt = DR_STMT (dr);
1169 stmt_info = vinfo_for_stmt (stmt);
1170 /* For interleaving, only the alignment of the first access
1171 matters. */
1172 if (STMT_VINFO_GROUPED_ACCESS (stmt_info)
1173 && GROUP_FIRST_ELEMENT (stmt_info) != stmt)
1174 continue;
1176 save_misalignment = DR_MISALIGNMENT (dr);
1177 vect_update_misalignment_for_peel (dr, elem->dr, elem->npeel);
1178 vect_get_data_access_cost (dr, &inside_cost, &outside_cost,
1179 &body_cost_vec);
1180 SET_DR_MISALIGNMENT (dr, save_misalignment);
1183 single_iter_cost = vect_get_single_scalar_iteration_cost (loop_vinfo);
1184 outside_cost += vect_get_known_peeling_cost
1185 (loop_vinfo, elem->npeel, &dummy,
1186 /* ??? We use this cost as number of stmts with scalar_stmt cost,
1187 thus divide by that. This introduces rounding errors, thus better
1188 introduce a new cost kind (raw_cost? scalar_iter_cost?). */
1189 single_iter_cost / vect_get_stmt_cost (scalar_stmt),
1190 &prologue_cost_vec, &epilogue_cost_vec);
1192 /* Prologue and epilogue costs are added to the target model later.
1193 These costs depend only on the scalar iteration cost, the
1194 number of peeling iterations finally chosen, and the number of
1195 misaligned statements. So discard the information found here. */
1196 prologue_cost_vec.release ();
1197 epilogue_cost_vec.release ();
1199 if (inside_cost < min->inside_cost
1200 || (inside_cost == min->inside_cost && outside_cost < min->outside_cost))
1202 min->inside_cost = inside_cost;
1203 min->outside_cost = outside_cost;
1204 min->body_cost_vec.release ();
1205 min->body_cost_vec = body_cost_vec;
1206 min->peel_info.dr = elem->dr;
1207 min->peel_info.npeel = elem->npeel;
1209 else
1210 body_cost_vec.release ();
1212 return 1;
1216 /* Choose best peeling option by traversing peeling hash table and either
1217 choosing an option with the lowest cost (if cost model is enabled) or the
1218 option that aligns as many accesses as possible. */
1220 static struct data_reference *
1221 vect_peeling_hash_choose_best_peeling (loop_vec_info loop_vinfo,
1222 unsigned int *npeel,
1223 stmt_vector_for_cost *body_cost_vec)
1225 struct _vect_peel_extended_info res;
1227 res.peel_info.dr = NULL;
1228 res.body_cost_vec = stmt_vector_for_cost ();
1230 if (!unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo)))
1232 res.inside_cost = INT_MAX;
1233 res.outside_cost = INT_MAX;
1234 LOOP_VINFO_PEELING_HTAB (loop_vinfo)
1235 ->traverse <_vect_peel_extended_info *,
1236 vect_peeling_hash_get_lowest_cost> (&res);
1238 else
1240 res.peel_info.count = 0;
1241 LOOP_VINFO_PEELING_HTAB (loop_vinfo)
1242 ->traverse <_vect_peel_extended_info *,
1243 vect_peeling_hash_get_most_frequent> (&res);
1246 *npeel = res.peel_info.npeel;
1247 *body_cost_vec = res.body_cost_vec;
1248 return res.peel_info.dr;
1252 /* Function vect_enhance_data_refs_alignment
1254 This pass will use loop versioning and loop peeling in order to enhance
1255 the alignment of data references in the loop.
1257 FOR NOW: we assume that whatever versioning/peeling takes place, only the
1258 original loop is to be vectorized. Any other loops that are created by
1259 the transformations performed in this pass - are not supposed to be
1260 vectorized. This restriction will be relaxed.
1262 This pass will require a cost model to guide it whether to apply peeling
1263 or versioning or a combination of the two. For example, the scheme that
1264 intel uses when given a loop with several memory accesses, is as follows:
1265 choose one memory access ('p') which alignment you want to force by doing
1266 peeling. Then, either (1) generate a loop in which 'p' is aligned and all
1267 other accesses are not necessarily aligned, or (2) use loop versioning to
1268 generate one loop in which all accesses are aligned, and another loop in
1269 which only 'p' is necessarily aligned.
1271 ("Automatic Intra-Register Vectorization for the Intel Architecture",
1272 Aart J.C. Bik, Milind Girkar, Paul M. Grey and Ximmin Tian, International
1273 Journal of Parallel Programming, Vol. 30, No. 2, April 2002.)
1275 Devising a cost model is the most critical aspect of this work. It will
1276 guide us on which access to peel for, whether to use loop versioning, how
1277 many versions to create, etc. The cost model will probably consist of
1278 generic considerations as well as target specific considerations (on
1279 powerpc for example, misaligned stores are more painful than misaligned
1280 loads).
1282 Here are the general steps involved in alignment enhancements:
1284 -- original loop, before alignment analysis:
1285 for (i=0; i<N; i++){
1286 x = q[i]; # DR_MISALIGNMENT(q) = unknown
1287 p[i] = y; # DR_MISALIGNMENT(p) = unknown
1290 -- After vect_compute_data_refs_alignment:
1291 for (i=0; i<N; i++){
1292 x = q[i]; # DR_MISALIGNMENT(q) = 3
1293 p[i] = y; # DR_MISALIGNMENT(p) = unknown
1296 -- Possibility 1: we do loop versioning:
1297 if (p is aligned) {
1298 for (i=0; i<N; i++){ # loop 1A
1299 x = q[i]; # DR_MISALIGNMENT(q) = 3
1300 p[i] = y; # DR_MISALIGNMENT(p) = 0
1303 else {
1304 for (i=0; i<N; i++){ # loop 1B
1305 x = q[i]; # DR_MISALIGNMENT(q) = 3
1306 p[i] = y; # DR_MISALIGNMENT(p) = unaligned
1310 -- Possibility 2: we do loop peeling:
1311 for (i = 0; i < 3; i++){ # (scalar loop, not to be vectorized).
1312 x = q[i];
1313 p[i] = y;
1315 for (i = 3; i < N; i++){ # loop 2A
1316 x = q[i]; # DR_MISALIGNMENT(q) = 0
1317 p[i] = y; # DR_MISALIGNMENT(p) = unknown
1320 -- Possibility 3: combination of loop peeling and versioning:
1321 for (i = 0; i < 3; i++){ # (scalar loop, not to be vectorized).
1322 x = q[i];
1323 p[i] = y;
1325 if (p is aligned) {
1326 for (i = 3; i<N; i++){ # loop 3A
1327 x = q[i]; # DR_MISALIGNMENT(q) = 0
1328 p[i] = y; # DR_MISALIGNMENT(p) = 0
1331 else {
1332 for (i = 3; i<N; i++){ # loop 3B
1333 x = q[i]; # DR_MISALIGNMENT(q) = 0
1334 p[i] = y; # DR_MISALIGNMENT(p) = unaligned
1338 These loops are later passed to loop_transform to be vectorized. The
1339 vectorizer will use the alignment information to guide the transformation
1340 (whether to generate regular loads/stores, or with special handling for
1341 misalignment). */
1343 bool
1344 vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo)
1346 vec<data_reference_p> datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
1347 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1348 enum dr_alignment_support supportable_dr_alignment;
1349 struct data_reference *dr0 = NULL, *first_store = NULL;
1350 struct data_reference *dr;
1351 unsigned int i, j;
1352 bool do_peeling = false;
1353 bool do_versioning = false;
1354 bool stat;
1355 gimple stmt;
1356 stmt_vec_info stmt_info;
1357 unsigned int npeel = 0;
1358 bool all_misalignments_unknown = true;
1359 unsigned int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1360 unsigned possible_npeel_number = 1;
1361 tree vectype;
1362 unsigned int nelements, mis, same_align_drs_max = 0;
1363 stmt_vector_for_cost body_cost_vec = stmt_vector_for_cost ();
1365 if (dump_enabled_p ())
1366 dump_printf_loc (MSG_NOTE, vect_location,
1367 "=== vect_enhance_data_refs_alignment ===\n");
1369 /* While cost model enhancements are expected in the future, the high level
1370 view of the code at this time is as follows:
1372 A) If there is a misaligned access then see if peeling to align
1373 this access can make all data references satisfy
1374 vect_supportable_dr_alignment. If so, update data structures
1375 as needed and return true.
1377 B) If peeling wasn't possible and there is a data reference with an
1378 unknown misalignment that does not satisfy vect_supportable_dr_alignment
1379 then see if loop versioning checks can be used to make all data
1380 references satisfy vect_supportable_dr_alignment. If so, update
1381 data structures as needed and return true.
1383 C) If neither peeling nor versioning were successful then return false if
1384 any data reference does not satisfy vect_supportable_dr_alignment.
1386 D) Return true (all data references satisfy vect_supportable_dr_alignment).
1388 Note, Possibility 3 above (which is peeling and versioning together) is not
1389 being done at this time. */
1391 /* (1) Peeling to force alignment. */
1393 /* (1.1) Decide whether to perform peeling, and how many iterations to peel:
1394 Considerations:
1395 + How many accesses will become aligned due to the peeling
1396 - How many accesses will become unaligned due to the peeling,
1397 and the cost of misaligned accesses.
1398 - The cost of peeling (the extra runtime checks, the increase
1399 in code size). */
1401 FOR_EACH_VEC_ELT (datarefs, i, dr)
1403 stmt = DR_STMT (dr);
1404 stmt_info = vinfo_for_stmt (stmt);
1406 if (!STMT_VINFO_RELEVANT_P (stmt_info))
1407 continue;
1409 /* For interleaving, only the alignment of the first access
1410 matters. */
1411 if (STMT_VINFO_GROUPED_ACCESS (stmt_info)
1412 && GROUP_FIRST_ELEMENT (stmt_info) != stmt)
1413 continue;
1415 /* For invariant accesses there is nothing to enhance. */
1416 if (integer_zerop (DR_STEP (dr)))
1417 continue;
1419 /* Strided loads perform only component accesses, alignment is
1420 irrelevant for them. */
1421 if (STMT_VINFO_STRIDE_LOAD_P (stmt_info))
1422 continue;
1424 supportable_dr_alignment = vect_supportable_dr_alignment (dr, true);
1425 do_peeling = vector_alignment_reachable_p (dr);
1426 if (do_peeling)
1428 if (known_alignment_for_access_p (dr))
1430 unsigned int npeel_tmp;
1431 bool negative = tree_int_cst_compare (DR_STEP (dr),
1432 size_zero_node) < 0;
1434 /* Save info about DR in the hash table. */
1435 if (!LOOP_VINFO_PEELING_HTAB (loop_vinfo))
1436 LOOP_VINFO_PEELING_HTAB (loop_vinfo)
1437 = new hash_table<peel_info_hasher> (1);
1439 vectype = STMT_VINFO_VECTYPE (stmt_info);
1440 nelements = TYPE_VECTOR_SUBPARTS (vectype);
1441 mis = DR_MISALIGNMENT (dr) / GET_MODE_SIZE (TYPE_MODE (
1442 TREE_TYPE (DR_REF (dr))));
1443 npeel_tmp = (negative
1444 ? (mis - nelements) : (nelements - mis))
1445 & (nelements - 1);
1447 /* For multiple types, it is possible that the bigger type access
1448 will have more than one peeling option. E.g., a loop with two
1449 types: one of size (vector size / 4), and the other one of
1450 size (vector size / 8). Vectorization factor will 8. If both
1451 access are misaligned by 3, the first one needs one scalar
1452 iteration to be aligned, and the second one needs 5. But the
1453 the first one will be aligned also by peeling 5 scalar
1454 iterations, and in that case both accesses will be aligned.
1455 Hence, except for the immediate peeling amount, we also want
1456 to try to add full vector size, while we don't exceed
1457 vectorization factor.
1458 We do this automtically for cost model, since we calculate cost
1459 for every peeling option. */
1460 if (unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo)))
1461 possible_npeel_number = vf /nelements;
1463 /* Handle the aligned case. We may decide to align some other
1464 access, making DR unaligned. */
1465 if (DR_MISALIGNMENT (dr) == 0)
1467 npeel_tmp = 0;
1468 if (unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo)))
1469 possible_npeel_number++;
1472 for (j = 0; j < possible_npeel_number; j++)
1474 gcc_assert (npeel_tmp <= vf);
1475 vect_peeling_hash_insert (loop_vinfo, dr, npeel_tmp);
1476 npeel_tmp += nelements;
1479 all_misalignments_unknown = false;
1480 /* Data-ref that was chosen for the case that all the
1481 misalignments are unknown is not relevant anymore, since we
1482 have a data-ref with known alignment. */
1483 dr0 = NULL;
1485 else
1487 /* If we don't know any misalignment values, we prefer
1488 peeling for data-ref that has the maximum number of data-refs
1489 with the same alignment, unless the target prefers to align
1490 stores over load. */
1491 if (all_misalignments_unknown)
1493 unsigned same_align_drs
1494 = STMT_VINFO_SAME_ALIGN_REFS (stmt_info).length ();
1495 if (!dr0
1496 || same_align_drs_max < same_align_drs)
1498 same_align_drs_max = same_align_drs;
1499 dr0 = dr;
1501 /* For data-refs with the same number of related
1502 accesses prefer the one where the misalign
1503 computation will be invariant in the outermost loop. */
1504 else if (same_align_drs_max == same_align_drs)
1506 struct loop *ivloop0, *ivloop;
1507 ivloop0 = outermost_invariant_loop_for_expr
1508 (loop, DR_BASE_ADDRESS (dr0));
1509 ivloop = outermost_invariant_loop_for_expr
1510 (loop, DR_BASE_ADDRESS (dr));
1511 if ((ivloop && !ivloop0)
1512 || (ivloop && ivloop0
1513 && flow_loop_nested_p (ivloop, ivloop0)))
1514 dr0 = dr;
1517 if (!first_store && DR_IS_WRITE (dr))
1518 first_store = dr;
1521 /* If there are both known and unknown misaligned accesses in the
1522 loop, we choose peeling amount according to the known
1523 accesses. */
1524 if (!supportable_dr_alignment)
1526 dr0 = dr;
1527 if (!first_store && DR_IS_WRITE (dr))
1528 first_store = dr;
1532 else
1534 if (!aligned_access_p (dr))
1536 if (dump_enabled_p ())
1537 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1538 "vector alignment may not be reachable\n");
1539 break;
1544 /* Check if we can possibly peel the loop. */
1545 if (!vect_can_advance_ivs_p (loop_vinfo)
1546 || !slpeel_can_duplicate_loop_p (loop, single_exit (loop)))
1547 do_peeling = false;
1549 /* If we don't know how many times the peeling loop will run
1550 assume it will run VF-1 times and disable peeling if the remaining
1551 iters are less than the vectorization factor. */
1552 if (do_peeling
1553 && all_misalignments_unknown
1554 && LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1555 && (LOOP_VINFO_INT_NITERS (loop_vinfo)
1556 < 2 * (unsigned) LOOP_VINFO_VECT_FACTOR (loop_vinfo) - 1))
1557 do_peeling = false;
1559 if (do_peeling
1560 && all_misalignments_unknown
1561 && vect_supportable_dr_alignment (dr0, false))
1563 /* Check if the target requires to prefer stores over loads, i.e., if
1564 misaligned stores are more expensive than misaligned loads (taking
1565 drs with same alignment into account). */
1566 if (first_store && DR_IS_READ (dr0))
1568 unsigned int load_inside_cost = 0, load_outside_cost = 0;
1569 unsigned int store_inside_cost = 0, store_outside_cost = 0;
1570 unsigned int load_inside_penalty = 0, load_outside_penalty = 0;
1571 unsigned int store_inside_penalty = 0, store_outside_penalty = 0;
1572 stmt_vector_for_cost dummy;
1573 dummy.create (2);
1575 vect_get_data_access_cost (dr0, &load_inside_cost, &load_outside_cost,
1576 &dummy);
1577 vect_get_data_access_cost (first_store, &store_inside_cost,
1578 &store_outside_cost, &dummy);
1580 dummy.release ();
1582 /* Calculate the penalty for leaving FIRST_STORE unaligned (by
1583 aligning the load DR0). */
1584 load_inside_penalty = store_inside_cost;
1585 load_outside_penalty = store_outside_cost;
1586 for (i = 0;
1587 STMT_VINFO_SAME_ALIGN_REFS (vinfo_for_stmt (
1588 DR_STMT (first_store))).iterate (i, &dr);
1589 i++)
1590 if (DR_IS_READ (dr))
1592 load_inside_penalty += load_inside_cost;
1593 load_outside_penalty += load_outside_cost;
1595 else
1597 load_inside_penalty += store_inside_cost;
1598 load_outside_penalty += store_outside_cost;
1601 /* Calculate the penalty for leaving DR0 unaligned (by
1602 aligning the FIRST_STORE). */
1603 store_inside_penalty = load_inside_cost;
1604 store_outside_penalty = load_outside_cost;
1605 for (i = 0;
1606 STMT_VINFO_SAME_ALIGN_REFS (vinfo_for_stmt (
1607 DR_STMT (dr0))).iterate (i, &dr);
1608 i++)
1609 if (DR_IS_READ (dr))
1611 store_inside_penalty += load_inside_cost;
1612 store_outside_penalty += load_outside_cost;
1614 else
1616 store_inside_penalty += store_inside_cost;
1617 store_outside_penalty += store_outside_cost;
1620 if (load_inside_penalty > store_inside_penalty
1621 || (load_inside_penalty == store_inside_penalty
1622 && load_outside_penalty > store_outside_penalty))
1623 dr0 = first_store;
1626 /* In case there are only loads with different unknown misalignments, use
1627 peeling only if it may help to align other accesses in the loop. */
1628 if (!first_store
1629 && !STMT_VINFO_SAME_ALIGN_REFS (
1630 vinfo_for_stmt (DR_STMT (dr0))).length ()
1631 && vect_supportable_dr_alignment (dr0, false)
1632 != dr_unaligned_supported)
1633 do_peeling = false;
1636 if (do_peeling && !dr0)
1638 /* Peeling is possible, but there is no data access that is not supported
1639 unless aligned. So we try to choose the best possible peeling. */
1641 /* We should get here only if there are drs with known misalignment. */
1642 gcc_assert (!all_misalignments_unknown);
1644 /* Choose the best peeling from the hash table. */
1645 dr0 = vect_peeling_hash_choose_best_peeling (loop_vinfo, &npeel,
1646 &body_cost_vec);
1647 if (!dr0 || !npeel)
1648 do_peeling = false;
1650 /* If peeling by npeel will result in a remaining loop not iterating
1651 enough to be vectorized then do not peel. */
1652 if (do_peeling
1653 && LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1654 && (LOOP_VINFO_INT_NITERS (loop_vinfo)
1655 < LOOP_VINFO_VECT_FACTOR (loop_vinfo) + npeel))
1656 do_peeling = false;
1659 if (do_peeling)
1661 stmt = DR_STMT (dr0);
1662 stmt_info = vinfo_for_stmt (stmt);
1663 vectype = STMT_VINFO_VECTYPE (stmt_info);
1664 nelements = TYPE_VECTOR_SUBPARTS (vectype);
1666 if (known_alignment_for_access_p (dr0))
1668 bool negative = tree_int_cst_compare (DR_STEP (dr0),
1669 size_zero_node) < 0;
1670 if (!npeel)
1672 /* Since it's known at compile time, compute the number of
1673 iterations in the peeled loop (the peeling factor) for use in
1674 updating DR_MISALIGNMENT values. The peeling factor is the
1675 vectorization factor minus the misalignment as an element
1676 count. */
1677 mis = DR_MISALIGNMENT (dr0);
1678 mis /= GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr0))));
1679 npeel = ((negative ? mis - nelements : nelements - mis)
1680 & (nelements - 1));
1683 /* For interleaved data access every iteration accesses all the
1684 members of the group, therefore we divide the number of iterations
1685 by the group size. */
1686 stmt_info = vinfo_for_stmt (DR_STMT (dr0));
1687 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
1688 npeel /= GROUP_SIZE (stmt_info);
1690 if (dump_enabled_p ())
1691 dump_printf_loc (MSG_NOTE, vect_location,
1692 "Try peeling by %d\n", npeel);
1695 /* Ensure that all data refs can be vectorized after the peel. */
1696 FOR_EACH_VEC_ELT (datarefs, i, dr)
1698 int save_misalignment;
1700 if (dr == dr0)
1701 continue;
1703 stmt = DR_STMT (dr);
1704 stmt_info = vinfo_for_stmt (stmt);
1705 /* For interleaving, only the alignment of the first access
1706 matters. */
1707 if (STMT_VINFO_GROUPED_ACCESS (stmt_info)
1708 && GROUP_FIRST_ELEMENT (stmt_info) != stmt)
1709 continue;
1711 /* Strided loads perform only component accesses, alignment is
1712 irrelevant for them. */
1713 if (STMT_VINFO_STRIDE_LOAD_P (stmt_info))
1714 continue;
1716 save_misalignment = DR_MISALIGNMENT (dr);
1717 vect_update_misalignment_for_peel (dr, dr0, npeel);
1718 supportable_dr_alignment = vect_supportable_dr_alignment (dr, false);
1719 SET_DR_MISALIGNMENT (dr, save_misalignment);
1721 if (!supportable_dr_alignment)
1723 do_peeling = false;
1724 break;
1728 if (do_peeling && known_alignment_for_access_p (dr0) && npeel == 0)
1730 stat = vect_verify_datarefs_alignment (loop_vinfo, NULL);
1731 if (!stat)
1732 do_peeling = false;
1733 else
1735 body_cost_vec.release ();
1736 return stat;
1740 if (do_peeling)
1742 unsigned max_allowed_peel
1743 = PARAM_VALUE (PARAM_VECT_MAX_PEELING_FOR_ALIGNMENT);
1744 if (max_allowed_peel != (unsigned)-1)
1746 unsigned max_peel = npeel;
1747 if (max_peel == 0)
1749 gimple dr_stmt = DR_STMT (dr0);
1750 stmt_vec_info vinfo = vinfo_for_stmt (dr_stmt);
1751 tree vtype = STMT_VINFO_VECTYPE (vinfo);
1752 max_peel = TYPE_VECTOR_SUBPARTS (vtype) - 1;
1754 if (max_peel > max_allowed_peel)
1756 do_peeling = false;
1757 if (dump_enabled_p ())
1758 dump_printf_loc (MSG_NOTE, vect_location,
1759 "Disable peeling, max peels reached: %d\n", max_peel);
1764 if (do_peeling)
1766 /* (1.2) Update the DR_MISALIGNMENT of each data reference DR_i.
1767 If the misalignment of DR_i is identical to that of dr0 then set
1768 DR_MISALIGNMENT (DR_i) to zero. If the misalignment of DR_i and
1769 dr0 are known at compile time then increment DR_MISALIGNMENT (DR_i)
1770 by the peeling factor times the element size of DR_i (MOD the
1771 vectorization factor times the size). Otherwise, the
1772 misalignment of DR_i must be set to unknown. */
1773 FOR_EACH_VEC_ELT (datarefs, i, dr)
1774 if (dr != dr0)
1775 vect_update_misalignment_for_peel (dr, dr0, npeel);
1777 LOOP_VINFO_UNALIGNED_DR (loop_vinfo) = dr0;
1778 if (npeel)
1779 LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) = npeel;
1780 else
1781 LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo)
1782 = DR_MISALIGNMENT (dr0);
1783 SET_DR_MISALIGNMENT (dr0, 0);
1784 if (dump_enabled_p ())
1786 dump_printf_loc (MSG_NOTE, vect_location,
1787 "Alignment of access forced using peeling.\n");
1788 dump_printf_loc (MSG_NOTE, vect_location,
1789 "Peeling for alignment will be applied.\n");
1791 /* The inside-loop cost will be accounted for in vectorizable_load
1792 and vectorizable_store correctly with adjusted alignments.
1793 Drop the body_cst_vec on the floor here. */
1794 body_cost_vec.release ();
1796 stat = vect_verify_datarefs_alignment (loop_vinfo, NULL);
1797 gcc_assert (stat);
1798 return stat;
1802 body_cost_vec.release ();
1804 /* (2) Versioning to force alignment. */
1806 /* Try versioning if:
1807 1) optimize loop for speed
1808 2) there is at least one unsupported misaligned data ref with an unknown
1809 misalignment, and
1810 3) all misaligned data refs with a known misalignment are supported, and
1811 4) the number of runtime alignment checks is within reason. */
1813 do_versioning =
1814 optimize_loop_nest_for_speed_p (loop)
1815 && (!loop->inner); /* FORNOW */
1817 if (do_versioning)
1819 FOR_EACH_VEC_ELT (datarefs, i, dr)
1821 stmt = DR_STMT (dr);
1822 stmt_info = vinfo_for_stmt (stmt);
1824 /* For interleaving, only the alignment of the first access
1825 matters. */
1826 if (aligned_access_p (dr)
1827 || (STMT_VINFO_GROUPED_ACCESS (stmt_info)
1828 && GROUP_FIRST_ELEMENT (stmt_info) != stmt))
1829 continue;
1831 /* Strided loads perform only component accesses, alignment is
1832 irrelevant for them. */
1833 if (STMT_VINFO_STRIDE_LOAD_P (stmt_info))
1834 continue;
1836 supportable_dr_alignment = vect_supportable_dr_alignment (dr, false);
1838 if (!supportable_dr_alignment)
1840 gimple stmt;
1841 int mask;
1842 tree vectype;
1844 if (known_alignment_for_access_p (dr)
1845 || LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).length ()
1846 >= (unsigned) PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIGNMENT_CHECKS))
1848 do_versioning = false;
1849 break;
1852 stmt = DR_STMT (dr);
1853 vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
1854 gcc_assert (vectype);
1856 /* The rightmost bits of an aligned address must be zeros.
1857 Construct the mask needed for this test. For example,
1858 GET_MODE_SIZE for the vector mode V4SI is 16 bytes so the
1859 mask must be 15 = 0xf. */
1860 mask = GET_MODE_SIZE (TYPE_MODE (vectype)) - 1;
1862 /* FORNOW: use the same mask to test all potentially unaligned
1863 references in the loop. The vectorizer currently supports
1864 a single vector size, see the reference to
1865 GET_MODE_NUNITS (TYPE_MODE (vectype)) where the
1866 vectorization factor is computed. */
1867 gcc_assert (!LOOP_VINFO_PTR_MASK (loop_vinfo)
1868 || LOOP_VINFO_PTR_MASK (loop_vinfo) == mask);
1869 LOOP_VINFO_PTR_MASK (loop_vinfo) = mask;
1870 LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).safe_push (
1871 DR_STMT (dr));
1875 /* Versioning requires at least one misaligned data reference. */
1876 if (!LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo))
1877 do_versioning = false;
1878 else if (!do_versioning)
1879 LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).truncate (0);
1882 if (do_versioning)
1884 vec<gimple> may_misalign_stmts
1885 = LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo);
1886 gimple stmt;
1888 /* It can now be assumed that the data references in the statements
1889 in LOOP_VINFO_MAY_MISALIGN_STMTS will be aligned in the version
1890 of the loop being vectorized. */
1891 FOR_EACH_VEC_ELT (may_misalign_stmts, i, stmt)
1893 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1894 dr = STMT_VINFO_DATA_REF (stmt_info);
1895 SET_DR_MISALIGNMENT (dr, 0);
1896 if (dump_enabled_p ())
1897 dump_printf_loc (MSG_NOTE, vect_location,
1898 "Alignment of access forced using versioning.\n");
1901 if (dump_enabled_p ())
1902 dump_printf_loc (MSG_NOTE, vect_location,
1903 "Versioning for alignment will be applied.\n");
1905 /* Peeling and versioning can't be done together at this time. */
1906 gcc_assert (! (do_peeling && do_versioning));
1908 stat = vect_verify_datarefs_alignment (loop_vinfo, NULL);
1909 gcc_assert (stat);
1910 return stat;
1913 /* This point is reached if neither peeling nor versioning is being done. */
1914 gcc_assert (! (do_peeling || do_versioning));
1916 stat = vect_verify_datarefs_alignment (loop_vinfo, NULL);
1917 return stat;
1921 /* Function vect_find_same_alignment_drs.
1923 Update group and alignment relations according to the chosen
1924 vectorization factor. */
1926 static void
1927 vect_find_same_alignment_drs (struct data_dependence_relation *ddr,
1928 loop_vec_info loop_vinfo)
1930 unsigned int i;
1931 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1932 int vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1933 struct data_reference *dra = DDR_A (ddr);
1934 struct data_reference *drb = DDR_B (ddr);
1935 stmt_vec_info stmtinfo_a = vinfo_for_stmt (DR_STMT (dra));
1936 stmt_vec_info stmtinfo_b = vinfo_for_stmt (DR_STMT (drb));
1937 int dra_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dra))));
1938 int drb_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (drb))));
1939 lambda_vector dist_v;
1940 unsigned int loop_depth;
1942 if (DDR_ARE_DEPENDENT (ddr) == chrec_known)
1943 return;
1945 if (dra == drb)
1946 return;
1948 if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know)
1949 return;
1951 /* Loop-based vectorization and known data dependence. */
1952 if (DDR_NUM_DIST_VECTS (ddr) == 0)
1953 return;
1955 /* Data-dependence analysis reports a distance vector of zero
1956 for data-references that overlap only in the first iteration
1957 but have different sign step (see PR45764).
1958 So as a sanity check require equal DR_STEP. */
1959 if (!operand_equal_p (DR_STEP (dra), DR_STEP (drb), 0))
1960 return;
1962 loop_depth = index_in_loop_nest (loop->num, DDR_LOOP_NEST (ddr));
1963 FOR_EACH_VEC_ELT (DDR_DIST_VECTS (ddr), i, dist_v)
1965 int dist = dist_v[loop_depth];
1967 if (dump_enabled_p ())
1968 dump_printf_loc (MSG_NOTE, vect_location,
1969 "dependence distance = %d.\n", dist);
1971 /* Same loop iteration. */
1972 if (dist == 0
1973 || (dist % vectorization_factor == 0 && dra_size == drb_size))
1975 /* Two references with distance zero have the same alignment. */
1976 STMT_VINFO_SAME_ALIGN_REFS (stmtinfo_a).safe_push (drb);
1977 STMT_VINFO_SAME_ALIGN_REFS (stmtinfo_b).safe_push (dra);
1978 if (dump_enabled_p ())
1980 dump_printf_loc (MSG_NOTE, vect_location,
1981 "accesses have the same alignment.\n");
1982 dump_printf (MSG_NOTE,
1983 "dependence distance modulo vf == 0 between ");
1984 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra));
1985 dump_printf (MSG_NOTE, " and ");
1986 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb));
1987 dump_printf (MSG_NOTE, "\n");
1994 /* Function vect_analyze_data_refs_alignment
1996 Analyze the alignment of the data-references in the loop.
1997 Return FALSE if a data reference is found that cannot be vectorized. */
1999 bool
2000 vect_analyze_data_refs_alignment (loop_vec_info loop_vinfo,
2001 bb_vec_info bb_vinfo)
2003 if (dump_enabled_p ())
2004 dump_printf_loc (MSG_NOTE, vect_location,
2005 "=== vect_analyze_data_refs_alignment ===\n");
2007 /* Mark groups of data references with same alignment using
2008 data dependence information. */
2009 if (loop_vinfo)
2011 vec<ddr_p> ddrs = LOOP_VINFO_DDRS (loop_vinfo);
2012 struct data_dependence_relation *ddr;
2013 unsigned int i;
2015 FOR_EACH_VEC_ELT (ddrs, i, ddr)
2016 vect_find_same_alignment_drs (ddr, loop_vinfo);
2019 if (!vect_compute_data_refs_alignment (loop_vinfo, bb_vinfo))
2021 if (dump_enabled_p ())
2022 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2023 "not vectorized: can't calculate alignment "
2024 "for data ref.\n");
2025 return false;
2028 return true;
2032 /* Analyze groups of accesses: check that DR belongs to a group of
2033 accesses of legal size, step, etc. Detect gaps, single element
2034 interleaving, and other special cases. Set grouped access info.
2035 Collect groups of strided stores for further use in SLP analysis. */
2037 static bool
2038 vect_analyze_group_access (struct data_reference *dr)
2040 tree step = DR_STEP (dr);
2041 tree scalar_type = TREE_TYPE (DR_REF (dr));
2042 HOST_WIDE_INT type_size = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (scalar_type));
2043 gimple stmt = DR_STMT (dr);
2044 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2045 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2046 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
2047 HOST_WIDE_INT dr_step = TREE_INT_CST_LOW (step);
2048 HOST_WIDE_INT groupsize, last_accessed_element = 1;
2049 bool slp_impossible = false;
2050 struct loop *loop = NULL;
2052 if (loop_vinfo)
2053 loop = LOOP_VINFO_LOOP (loop_vinfo);
2055 /* For interleaving, GROUPSIZE is STEP counted in elements, i.e., the
2056 size of the interleaving group (including gaps). */
2057 groupsize = absu_hwi (dr_step) / type_size;
2059 /* Not consecutive access is possible only if it is a part of interleaving. */
2060 if (!GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
2062 /* Check if it this DR is a part of interleaving, and is a single
2063 element of the group that is accessed in the loop. */
2065 /* Gaps are supported only for loads. STEP must be a multiple of the type
2066 size. The size of the group must be a power of 2. */
2067 if (DR_IS_READ (dr)
2068 && (dr_step % type_size) == 0
2069 && groupsize > 0
2070 && exact_log2 (groupsize) != -1)
2072 GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = stmt;
2073 GROUP_SIZE (vinfo_for_stmt (stmt)) = groupsize;
2074 if (dump_enabled_p ())
2076 dump_printf_loc (MSG_NOTE, vect_location,
2077 "Detected single element interleaving ");
2078 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dr));
2079 dump_printf (MSG_NOTE, " step ");
2080 dump_generic_expr (MSG_NOTE, TDF_SLIM, step);
2081 dump_printf (MSG_NOTE, "\n");
2084 if (loop_vinfo)
2086 if (dump_enabled_p ())
2087 dump_printf_loc (MSG_NOTE, vect_location,
2088 "Data access with gaps requires scalar "
2089 "epilogue loop\n");
2090 if (loop->inner)
2092 if (dump_enabled_p ())
2093 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2094 "Peeling for outer loop is not"
2095 " supported\n");
2096 return false;
2099 LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) = true;
2102 return true;
2105 if (dump_enabled_p ())
2107 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2108 "not consecutive access ");
2109 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
2110 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
2113 if (bb_vinfo)
2115 /* Mark the statement as unvectorizable. */
2116 STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr))) = false;
2117 return true;
2120 return false;
2123 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) == stmt)
2125 /* First stmt in the interleaving chain. Check the chain. */
2126 gimple next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmt));
2127 struct data_reference *data_ref = dr;
2128 unsigned int count = 1;
2129 tree prev_init = DR_INIT (data_ref);
2130 gimple prev = stmt;
2131 HOST_WIDE_INT diff, gaps = 0;
2132 unsigned HOST_WIDE_INT count_in_bytes;
2134 while (next)
2136 /* Skip same data-refs. In case that two or more stmts share
2137 data-ref (supported only for loads), we vectorize only the first
2138 stmt, and the rest get their vectorized loads from the first
2139 one. */
2140 if (!tree_int_cst_compare (DR_INIT (data_ref),
2141 DR_INIT (STMT_VINFO_DATA_REF (
2142 vinfo_for_stmt (next)))))
2144 if (DR_IS_WRITE (data_ref))
2146 if (dump_enabled_p ())
2147 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2148 "Two store stmts share the same dr.\n");
2149 return false;
2152 /* For load use the same data-ref load. */
2153 GROUP_SAME_DR_STMT (vinfo_for_stmt (next)) = prev;
2155 prev = next;
2156 next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next));
2157 continue;
2160 prev = next;
2161 data_ref = STMT_VINFO_DATA_REF (vinfo_for_stmt (next));
2163 /* All group members have the same STEP by construction. */
2164 gcc_checking_assert (operand_equal_p (DR_STEP (data_ref), step, 0));
2166 /* Check that the distance between two accesses is equal to the type
2167 size. Otherwise, we have gaps. */
2168 diff = (TREE_INT_CST_LOW (DR_INIT (data_ref))
2169 - TREE_INT_CST_LOW (prev_init)) / type_size;
2170 if (diff != 1)
2172 /* FORNOW: SLP of accesses with gaps is not supported. */
2173 slp_impossible = true;
2174 if (DR_IS_WRITE (data_ref))
2176 if (dump_enabled_p ())
2177 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2178 "interleaved store with gaps\n");
2179 return false;
2182 gaps += diff - 1;
2185 last_accessed_element += diff;
2187 /* Store the gap from the previous member of the group. If there is no
2188 gap in the access, GROUP_GAP is always 1. */
2189 GROUP_GAP (vinfo_for_stmt (next)) = diff;
2191 prev_init = DR_INIT (data_ref);
2192 next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next));
2193 /* Count the number of data-refs in the chain. */
2194 count++;
2197 /* COUNT is the number of accesses found, we multiply it by the size of
2198 the type to get COUNT_IN_BYTES. */
2199 count_in_bytes = type_size * count;
2201 /* Check that the size of the interleaving (including gaps) is not
2202 greater than STEP. */
2203 if (dr_step != 0
2204 && absu_hwi (dr_step) < count_in_bytes + gaps * type_size)
2206 if (dump_enabled_p ())
2208 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2209 "interleaving size is greater than step for ");
2210 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
2211 DR_REF (dr));
2212 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
2214 return false;
2217 /* Check that the size of the interleaving is equal to STEP for stores,
2218 i.e., that there are no gaps. */
2219 if (dr_step != 0
2220 && absu_hwi (dr_step) != count_in_bytes)
2222 if (DR_IS_READ (dr))
2224 slp_impossible = true;
2225 /* There is a gap after the last load in the group. This gap is a
2226 difference between the groupsize and the number of elements.
2227 When there is no gap, this difference should be 0. */
2228 GROUP_GAP (vinfo_for_stmt (stmt)) = groupsize - count;
2230 else
2232 if (dump_enabled_p ())
2233 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2234 "interleaved store with gaps\n");
2235 return false;
2239 /* Check that STEP is a multiple of type size. */
2240 if (dr_step != 0
2241 && (dr_step % type_size) != 0)
2243 if (dump_enabled_p ())
2245 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2246 "step is not a multiple of type size: step ");
2247 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, step);
2248 dump_printf (MSG_MISSED_OPTIMIZATION, " size ");
2249 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
2250 TYPE_SIZE_UNIT (scalar_type));
2251 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
2253 return false;
2256 if (groupsize == 0)
2257 groupsize = count;
2259 GROUP_SIZE (vinfo_for_stmt (stmt)) = groupsize;
2260 if (dump_enabled_p ())
2261 dump_printf_loc (MSG_NOTE, vect_location,
2262 "Detected interleaving of size %d\n", (int)groupsize);
2264 /* SLP: create an SLP data structure for every interleaving group of
2265 stores for further analysis in vect_analyse_slp. */
2266 if (DR_IS_WRITE (dr) && !slp_impossible)
2268 if (loop_vinfo)
2269 LOOP_VINFO_GROUPED_STORES (loop_vinfo).safe_push (stmt);
2270 if (bb_vinfo)
2271 BB_VINFO_GROUPED_STORES (bb_vinfo).safe_push (stmt);
2274 /* There is a gap in the end of the group. */
2275 if (groupsize - last_accessed_element > 0 && loop_vinfo)
2277 if (dump_enabled_p ())
2278 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2279 "Data access with gaps requires scalar "
2280 "epilogue loop\n");
2281 if (loop->inner)
2283 if (dump_enabled_p ())
2284 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2285 "Peeling for outer loop is not supported\n");
2286 return false;
2289 LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) = true;
2293 return true;
2297 /* Analyze the access pattern of the data-reference DR.
2298 In case of non-consecutive accesses call vect_analyze_group_access() to
2299 analyze groups of accesses. */
2301 static bool
2302 vect_analyze_data_ref_access (struct data_reference *dr)
2304 tree step = DR_STEP (dr);
2305 tree scalar_type = TREE_TYPE (DR_REF (dr));
2306 gimple stmt = DR_STMT (dr);
2307 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2308 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2309 struct loop *loop = NULL;
2311 if (loop_vinfo)
2312 loop = LOOP_VINFO_LOOP (loop_vinfo);
2314 if (loop_vinfo && !step)
2316 if (dump_enabled_p ())
2317 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2318 "bad data-ref access in loop\n");
2319 return false;
2322 /* Allow invariant loads in not nested loops. */
2323 if (loop_vinfo && integer_zerop (step))
2325 GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = NULL;
2326 if (nested_in_vect_loop_p (loop, stmt))
2328 if (dump_enabled_p ())
2329 dump_printf_loc (MSG_NOTE, vect_location,
2330 "zero step in inner loop of nest\n");
2331 return false;
2333 return DR_IS_READ (dr);
2336 if (loop && nested_in_vect_loop_p (loop, stmt))
2338 /* Interleaved accesses are not yet supported within outer-loop
2339 vectorization for references in the inner-loop. */
2340 GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = NULL;
2342 /* For the rest of the analysis we use the outer-loop step. */
2343 step = STMT_VINFO_DR_STEP (stmt_info);
2344 if (integer_zerop (step))
2346 if (dump_enabled_p ())
2347 dump_printf_loc (MSG_NOTE, vect_location,
2348 "zero step in outer loop.\n");
2349 if (DR_IS_READ (dr))
2350 return true;
2351 else
2352 return false;
2356 /* Consecutive? */
2357 if (TREE_CODE (step) == INTEGER_CST)
2359 HOST_WIDE_INT dr_step = TREE_INT_CST_LOW (step);
2360 if (!tree_int_cst_compare (step, TYPE_SIZE_UNIT (scalar_type))
2361 || (dr_step < 0
2362 && !compare_tree_int (TYPE_SIZE_UNIT (scalar_type), -dr_step)))
2364 /* Mark that it is not interleaving. */
2365 GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = NULL;
2366 return true;
2370 if (loop && nested_in_vect_loop_p (loop, stmt))
2372 if (dump_enabled_p ())
2373 dump_printf_loc (MSG_NOTE, vect_location,
2374 "grouped access in outer loop.\n");
2375 return false;
2378 /* Assume this is a DR handled by non-constant strided load case. */
2379 if (TREE_CODE (step) != INTEGER_CST)
2380 return STMT_VINFO_STRIDE_LOAD_P (stmt_info);
2382 /* Not consecutive access - check if it's a part of interleaving group. */
2383 return vect_analyze_group_access (dr);
2388 /* A helper function used in the comparator function to sort data
2389 references. T1 and T2 are two data references to be compared.
2390 The function returns -1, 0, or 1. */
2392 static int
2393 compare_tree (tree t1, tree t2)
2395 int i, cmp;
2396 enum tree_code code;
2397 char tclass;
2399 if (t1 == t2)
2400 return 0;
2401 if (t1 == NULL)
2402 return -1;
2403 if (t2 == NULL)
2404 return 1;
2407 if (TREE_CODE (t1) != TREE_CODE (t2))
2408 return TREE_CODE (t1) < TREE_CODE (t2) ? -1 : 1;
2410 code = TREE_CODE (t1);
2411 switch (code)
2413 /* For const values, we can just use hash values for comparisons. */
2414 case INTEGER_CST:
2415 case REAL_CST:
2416 case FIXED_CST:
2417 case STRING_CST:
2418 case COMPLEX_CST:
2419 case VECTOR_CST:
2421 hashval_t h1 = iterative_hash_expr (t1, 0);
2422 hashval_t h2 = iterative_hash_expr (t2, 0);
2423 if (h1 != h2)
2424 return h1 < h2 ? -1 : 1;
2425 break;
2428 case SSA_NAME:
2429 cmp = compare_tree (SSA_NAME_VAR (t1), SSA_NAME_VAR (t2));
2430 if (cmp != 0)
2431 return cmp;
2433 if (SSA_NAME_VERSION (t1) != SSA_NAME_VERSION (t2))
2434 return SSA_NAME_VERSION (t1) < SSA_NAME_VERSION (t2) ? -1 : 1;
2435 break;
2437 default:
2438 tclass = TREE_CODE_CLASS (code);
2440 /* For var-decl, we could compare their UIDs. */
2441 if (tclass == tcc_declaration)
2443 if (DECL_UID (t1) != DECL_UID (t2))
2444 return DECL_UID (t1) < DECL_UID (t2) ? -1 : 1;
2445 break;
2448 /* For expressions with operands, compare their operands recursively. */
2449 for (i = TREE_OPERAND_LENGTH (t1) - 1; i >= 0; --i)
2451 cmp = compare_tree (TREE_OPERAND (t1, i), TREE_OPERAND (t2, i));
2452 if (cmp != 0)
2453 return cmp;
2457 return 0;
2461 /* Compare two data-references DRA and DRB to group them into chunks
2462 suitable for grouping. */
2464 static int
2465 dr_group_sort_cmp (const void *dra_, const void *drb_)
2467 data_reference_p dra = *(data_reference_p *)const_cast<void *>(dra_);
2468 data_reference_p drb = *(data_reference_p *)const_cast<void *>(drb_);
2469 int cmp;
2471 /* Stabilize sort. */
2472 if (dra == drb)
2473 return 0;
2475 /* Ordering of DRs according to base. */
2476 if (!operand_equal_p (DR_BASE_ADDRESS (dra), DR_BASE_ADDRESS (drb), 0))
2478 cmp = compare_tree (DR_BASE_ADDRESS (dra), DR_BASE_ADDRESS (drb));
2479 if (cmp != 0)
2480 return cmp;
2483 /* And according to DR_OFFSET. */
2484 if (!dr_equal_offsets_p (dra, drb))
2486 cmp = compare_tree (DR_OFFSET (dra), DR_OFFSET (drb));
2487 if (cmp != 0)
2488 return cmp;
2491 /* Put reads before writes. */
2492 if (DR_IS_READ (dra) != DR_IS_READ (drb))
2493 return DR_IS_READ (dra) ? -1 : 1;
2495 /* Then sort after access size. */
2496 if (!operand_equal_p (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dra))),
2497 TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (drb))), 0))
2499 cmp = compare_tree (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dra))),
2500 TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (drb))));
2501 if (cmp != 0)
2502 return cmp;
2505 /* And after step. */
2506 if (!operand_equal_p (DR_STEP (dra), DR_STEP (drb), 0))
2508 cmp = compare_tree (DR_STEP (dra), DR_STEP (drb));
2509 if (cmp != 0)
2510 return cmp;
2513 /* Then sort after DR_INIT. In case of identical DRs sort after stmt UID. */
2514 cmp = tree_int_cst_compare (DR_INIT (dra), DR_INIT (drb));
2515 if (cmp == 0)
2516 return gimple_uid (DR_STMT (dra)) < gimple_uid (DR_STMT (drb)) ? -1 : 1;
2517 return cmp;
2520 /* Function vect_analyze_data_ref_accesses.
2522 Analyze the access pattern of all the data references in the loop.
2524 FORNOW: the only access pattern that is considered vectorizable is a
2525 simple step 1 (consecutive) access.
2527 FORNOW: handle only arrays and pointer accesses. */
2529 bool
2530 vect_analyze_data_ref_accesses (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo)
2532 unsigned int i;
2533 vec<data_reference_p> datarefs;
2534 struct data_reference *dr;
2536 if (dump_enabled_p ())
2537 dump_printf_loc (MSG_NOTE, vect_location,
2538 "=== vect_analyze_data_ref_accesses ===\n");
2540 if (loop_vinfo)
2541 datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
2542 else
2543 datarefs = BB_VINFO_DATAREFS (bb_vinfo);
2545 if (datarefs.is_empty ())
2546 return true;
2548 /* Sort the array of datarefs to make building the interleaving chains
2549 linear. Don't modify the original vector's order, it is needed for
2550 determining what dependencies are reversed. */
2551 vec<data_reference_p> datarefs_copy = datarefs.copy ();
2552 datarefs_copy.qsort (dr_group_sort_cmp);
2554 /* Build the interleaving chains. */
2555 for (i = 0; i < datarefs_copy.length () - 1;)
2557 data_reference_p dra = datarefs_copy[i];
2558 stmt_vec_info stmtinfo_a = vinfo_for_stmt (DR_STMT (dra));
2559 stmt_vec_info lastinfo = NULL;
2560 for (i = i + 1; i < datarefs_copy.length (); ++i)
2562 data_reference_p drb = datarefs_copy[i];
2563 stmt_vec_info stmtinfo_b = vinfo_for_stmt (DR_STMT (drb));
2565 /* ??? Imperfect sorting (non-compatible types, non-modulo
2566 accesses, same accesses) can lead to a group to be artificially
2567 split here as we don't just skip over those. If it really
2568 matters we can push those to a worklist and re-iterate
2569 over them. The we can just skip ahead to the next DR here. */
2571 /* Check that the data-refs have same first location (except init)
2572 and they are both either store or load (not load and store,
2573 not masked loads or stores). */
2574 if (DR_IS_READ (dra) != DR_IS_READ (drb)
2575 || !operand_equal_p (DR_BASE_ADDRESS (dra),
2576 DR_BASE_ADDRESS (drb), 0)
2577 || !dr_equal_offsets_p (dra, drb)
2578 || !gimple_assign_single_p (DR_STMT (dra))
2579 || !gimple_assign_single_p (DR_STMT (drb)))
2580 break;
2582 /* Check that the data-refs have the same constant size and step. */
2583 tree sza = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dra)));
2584 tree szb = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (drb)));
2585 if (!tree_fits_uhwi_p (sza)
2586 || !tree_fits_uhwi_p (szb)
2587 || !tree_int_cst_equal (sza, szb)
2588 || !tree_fits_shwi_p (DR_STEP (dra))
2589 || !tree_fits_shwi_p (DR_STEP (drb))
2590 || !tree_int_cst_equal (DR_STEP (dra), DR_STEP (drb)))
2591 break;
2593 /* Do not place the same access in the interleaving chain twice. */
2594 if (tree_int_cst_compare (DR_INIT (dra), DR_INIT (drb)) == 0)
2595 break;
2597 /* Check the types are compatible.
2598 ??? We don't distinguish this during sorting. */
2599 if (!types_compatible_p (TREE_TYPE (DR_REF (dra)),
2600 TREE_TYPE (DR_REF (drb))))
2601 break;
2603 /* Sorting has ensured that DR_INIT (dra) <= DR_INIT (drb). */
2604 HOST_WIDE_INT init_a = TREE_INT_CST_LOW (DR_INIT (dra));
2605 HOST_WIDE_INT init_b = TREE_INT_CST_LOW (DR_INIT (drb));
2606 gcc_assert (init_a < init_b);
2608 /* If init_b == init_a + the size of the type * k, we have an
2609 interleaving, and DRA is accessed before DRB. */
2610 HOST_WIDE_INT type_size_a = tree_to_uhwi (sza);
2611 if ((init_b - init_a) % type_size_a != 0)
2612 break;
2614 /* The step (if not zero) is greater than the difference between
2615 data-refs' inits. This splits groups into suitable sizes. */
2616 HOST_WIDE_INT step = tree_to_shwi (DR_STEP (dra));
2617 if (step != 0 && step <= (init_b - init_a))
2618 break;
2620 if (dump_enabled_p ())
2622 dump_printf_loc (MSG_NOTE, vect_location,
2623 "Detected interleaving ");
2624 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra));
2625 dump_printf (MSG_NOTE, " and ");
2626 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb));
2627 dump_printf (MSG_NOTE, "\n");
2630 /* Link the found element into the group list. */
2631 if (!GROUP_FIRST_ELEMENT (stmtinfo_a))
2633 GROUP_FIRST_ELEMENT (stmtinfo_a) = DR_STMT (dra);
2634 lastinfo = stmtinfo_a;
2636 GROUP_FIRST_ELEMENT (stmtinfo_b) = DR_STMT (dra);
2637 GROUP_NEXT_ELEMENT (lastinfo) = DR_STMT (drb);
2638 lastinfo = stmtinfo_b;
2642 FOR_EACH_VEC_ELT (datarefs_copy, i, dr)
2643 if (STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr)))
2644 && !vect_analyze_data_ref_access (dr))
2646 if (dump_enabled_p ())
2647 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2648 "not vectorized: complicated access pattern.\n");
2650 if (bb_vinfo)
2652 /* Mark the statement as not vectorizable. */
2653 STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr))) = false;
2654 continue;
2656 else
2658 datarefs_copy.release ();
2659 return false;
2663 datarefs_copy.release ();
2664 return true;
2668 /* Operator == between two dr_with_seg_len objects.
2670 This equality operator is used to make sure two data refs
2671 are the same one so that we will consider to combine the
2672 aliasing checks of those two pairs of data dependent data
2673 refs. */
2675 static bool
2676 operator == (const dr_with_seg_len& d1,
2677 const dr_with_seg_len& d2)
2679 return operand_equal_p (DR_BASE_ADDRESS (d1.dr),
2680 DR_BASE_ADDRESS (d2.dr), 0)
2681 && compare_tree (d1.offset, d2.offset) == 0
2682 && compare_tree (d1.seg_len, d2.seg_len) == 0;
2685 /* Function comp_dr_with_seg_len_pair.
2687 Comparison function for sorting objects of dr_with_seg_len_pair_t
2688 so that we can combine aliasing checks in one scan. */
2690 static int
2691 comp_dr_with_seg_len_pair (const void *p1_, const void *p2_)
2693 const dr_with_seg_len_pair_t* p1 = (const dr_with_seg_len_pair_t *) p1_;
2694 const dr_with_seg_len_pair_t* p2 = (const dr_with_seg_len_pair_t *) p2_;
2696 const dr_with_seg_len &p11 = p1->first,
2697 &p12 = p1->second,
2698 &p21 = p2->first,
2699 &p22 = p2->second;
2701 /* For DR pairs (a, b) and (c, d), we only consider to merge the alias checks
2702 if a and c have the same basic address snd step, and b and d have the same
2703 address and step. Therefore, if any a&c or b&d don't have the same address
2704 and step, we don't care the order of those two pairs after sorting. */
2705 int comp_res;
2707 if ((comp_res = compare_tree (DR_BASE_ADDRESS (p11.dr),
2708 DR_BASE_ADDRESS (p21.dr))) != 0)
2709 return comp_res;
2710 if ((comp_res = compare_tree (DR_BASE_ADDRESS (p12.dr),
2711 DR_BASE_ADDRESS (p22.dr))) != 0)
2712 return comp_res;
2713 if ((comp_res = compare_tree (DR_STEP (p11.dr), DR_STEP (p21.dr))) != 0)
2714 return comp_res;
2715 if ((comp_res = compare_tree (DR_STEP (p12.dr), DR_STEP (p22.dr))) != 0)
2716 return comp_res;
2717 if ((comp_res = compare_tree (p11.offset, p21.offset)) != 0)
2718 return comp_res;
2719 if ((comp_res = compare_tree (p12.offset, p22.offset)) != 0)
2720 return comp_res;
2722 return 0;
2725 /* Function vect_vfa_segment_size.
2727 Create an expression that computes the size of segment
2728 that will be accessed for a data reference. The functions takes into
2729 account that realignment loads may access one more vector.
2731 Input:
2732 DR: The data reference.
2733 LENGTH_FACTOR: segment length to consider.
2735 Return an expression whose value is the size of segment which will be
2736 accessed by DR. */
2738 static tree
2739 vect_vfa_segment_size (struct data_reference *dr, tree length_factor)
2741 tree segment_length;
2743 if (integer_zerop (DR_STEP (dr)))
2744 segment_length = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dr)));
2745 else
2746 segment_length = size_binop (MULT_EXPR,
2747 fold_convert (sizetype, DR_STEP (dr)),
2748 fold_convert (sizetype, length_factor));
2750 if (vect_supportable_dr_alignment (dr, false)
2751 == dr_explicit_realign_optimized)
2753 tree vector_size = TYPE_SIZE_UNIT
2754 (STMT_VINFO_VECTYPE (vinfo_for_stmt (DR_STMT (dr))));
2756 segment_length = size_binop (PLUS_EXPR, segment_length, vector_size);
2758 return segment_length;
2761 /* Function vect_prune_runtime_alias_test_list.
2763 Prune a list of ddrs to be tested at run-time by versioning for alias.
2764 Merge several alias checks into one if possible.
2765 Return FALSE if resulting list of ddrs is longer then allowed by
2766 PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS, otherwise return TRUE. */
2768 bool
2769 vect_prune_runtime_alias_test_list (loop_vec_info loop_vinfo)
2771 vec<ddr_p> may_alias_ddrs =
2772 LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo);
2773 vec<dr_with_seg_len_pair_t>& comp_alias_ddrs =
2774 LOOP_VINFO_COMP_ALIAS_DDRS (loop_vinfo);
2775 int vect_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2776 tree scalar_loop_iters = LOOP_VINFO_NITERS (loop_vinfo);
2778 ddr_p ddr;
2779 unsigned int i;
2780 tree length_factor;
2782 if (dump_enabled_p ())
2783 dump_printf_loc (MSG_NOTE, vect_location,
2784 "=== vect_prune_runtime_alias_test_list ===\n");
2786 if (may_alias_ddrs.is_empty ())
2787 return true;
2789 /* Basically, for each pair of dependent data refs store_ptr_0
2790 and load_ptr_0, we create an expression:
2792 ((store_ptr_0 + store_segment_length_0) <= load_ptr_0)
2793 || (load_ptr_0 + load_segment_length_0) <= store_ptr_0))
2795 for aliasing checks. However, in some cases we can decrease
2796 the number of checks by combining two checks into one. For
2797 example, suppose we have another pair of data refs store_ptr_0
2798 and load_ptr_1, and if the following condition is satisfied:
2800 load_ptr_0 < load_ptr_1 &&
2801 load_ptr_1 - load_ptr_0 - load_segment_length_0 < store_segment_length_0
2803 (this condition means, in each iteration of vectorized loop,
2804 the accessed memory of store_ptr_0 cannot be between the memory
2805 of load_ptr_0 and load_ptr_1.)
2807 we then can use only the following expression to finish the
2808 alising checks between store_ptr_0 & load_ptr_0 and
2809 store_ptr_0 & load_ptr_1:
2811 ((store_ptr_0 + store_segment_length_0) <= load_ptr_0)
2812 || (load_ptr_1 + load_segment_length_1 <= store_ptr_0))
2814 Note that we only consider that load_ptr_0 and load_ptr_1 have the
2815 same basic address. */
2817 comp_alias_ddrs.create (may_alias_ddrs.length ());
2819 /* First, we collect all data ref pairs for aliasing checks. */
2820 FOR_EACH_VEC_ELT (may_alias_ddrs, i, ddr)
2822 struct data_reference *dr_a, *dr_b;
2823 gimple dr_group_first_a, dr_group_first_b;
2824 tree segment_length_a, segment_length_b;
2825 gimple stmt_a, stmt_b;
2827 dr_a = DDR_A (ddr);
2828 stmt_a = DR_STMT (DDR_A (ddr));
2829 dr_group_first_a = GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt_a));
2830 if (dr_group_first_a)
2832 stmt_a = dr_group_first_a;
2833 dr_a = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt_a));
2836 dr_b = DDR_B (ddr);
2837 stmt_b = DR_STMT (DDR_B (ddr));
2838 dr_group_first_b = GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt_b));
2839 if (dr_group_first_b)
2841 stmt_b = dr_group_first_b;
2842 dr_b = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt_b));
2845 if (!operand_equal_p (DR_STEP (dr_a), DR_STEP (dr_b), 0))
2846 length_factor = scalar_loop_iters;
2847 else
2848 length_factor = size_int (vect_factor);
2849 segment_length_a = vect_vfa_segment_size (dr_a, length_factor);
2850 segment_length_b = vect_vfa_segment_size (dr_b, length_factor);
2852 dr_with_seg_len_pair_t dr_with_seg_len_pair
2853 (dr_with_seg_len (dr_a, segment_length_a),
2854 dr_with_seg_len (dr_b, segment_length_b));
2856 if (compare_tree (DR_BASE_ADDRESS (dr_a), DR_BASE_ADDRESS (dr_b)) > 0)
2857 std::swap (dr_with_seg_len_pair.first, dr_with_seg_len_pair.second);
2859 comp_alias_ddrs.safe_push (dr_with_seg_len_pair);
2862 /* Second, we sort the collected data ref pairs so that we can scan
2863 them once to combine all possible aliasing checks. */
2864 comp_alias_ddrs.qsort (comp_dr_with_seg_len_pair);
2866 /* Third, we scan the sorted dr pairs and check if we can combine
2867 alias checks of two neighbouring dr pairs. */
2868 for (size_t i = 1; i < comp_alias_ddrs.length (); ++i)
2870 /* Deal with two ddrs (dr_a1, dr_b1) and (dr_a2, dr_b2). */
2871 dr_with_seg_len *dr_a1 = &comp_alias_ddrs[i-1].first,
2872 *dr_b1 = &comp_alias_ddrs[i-1].second,
2873 *dr_a2 = &comp_alias_ddrs[i].first,
2874 *dr_b2 = &comp_alias_ddrs[i].second;
2876 /* Remove duplicate data ref pairs. */
2877 if (*dr_a1 == *dr_a2 && *dr_b1 == *dr_b2)
2879 if (dump_enabled_p ())
2881 dump_printf_loc (MSG_NOTE, vect_location,
2882 "found equal ranges ");
2883 dump_generic_expr (MSG_NOTE, TDF_SLIM,
2884 DR_REF (dr_a1->dr));
2885 dump_printf (MSG_NOTE, ", ");
2886 dump_generic_expr (MSG_NOTE, TDF_SLIM,
2887 DR_REF (dr_b1->dr));
2888 dump_printf (MSG_NOTE, " and ");
2889 dump_generic_expr (MSG_NOTE, TDF_SLIM,
2890 DR_REF (dr_a2->dr));
2891 dump_printf (MSG_NOTE, ", ");
2892 dump_generic_expr (MSG_NOTE, TDF_SLIM,
2893 DR_REF (dr_b2->dr));
2894 dump_printf (MSG_NOTE, "\n");
2897 comp_alias_ddrs.ordered_remove (i--);
2898 continue;
2901 if (*dr_a1 == *dr_a2 || *dr_b1 == *dr_b2)
2903 /* We consider the case that DR_B1 and DR_B2 are same memrefs,
2904 and DR_A1 and DR_A2 are two consecutive memrefs. */
2905 if (*dr_a1 == *dr_a2)
2907 std::swap (dr_a1, dr_b1);
2908 std::swap (dr_a2, dr_b2);
2911 if (!operand_equal_p (DR_BASE_ADDRESS (dr_a1->dr),
2912 DR_BASE_ADDRESS (dr_a2->dr),
2914 || !tree_fits_shwi_p (dr_a1->offset)
2915 || !tree_fits_shwi_p (dr_a2->offset))
2916 continue;
2918 HOST_WIDE_INT diff = (tree_to_shwi (dr_a2->offset)
2919 - tree_to_shwi (dr_a1->offset));
2922 /* Now we check if the following condition is satisfied:
2924 DIFF - SEGMENT_LENGTH_A < SEGMENT_LENGTH_B
2926 where DIFF = DR_A2->OFFSET - DR_A1->OFFSET. However,
2927 SEGMENT_LENGTH_A or SEGMENT_LENGTH_B may not be constant so we
2928 have to make a best estimation. We can get the minimum value
2929 of SEGMENT_LENGTH_B as a constant, represented by MIN_SEG_LEN_B,
2930 then either of the following two conditions can guarantee the
2931 one above:
2933 1: DIFF <= MIN_SEG_LEN_B
2934 2: DIFF - SEGMENT_LENGTH_A < MIN_SEG_LEN_B
2938 HOST_WIDE_INT min_seg_len_b = (tree_fits_shwi_p (dr_b1->seg_len)
2939 ? tree_to_shwi (dr_b1->seg_len)
2940 : vect_factor);
2942 if (diff <= min_seg_len_b
2943 || (tree_fits_shwi_p (dr_a1->seg_len)
2944 && diff - tree_to_shwi (dr_a1->seg_len) < min_seg_len_b))
2946 if (dump_enabled_p ())
2948 dump_printf_loc (MSG_NOTE, vect_location,
2949 "merging ranges for ");
2950 dump_generic_expr (MSG_NOTE, TDF_SLIM,
2951 DR_REF (dr_a1->dr));
2952 dump_printf (MSG_NOTE, ", ");
2953 dump_generic_expr (MSG_NOTE, TDF_SLIM,
2954 DR_REF (dr_b1->dr));
2955 dump_printf (MSG_NOTE, " and ");
2956 dump_generic_expr (MSG_NOTE, TDF_SLIM,
2957 DR_REF (dr_a2->dr));
2958 dump_printf (MSG_NOTE, ", ");
2959 dump_generic_expr (MSG_NOTE, TDF_SLIM,
2960 DR_REF (dr_b2->dr));
2961 dump_printf (MSG_NOTE, "\n");
2964 dr_a1->seg_len = size_binop (PLUS_EXPR,
2965 dr_a2->seg_len, size_int (diff));
2966 comp_alias_ddrs.ordered_remove (i--);
2971 dump_printf_loc (MSG_NOTE, vect_location,
2972 "improved number of alias checks from %d to %d\n",
2973 may_alias_ddrs.length (), comp_alias_ddrs.length ());
2974 if ((int) comp_alias_ddrs.length () >
2975 PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS))
2976 return false;
2978 return true;
2981 /* Check whether a non-affine read in stmt is suitable for gather load
2982 and if so, return a builtin decl for that operation. */
2984 tree
2985 vect_check_gather (gimple stmt, loop_vec_info loop_vinfo, tree *basep,
2986 tree *offp, int *scalep)
2988 HOST_WIDE_INT scale = 1, pbitpos, pbitsize;
2989 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2990 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2991 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
2992 tree offtype = NULL_TREE;
2993 tree decl, base, off;
2994 machine_mode pmode;
2995 int punsignedp, pvolatilep;
2997 base = DR_REF (dr);
2998 /* For masked loads/stores, DR_REF (dr) is an artificial MEM_REF,
2999 see if we can use the def stmt of the address. */
3000 if (is_gimple_call (stmt)
3001 && gimple_call_internal_p (stmt)
3002 && (gimple_call_internal_fn (stmt) == IFN_MASK_LOAD
3003 || gimple_call_internal_fn (stmt) == IFN_MASK_STORE)
3004 && TREE_CODE (base) == MEM_REF
3005 && TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME
3006 && integer_zerop (TREE_OPERAND (base, 1))
3007 && !expr_invariant_in_loop_p (loop, TREE_OPERAND (base, 0)))
3009 gimple def_stmt = SSA_NAME_DEF_STMT (TREE_OPERAND (base, 0));
3010 if (is_gimple_assign (def_stmt)
3011 && gimple_assign_rhs_code (def_stmt) == ADDR_EXPR)
3012 base = TREE_OPERAND (gimple_assign_rhs1 (def_stmt), 0);
3015 /* The gather builtins need address of the form
3016 loop_invariant + vector * {1, 2, 4, 8}
3018 loop_invariant + sign_extend (vector) * { 1, 2, 4, 8 }.
3019 Unfortunately DR_BASE_ADDRESS/DR_OFFSET can be a mixture
3020 of loop invariants/SSA_NAMEs defined in the loop, with casts,
3021 multiplications and additions in it. To get a vector, we need
3022 a single SSA_NAME that will be defined in the loop and will
3023 contain everything that is not loop invariant and that can be
3024 vectorized. The following code attempts to find such a preexistng
3025 SSA_NAME OFF and put the loop invariants into a tree BASE
3026 that can be gimplified before the loop. */
3027 base = get_inner_reference (base, &pbitsize, &pbitpos, &off,
3028 &pmode, &punsignedp, &pvolatilep, false);
3029 gcc_assert (base != NULL_TREE && (pbitpos % BITS_PER_UNIT) == 0);
3031 if (TREE_CODE (base) == MEM_REF)
3033 if (!integer_zerop (TREE_OPERAND (base, 1)))
3035 if (off == NULL_TREE)
3037 offset_int moff = mem_ref_offset (base);
3038 off = wide_int_to_tree (sizetype, moff);
3040 else
3041 off = size_binop (PLUS_EXPR, off,
3042 fold_convert (sizetype, TREE_OPERAND (base, 1)));
3044 base = TREE_OPERAND (base, 0);
3046 else
3047 base = build_fold_addr_expr (base);
3049 if (off == NULL_TREE)
3050 off = size_zero_node;
3052 /* If base is not loop invariant, either off is 0, then we start with just
3053 the constant offset in the loop invariant BASE and continue with base
3054 as OFF, otherwise give up.
3055 We could handle that case by gimplifying the addition of base + off
3056 into some SSA_NAME and use that as off, but for now punt. */
3057 if (!expr_invariant_in_loop_p (loop, base))
3059 if (!integer_zerop (off))
3060 return NULL_TREE;
3061 off = base;
3062 base = size_int (pbitpos / BITS_PER_UNIT);
3064 /* Otherwise put base + constant offset into the loop invariant BASE
3065 and continue with OFF. */
3066 else
3068 base = fold_convert (sizetype, base);
3069 base = size_binop (PLUS_EXPR, base, size_int (pbitpos / BITS_PER_UNIT));
3072 /* OFF at this point may be either a SSA_NAME or some tree expression
3073 from get_inner_reference. Try to peel off loop invariants from it
3074 into BASE as long as possible. */
3075 STRIP_NOPS (off);
3076 while (offtype == NULL_TREE)
3078 enum tree_code code;
3079 tree op0, op1, add = NULL_TREE;
3081 if (TREE_CODE (off) == SSA_NAME)
3083 gimple def_stmt = SSA_NAME_DEF_STMT (off);
3085 if (expr_invariant_in_loop_p (loop, off))
3086 return NULL_TREE;
3088 if (gimple_code (def_stmt) != GIMPLE_ASSIGN)
3089 break;
3091 op0 = gimple_assign_rhs1 (def_stmt);
3092 code = gimple_assign_rhs_code (def_stmt);
3093 op1 = gimple_assign_rhs2 (def_stmt);
3095 else
3097 if (get_gimple_rhs_class (TREE_CODE (off)) == GIMPLE_TERNARY_RHS)
3098 return NULL_TREE;
3099 code = TREE_CODE (off);
3100 extract_ops_from_tree (off, &code, &op0, &op1);
3102 switch (code)
3104 case POINTER_PLUS_EXPR:
3105 case PLUS_EXPR:
3106 if (expr_invariant_in_loop_p (loop, op0))
3108 add = op0;
3109 off = op1;
3110 do_add:
3111 add = fold_convert (sizetype, add);
3112 if (scale != 1)
3113 add = size_binop (MULT_EXPR, add, size_int (scale));
3114 base = size_binop (PLUS_EXPR, base, add);
3115 continue;
3117 if (expr_invariant_in_loop_p (loop, op1))
3119 add = op1;
3120 off = op0;
3121 goto do_add;
3123 break;
3124 case MINUS_EXPR:
3125 if (expr_invariant_in_loop_p (loop, op1))
3127 add = fold_convert (sizetype, op1);
3128 add = size_binop (MINUS_EXPR, size_zero_node, add);
3129 off = op0;
3130 goto do_add;
3132 break;
3133 case MULT_EXPR:
3134 if (scale == 1 && tree_fits_shwi_p (op1))
3136 scale = tree_to_shwi (op1);
3137 off = op0;
3138 continue;
3140 break;
3141 case SSA_NAME:
3142 off = op0;
3143 continue;
3144 CASE_CONVERT:
3145 if (!POINTER_TYPE_P (TREE_TYPE (op0))
3146 && !INTEGRAL_TYPE_P (TREE_TYPE (op0)))
3147 break;
3148 if (TYPE_PRECISION (TREE_TYPE (op0))
3149 == TYPE_PRECISION (TREE_TYPE (off)))
3151 off = op0;
3152 continue;
3154 if (TYPE_PRECISION (TREE_TYPE (op0))
3155 < TYPE_PRECISION (TREE_TYPE (off)))
3157 off = op0;
3158 offtype = TREE_TYPE (off);
3159 STRIP_NOPS (off);
3160 continue;
3162 break;
3163 default:
3164 break;
3166 break;
3169 /* If at the end OFF still isn't a SSA_NAME or isn't
3170 defined in the loop, punt. */
3171 if (TREE_CODE (off) != SSA_NAME
3172 || expr_invariant_in_loop_p (loop, off))
3173 return NULL_TREE;
3175 if (offtype == NULL_TREE)
3176 offtype = TREE_TYPE (off);
3178 decl = targetm.vectorize.builtin_gather (STMT_VINFO_VECTYPE (stmt_info),
3179 offtype, scale);
3180 if (decl == NULL_TREE)
3181 return NULL_TREE;
3183 if (basep)
3184 *basep = base;
3185 if (offp)
3186 *offp = off;
3187 if (scalep)
3188 *scalep = scale;
3189 return decl;
3192 /* Function vect_analyze_data_refs.
3194 Find all the data references in the loop or basic block.
3196 The general structure of the analysis of data refs in the vectorizer is as
3197 follows:
3198 1- vect_analyze_data_refs(loop/bb): call
3199 compute_data_dependences_for_loop/bb to find and analyze all data-refs
3200 in the loop/bb and their dependences.
3201 2- vect_analyze_dependences(): apply dependence testing using ddrs.
3202 3- vect_analyze_drs_alignment(): check that ref_stmt.alignment is ok.
3203 4- vect_analyze_drs_access(): check that ref_stmt.step is ok.
3207 bool
3208 vect_analyze_data_refs (loop_vec_info loop_vinfo,
3209 bb_vec_info bb_vinfo,
3210 int *min_vf, unsigned *n_stmts)
3212 struct loop *loop = NULL;
3213 basic_block bb = NULL;
3214 unsigned int i;
3215 vec<data_reference_p> datarefs;
3216 struct data_reference *dr;
3217 tree scalar_type;
3219 if (dump_enabled_p ())
3220 dump_printf_loc (MSG_NOTE, vect_location,
3221 "=== vect_analyze_data_refs ===\n");
3223 if (loop_vinfo)
3225 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
3227 loop = LOOP_VINFO_LOOP (loop_vinfo);
3228 datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
3229 if (!find_loop_nest (loop, &LOOP_VINFO_LOOP_NEST (loop_vinfo)))
3231 if (dump_enabled_p ())
3232 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3233 "not vectorized: loop contains function calls"
3234 " or data references that cannot be analyzed\n");
3235 return false;
3238 for (i = 0; i < loop->num_nodes; i++)
3240 gimple_stmt_iterator gsi;
3242 for (gsi = gsi_start_bb (bbs[i]); !gsi_end_p (gsi); gsi_next (&gsi))
3244 gimple stmt = gsi_stmt (gsi);
3245 if (is_gimple_debug (stmt))
3246 continue;
3247 ++*n_stmts;
3248 if (!find_data_references_in_stmt (loop, stmt, &datarefs))
3250 if (is_gimple_call (stmt) && loop->safelen)
3252 tree fndecl = gimple_call_fndecl (stmt), op;
3253 if (fndecl != NULL_TREE)
3255 struct cgraph_node *node = cgraph_node::get (fndecl);
3256 if (node != NULL && node->simd_clones != NULL)
3258 unsigned int j, n = gimple_call_num_args (stmt);
3259 for (j = 0; j < n; j++)
3261 op = gimple_call_arg (stmt, j);
3262 if (DECL_P (op)
3263 || (REFERENCE_CLASS_P (op)
3264 && get_base_address (op)))
3265 break;
3267 op = gimple_call_lhs (stmt);
3268 /* Ignore #pragma omp declare simd functions
3269 if they don't have data references in the
3270 call stmt itself. */
3271 if (j == n
3272 && !(op
3273 && (DECL_P (op)
3274 || (REFERENCE_CLASS_P (op)
3275 && get_base_address (op)))))
3276 continue;
3280 LOOP_VINFO_DATAREFS (loop_vinfo) = datarefs;
3281 if (dump_enabled_p ())
3282 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3283 "not vectorized: loop contains function "
3284 "calls or data references that cannot "
3285 "be analyzed\n");
3286 return false;
3291 LOOP_VINFO_DATAREFS (loop_vinfo) = datarefs;
3293 else
3295 gimple_stmt_iterator gsi;
3297 bb = BB_VINFO_BB (bb_vinfo);
3298 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3300 gimple stmt = gsi_stmt (gsi);
3301 if (is_gimple_debug (stmt))
3302 continue;
3303 ++*n_stmts;
3304 if (!find_data_references_in_stmt (NULL, stmt,
3305 &BB_VINFO_DATAREFS (bb_vinfo)))
3307 /* Mark the rest of the basic-block as unvectorizable. */
3308 for (; !gsi_end_p (gsi); gsi_next (&gsi))
3310 stmt = gsi_stmt (gsi);
3311 STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (stmt)) = false;
3313 break;
3317 datarefs = BB_VINFO_DATAREFS (bb_vinfo);
3320 /* Go through the data-refs, check that the analysis succeeded. Update
3321 pointer from stmt_vec_info struct to DR and vectype. */
3323 FOR_EACH_VEC_ELT (datarefs, i, dr)
3325 gimple stmt;
3326 stmt_vec_info stmt_info;
3327 tree base, offset, init;
3328 bool gather = false;
3329 bool simd_lane_access = false;
3330 int vf;
3332 again:
3333 if (!dr || !DR_REF (dr))
3335 if (dump_enabled_p ())
3336 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3337 "not vectorized: unhandled data-ref\n");
3338 return false;
3341 stmt = DR_STMT (dr);
3342 stmt_info = vinfo_for_stmt (stmt);
3344 /* Discard clobbers from the dataref vector. We will remove
3345 clobber stmts during vectorization. */
3346 if (gimple_clobber_p (stmt))
3348 free_data_ref (dr);
3349 if (i == datarefs.length () - 1)
3351 datarefs.pop ();
3352 break;
3354 datarefs.ordered_remove (i);
3355 dr = datarefs[i];
3356 goto again;
3359 /* Check that analysis of the data-ref succeeded. */
3360 if (!DR_BASE_ADDRESS (dr) || !DR_OFFSET (dr) || !DR_INIT (dr)
3361 || !DR_STEP (dr))
3363 bool maybe_gather
3364 = DR_IS_READ (dr)
3365 && !TREE_THIS_VOLATILE (DR_REF (dr))
3366 && targetm.vectorize.builtin_gather != NULL;
3367 bool maybe_simd_lane_access
3368 = loop_vinfo && loop->simduid;
3370 /* If target supports vector gather loads, or if this might be
3371 a SIMD lane access, see if they can't be used. */
3372 if (loop_vinfo
3373 && (maybe_gather || maybe_simd_lane_access)
3374 && !nested_in_vect_loop_p (loop, stmt))
3376 struct data_reference *newdr
3377 = create_data_ref (NULL, loop_containing_stmt (stmt),
3378 DR_REF (dr), stmt, true);
3379 gcc_assert (newdr != NULL && DR_REF (newdr));
3380 if (DR_BASE_ADDRESS (newdr)
3381 && DR_OFFSET (newdr)
3382 && DR_INIT (newdr)
3383 && DR_STEP (newdr)
3384 && integer_zerop (DR_STEP (newdr)))
3386 if (maybe_simd_lane_access)
3388 tree off = DR_OFFSET (newdr);
3389 STRIP_NOPS (off);
3390 if (TREE_CODE (DR_INIT (newdr)) == INTEGER_CST
3391 && TREE_CODE (off) == MULT_EXPR
3392 && tree_fits_uhwi_p (TREE_OPERAND (off, 1)))
3394 tree step = TREE_OPERAND (off, 1);
3395 off = TREE_OPERAND (off, 0);
3396 STRIP_NOPS (off);
3397 if (CONVERT_EXPR_P (off)
3398 && TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (off,
3399 0)))
3400 < TYPE_PRECISION (TREE_TYPE (off)))
3401 off = TREE_OPERAND (off, 0);
3402 if (TREE_CODE (off) == SSA_NAME)
3404 gimple def = SSA_NAME_DEF_STMT (off);
3405 tree reft = TREE_TYPE (DR_REF (newdr));
3406 if (is_gimple_call (def)
3407 && gimple_call_internal_p (def)
3408 && (gimple_call_internal_fn (def)
3409 == IFN_GOMP_SIMD_LANE))
3411 tree arg = gimple_call_arg (def, 0);
3412 gcc_assert (TREE_CODE (arg) == SSA_NAME);
3413 arg = SSA_NAME_VAR (arg);
3414 if (arg == loop->simduid
3415 /* For now. */
3416 && tree_int_cst_equal
3417 (TYPE_SIZE_UNIT (reft),
3418 step))
3420 DR_OFFSET (newdr) = ssize_int (0);
3421 DR_STEP (newdr) = step;
3422 DR_ALIGNED_TO (newdr)
3423 = size_int (BIGGEST_ALIGNMENT);
3424 dr = newdr;
3425 simd_lane_access = true;
3431 if (!simd_lane_access && maybe_gather)
3433 dr = newdr;
3434 gather = true;
3437 if (!gather && !simd_lane_access)
3438 free_data_ref (newdr);
3441 if (!gather && !simd_lane_access)
3443 if (dump_enabled_p ())
3445 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3446 "not vectorized: data ref analysis "
3447 "failed ");
3448 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3449 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3452 if (bb_vinfo)
3453 break;
3455 return false;
3459 if (TREE_CODE (DR_BASE_ADDRESS (dr)) == INTEGER_CST)
3461 if (dump_enabled_p ())
3462 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3463 "not vectorized: base addr of dr is a "
3464 "constant\n");
3466 if (bb_vinfo)
3467 break;
3469 if (gather || simd_lane_access)
3470 free_data_ref (dr);
3471 return false;
3474 if (TREE_THIS_VOLATILE (DR_REF (dr)))
3476 if (dump_enabled_p ())
3478 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3479 "not vectorized: volatile type ");
3480 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3481 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3484 if (bb_vinfo)
3485 break;
3487 return false;
3490 if (stmt_can_throw_internal (stmt))
3492 if (dump_enabled_p ())
3494 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3495 "not vectorized: statement can throw an "
3496 "exception ");
3497 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3498 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3501 if (bb_vinfo)
3502 break;
3504 if (gather || simd_lane_access)
3505 free_data_ref (dr);
3506 return false;
3509 if (TREE_CODE (DR_REF (dr)) == COMPONENT_REF
3510 && DECL_BIT_FIELD (TREE_OPERAND (DR_REF (dr), 1)))
3512 if (dump_enabled_p ())
3514 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3515 "not vectorized: statement is bitfield "
3516 "access ");
3517 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3518 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3521 if (bb_vinfo)
3522 break;
3524 if (gather || simd_lane_access)
3525 free_data_ref (dr);
3526 return false;
3529 base = unshare_expr (DR_BASE_ADDRESS (dr));
3530 offset = unshare_expr (DR_OFFSET (dr));
3531 init = unshare_expr (DR_INIT (dr));
3533 if (is_gimple_call (stmt)
3534 && (!gimple_call_internal_p (stmt)
3535 || (gimple_call_internal_fn (stmt) != IFN_MASK_LOAD
3536 && gimple_call_internal_fn (stmt) != IFN_MASK_STORE)))
3538 if (dump_enabled_p ())
3540 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3541 "not vectorized: dr in a call ");
3542 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3543 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3546 if (bb_vinfo)
3547 break;
3549 if (gather || simd_lane_access)
3550 free_data_ref (dr);
3551 return false;
3554 /* Update DR field in stmt_vec_info struct. */
3556 /* If the dataref is in an inner-loop of the loop that is considered for
3557 for vectorization, we also want to analyze the access relative to
3558 the outer-loop (DR contains information only relative to the
3559 inner-most enclosing loop). We do that by building a reference to the
3560 first location accessed by the inner-loop, and analyze it relative to
3561 the outer-loop. */
3562 if (loop && nested_in_vect_loop_p (loop, stmt))
3564 tree outer_step, outer_base, outer_init;
3565 HOST_WIDE_INT pbitsize, pbitpos;
3566 tree poffset;
3567 machine_mode pmode;
3568 int punsignedp, pvolatilep;
3569 affine_iv base_iv, offset_iv;
3570 tree dinit;
3572 /* Build a reference to the first location accessed by the
3573 inner-loop: *(BASE+INIT). (The first location is actually
3574 BASE+INIT+OFFSET, but we add OFFSET separately later). */
3575 tree inner_base = build_fold_indirect_ref
3576 (fold_build_pointer_plus (base, init));
3578 if (dump_enabled_p ())
3580 dump_printf_loc (MSG_NOTE, vect_location,
3581 "analyze in outer-loop: ");
3582 dump_generic_expr (MSG_NOTE, TDF_SLIM, inner_base);
3583 dump_printf (MSG_NOTE, "\n");
3586 outer_base = get_inner_reference (inner_base, &pbitsize, &pbitpos,
3587 &poffset, &pmode, &punsignedp, &pvolatilep, false);
3588 gcc_assert (outer_base != NULL_TREE);
3590 if (pbitpos % BITS_PER_UNIT != 0)
3592 if (dump_enabled_p ())
3593 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3594 "failed: bit offset alignment.\n");
3595 return false;
3598 outer_base = build_fold_addr_expr (outer_base);
3599 if (!simple_iv (loop, loop_containing_stmt (stmt), outer_base,
3600 &base_iv, false))
3602 if (dump_enabled_p ())
3603 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3604 "failed: evolution of base is not affine.\n");
3605 return false;
3608 if (offset)
3610 if (poffset)
3611 poffset = fold_build2 (PLUS_EXPR, TREE_TYPE (offset), offset,
3612 poffset);
3613 else
3614 poffset = offset;
3617 if (!poffset)
3619 offset_iv.base = ssize_int (0);
3620 offset_iv.step = ssize_int (0);
3622 else if (!simple_iv (loop, loop_containing_stmt (stmt), poffset,
3623 &offset_iv, false))
3625 if (dump_enabled_p ())
3626 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3627 "evolution of offset is not affine.\n");
3628 return false;
3631 outer_init = ssize_int (pbitpos / BITS_PER_UNIT);
3632 split_constant_offset (base_iv.base, &base_iv.base, &dinit);
3633 outer_init = size_binop (PLUS_EXPR, outer_init, dinit);
3634 split_constant_offset (offset_iv.base, &offset_iv.base, &dinit);
3635 outer_init = size_binop (PLUS_EXPR, outer_init, dinit);
3637 outer_step = size_binop (PLUS_EXPR,
3638 fold_convert (ssizetype, base_iv.step),
3639 fold_convert (ssizetype, offset_iv.step));
3641 STMT_VINFO_DR_STEP (stmt_info) = outer_step;
3642 /* FIXME: Use canonicalize_base_object_address (base_iv.base); */
3643 STMT_VINFO_DR_BASE_ADDRESS (stmt_info) = base_iv.base;
3644 STMT_VINFO_DR_INIT (stmt_info) = outer_init;
3645 STMT_VINFO_DR_OFFSET (stmt_info) =
3646 fold_convert (ssizetype, offset_iv.base);
3647 STMT_VINFO_DR_ALIGNED_TO (stmt_info) =
3648 size_int (highest_pow2_factor (offset_iv.base));
3650 if (dump_enabled_p ())
3652 dump_printf_loc (MSG_NOTE, vect_location,
3653 "\touter base_address: ");
3654 dump_generic_expr (MSG_NOTE, TDF_SLIM,
3655 STMT_VINFO_DR_BASE_ADDRESS (stmt_info));
3656 dump_printf (MSG_NOTE, "\n\touter offset from base address: ");
3657 dump_generic_expr (MSG_NOTE, TDF_SLIM,
3658 STMT_VINFO_DR_OFFSET (stmt_info));
3659 dump_printf (MSG_NOTE,
3660 "\n\touter constant offset from base address: ");
3661 dump_generic_expr (MSG_NOTE, TDF_SLIM,
3662 STMT_VINFO_DR_INIT (stmt_info));
3663 dump_printf (MSG_NOTE, "\n\touter step: ");
3664 dump_generic_expr (MSG_NOTE, TDF_SLIM,
3665 STMT_VINFO_DR_STEP (stmt_info));
3666 dump_printf (MSG_NOTE, "\n\touter aligned to: ");
3667 dump_generic_expr (MSG_NOTE, TDF_SLIM,
3668 STMT_VINFO_DR_ALIGNED_TO (stmt_info));
3669 dump_printf (MSG_NOTE, "\n");
3673 if (STMT_VINFO_DATA_REF (stmt_info))
3675 if (dump_enabled_p ())
3677 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3678 "not vectorized: more than one data ref "
3679 "in stmt: ");
3680 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3681 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3684 if (bb_vinfo)
3685 break;
3687 if (gather || simd_lane_access)
3688 free_data_ref (dr);
3689 return false;
3692 STMT_VINFO_DATA_REF (stmt_info) = dr;
3693 if (simd_lane_access)
3695 STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info) = true;
3696 free_data_ref (datarefs[i]);
3697 datarefs[i] = dr;
3700 /* Set vectype for STMT. */
3701 scalar_type = TREE_TYPE (DR_REF (dr));
3702 STMT_VINFO_VECTYPE (stmt_info)
3703 = get_vectype_for_scalar_type (scalar_type);
3704 if (!STMT_VINFO_VECTYPE (stmt_info))
3706 if (dump_enabled_p ())
3708 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3709 "not vectorized: no vectype for stmt: ");
3710 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3711 dump_printf (MSG_MISSED_OPTIMIZATION, " scalar_type: ");
3712 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_DETAILS,
3713 scalar_type);
3714 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3717 if (bb_vinfo)
3718 break;
3720 if (gather || simd_lane_access)
3722 STMT_VINFO_DATA_REF (stmt_info) = NULL;
3723 if (gather)
3724 free_data_ref (dr);
3726 return false;
3728 else
3730 if (dump_enabled_p ())
3732 dump_printf_loc (MSG_NOTE, vect_location,
3733 "got vectype for stmt: ");
3734 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
3735 dump_generic_expr (MSG_NOTE, TDF_SLIM,
3736 STMT_VINFO_VECTYPE (stmt_info));
3737 dump_printf (MSG_NOTE, "\n");
3741 /* Adjust the minimal vectorization factor according to the
3742 vector type. */
3743 vf = TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info));
3744 if (vf > *min_vf)
3745 *min_vf = vf;
3747 if (gather)
3749 tree off;
3751 gather = 0 != vect_check_gather (stmt, loop_vinfo, NULL, &off, NULL);
3752 if (gather
3753 && get_vectype_for_scalar_type (TREE_TYPE (off)) == NULL_TREE)
3754 gather = false;
3755 if (!gather)
3757 STMT_VINFO_DATA_REF (stmt_info) = NULL;
3758 free_data_ref (dr);
3759 if (dump_enabled_p ())
3761 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3762 "not vectorized: not suitable for gather "
3763 "load ");
3764 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3765 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3767 return false;
3770 datarefs[i] = dr;
3771 STMT_VINFO_GATHER_P (stmt_info) = true;
3773 else if (loop_vinfo
3774 && TREE_CODE (DR_STEP (dr)) != INTEGER_CST)
3776 if (nested_in_vect_loop_p (loop, stmt)
3777 || !DR_IS_READ (dr))
3779 if (dump_enabled_p ())
3781 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3782 "not vectorized: not suitable for strided "
3783 "load ");
3784 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3785 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3787 return false;
3789 STMT_VINFO_STRIDE_LOAD_P (stmt_info) = true;
3793 /* If we stopped analysis at the first dataref we could not analyze
3794 when trying to vectorize a basic-block mark the rest of the datarefs
3795 as not vectorizable and truncate the vector of datarefs. That
3796 avoids spending useless time in analyzing their dependence. */
3797 if (i != datarefs.length ())
3799 gcc_assert (bb_vinfo != NULL);
3800 for (unsigned j = i; j < datarefs.length (); ++j)
3802 data_reference_p dr = datarefs[j];
3803 STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr))) = false;
3804 free_data_ref (dr);
3806 datarefs.truncate (i);
3809 return true;
3813 /* Function vect_get_new_vect_var.
3815 Returns a name for a new variable. The current naming scheme appends the
3816 prefix "vect_" or "vect_p" (depending on the value of VAR_KIND) to
3817 the name of vectorizer generated variables, and appends that to NAME if
3818 provided. */
3820 tree
3821 vect_get_new_vect_var (tree type, enum vect_var_kind var_kind, const char *name)
3823 const char *prefix;
3824 tree new_vect_var;
3826 switch (var_kind)
3828 case vect_simple_var:
3829 prefix = "vect";
3830 break;
3831 case vect_scalar_var:
3832 prefix = "stmp";
3833 break;
3834 case vect_pointer_var:
3835 prefix = "vectp";
3836 break;
3837 default:
3838 gcc_unreachable ();
3841 if (name)
3843 char* tmp = concat (prefix, "_", name, NULL);
3844 new_vect_var = create_tmp_reg (type, tmp);
3845 free (tmp);
3847 else
3848 new_vect_var = create_tmp_reg (type, prefix);
3850 return new_vect_var;
3854 /* Function vect_create_addr_base_for_vector_ref.
3856 Create an expression that computes the address of the first memory location
3857 that will be accessed for a data reference.
3859 Input:
3860 STMT: The statement containing the data reference.
3861 NEW_STMT_LIST: Must be initialized to NULL_TREE or a statement list.
3862 OFFSET: Optional. If supplied, it is be added to the initial address.
3863 LOOP: Specify relative to which loop-nest should the address be computed.
3864 For example, when the dataref is in an inner-loop nested in an
3865 outer-loop that is now being vectorized, LOOP can be either the
3866 outer-loop, or the inner-loop. The first memory location accessed
3867 by the following dataref ('in' points to short):
3869 for (i=0; i<N; i++)
3870 for (j=0; j<M; j++)
3871 s += in[i+j]
3873 is as follows:
3874 if LOOP=i_loop: &in (relative to i_loop)
3875 if LOOP=j_loop: &in+i*2B (relative to j_loop)
3876 BYTE_OFFSET: Optional, defaulted to NULL. If supplied, it is added to the
3877 initial address. Unlike OFFSET, which is number of elements to
3878 be added, BYTE_OFFSET is measured in bytes.
3880 Output:
3881 1. Return an SSA_NAME whose value is the address of the memory location of
3882 the first vector of the data reference.
3883 2. If new_stmt_list is not NULL_TREE after return then the caller must insert
3884 these statement(s) which define the returned SSA_NAME.
3886 FORNOW: We are only handling array accesses with step 1. */
3888 tree
3889 vect_create_addr_base_for_vector_ref (gimple stmt,
3890 gimple_seq *new_stmt_list,
3891 tree offset,
3892 struct loop *loop,
3893 tree byte_offset)
3895 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3896 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
3897 tree data_ref_base;
3898 const char *base_name;
3899 tree addr_base;
3900 tree dest;
3901 gimple_seq seq = NULL;
3902 tree base_offset;
3903 tree init;
3904 tree vect_ptr_type;
3905 tree step = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dr)));
3906 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3908 if (loop_vinfo && loop && loop != (gimple_bb (stmt))->loop_father)
3910 struct loop *outer_loop = LOOP_VINFO_LOOP (loop_vinfo);
3912 gcc_assert (nested_in_vect_loop_p (outer_loop, stmt));
3914 data_ref_base = unshare_expr (STMT_VINFO_DR_BASE_ADDRESS (stmt_info));
3915 base_offset = unshare_expr (STMT_VINFO_DR_OFFSET (stmt_info));
3916 init = unshare_expr (STMT_VINFO_DR_INIT (stmt_info));
3918 else
3920 data_ref_base = unshare_expr (DR_BASE_ADDRESS (dr));
3921 base_offset = unshare_expr (DR_OFFSET (dr));
3922 init = unshare_expr (DR_INIT (dr));
3925 if (loop_vinfo)
3926 base_name = get_name (data_ref_base);
3927 else
3929 base_offset = ssize_int (0);
3930 init = ssize_int (0);
3931 base_name = get_name (DR_REF (dr));
3934 /* Create base_offset */
3935 base_offset = size_binop (PLUS_EXPR,
3936 fold_convert (sizetype, base_offset),
3937 fold_convert (sizetype, init));
3939 if (offset)
3941 offset = fold_build2 (MULT_EXPR, sizetype,
3942 fold_convert (sizetype, offset), step);
3943 base_offset = fold_build2 (PLUS_EXPR, sizetype,
3944 base_offset, offset);
3946 if (byte_offset)
3948 byte_offset = fold_convert (sizetype, byte_offset);
3949 base_offset = fold_build2 (PLUS_EXPR, sizetype,
3950 base_offset, byte_offset);
3953 /* base + base_offset */
3954 if (loop_vinfo)
3955 addr_base = fold_build_pointer_plus (data_ref_base, base_offset);
3956 else
3958 addr_base = build1 (ADDR_EXPR,
3959 build_pointer_type (TREE_TYPE (DR_REF (dr))),
3960 unshare_expr (DR_REF (dr)));
3963 vect_ptr_type = build_pointer_type (STMT_VINFO_VECTYPE (stmt_info));
3964 addr_base = fold_convert (vect_ptr_type, addr_base);
3965 dest = vect_get_new_vect_var (vect_ptr_type, vect_pointer_var, base_name);
3966 addr_base = force_gimple_operand (addr_base, &seq, false, dest);
3967 gimple_seq_add_seq (new_stmt_list, seq);
3969 if (DR_PTR_INFO (dr)
3970 && TREE_CODE (addr_base) == SSA_NAME)
3972 duplicate_ssa_name_ptr_info (addr_base, DR_PTR_INFO (dr));
3973 unsigned int align = TYPE_ALIGN_UNIT (STMT_VINFO_VECTYPE (stmt_info));
3974 int misalign = DR_MISALIGNMENT (dr);
3975 if (offset || byte_offset || (misalign == -1))
3976 mark_ptr_info_alignment_unknown (SSA_NAME_PTR_INFO (addr_base));
3977 else
3978 set_ptr_info_alignment (SSA_NAME_PTR_INFO (addr_base), align, misalign);
3981 if (dump_enabled_p ())
3983 dump_printf_loc (MSG_NOTE, vect_location, "created ");
3984 dump_generic_expr (MSG_NOTE, TDF_SLIM, addr_base);
3985 dump_printf (MSG_NOTE, "\n");
3988 return addr_base;
3992 /* Function vect_create_data_ref_ptr.
3994 Create a new pointer-to-AGGR_TYPE variable (ap), that points to the first
3995 location accessed in the loop by STMT, along with the def-use update
3996 chain to appropriately advance the pointer through the loop iterations.
3997 Also set aliasing information for the pointer. This pointer is used by
3998 the callers to this function to create a memory reference expression for
3999 vector load/store access.
4001 Input:
4002 1. STMT: a stmt that references memory. Expected to be of the form
4003 GIMPLE_ASSIGN <name, data-ref> or
4004 GIMPLE_ASSIGN <data-ref, name>.
4005 2. AGGR_TYPE: the type of the reference, which should be either a vector
4006 or an array.
4007 3. AT_LOOP: the loop where the vector memref is to be created.
4008 4. OFFSET (optional): an offset to be added to the initial address accessed
4009 by the data-ref in STMT.
4010 5. BSI: location where the new stmts are to be placed if there is no loop
4011 6. ONLY_INIT: indicate if ap is to be updated in the loop, or remain
4012 pointing to the initial address.
4013 7. BYTE_OFFSET (optional, defaults to NULL): a byte offset to be added
4014 to the initial address accessed by the data-ref in STMT. This is
4015 similar to OFFSET, but OFFSET is counted in elements, while BYTE_OFFSET
4016 in bytes.
4018 Output:
4019 1. Declare a new ptr to vector_type, and have it point to the base of the
4020 data reference (initial addressed accessed by the data reference).
4021 For example, for vector of type V8HI, the following code is generated:
4023 v8hi *ap;
4024 ap = (v8hi *)initial_address;
4026 if OFFSET is not supplied:
4027 initial_address = &a[init];
4028 if OFFSET is supplied:
4029 initial_address = &a[init + OFFSET];
4030 if BYTE_OFFSET is supplied:
4031 initial_address = &a[init] + BYTE_OFFSET;
4033 Return the initial_address in INITIAL_ADDRESS.
4035 2. If ONLY_INIT is true, just return the initial pointer. Otherwise, also
4036 update the pointer in each iteration of the loop.
4038 Return the increment stmt that updates the pointer in PTR_INCR.
4040 3. Set INV_P to true if the access pattern of the data reference in the
4041 vectorized loop is invariant. Set it to false otherwise.
4043 4. Return the pointer. */
4045 tree
4046 vect_create_data_ref_ptr (gimple stmt, tree aggr_type, struct loop *at_loop,
4047 tree offset, tree *initial_address,
4048 gimple_stmt_iterator *gsi, gimple *ptr_incr,
4049 bool only_init, bool *inv_p, tree byte_offset)
4051 const char *base_name;
4052 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4053 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4054 struct loop *loop = NULL;
4055 bool nested_in_vect_loop = false;
4056 struct loop *containing_loop = NULL;
4057 tree aggr_ptr_type;
4058 tree aggr_ptr;
4059 tree new_temp;
4060 gimple vec_stmt;
4061 gimple_seq new_stmt_list = NULL;
4062 edge pe = NULL;
4063 basic_block new_bb;
4064 tree aggr_ptr_init;
4065 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
4066 tree aptr;
4067 gimple_stmt_iterator incr_gsi;
4068 bool insert_after;
4069 tree indx_before_incr, indx_after_incr;
4070 gimple incr;
4071 tree step;
4072 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
4074 gcc_assert (TREE_CODE (aggr_type) == ARRAY_TYPE
4075 || TREE_CODE (aggr_type) == VECTOR_TYPE);
4077 if (loop_vinfo)
4079 loop = LOOP_VINFO_LOOP (loop_vinfo);
4080 nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
4081 containing_loop = (gimple_bb (stmt))->loop_father;
4082 pe = loop_preheader_edge (loop);
4084 else
4086 gcc_assert (bb_vinfo);
4087 only_init = true;
4088 *ptr_incr = NULL;
4091 /* Check the step (evolution) of the load in LOOP, and record
4092 whether it's invariant. */
4093 if (nested_in_vect_loop)
4094 step = STMT_VINFO_DR_STEP (stmt_info);
4095 else
4096 step = DR_STEP (STMT_VINFO_DATA_REF (stmt_info));
4098 if (integer_zerop (step))
4099 *inv_p = true;
4100 else
4101 *inv_p = false;
4103 /* Create an expression for the first address accessed by this load
4104 in LOOP. */
4105 base_name = get_name (DR_BASE_ADDRESS (dr));
4107 if (dump_enabled_p ())
4109 tree dr_base_type = TREE_TYPE (DR_BASE_OBJECT (dr));
4110 dump_printf_loc (MSG_NOTE, vect_location,
4111 "create %s-pointer variable to type: ",
4112 get_tree_code_name (TREE_CODE (aggr_type)));
4113 dump_generic_expr (MSG_NOTE, TDF_SLIM, aggr_type);
4114 if (TREE_CODE (dr_base_type) == ARRAY_TYPE)
4115 dump_printf (MSG_NOTE, " vectorizing an array ref: ");
4116 else if (TREE_CODE (dr_base_type) == VECTOR_TYPE)
4117 dump_printf (MSG_NOTE, " vectorizing a vector ref: ");
4118 else if (TREE_CODE (dr_base_type) == RECORD_TYPE)
4119 dump_printf (MSG_NOTE, " vectorizing a record based array ref: ");
4120 else
4121 dump_printf (MSG_NOTE, " vectorizing a pointer ref: ");
4122 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_BASE_OBJECT (dr));
4123 dump_printf (MSG_NOTE, "\n");
4126 /* (1) Create the new aggregate-pointer variable.
4127 Vector and array types inherit the alias set of their component
4128 type by default so we need to use a ref-all pointer if the data
4129 reference does not conflict with the created aggregated data
4130 reference because it is not addressable. */
4131 bool need_ref_all = false;
4132 if (!alias_sets_conflict_p (get_alias_set (aggr_type),
4133 get_alias_set (DR_REF (dr))))
4134 need_ref_all = true;
4135 /* Likewise for any of the data references in the stmt group. */
4136 else if (STMT_VINFO_GROUP_SIZE (stmt_info) > 1)
4138 gimple orig_stmt = STMT_VINFO_GROUP_FIRST_ELEMENT (stmt_info);
4141 stmt_vec_info sinfo = vinfo_for_stmt (orig_stmt);
4142 struct data_reference *sdr = STMT_VINFO_DATA_REF (sinfo);
4143 if (!alias_sets_conflict_p (get_alias_set (aggr_type),
4144 get_alias_set (DR_REF (sdr))))
4146 need_ref_all = true;
4147 break;
4149 orig_stmt = STMT_VINFO_GROUP_NEXT_ELEMENT (sinfo);
4151 while (orig_stmt);
4153 aggr_ptr_type = build_pointer_type_for_mode (aggr_type, ptr_mode,
4154 need_ref_all);
4155 aggr_ptr = vect_get_new_vect_var (aggr_ptr_type, vect_pointer_var, base_name);
4158 /* Note: If the dataref is in an inner-loop nested in LOOP, and we are
4159 vectorizing LOOP (i.e., outer-loop vectorization), we need to create two
4160 def-use update cycles for the pointer: one relative to the outer-loop
4161 (LOOP), which is what steps (3) and (4) below do. The other is relative
4162 to the inner-loop (which is the inner-most loop containing the dataref),
4163 and this is done be step (5) below.
4165 When vectorizing inner-most loops, the vectorized loop (LOOP) is also the
4166 inner-most loop, and so steps (3),(4) work the same, and step (5) is
4167 redundant. Steps (3),(4) create the following:
4169 vp0 = &base_addr;
4170 LOOP: vp1 = phi(vp0,vp2)
4173 vp2 = vp1 + step
4174 goto LOOP
4176 If there is an inner-loop nested in loop, then step (5) will also be
4177 applied, and an additional update in the inner-loop will be created:
4179 vp0 = &base_addr;
4180 LOOP: vp1 = phi(vp0,vp2)
4182 inner: vp3 = phi(vp1,vp4)
4183 vp4 = vp3 + inner_step
4184 if () goto inner
4186 vp2 = vp1 + step
4187 if () goto LOOP */
4189 /* (2) Calculate the initial address of the aggregate-pointer, and set
4190 the aggregate-pointer to point to it before the loop. */
4192 /* Create: (&(base[init_val+offset]+byte_offset) in the loop preheader. */
4194 new_temp = vect_create_addr_base_for_vector_ref (stmt, &new_stmt_list,
4195 offset, loop, byte_offset);
4196 if (new_stmt_list)
4198 if (pe)
4200 new_bb = gsi_insert_seq_on_edge_immediate (pe, new_stmt_list);
4201 gcc_assert (!new_bb);
4203 else
4204 gsi_insert_seq_before (gsi, new_stmt_list, GSI_SAME_STMT);
4207 *initial_address = new_temp;
4209 /* Create: p = (aggr_type *) initial_base */
4210 if (TREE_CODE (new_temp) != SSA_NAME
4211 || !useless_type_conversion_p (aggr_ptr_type, TREE_TYPE (new_temp)))
4213 vec_stmt = gimple_build_assign (aggr_ptr,
4214 fold_convert (aggr_ptr_type, new_temp));
4215 aggr_ptr_init = make_ssa_name (aggr_ptr, vec_stmt);
4216 /* Copy the points-to information if it exists. */
4217 if (DR_PTR_INFO (dr))
4218 duplicate_ssa_name_ptr_info (aggr_ptr_init, DR_PTR_INFO (dr));
4219 gimple_assign_set_lhs (vec_stmt, aggr_ptr_init);
4220 if (pe)
4222 new_bb = gsi_insert_on_edge_immediate (pe, vec_stmt);
4223 gcc_assert (!new_bb);
4225 else
4226 gsi_insert_before (gsi, vec_stmt, GSI_SAME_STMT);
4228 else
4229 aggr_ptr_init = new_temp;
4231 /* (3) Handle the updating of the aggregate-pointer inside the loop.
4232 This is needed when ONLY_INIT is false, and also when AT_LOOP is the
4233 inner-loop nested in LOOP (during outer-loop vectorization). */
4235 /* No update in loop is required. */
4236 if (only_init && (!loop_vinfo || at_loop == loop))
4237 aptr = aggr_ptr_init;
4238 else
4240 /* The step of the aggregate pointer is the type size. */
4241 tree iv_step = TYPE_SIZE_UNIT (aggr_type);
4242 /* One exception to the above is when the scalar step of the load in
4243 LOOP is zero. In this case the step here is also zero. */
4244 if (*inv_p)
4245 iv_step = size_zero_node;
4246 else if (tree_int_cst_sgn (step) == -1)
4247 iv_step = fold_build1 (NEGATE_EXPR, TREE_TYPE (iv_step), iv_step);
4249 standard_iv_increment_position (loop, &incr_gsi, &insert_after);
4251 create_iv (aggr_ptr_init,
4252 fold_convert (aggr_ptr_type, iv_step),
4253 aggr_ptr, loop, &incr_gsi, insert_after,
4254 &indx_before_incr, &indx_after_incr);
4255 incr = gsi_stmt (incr_gsi);
4256 set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo, NULL));
4258 /* Copy the points-to information if it exists. */
4259 if (DR_PTR_INFO (dr))
4261 duplicate_ssa_name_ptr_info (indx_before_incr, DR_PTR_INFO (dr));
4262 duplicate_ssa_name_ptr_info (indx_after_incr, DR_PTR_INFO (dr));
4264 if (ptr_incr)
4265 *ptr_incr = incr;
4267 aptr = indx_before_incr;
4270 if (!nested_in_vect_loop || only_init)
4271 return aptr;
4274 /* (4) Handle the updating of the aggregate-pointer inside the inner-loop
4275 nested in LOOP, if exists. */
4277 gcc_assert (nested_in_vect_loop);
4278 if (!only_init)
4280 standard_iv_increment_position (containing_loop, &incr_gsi,
4281 &insert_after);
4282 create_iv (aptr, fold_convert (aggr_ptr_type, DR_STEP (dr)), aggr_ptr,
4283 containing_loop, &incr_gsi, insert_after, &indx_before_incr,
4284 &indx_after_incr);
4285 incr = gsi_stmt (incr_gsi);
4286 set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo, NULL));
4288 /* Copy the points-to information if it exists. */
4289 if (DR_PTR_INFO (dr))
4291 duplicate_ssa_name_ptr_info (indx_before_incr, DR_PTR_INFO (dr));
4292 duplicate_ssa_name_ptr_info (indx_after_incr, DR_PTR_INFO (dr));
4294 if (ptr_incr)
4295 *ptr_incr = incr;
4297 return indx_before_incr;
4299 else
4300 gcc_unreachable ();
4304 /* Function bump_vector_ptr
4306 Increment a pointer (to a vector type) by vector-size. If requested,
4307 i.e. if PTR-INCR is given, then also connect the new increment stmt
4308 to the existing def-use update-chain of the pointer, by modifying
4309 the PTR_INCR as illustrated below:
4311 The pointer def-use update-chain before this function:
4312 DATAREF_PTR = phi (p_0, p_2)
4313 ....
4314 PTR_INCR: p_2 = DATAREF_PTR + step
4316 The pointer def-use update-chain after this function:
4317 DATAREF_PTR = phi (p_0, p_2)
4318 ....
4319 NEW_DATAREF_PTR = DATAREF_PTR + BUMP
4320 ....
4321 PTR_INCR: p_2 = NEW_DATAREF_PTR + step
4323 Input:
4324 DATAREF_PTR - ssa_name of a pointer (to vector type) that is being updated
4325 in the loop.
4326 PTR_INCR - optional. The stmt that updates the pointer in each iteration of
4327 the loop. The increment amount across iterations is expected
4328 to be vector_size.
4329 BSI - location where the new update stmt is to be placed.
4330 STMT - the original scalar memory-access stmt that is being vectorized.
4331 BUMP - optional. The offset by which to bump the pointer. If not given,
4332 the offset is assumed to be vector_size.
4334 Output: Return NEW_DATAREF_PTR as illustrated above.
4338 tree
4339 bump_vector_ptr (tree dataref_ptr, gimple ptr_incr, gimple_stmt_iterator *gsi,
4340 gimple stmt, tree bump)
4342 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4343 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
4344 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
4345 tree update = TYPE_SIZE_UNIT (vectype);
4346 gassign *incr_stmt;
4347 ssa_op_iter iter;
4348 use_operand_p use_p;
4349 tree new_dataref_ptr;
4351 if (bump)
4352 update = bump;
4354 new_dataref_ptr = copy_ssa_name (dataref_ptr);
4355 incr_stmt = gimple_build_assign (new_dataref_ptr, POINTER_PLUS_EXPR,
4356 dataref_ptr, update);
4357 vect_finish_stmt_generation (stmt, incr_stmt, gsi);
4359 /* Copy the points-to information if it exists. */
4360 if (DR_PTR_INFO (dr))
4362 duplicate_ssa_name_ptr_info (new_dataref_ptr, DR_PTR_INFO (dr));
4363 mark_ptr_info_alignment_unknown (SSA_NAME_PTR_INFO (new_dataref_ptr));
4366 if (!ptr_incr)
4367 return new_dataref_ptr;
4369 /* Update the vector-pointer's cross-iteration increment. */
4370 FOR_EACH_SSA_USE_OPERAND (use_p, ptr_incr, iter, SSA_OP_USE)
4372 tree use = USE_FROM_PTR (use_p);
4374 if (use == dataref_ptr)
4375 SET_USE (use_p, new_dataref_ptr);
4376 else
4377 gcc_assert (tree_int_cst_compare (use, update) == 0);
4380 return new_dataref_ptr;
4384 /* Function vect_create_destination_var.
4386 Create a new temporary of type VECTYPE. */
4388 tree
4389 vect_create_destination_var (tree scalar_dest, tree vectype)
4391 tree vec_dest;
4392 const char *name;
4393 char *new_name;
4394 tree type;
4395 enum vect_var_kind kind;
4397 kind = vectype ? vect_simple_var : vect_scalar_var;
4398 type = vectype ? vectype : TREE_TYPE (scalar_dest);
4400 gcc_assert (TREE_CODE (scalar_dest) == SSA_NAME);
4402 name = get_name (scalar_dest);
4403 if (name)
4404 new_name = xasprintf ("%s_%u", name, SSA_NAME_VERSION (scalar_dest));
4405 else
4406 new_name = xasprintf ("_%u", SSA_NAME_VERSION (scalar_dest));
4407 vec_dest = vect_get_new_vect_var (type, kind, new_name);
4408 free (new_name);
4410 return vec_dest;
4413 /* Function vect_grouped_store_supported.
4415 Returns TRUE if interleave high and interleave low permutations
4416 are supported, and FALSE otherwise. */
4418 bool
4419 vect_grouped_store_supported (tree vectype, unsigned HOST_WIDE_INT count)
4421 machine_mode mode = TYPE_MODE (vectype);
4423 /* vect_permute_store_chain requires the group size to be equal to 3 or
4424 be a power of two. */
4425 if (count != 3 && exact_log2 (count) == -1)
4427 if (dump_enabled_p ())
4428 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4429 "the size of the group of accesses"
4430 " is not a power of 2 or not eqaul to 3\n");
4431 return false;
4434 /* Check that the permutation is supported. */
4435 if (VECTOR_MODE_P (mode))
4437 unsigned int i, nelt = GET_MODE_NUNITS (mode);
4438 unsigned char *sel = XALLOCAVEC (unsigned char, nelt);
4440 if (count == 3)
4442 unsigned int j0 = 0, j1 = 0, j2 = 0;
4443 unsigned int i, j;
4445 for (j = 0; j < 3; j++)
4447 int nelt0 = ((3 - j) * nelt) % 3;
4448 int nelt1 = ((3 - j) * nelt + 1) % 3;
4449 int nelt2 = ((3 - j) * nelt + 2) % 3;
4450 for (i = 0; i < nelt; i++)
4452 if (3 * i + nelt0 < nelt)
4453 sel[3 * i + nelt0] = j0++;
4454 if (3 * i + nelt1 < nelt)
4455 sel[3 * i + nelt1] = nelt + j1++;
4456 if (3 * i + nelt2 < nelt)
4457 sel[3 * i + nelt2] = 0;
4459 if (!can_vec_perm_p (mode, false, sel))
4461 if (dump_enabled_p ())
4462 dump_printf (MSG_MISSED_OPTIMIZATION,
4463 "permutaion op not supported by target.\n");
4464 return false;
4467 for (i = 0; i < nelt; i++)
4469 if (3 * i + nelt0 < nelt)
4470 sel[3 * i + nelt0] = 3 * i + nelt0;
4471 if (3 * i + nelt1 < nelt)
4472 sel[3 * i + nelt1] = 3 * i + nelt1;
4473 if (3 * i + nelt2 < nelt)
4474 sel[3 * i + nelt2] = nelt + j2++;
4476 if (!can_vec_perm_p (mode, false, sel))
4478 if (dump_enabled_p ())
4479 dump_printf (MSG_MISSED_OPTIMIZATION,
4480 "permutaion op not supported by target.\n");
4481 return false;
4484 return true;
4486 else
4488 /* If length is not equal to 3 then only power of 2 is supported. */
4489 gcc_assert (exact_log2 (count) != -1);
4491 for (i = 0; i < nelt / 2; i++)
4493 sel[i * 2] = i;
4494 sel[i * 2 + 1] = i + nelt;
4496 if (can_vec_perm_p (mode, false, sel))
4498 for (i = 0; i < nelt; i++)
4499 sel[i] += nelt / 2;
4500 if (can_vec_perm_p (mode, false, sel))
4501 return true;
4506 if (dump_enabled_p ())
4507 dump_printf (MSG_MISSED_OPTIMIZATION,
4508 "permutaion op not supported by target.\n");
4509 return false;
4513 /* Return TRUE if vec_store_lanes is available for COUNT vectors of
4514 type VECTYPE. */
4516 bool
4517 vect_store_lanes_supported (tree vectype, unsigned HOST_WIDE_INT count)
4519 return vect_lanes_optab_supported_p ("vec_store_lanes",
4520 vec_store_lanes_optab,
4521 vectype, count);
4525 /* Function vect_permute_store_chain.
4527 Given a chain of interleaved stores in DR_CHAIN of LENGTH that must be
4528 a power of 2 or equal to 3, generate interleave_high/low stmts to reorder
4529 the data correctly for the stores. Return the final references for stores
4530 in RESULT_CHAIN.
4532 E.g., LENGTH is 4 and the scalar type is short, i.e., VF is 8.
4533 The input is 4 vectors each containing 8 elements. We assign a number to
4534 each element, the input sequence is:
4536 1st vec: 0 1 2 3 4 5 6 7
4537 2nd vec: 8 9 10 11 12 13 14 15
4538 3rd vec: 16 17 18 19 20 21 22 23
4539 4th vec: 24 25 26 27 28 29 30 31
4541 The output sequence should be:
4543 1st vec: 0 8 16 24 1 9 17 25
4544 2nd vec: 2 10 18 26 3 11 19 27
4545 3rd vec: 4 12 20 28 5 13 21 30
4546 4th vec: 6 14 22 30 7 15 23 31
4548 i.e., we interleave the contents of the four vectors in their order.
4550 We use interleave_high/low instructions to create such output. The input of
4551 each interleave_high/low operation is two vectors:
4552 1st vec 2nd vec
4553 0 1 2 3 4 5 6 7
4554 the even elements of the result vector are obtained left-to-right from the
4555 high/low elements of the first vector. The odd elements of the result are
4556 obtained left-to-right from the high/low elements of the second vector.
4557 The output of interleave_high will be: 0 4 1 5
4558 and of interleave_low: 2 6 3 7
4561 The permutation is done in log LENGTH stages. In each stage interleave_high
4562 and interleave_low stmts are created for each pair of vectors in DR_CHAIN,
4563 where the first argument is taken from the first half of DR_CHAIN and the
4564 second argument from it's second half.
4565 In our example,
4567 I1: interleave_high (1st vec, 3rd vec)
4568 I2: interleave_low (1st vec, 3rd vec)
4569 I3: interleave_high (2nd vec, 4th vec)
4570 I4: interleave_low (2nd vec, 4th vec)
4572 The output for the first stage is:
4574 I1: 0 16 1 17 2 18 3 19
4575 I2: 4 20 5 21 6 22 7 23
4576 I3: 8 24 9 25 10 26 11 27
4577 I4: 12 28 13 29 14 30 15 31
4579 The output of the second stage, i.e. the final result is:
4581 I1: 0 8 16 24 1 9 17 25
4582 I2: 2 10 18 26 3 11 19 27
4583 I3: 4 12 20 28 5 13 21 30
4584 I4: 6 14 22 30 7 15 23 31. */
4586 void
4587 vect_permute_store_chain (vec<tree> dr_chain,
4588 unsigned int length,
4589 gimple stmt,
4590 gimple_stmt_iterator *gsi,
4591 vec<tree> *result_chain)
4593 tree vect1, vect2, high, low;
4594 gimple perm_stmt;
4595 tree vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
4596 tree perm_mask_low, perm_mask_high;
4597 tree data_ref;
4598 tree perm3_mask_low, perm3_mask_high;
4599 unsigned int i, n, log_length = exact_log2 (length);
4600 unsigned int j, nelt = TYPE_VECTOR_SUBPARTS (vectype);
4601 unsigned char *sel = XALLOCAVEC (unsigned char, nelt);
4603 result_chain->quick_grow (length);
4604 memcpy (result_chain->address (), dr_chain.address (),
4605 length * sizeof (tree));
4607 if (length == 3)
4609 unsigned int j0 = 0, j1 = 0, j2 = 0;
4611 for (j = 0; j < 3; j++)
4613 int nelt0 = ((3 - j) * nelt) % 3;
4614 int nelt1 = ((3 - j) * nelt + 1) % 3;
4615 int nelt2 = ((3 - j) * nelt + 2) % 3;
4617 for (i = 0; i < nelt; i++)
4619 if (3 * i + nelt0 < nelt)
4620 sel[3 * i + nelt0] = j0++;
4621 if (3 * i + nelt1 < nelt)
4622 sel[3 * i + nelt1] = nelt + j1++;
4623 if (3 * i + nelt2 < nelt)
4624 sel[3 * i + nelt2] = 0;
4626 perm3_mask_low = vect_gen_perm_mask_checked (vectype, sel);
4628 for (i = 0; i < nelt; i++)
4630 if (3 * i + nelt0 < nelt)
4631 sel[3 * i + nelt0] = 3 * i + nelt0;
4632 if (3 * i + nelt1 < nelt)
4633 sel[3 * i + nelt1] = 3 * i + nelt1;
4634 if (3 * i + nelt2 < nelt)
4635 sel[3 * i + nelt2] = nelt + j2++;
4637 perm3_mask_high = vect_gen_perm_mask_checked (vectype, sel);
4639 vect1 = dr_chain[0];
4640 vect2 = dr_chain[1];
4642 /* Create interleaving stmt:
4643 low = VEC_PERM_EXPR <vect1, vect2,
4644 {j, nelt, *, j + 1, nelt + j + 1, *,
4645 j + 2, nelt + j + 2, *, ...}> */
4646 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle3_low");
4647 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, vect1,
4648 vect2, perm3_mask_low);
4649 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
4651 vect1 = data_ref;
4652 vect2 = dr_chain[2];
4653 /* Create interleaving stmt:
4654 low = VEC_PERM_EXPR <vect1, vect2,
4655 {0, 1, nelt + j, 3, 4, nelt + j + 1,
4656 6, 7, nelt + j + 2, ...}> */
4657 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle3_high");
4658 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, vect1,
4659 vect2, perm3_mask_high);
4660 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
4661 (*result_chain)[j] = data_ref;
4664 else
4666 /* If length is not equal to 3 then only power of 2 is supported. */
4667 gcc_assert (exact_log2 (length) != -1);
4669 for (i = 0, n = nelt / 2; i < n; i++)
4671 sel[i * 2] = i;
4672 sel[i * 2 + 1] = i + nelt;
4674 perm_mask_high = vect_gen_perm_mask_checked (vectype, sel);
4676 for (i = 0; i < nelt; i++)
4677 sel[i] += nelt / 2;
4678 perm_mask_low = vect_gen_perm_mask_checked (vectype, sel);
4680 for (i = 0, n = log_length; i < n; i++)
4682 for (j = 0; j < length/2; j++)
4684 vect1 = dr_chain[j];
4685 vect2 = dr_chain[j+length/2];
4687 /* Create interleaving stmt:
4688 high = VEC_PERM_EXPR <vect1, vect2, {0, nelt, 1, nelt+1,
4689 ...}> */
4690 high = make_temp_ssa_name (vectype, NULL, "vect_inter_high");
4691 perm_stmt = gimple_build_assign (high, VEC_PERM_EXPR, vect1,
4692 vect2, perm_mask_high);
4693 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
4694 (*result_chain)[2*j] = high;
4696 /* Create interleaving stmt:
4697 low = VEC_PERM_EXPR <vect1, vect2,
4698 {nelt/2, nelt*3/2, nelt/2+1, nelt*3/2+1,
4699 ...}> */
4700 low = make_temp_ssa_name (vectype, NULL, "vect_inter_low");
4701 perm_stmt = gimple_build_assign (low, VEC_PERM_EXPR, vect1,
4702 vect2, perm_mask_low);
4703 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
4704 (*result_chain)[2*j+1] = low;
4706 memcpy (dr_chain.address (), result_chain->address (),
4707 length * sizeof (tree));
4712 /* Function vect_setup_realignment
4714 This function is called when vectorizing an unaligned load using
4715 the dr_explicit_realign[_optimized] scheme.
4716 This function generates the following code at the loop prolog:
4718 p = initial_addr;
4719 x msq_init = *(floor(p)); # prolog load
4720 realignment_token = call target_builtin;
4721 loop:
4722 x msq = phi (msq_init, ---)
4724 The stmts marked with x are generated only for the case of
4725 dr_explicit_realign_optimized.
4727 The code above sets up a new (vector) pointer, pointing to the first
4728 location accessed by STMT, and a "floor-aligned" load using that pointer.
4729 It also generates code to compute the "realignment-token" (if the relevant
4730 target hook was defined), and creates a phi-node at the loop-header bb
4731 whose arguments are the result of the prolog-load (created by this
4732 function) and the result of a load that takes place in the loop (to be
4733 created by the caller to this function).
4735 For the case of dr_explicit_realign_optimized:
4736 The caller to this function uses the phi-result (msq) to create the
4737 realignment code inside the loop, and sets up the missing phi argument,
4738 as follows:
4739 loop:
4740 msq = phi (msq_init, lsq)
4741 lsq = *(floor(p')); # load in loop
4742 result = realign_load (msq, lsq, realignment_token);
4744 For the case of dr_explicit_realign:
4745 loop:
4746 msq = *(floor(p)); # load in loop
4747 p' = p + (VS-1);
4748 lsq = *(floor(p')); # load in loop
4749 result = realign_load (msq, lsq, realignment_token);
4751 Input:
4752 STMT - (scalar) load stmt to be vectorized. This load accesses
4753 a memory location that may be unaligned.
4754 BSI - place where new code is to be inserted.
4755 ALIGNMENT_SUPPORT_SCHEME - which of the two misalignment handling schemes
4756 is used.
4758 Output:
4759 REALIGNMENT_TOKEN - the result of a call to the builtin_mask_for_load
4760 target hook, if defined.
4761 Return value - the result of the loop-header phi node. */
4763 tree
4764 vect_setup_realignment (gimple stmt, gimple_stmt_iterator *gsi,
4765 tree *realignment_token,
4766 enum dr_alignment_support alignment_support_scheme,
4767 tree init_addr,
4768 struct loop **at_loop)
4770 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4771 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
4772 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4773 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
4774 struct loop *loop = NULL;
4775 edge pe = NULL;
4776 tree scalar_dest = gimple_assign_lhs (stmt);
4777 tree vec_dest;
4778 gimple inc;
4779 tree ptr;
4780 tree data_ref;
4781 basic_block new_bb;
4782 tree msq_init = NULL_TREE;
4783 tree new_temp;
4784 gphi *phi_stmt;
4785 tree msq = NULL_TREE;
4786 gimple_seq stmts = NULL;
4787 bool inv_p;
4788 bool compute_in_loop = false;
4789 bool nested_in_vect_loop = false;
4790 struct loop *containing_loop = (gimple_bb (stmt))->loop_father;
4791 struct loop *loop_for_initial_load = NULL;
4793 if (loop_vinfo)
4795 loop = LOOP_VINFO_LOOP (loop_vinfo);
4796 nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
4799 gcc_assert (alignment_support_scheme == dr_explicit_realign
4800 || alignment_support_scheme == dr_explicit_realign_optimized);
4802 /* We need to generate three things:
4803 1. the misalignment computation
4804 2. the extra vector load (for the optimized realignment scheme).
4805 3. the phi node for the two vectors from which the realignment is
4806 done (for the optimized realignment scheme). */
4808 /* 1. Determine where to generate the misalignment computation.
4810 If INIT_ADDR is NULL_TREE, this indicates that the misalignment
4811 calculation will be generated by this function, outside the loop (in the
4812 preheader). Otherwise, INIT_ADDR had already been computed for us by the
4813 caller, inside the loop.
4815 Background: If the misalignment remains fixed throughout the iterations of
4816 the loop, then both realignment schemes are applicable, and also the
4817 misalignment computation can be done outside LOOP. This is because we are
4818 vectorizing LOOP, and so the memory accesses in LOOP advance in steps that
4819 are a multiple of VS (the Vector Size), and therefore the misalignment in
4820 different vectorized LOOP iterations is always the same.
4821 The problem arises only if the memory access is in an inner-loop nested
4822 inside LOOP, which is now being vectorized using outer-loop vectorization.
4823 This is the only case when the misalignment of the memory access may not
4824 remain fixed throughout the iterations of the inner-loop (as explained in
4825 detail in vect_supportable_dr_alignment). In this case, not only is the
4826 optimized realignment scheme not applicable, but also the misalignment
4827 computation (and generation of the realignment token that is passed to
4828 REALIGN_LOAD) have to be done inside the loop.
4830 In short, INIT_ADDR indicates whether we are in a COMPUTE_IN_LOOP mode
4831 or not, which in turn determines if the misalignment is computed inside
4832 the inner-loop, or outside LOOP. */
4834 if (init_addr != NULL_TREE || !loop_vinfo)
4836 compute_in_loop = true;
4837 gcc_assert (alignment_support_scheme == dr_explicit_realign);
4841 /* 2. Determine where to generate the extra vector load.
4843 For the optimized realignment scheme, instead of generating two vector
4844 loads in each iteration, we generate a single extra vector load in the
4845 preheader of the loop, and in each iteration reuse the result of the
4846 vector load from the previous iteration. In case the memory access is in
4847 an inner-loop nested inside LOOP, which is now being vectorized using
4848 outer-loop vectorization, we need to determine whether this initial vector
4849 load should be generated at the preheader of the inner-loop, or can be
4850 generated at the preheader of LOOP. If the memory access has no evolution
4851 in LOOP, it can be generated in the preheader of LOOP. Otherwise, it has
4852 to be generated inside LOOP (in the preheader of the inner-loop). */
4854 if (nested_in_vect_loop)
4856 tree outerloop_step = STMT_VINFO_DR_STEP (stmt_info);
4857 bool invariant_in_outerloop =
4858 (tree_int_cst_compare (outerloop_step, size_zero_node) == 0);
4859 loop_for_initial_load = (invariant_in_outerloop ? loop : loop->inner);
4861 else
4862 loop_for_initial_load = loop;
4863 if (at_loop)
4864 *at_loop = loop_for_initial_load;
4866 if (loop_for_initial_load)
4867 pe = loop_preheader_edge (loop_for_initial_load);
4869 /* 3. For the case of the optimized realignment, create the first vector
4870 load at the loop preheader. */
4872 if (alignment_support_scheme == dr_explicit_realign_optimized)
4874 /* Create msq_init = *(floor(p1)) in the loop preheader */
4875 gassign *new_stmt;
4877 gcc_assert (!compute_in_loop);
4878 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4879 ptr = vect_create_data_ref_ptr (stmt, vectype, loop_for_initial_load,
4880 NULL_TREE, &init_addr, NULL, &inc,
4881 true, &inv_p);
4882 new_temp = copy_ssa_name (ptr);
4883 new_stmt = gimple_build_assign
4884 (new_temp, BIT_AND_EXPR, ptr,
4885 build_int_cst (TREE_TYPE (ptr),
4886 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
4887 new_bb = gsi_insert_on_edge_immediate (pe, new_stmt);
4888 gcc_assert (!new_bb);
4889 data_ref
4890 = build2 (MEM_REF, TREE_TYPE (vec_dest), new_temp,
4891 build_int_cst (reference_alias_ptr_type (DR_REF (dr)), 0));
4892 new_stmt = gimple_build_assign (vec_dest, data_ref);
4893 new_temp = make_ssa_name (vec_dest, new_stmt);
4894 gimple_assign_set_lhs (new_stmt, new_temp);
4895 if (pe)
4897 new_bb = gsi_insert_on_edge_immediate (pe, new_stmt);
4898 gcc_assert (!new_bb);
4900 else
4901 gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
4903 msq_init = gimple_assign_lhs (new_stmt);
4906 /* 4. Create realignment token using a target builtin, if available.
4907 It is done either inside the containing loop, or before LOOP (as
4908 determined above). */
4910 if (targetm.vectorize.builtin_mask_for_load)
4912 gcall *new_stmt;
4913 tree builtin_decl;
4915 /* Compute INIT_ADDR - the initial addressed accessed by this memref. */
4916 if (!init_addr)
4918 /* Generate the INIT_ADDR computation outside LOOP. */
4919 init_addr = vect_create_addr_base_for_vector_ref (stmt, &stmts,
4920 NULL_TREE, loop);
4921 if (loop)
4923 pe = loop_preheader_edge (loop);
4924 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
4925 gcc_assert (!new_bb);
4927 else
4928 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
4931 builtin_decl = targetm.vectorize.builtin_mask_for_load ();
4932 new_stmt = gimple_build_call (builtin_decl, 1, init_addr);
4933 vec_dest =
4934 vect_create_destination_var (scalar_dest,
4935 gimple_call_return_type (new_stmt));
4936 new_temp = make_ssa_name (vec_dest, new_stmt);
4937 gimple_call_set_lhs (new_stmt, new_temp);
4939 if (compute_in_loop)
4940 gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
4941 else
4943 /* Generate the misalignment computation outside LOOP. */
4944 pe = loop_preheader_edge (loop);
4945 new_bb = gsi_insert_on_edge_immediate (pe, new_stmt);
4946 gcc_assert (!new_bb);
4949 *realignment_token = gimple_call_lhs (new_stmt);
4951 /* The result of the CALL_EXPR to this builtin is determined from
4952 the value of the parameter and no global variables are touched
4953 which makes the builtin a "const" function. Requiring the
4954 builtin to have the "const" attribute makes it unnecessary
4955 to call mark_call_clobbered. */
4956 gcc_assert (TREE_READONLY (builtin_decl));
4959 if (alignment_support_scheme == dr_explicit_realign)
4960 return msq;
4962 gcc_assert (!compute_in_loop);
4963 gcc_assert (alignment_support_scheme == dr_explicit_realign_optimized);
4966 /* 5. Create msq = phi <msq_init, lsq> in loop */
4968 pe = loop_preheader_edge (containing_loop);
4969 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4970 msq = make_ssa_name (vec_dest);
4971 phi_stmt = create_phi_node (msq, containing_loop->header);
4972 add_phi_arg (phi_stmt, msq_init, pe, UNKNOWN_LOCATION);
4974 return msq;
4978 /* Function vect_grouped_load_supported.
4980 Returns TRUE if even and odd permutations are supported,
4981 and FALSE otherwise. */
4983 bool
4984 vect_grouped_load_supported (tree vectype, unsigned HOST_WIDE_INT count)
4986 machine_mode mode = TYPE_MODE (vectype);
4988 /* vect_permute_load_chain requires the group size to be equal to 3 or
4989 be a power of two. */
4990 if (count != 3 && exact_log2 (count) == -1)
4992 if (dump_enabled_p ())
4993 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4994 "the size of the group of accesses"
4995 " is not a power of 2 or not equal to 3\n");
4996 return false;
4999 /* Check that the permutation is supported. */
5000 if (VECTOR_MODE_P (mode))
5002 unsigned int i, j, nelt = GET_MODE_NUNITS (mode);
5003 unsigned char *sel = XALLOCAVEC (unsigned char, nelt);
5005 if (count == 3)
5007 unsigned int k;
5008 for (k = 0; k < 3; k++)
5010 for (i = 0; i < nelt; i++)
5011 if (3 * i + k < 2 * nelt)
5012 sel[i] = 3 * i + k;
5013 else
5014 sel[i] = 0;
5015 if (!can_vec_perm_p (mode, false, sel))
5017 if (dump_enabled_p ())
5018 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5019 "shuffle of 3 loads is not supported by"
5020 " target\n");
5021 return false;
5023 for (i = 0, j = 0; i < nelt; i++)
5024 if (3 * i + k < 2 * nelt)
5025 sel[i] = i;
5026 else
5027 sel[i] = nelt + ((nelt + k) % 3) + 3 * (j++);
5028 if (!can_vec_perm_p (mode, false, sel))
5030 if (dump_enabled_p ())
5031 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5032 "shuffle of 3 loads is not supported by"
5033 " target\n");
5034 return false;
5037 return true;
5039 else
5041 /* If length is not equal to 3 then only power of 2 is supported. */
5042 gcc_assert (exact_log2 (count) != -1);
5043 for (i = 0; i < nelt; i++)
5044 sel[i] = i * 2;
5045 if (can_vec_perm_p (mode, false, sel))
5047 for (i = 0; i < nelt; i++)
5048 sel[i] = i * 2 + 1;
5049 if (can_vec_perm_p (mode, false, sel))
5050 return true;
5055 if (dump_enabled_p ())
5056 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5057 "extract even/odd not supported by target\n");
5058 return false;
5061 /* Return TRUE if vec_load_lanes is available for COUNT vectors of
5062 type VECTYPE. */
5064 bool
5065 vect_load_lanes_supported (tree vectype, unsigned HOST_WIDE_INT count)
5067 return vect_lanes_optab_supported_p ("vec_load_lanes",
5068 vec_load_lanes_optab,
5069 vectype, count);
5072 /* Function vect_permute_load_chain.
5074 Given a chain of interleaved loads in DR_CHAIN of LENGTH that must be
5075 a power of 2 or equal to 3, generate extract_even/odd stmts to reorder
5076 the input data correctly. Return the final references for loads in
5077 RESULT_CHAIN.
5079 E.g., LENGTH is 4 and the scalar type is short, i.e., VF is 8.
5080 The input is 4 vectors each containing 8 elements. We assign a number to each
5081 element, the input sequence is:
5083 1st vec: 0 1 2 3 4 5 6 7
5084 2nd vec: 8 9 10 11 12 13 14 15
5085 3rd vec: 16 17 18 19 20 21 22 23
5086 4th vec: 24 25 26 27 28 29 30 31
5088 The output sequence should be:
5090 1st vec: 0 4 8 12 16 20 24 28
5091 2nd vec: 1 5 9 13 17 21 25 29
5092 3rd vec: 2 6 10 14 18 22 26 30
5093 4th vec: 3 7 11 15 19 23 27 31
5095 i.e., the first output vector should contain the first elements of each
5096 interleaving group, etc.
5098 We use extract_even/odd instructions to create such output. The input of
5099 each extract_even/odd operation is two vectors
5100 1st vec 2nd vec
5101 0 1 2 3 4 5 6 7
5103 and the output is the vector of extracted even/odd elements. The output of
5104 extract_even will be: 0 2 4 6
5105 and of extract_odd: 1 3 5 7
5108 The permutation is done in log LENGTH stages. In each stage extract_even
5109 and extract_odd stmts are created for each pair of vectors in DR_CHAIN in
5110 their order. In our example,
5112 E1: extract_even (1st vec, 2nd vec)
5113 E2: extract_odd (1st vec, 2nd vec)
5114 E3: extract_even (3rd vec, 4th vec)
5115 E4: extract_odd (3rd vec, 4th vec)
5117 The output for the first stage will be:
5119 E1: 0 2 4 6 8 10 12 14
5120 E2: 1 3 5 7 9 11 13 15
5121 E3: 16 18 20 22 24 26 28 30
5122 E4: 17 19 21 23 25 27 29 31
5124 In order to proceed and create the correct sequence for the next stage (or
5125 for the correct output, if the second stage is the last one, as in our
5126 example), we first put the output of extract_even operation and then the
5127 output of extract_odd in RESULT_CHAIN (which is then copied to DR_CHAIN).
5128 The input for the second stage is:
5130 1st vec (E1): 0 2 4 6 8 10 12 14
5131 2nd vec (E3): 16 18 20 22 24 26 28 30
5132 3rd vec (E2): 1 3 5 7 9 11 13 15
5133 4th vec (E4): 17 19 21 23 25 27 29 31
5135 The output of the second stage:
5137 E1: 0 4 8 12 16 20 24 28
5138 E2: 2 6 10 14 18 22 26 30
5139 E3: 1 5 9 13 17 21 25 29
5140 E4: 3 7 11 15 19 23 27 31
5142 And RESULT_CHAIN after reordering:
5144 1st vec (E1): 0 4 8 12 16 20 24 28
5145 2nd vec (E3): 1 5 9 13 17 21 25 29
5146 3rd vec (E2): 2 6 10 14 18 22 26 30
5147 4th vec (E4): 3 7 11 15 19 23 27 31. */
5149 static void
5150 vect_permute_load_chain (vec<tree> dr_chain,
5151 unsigned int length,
5152 gimple stmt,
5153 gimple_stmt_iterator *gsi,
5154 vec<tree> *result_chain)
5156 tree data_ref, first_vect, second_vect;
5157 tree perm_mask_even, perm_mask_odd;
5158 tree perm3_mask_low, perm3_mask_high;
5159 gimple perm_stmt;
5160 tree vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
5161 unsigned int i, j, log_length = exact_log2 (length);
5162 unsigned nelt = TYPE_VECTOR_SUBPARTS (vectype);
5163 unsigned char *sel = XALLOCAVEC (unsigned char, nelt);
5165 result_chain->quick_grow (length);
5166 memcpy (result_chain->address (), dr_chain.address (),
5167 length * sizeof (tree));
5169 if (length == 3)
5171 unsigned int k;
5173 for (k = 0; k < 3; k++)
5175 for (i = 0; i < nelt; i++)
5176 if (3 * i + k < 2 * nelt)
5177 sel[i] = 3 * i + k;
5178 else
5179 sel[i] = 0;
5180 perm3_mask_low = vect_gen_perm_mask_checked (vectype, sel);
5182 for (i = 0, j = 0; i < nelt; i++)
5183 if (3 * i + k < 2 * nelt)
5184 sel[i] = i;
5185 else
5186 sel[i] = nelt + ((nelt + k) % 3) + 3 * (j++);
5188 perm3_mask_high = vect_gen_perm_mask_checked (vectype, sel);
5190 first_vect = dr_chain[0];
5191 second_vect = dr_chain[1];
5193 /* Create interleaving stmt (low part of):
5194 low = VEC_PERM_EXPR <first_vect, second_vect2, {k, 3 + k, 6 + k,
5195 ...}> */
5196 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle3_low");
5197 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, first_vect,
5198 second_vect, perm3_mask_low);
5199 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5201 /* Create interleaving stmt (high part of):
5202 high = VEC_PERM_EXPR <first_vect, second_vect2, {k, 3 + k, 6 + k,
5203 ...}> */
5204 first_vect = data_ref;
5205 second_vect = dr_chain[2];
5206 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle3_high");
5207 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, first_vect,
5208 second_vect, perm3_mask_high);
5209 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5210 (*result_chain)[k] = data_ref;
5213 else
5215 /* If length is not equal to 3 then only power of 2 is supported. */
5216 gcc_assert (exact_log2 (length) != -1);
5218 for (i = 0; i < nelt; ++i)
5219 sel[i] = i * 2;
5220 perm_mask_even = vect_gen_perm_mask_checked (vectype, sel);
5222 for (i = 0; i < nelt; ++i)
5223 sel[i] = i * 2 + 1;
5224 perm_mask_odd = vect_gen_perm_mask_checked (vectype, sel);
5226 for (i = 0; i < log_length; i++)
5228 for (j = 0; j < length; j += 2)
5230 first_vect = dr_chain[j];
5231 second_vect = dr_chain[j+1];
5233 /* data_ref = permute_even (first_data_ref, second_data_ref); */
5234 data_ref = make_temp_ssa_name (vectype, NULL, "vect_perm_even");
5235 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
5236 first_vect, second_vect,
5237 perm_mask_even);
5238 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5239 (*result_chain)[j/2] = data_ref;
5241 /* data_ref = permute_odd (first_data_ref, second_data_ref); */
5242 data_ref = make_temp_ssa_name (vectype, NULL, "vect_perm_odd");
5243 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
5244 first_vect, second_vect,
5245 perm_mask_odd);
5246 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5247 (*result_chain)[j/2+length/2] = data_ref;
5249 memcpy (dr_chain.address (), result_chain->address (),
5250 length * sizeof (tree));
5255 /* Function vect_shift_permute_load_chain.
5257 Given a chain of loads in DR_CHAIN of LENGTH 2 or 3, generate
5258 sequence of stmts to reorder the input data accordingly.
5259 Return the final references for loads in RESULT_CHAIN.
5260 Return true if successed, false otherwise.
5262 E.g., LENGTH is 3 and the scalar type is short, i.e., VF is 8.
5263 The input is 3 vectors each containing 8 elements. We assign a
5264 number to each element, the input sequence is:
5266 1st vec: 0 1 2 3 4 5 6 7
5267 2nd vec: 8 9 10 11 12 13 14 15
5268 3rd vec: 16 17 18 19 20 21 22 23
5270 The output sequence should be:
5272 1st vec: 0 3 6 9 12 15 18 21
5273 2nd vec: 1 4 7 10 13 16 19 22
5274 3rd vec: 2 5 8 11 14 17 20 23
5276 We use 3 shuffle instructions and 3 * 3 - 1 shifts to create such output.
5278 First we shuffle all 3 vectors to get correct elements order:
5280 1st vec: ( 0 3 6) ( 1 4 7) ( 2 5)
5281 2nd vec: ( 8 11 14) ( 9 12 15) (10 13)
5282 3rd vec: (16 19 22) (17 20 23) (18 21)
5284 Next we unite and shift vector 3 times:
5286 1st step:
5287 shift right by 6 the concatenation of:
5288 "1st vec" and "2nd vec"
5289 ( 0 3 6) ( 1 4 7) |( 2 5) _ ( 8 11 14) ( 9 12 15)| (10 13)
5290 "2nd vec" and "3rd vec"
5291 ( 8 11 14) ( 9 12 15) |(10 13) _ (16 19 22) (17 20 23)| (18 21)
5292 "3rd vec" and "1st vec"
5293 (16 19 22) (17 20 23) |(18 21) _ ( 0 3 6) ( 1 4 7)| ( 2 5)
5294 | New vectors |
5296 So that now new vectors are:
5298 1st vec: ( 2 5) ( 8 11 14) ( 9 12 15)
5299 2nd vec: (10 13) (16 19 22) (17 20 23)
5300 3rd vec: (18 21) ( 0 3 6) ( 1 4 7)
5302 2nd step:
5303 shift right by 5 the concatenation of:
5304 "1st vec" and "3rd vec"
5305 ( 2 5) ( 8 11 14) |( 9 12 15) _ (18 21) ( 0 3 6)| ( 1 4 7)
5306 "2nd vec" and "1st vec"
5307 (10 13) (16 19 22) |(17 20 23) _ ( 2 5) ( 8 11 14)| ( 9 12 15)
5308 "3rd vec" and "2nd vec"
5309 (18 21) ( 0 3 6) |( 1 4 7) _ (10 13) (16 19 22)| (17 20 23)
5310 | New vectors |
5312 So that now new vectors are:
5314 1st vec: ( 9 12 15) (18 21) ( 0 3 6)
5315 2nd vec: (17 20 23) ( 2 5) ( 8 11 14)
5316 3rd vec: ( 1 4 7) (10 13) (16 19 22) READY
5318 3rd step:
5319 shift right by 5 the concatenation of:
5320 "1st vec" and "1st vec"
5321 ( 9 12 15) (18 21) |( 0 3 6) _ ( 9 12 15) (18 21)| ( 0 3 6)
5322 shift right by 3 the concatenation of:
5323 "2nd vec" and "2nd vec"
5324 (17 20 23) |( 2 5) ( 8 11 14) _ (17 20 23)| ( 2 5) ( 8 11 14)
5325 | New vectors |
5327 So that now all vectors are READY:
5328 1st vec: ( 0 3 6) ( 9 12 15) (18 21)
5329 2nd vec: ( 2 5) ( 8 11 14) (17 20 23)
5330 3rd vec: ( 1 4 7) (10 13) (16 19 22)
5332 This algorithm is faster than one in vect_permute_load_chain if:
5333 1. "shift of a concatination" is faster than general permutation.
5334 This is usually so.
5335 2. The TARGET machine can't execute vector instructions in parallel.
5336 This is because each step of the algorithm depends on previous.
5337 The algorithm in vect_permute_load_chain is much more parallel.
5339 The algorithm is applicable only for LOAD CHAIN LENGTH less than VF.
5342 static bool
5343 vect_shift_permute_load_chain (vec<tree> dr_chain,
5344 unsigned int length,
5345 gimple stmt,
5346 gimple_stmt_iterator *gsi,
5347 vec<tree> *result_chain)
5349 tree vect[3], vect_shift[3], data_ref, first_vect, second_vect;
5350 tree perm2_mask1, perm2_mask2, perm3_mask;
5351 tree select_mask, shift1_mask, shift2_mask, shift3_mask, shift4_mask;
5352 gimple perm_stmt;
5354 tree vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
5355 unsigned int i;
5356 unsigned nelt = TYPE_VECTOR_SUBPARTS (vectype);
5357 unsigned char *sel = XALLOCAVEC (unsigned char, nelt);
5358 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5359 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5361 result_chain->quick_grow (length);
5362 memcpy (result_chain->address (), dr_chain.address (),
5363 length * sizeof (tree));
5365 if (exact_log2 (length) != -1 && LOOP_VINFO_VECT_FACTOR (loop_vinfo) > 4)
5367 unsigned int j, log_length = exact_log2 (length);
5368 for (i = 0; i < nelt / 2; ++i)
5369 sel[i] = i * 2;
5370 for (i = 0; i < nelt / 2; ++i)
5371 sel[nelt / 2 + i] = i * 2 + 1;
5372 if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
5374 if (dump_enabled_p ())
5375 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5376 "shuffle of 2 fields structure is not \
5377 supported by target\n");
5378 return false;
5380 perm2_mask1 = vect_gen_perm_mask_checked (vectype, sel);
5382 for (i = 0; i < nelt / 2; ++i)
5383 sel[i] = i * 2 + 1;
5384 for (i = 0; i < nelt / 2; ++i)
5385 sel[nelt / 2 + i] = i * 2;
5386 if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
5388 if (dump_enabled_p ())
5389 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5390 "shuffle of 2 fields structure is not \
5391 supported by target\n");
5392 return false;
5394 perm2_mask2 = vect_gen_perm_mask_checked (vectype, sel);
5396 /* Generating permutation constant to shift all elements.
5397 For vector length 8 it is {4 5 6 7 8 9 10 11}. */
5398 for (i = 0; i < nelt; i++)
5399 sel[i] = nelt / 2 + i;
5400 if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
5402 if (dump_enabled_p ())
5403 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5404 "shift permutation is not supported by target\n");
5405 return false;
5407 shift1_mask = vect_gen_perm_mask_checked (vectype, sel);
5409 /* Generating permutation constant to select vector from 2.
5410 For vector length 8 it is {0 1 2 3 12 13 14 15}. */
5411 for (i = 0; i < nelt / 2; i++)
5412 sel[i] = i;
5413 for (i = nelt / 2; i < nelt; i++)
5414 sel[i] = nelt + i;
5415 if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
5417 if (dump_enabled_p ())
5418 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5419 "select is not supported by target\n");
5420 return false;
5422 select_mask = vect_gen_perm_mask_checked (vectype, sel);
5424 for (i = 0; i < log_length; i++)
5426 for (j = 0; j < length; j += 2)
5428 first_vect = dr_chain[j];
5429 second_vect = dr_chain[j + 1];
5431 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle2");
5432 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
5433 first_vect, first_vect,
5434 perm2_mask1);
5435 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5436 vect[0] = data_ref;
5438 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle2");
5439 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
5440 second_vect, second_vect,
5441 perm2_mask2);
5442 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5443 vect[1] = data_ref;
5445 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shift");
5446 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
5447 vect[0], vect[1], shift1_mask);
5448 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5449 (*result_chain)[j/2 + length/2] = data_ref;
5451 data_ref = make_temp_ssa_name (vectype, NULL, "vect_select");
5452 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
5453 vect[0], vect[1], select_mask);
5454 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5455 (*result_chain)[j/2] = data_ref;
5457 memcpy (dr_chain.address (), result_chain->address (),
5458 length * sizeof (tree));
5460 return true;
5462 if (length == 3 && LOOP_VINFO_VECT_FACTOR (loop_vinfo) > 2)
5464 unsigned int k = 0, l = 0;
5466 /* Generating permutation constant to get all elements in rigth order.
5467 For vector length 8 it is {0 3 6 1 4 7 2 5}. */
5468 for (i = 0; i < nelt; i++)
5470 if (3 * k + (l % 3) >= nelt)
5472 k = 0;
5473 l += (3 - (nelt % 3));
5475 sel[i] = 3 * k + (l % 3);
5476 k++;
5478 if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
5480 if (dump_enabled_p ())
5481 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5482 "shuffle of 3 fields structure is not \
5483 supported by target\n");
5484 return false;
5486 perm3_mask = vect_gen_perm_mask_checked (vectype, sel);
5488 /* Generating permutation constant to shift all elements.
5489 For vector length 8 it is {6 7 8 9 10 11 12 13}. */
5490 for (i = 0; i < nelt; i++)
5491 sel[i] = 2 * (nelt / 3) + (nelt % 3) + i;
5492 if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
5494 if (dump_enabled_p ())
5495 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5496 "shift permutation is not supported by target\n");
5497 return false;
5499 shift1_mask = vect_gen_perm_mask_checked (vectype, sel);
5501 /* Generating permutation constant to shift all elements.
5502 For vector length 8 it is {5 6 7 8 9 10 11 12}. */
5503 for (i = 0; i < nelt; i++)
5504 sel[i] = 2 * (nelt / 3) + 1 + i;
5505 if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
5507 if (dump_enabled_p ())
5508 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5509 "shift permutation is not supported by target\n");
5510 return false;
5512 shift2_mask = vect_gen_perm_mask_checked (vectype, sel);
5514 /* Generating permutation constant to shift all elements.
5515 For vector length 8 it is {3 4 5 6 7 8 9 10}. */
5516 for (i = 0; i < nelt; i++)
5517 sel[i] = (nelt / 3) + (nelt % 3) / 2 + i;
5518 if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
5520 if (dump_enabled_p ())
5521 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5522 "shift permutation is not supported by target\n");
5523 return false;
5525 shift3_mask = vect_gen_perm_mask_checked (vectype, sel);
5527 /* Generating permutation constant to shift all elements.
5528 For vector length 8 it is {5 6 7 8 9 10 11 12}. */
5529 for (i = 0; i < nelt; i++)
5530 sel[i] = 2 * (nelt / 3) + (nelt % 3) / 2 + i;
5531 if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
5533 if (dump_enabled_p ())
5534 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5535 "shift permutation is not supported by target\n");
5536 return false;
5538 shift4_mask = vect_gen_perm_mask_checked (vectype, sel);
5540 for (k = 0; k < 3; k++)
5542 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle3");
5543 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
5544 dr_chain[k], dr_chain[k],
5545 perm3_mask);
5546 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5547 vect[k] = data_ref;
5550 for (k = 0; k < 3; k++)
5552 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shift1");
5553 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
5554 vect[k % 3], vect[(k + 1) % 3],
5555 shift1_mask);
5556 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5557 vect_shift[k] = data_ref;
5560 for (k = 0; k < 3; k++)
5562 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shift2");
5563 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
5564 vect_shift[(4 - k) % 3],
5565 vect_shift[(3 - k) % 3],
5566 shift2_mask);
5567 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5568 vect[k] = data_ref;
5571 (*result_chain)[3 - (nelt % 3)] = vect[2];
5573 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shift3");
5574 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, vect[0],
5575 vect[0], shift3_mask);
5576 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5577 (*result_chain)[nelt % 3] = data_ref;
5579 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shift4");
5580 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, vect[1],
5581 vect[1], shift4_mask);
5582 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5583 (*result_chain)[0] = data_ref;
5584 return true;
5586 return false;
5589 /* Function vect_transform_grouped_load.
5591 Given a chain of input interleaved data-refs (in DR_CHAIN), build statements
5592 to perform their permutation and ascribe the result vectorized statements to
5593 the scalar statements.
5596 void
5597 vect_transform_grouped_load (gimple stmt, vec<tree> dr_chain, int size,
5598 gimple_stmt_iterator *gsi)
5600 machine_mode mode;
5601 vec<tree> result_chain = vNULL;
5603 /* DR_CHAIN contains input data-refs that are a part of the interleaving.
5604 RESULT_CHAIN is the output of vect_permute_load_chain, it contains permuted
5605 vectors, that are ready for vector computation. */
5606 result_chain.create (size);
5608 /* If reassociation width for vector type is 2 or greater target machine can
5609 execute 2 or more vector instructions in parallel. Otherwise try to
5610 get chain for loads group using vect_shift_permute_load_chain. */
5611 mode = TYPE_MODE (STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt)));
5612 if (targetm.sched.reassociation_width (VEC_PERM_EXPR, mode) > 1
5613 || exact_log2 (size) != -1
5614 || !vect_shift_permute_load_chain (dr_chain, size, stmt,
5615 gsi, &result_chain))
5616 vect_permute_load_chain (dr_chain, size, stmt, gsi, &result_chain);
5617 vect_record_grouped_load_vectors (stmt, result_chain);
5618 result_chain.release ();
5621 /* RESULT_CHAIN contains the output of a group of grouped loads that were
5622 generated as part of the vectorization of STMT. Assign the statement
5623 for each vector to the associated scalar statement. */
5625 void
5626 vect_record_grouped_load_vectors (gimple stmt, vec<tree> result_chain)
5628 gimple first_stmt = GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt));
5629 gimple next_stmt, new_stmt;
5630 unsigned int i, gap_count;
5631 tree tmp_data_ref;
5633 /* Put a permuted data-ref in the VECTORIZED_STMT field.
5634 Since we scan the chain starting from it's first node, their order
5635 corresponds the order of data-refs in RESULT_CHAIN. */
5636 next_stmt = first_stmt;
5637 gap_count = 1;
5638 FOR_EACH_VEC_ELT (result_chain, i, tmp_data_ref)
5640 if (!next_stmt)
5641 break;
5643 /* Skip the gaps. Loads created for the gaps will be removed by dead
5644 code elimination pass later. No need to check for the first stmt in
5645 the group, since it always exists.
5646 GROUP_GAP is the number of steps in elements from the previous
5647 access (if there is no gap GROUP_GAP is 1). We skip loads that
5648 correspond to the gaps. */
5649 if (next_stmt != first_stmt
5650 && gap_count < GROUP_GAP (vinfo_for_stmt (next_stmt)))
5652 gap_count++;
5653 continue;
5656 while (next_stmt)
5658 new_stmt = SSA_NAME_DEF_STMT (tmp_data_ref);
5659 /* We assume that if VEC_STMT is not NULL, this is a case of multiple
5660 copies, and we put the new vector statement in the first available
5661 RELATED_STMT. */
5662 if (!STMT_VINFO_VEC_STMT (vinfo_for_stmt (next_stmt)))
5663 STMT_VINFO_VEC_STMT (vinfo_for_stmt (next_stmt)) = new_stmt;
5664 else
5666 if (!GROUP_SAME_DR_STMT (vinfo_for_stmt (next_stmt)))
5668 gimple prev_stmt =
5669 STMT_VINFO_VEC_STMT (vinfo_for_stmt (next_stmt));
5670 gimple rel_stmt =
5671 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (prev_stmt));
5672 while (rel_stmt)
5674 prev_stmt = rel_stmt;
5675 rel_stmt =
5676 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (rel_stmt));
5679 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (prev_stmt)) =
5680 new_stmt;
5684 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
5685 gap_count = 1;
5686 /* If NEXT_STMT accesses the same DR as the previous statement,
5687 put the same TMP_DATA_REF as its vectorized statement; otherwise
5688 get the next data-ref from RESULT_CHAIN. */
5689 if (!next_stmt || !GROUP_SAME_DR_STMT (vinfo_for_stmt (next_stmt)))
5690 break;
5695 /* Function vect_force_dr_alignment_p.
5697 Returns whether the alignment of a DECL can be forced to be aligned
5698 on ALIGNMENT bit boundary. */
5700 bool
5701 vect_can_force_dr_alignment_p (const_tree decl, unsigned int alignment)
5703 if (TREE_CODE (decl) != VAR_DECL)
5704 return false;
5706 /* With -fno-toplevel-reorder we may have already output the constant. */
5707 if (TREE_ASM_WRITTEN (decl))
5708 return false;
5710 /* Constant pool entries may be shared and not properly merged by LTO. */
5711 if (DECL_IN_CONSTANT_POOL (decl))
5712 return false;
5714 if (TREE_PUBLIC (decl) || DECL_EXTERNAL (decl))
5716 symtab_node *snode;
5718 /* We cannot change alignment of symbols that may bind to symbols
5719 in other translation unit that may contain a definition with lower
5720 alignment. */
5721 if (!decl_binds_to_current_def_p (decl))
5722 return false;
5724 /* When compiling partition, be sure the symbol is not output by other
5725 partition. */
5726 snode = symtab_node::get (decl);
5727 if (flag_ltrans
5728 && (snode->in_other_partition
5729 || snode->get_partitioning_class () == SYMBOL_DUPLICATE))
5730 return false;
5733 /* Do not override the alignment as specified by the ABI when the used
5734 attribute is set. */
5735 if (DECL_PRESERVE_P (decl))
5736 return false;
5738 /* Do not override explicit alignment set by the user when an explicit
5739 section name is also used. This is a common idiom used by many
5740 software projects. */
5741 if (TREE_STATIC (decl)
5742 && DECL_SECTION_NAME (decl) != NULL
5743 && !symtab_node::get (decl)->implicit_section)
5744 return false;
5746 /* If symbol is an alias, we need to check that target is OK. */
5747 if (TREE_STATIC (decl))
5749 tree target = symtab_node::get (decl)->ultimate_alias_target ()->decl;
5750 if (target != decl)
5752 if (DECL_PRESERVE_P (target))
5753 return false;
5754 decl = target;
5758 if (TREE_STATIC (decl))
5759 return (alignment <= MAX_OFILE_ALIGNMENT);
5760 else
5761 return (alignment <= MAX_STACK_ALIGNMENT);
5765 /* Return whether the data reference DR is supported with respect to its
5766 alignment.
5767 If CHECK_ALIGNED_ACCESSES is TRUE, check if the access is supported even
5768 it is aligned, i.e., check if it is possible to vectorize it with different
5769 alignment. */
5771 enum dr_alignment_support
5772 vect_supportable_dr_alignment (struct data_reference *dr,
5773 bool check_aligned_accesses)
5775 gimple stmt = DR_STMT (dr);
5776 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5777 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
5778 machine_mode mode = TYPE_MODE (vectype);
5779 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5780 struct loop *vect_loop = NULL;
5781 bool nested_in_vect_loop = false;
5783 if (aligned_access_p (dr) && !check_aligned_accesses)
5784 return dr_aligned;
5786 /* For now assume all conditional loads/stores support unaligned
5787 access without any special code. */
5788 if (is_gimple_call (stmt)
5789 && gimple_call_internal_p (stmt)
5790 && (gimple_call_internal_fn (stmt) == IFN_MASK_LOAD
5791 || gimple_call_internal_fn (stmt) == IFN_MASK_STORE))
5792 return dr_unaligned_supported;
5794 if (loop_vinfo)
5796 vect_loop = LOOP_VINFO_LOOP (loop_vinfo);
5797 nested_in_vect_loop = nested_in_vect_loop_p (vect_loop, stmt);
5800 /* Possibly unaligned access. */
5802 /* We can choose between using the implicit realignment scheme (generating
5803 a misaligned_move stmt) and the explicit realignment scheme (generating
5804 aligned loads with a REALIGN_LOAD). There are two variants to the
5805 explicit realignment scheme: optimized, and unoptimized.
5806 We can optimize the realignment only if the step between consecutive
5807 vector loads is equal to the vector size. Since the vector memory
5808 accesses advance in steps of VS (Vector Size) in the vectorized loop, it
5809 is guaranteed that the misalignment amount remains the same throughout the
5810 execution of the vectorized loop. Therefore, we can create the
5811 "realignment token" (the permutation mask that is passed to REALIGN_LOAD)
5812 at the loop preheader.
5814 However, in the case of outer-loop vectorization, when vectorizing a
5815 memory access in the inner-loop nested within the LOOP that is now being
5816 vectorized, while it is guaranteed that the misalignment of the
5817 vectorized memory access will remain the same in different outer-loop
5818 iterations, it is *not* guaranteed that is will remain the same throughout
5819 the execution of the inner-loop. This is because the inner-loop advances
5820 with the original scalar step (and not in steps of VS). If the inner-loop
5821 step happens to be a multiple of VS, then the misalignment remains fixed
5822 and we can use the optimized realignment scheme. For example:
5824 for (i=0; i<N; i++)
5825 for (j=0; j<M; j++)
5826 s += a[i+j];
5828 When vectorizing the i-loop in the above example, the step between
5829 consecutive vector loads is 1, and so the misalignment does not remain
5830 fixed across the execution of the inner-loop, and the realignment cannot
5831 be optimized (as illustrated in the following pseudo vectorized loop):
5833 for (i=0; i<N; i+=4)
5834 for (j=0; j<M; j++){
5835 vs += vp[i+j]; // misalignment of &vp[i+j] is {0,1,2,3,0,1,2,3,...}
5836 // when j is {0,1,2,3,4,5,6,7,...} respectively.
5837 // (assuming that we start from an aligned address).
5840 We therefore have to use the unoptimized realignment scheme:
5842 for (i=0; i<N; i+=4)
5843 for (j=k; j<M; j+=4)
5844 vs += vp[i+j]; // misalignment of &vp[i+j] is always k (assuming
5845 // that the misalignment of the initial address is
5846 // 0).
5848 The loop can then be vectorized as follows:
5850 for (k=0; k<4; k++){
5851 rt = get_realignment_token (&vp[k]);
5852 for (i=0; i<N; i+=4){
5853 v1 = vp[i+k];
5854 for (j=k; j<M; j+=4){
5855 v2 = vp[i+j+VS-1];
5856 va = REALIGN_LOAD <v1,v2,rt>;
5857 vs += va;
5858 v1 = v2;
5861 } */
5863 if (DR_IS_READ (dr))
5865 bool is_packed = false;
5866 tree type = (TREE_TYPE (DR_REF (dr)));
5868 if (optab_handler (vec_realign_load_optab, mode) != CODE_FOR_nothing
5869 && (!targetm.vectorize.builtin_mask_for_load
5870 || targetm.vectorize.builtin_mask_for_load ()))
5872 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
5873 if ((nested_in_vect_loop
5874 && (TREE_INT_CST_LOW (DR_STEP (dr))
5875 != GET_MODE_SIZE (TYPE_MODE (vectype))))
5876 || !loop_vinfo)
5877 return dr_explicit_realign;
5878 else
5879 return dr_explicit_realign_optimized;
5881 if (!known_alignment_for_access_p (dr))
5882 is_packed = not_size_aligned (DR_REF (dr));
5884 if ((TYPE_USER_ALIGN (type) && !is_packed)
5885 || targetm.vectorize.
5886 support_vector_misalignment (mode, type,
5887 DR_MISALIGNMENT (dr), is_packed))
5888 /* Can't software pipeline the loads, but can at least do them. */
5889 return dr_unaligned_supported;
5891 else
5893 bool is_packed = false;
5894 tree type = (TREE_TYPE (DR_REF (dr)));
5896 if (!known_alignment_for_access_p (dr))
5897 is_packed = not_size_aligned (DR_REF (dr));
5899 if ((TYPE_USER_ALIGN (type) && !is_packed)
5900 || targetm.vectorize.
5901 support_vector_misalignment (mode, type,
5902 DR_MISALIGNMENT (dr), is_packed))
5903 return dr_unaligned_supported;
5906 /* Unsupported. */
5907 return dr_unaligned_unsupported;