PR target/65871
[official-gcc.git] / gcc / tree-vect-data-refs.c
blobccb1f620c2d09e0c394dab80a97b7c728052af5e
1 /* Data References Analysis and Manipulation Utilities for Vectorization.
2 Copyright (C) 2003-2015 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
4 and Ira Rosen <irar@il.ibm.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "dumpfile.h"
26 #include "tm.h"
27 #include "hash-set.h"
28 #include "machmode.h"
29 #include "vec.h"
30 #include "double-int.h"
31 #include "input.h"
32 #include "alias.h"
33 #include "symtab.h"
34 #include "wide-int.h"
35 #include "inchash.h"
36 #include "tree.h"
37 #include "fold-const.h"
38 #include "stor-layout.h"
39 #include "tm_p.h"
40 #include "target.h"
41 #include "predict.h"
42 #include "hard-reg-set.h"
43 #include "function.h"
44 #include "dominance.h"
45 #include "cfg.h"
46 #include "basic-block.h"
47 #include "gimple-pretty-print.h"
48 #include "tree-ssa-alias.h"
49 #include "internal-fn.h"
50 #include "tree-eh.h"
51 #include "gimple-expr.h"
52 #include "is-a.h"
53 #include "gimple.h"
54 #include "gimplify.h"
55 #include "gimple-iterator.h"
56 #include "gimplify-me.h"
57 #include "gimple-ssa.h"
58 #include "tree-phinodes.h"
59 #include "ssa-iterators.h"
60 #include "stringpool.h"
61 #include "tree-ssanames.h"
62 #include "tree-ssa-loop-ivopts.h"
63 #include "tree-ssa-loop-manip.h"
64 #include "tree-ssa-loop.h"
65 #include "cfgloop.h"
66 #include "tree-chrec.h"
67 #include "tree-scalar-evolution.h"
68 #include "tree-vectorizer.h"
69 #include "diagnostic-core.h"
70 #include "hash-map.h"
71 #include "plugin-api.h"
72 #include "ipa-ref.h"
73 #include "cgraph.h"
74 /* Need to include rtl.h, expr.h, etc. for optabs. */
75 #include "hashtab.h"
76 #include "rtl.h"
77 #include "flags.h"
78 #include "statistics.h"
79 #include "real.h"
80 #include "fixed-value.h"
81 #include "insn-config.h"
82 #include "expmed.h"
83 #include "dojump.h"
84 #include "explow.h"
85 #include "calls.h"
86 #include "emit-rtl.h"
87 #include "varasm.h"
88 #include "stmt.h"
89 #include "expr.h"
90 #include "insn-codes.h"
91 #include "optabs.h"
92 #include "builtins.h"
94 /* Return true if load- or store-lanes optab OPTAB is implemented for
95 COUNT vectors of type VECTYPE. NAME is the name of OPTAB. */
97 static bool
98 vect_lanes_optab_supported_p (const char *name, convert_optab optab,
99 tree vectype, unsigned HOST_WIDE_INT count)
101 machine_mode mode, array_mode;
102 bool limit_p;
104 mode = TYPE_MODE (vectype);
105 limit_p = !targetm.array_mode_supported_p (mode, count);
106 array_mode = mode_for_size (count * GET_MODE_BITSIZE (mode),
107 MODE_INT, limit_p);
109 if (array_mode == BLKmode)
111 if (dump_enabled_p ())
112 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
113 "no array mode for %s[" HOST_WIDE_INT_PRINT_DEC "]\n",
114 GET_MODE_NAME (mode), count);
115 return false;
118 if (convert_optab_handler (optab, array_mode, mode) == CODE_FOR_nothing)
120 if (dump_enabled_p ())
121 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
122 "cannot use %s<%s><%s>\n", name,
123 GET_MODE_NAME (array_mode), GET_MODE_NAME (mode));
124 return false;
127 if (dump_enabled_p ())
128 dump_printf_loc (MSG_NOTE, vect_location,
129 "can use %s<%s><%s>\n", name, GET_MODE_NAME (array_mode),
130 GET_MODE_NAME (mode));
132 return true;
136 /* Return the smallest scalar part of STMT.
137 This is used to determine the vectype of the stmt. We generally set the
138 vectype according to the type of the result (lhs). For stmts whose
139 result-type is different than the type of the arguments (e.g., demotion,
140 promotion), vectype will be reset appropriately (later). Note that we have
141 to visit the smallest datatype in this function, because that determines the
142 VF. If the smallest datatype in the loop is present only as the rhs of a
143 promotion operation - we'd miss it.
144 Such a case, where a variable of this datatype does not appear in the lhs
145 anywhere in the loop, can only occur if it's an invariant: e.g.:
146 'int_x = (int) short_inv', which we'd expect to have been optimized away by
147 invariant motion. However, we cannot rely on invariant motion to always
148 take invariants out of the loop, and so in the case of promotion we also
149 have to check the rhs.
150 LHS_SIZE_UNIT and RHS_SIZE_UNIT contain the sizes of the corresponding
151 types. */
153 tree
154 vect_get_smallest_scalar_type (gimple stmt, HOST_WIDE_INT *lhs_size_unit,
155 HOST_WIDE_INT *rhs_size_unit)
157 tree scalar_type = gimple_expr_type (stmt);
158 HOST_WIDE_INT lhs, rhs;
160 lhs = rhs = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (scalar_type));
162 if (is_gimple_assign (stmt)
163 && (gimple_assign_cast_p (stmt)
164 || gimple_assign_rhs_code (stmt) == WIDEN_MULT_EXPR
165 || gimple_assign_rhs_code (stmt) == WIDEN_LSHIFT_EXPR
166 || gimple_assign_rhs_code (stmt) == FLOAT_EXPR))
168 tree rhs_type = TREE_TYPE (gimple_assign_rhs1 (stmt));
170 rhs = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (rhs_type));
171 if (rhs < lhs)
172 scalar_type = rhs_type;
175 *lhs_size_unit = lhs;
176 *rhs_size_unit = rhs;
177 return scalar_type;
181 /* Insert DDR into LOOP_VINFO list of ddrs that may alias and need to be
182 tested at run-time. Return TRUE if DDR was successfully inserted.
183 Return false if versioning is not supported. */
185 static bool
186 vect_mark_for_runtime_alias_test (ddr_p ddr, loop_vec_info loop_vinfo)
188 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
190 if ((unsigned) PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS) == 0)
191 return false;
193 if (dump_enabled_p ())
195 dump_printf_loc (MSG_NOTE, vect_location,
196 "mark for run-time aliasing test between ");
197 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (DDR_A (ddr)));
198 dump_printf (MSG_NOTE, " and ");
199 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (DDR_B (ddr)));
200 dump_printf (MSG_NOTE, "\n");
203 if (optimize_loop_nest_for_size_p (loop))
205 if (dump_enabled_p ())
206 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
207 "versioning not supported when optimizing"
208 " for size.\n");
209 return false;
212 /* FORNOW: We don't support versioning with outer-loop vectorization. */
213 if (loop->inner)
215 if (dump_enabled_p ())
216 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
217 "versioning not yet supported for outer-loops.\n");
218 return false;
221 /* FORNOW: We don't support creating runtime alias tests for non-constant
222 step. */
223 if (TREE_CODE (DR_STEP (DDR_A (ddr))) != INTEGER_CST
224 || TREE_CODE (DR_STEP (DDR_B (ddr))) != INTEGER_CST)
226 if (dump_enabled_p ())
227 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
228 "versioning not yet supported for non-constant "
229 "step\n");
230 return false;
233 LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo).safe_push (ddr);
234 return true;
238 /* Function vect_analyze_data_ref_dependence.
240 Return TRUE if there (might) exist a dependence between a memory-reference
241 DRA and a memory-reference DRB. When versioning for alias may check a
242 dependence at run-time, return FALSE. Adjust *MAX_VF according to
243 the data dependence. */
245 static bool
246 vect_analyze_data_ref_dependence (struct data_dependence_relation *ddr,
247 loop_vec_info loop_vinfo, int *max_vf)
249 unsigned int i;
250 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
251 struct data_reference *dra = DDR_A (ddr);
252 struct data_reference *drb = DDR_B (ddr);
253 stmt_vec_info stmtinfo_a = vinfo_for_stmt (DR_STMT (dra));
254 stmt_vec_info stmtinfo_b = vinfo_for_stmt (DR_STMT (drb));
255 lambda_vector dist_v;
256 unsigned int loop_depth;
258 /* In loop analysis all data references should be vectorizable. */
259 if (!STMT_VINFO_VECTORIZABLE (stmtinfo_a)
260 || !STMT_VINFO_VECTORIZABLE (stmtinfo_b))
261 gcc_unreachable ();
263 /* Independent data accesses. */
264 if (DDR_ARE_DEPENDENT (ddr) == chrec_known)
265 return false;
267 if (dra == drb
268 || (DR_IS_READ (dra) && DR_IS_READ (drb)))
269 return false;
271 /* Even if we have an anti-dependence then, as the vectorized loop covers at
272 least two scalar iterations, there is always also a true dependence.
273 As the vectorizer does not re-order loads and stores we can ignore
274 the anti-dependence if TBAA can disambiguate both DRs similar to the
275 case with known negative distance anti-dependences (positive
276 distance anti-dependences would violate TBAA constraints). */
277 if (((DR_IS_READ (dra) && DR_IS_WRITE (drb))
278 || (DR_IS_WRITE (dra) && DR_IS_READ (drb)))
279 && !alias_sets_conflict_p (get_alias_set (DR_REF (dra)),
280 get_alias_set (DR_REF (drb))))
281 return false;
283 /* Unknown data dependence. */
284 if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know)
286 /* If user asserted safelen consecutive iterations can be
287 executed concurrently, assume independence. */
288 if (loop->safelen >= 2)
290 if (loop->safelen < *max_vf)
291 *max_vf = loop->safelen;
292 LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo) = false;
293 return false;
296 if (STMT_VINFO_GATHER_P (stmtinfo_a)
297 || STMT_VINFO_GATHER_P (stmtinfo_b))
299 if (dump_enabled_p ())
301 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
302 "versioning for alias not supported for: "
303 "can't determine dependence between ");
304 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
305 DR_REF (dra));
306 dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
307 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
308 DR_REF (drb));
309 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
311 return true;
314 if (dump_enabled_p ())
316 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
317 "versioning for alias required: "
318 "can't determine dependence between ");
319 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
320 DR_REF (dra));
321 dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
322 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
323 DR_REF (drb));
324 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
327 /* Add to list of ddrs that need to be tested at run-time. */
328 return !vect_mark_for_runtime_alias_test (ddr, loop_vinfo);
331 /* Known data dependence. */
332 if (DDR_NUM_DIST_VECTS (ddr) == 0)
334 /* If user asserted safelen consecutive iterations can be
335 executed concurrently, assume independence. */
336 if (loop->safelen >= 2)
338 if (loop->safelen < *max_vf)
339 *max_vf = loop->safelen;
340 LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo) = false;
341 return false;
344 if (STMT_VINFO_GATHER_P (stmtinfo_a)
345 || STMT_VINFO_GATHER_P (stmtinfo_b))
347 if (dump_enabled_p ())
349 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
350 "versioning for alias not supported for: "
351 "bad dist vector for ");
352 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
353 DR_REF (dra));
354 dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
355 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
356 DR_REF (drb));
357 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
359 return true;
362 if (dump_enabled_p ())
364 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
365 "versioning for alias required: "
366 "bad dist vector for ");
367 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (dra));
368 dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
369 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (drb));
370 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
372 /* Add to list of ddrs that need to be tested at run-time. */
373 return !vect_mark_for_runtime_alias_test (ddr, loop_vinfo);
376 loop_depth = index_in_loop_nest (loop->num, DDR_LOOP_NEST (ddr));
377 FOR_EACH_VEC_ELT (DDR_DIST_VECTS (ddr), i, dist_v)
379 int dist = dist_v[loop_depth];
381 if (dump_enabled_p ())
382 dump_printf_loc (MSG_NOTE, vect_location,
383 "dependence distance = %d.\n", dist);
385 if (dist == 0)
387 if (dump_enabled_p ())
389 dump_printf_loc (MSG_NOTE, vect_location,
390 "dependence distance == 0 between ");
391 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra));
392 dump_printf (MSG_NOTE, " and ");
393 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb));
394 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
397 /* When we perform grouped accesses and perform implicit CSE
398 by detecting equal accesses and doing disambiguation with
399 runtime alias tests like for
400 .. = a[i];
401 .. = a[i+1];
402 a[i] = ..;
403 a[i+1] = ..;
404 *p = ..;
405 .. = a[i];
406 .. = a[i+1];
407 where we will end up loading { a[i], a[i+1] } once, make
408 sure that inserting group loads before the first load and
409 stores after the last store will do the right thing.
410 Similar for groups like
411 a[i] = ...;
412 ... = a[i];
413 a[i+1] = ...;
414 where loads from the group interleave with the store. */
415 if (STMT_VINFO_GROUPED_ACCESS (stmtinfo_a)
416 || STMT_VINFO_GROUPED_ACCESS (stmtinfo_b))
418 gimple earlier_stmt;
419 earlier_stmt = get_earlier_stmt (DR_STMT (dra), DR_STMT (drb));
420 if (DR_IS_WRITE
421 (STMT_VINFO_DATA_REF (vinfo_for_stmt (earlier_stmt))))
423 if (dump_enabled_p ())
424 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
425 "READ_WRITE dependence in interleaving."
426 "\n");
427 return true;
431 continue;
434 if (dist > 0 && DDR_REVERSED_P (ddr))
436 /* If DDR_REVERSED_P the order of the data-refs in DDR was
437 reversed (to make distance vector positive), and the actual
438 distance is negative. */
439 if (dump_enabled_p ())
440 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
441 "dependence distance negative.\n");
442 /* Record a negative dependence distance to later limit the
443 amount of stmt copying / unrolling we can perform.
444 Only need to handle read-after-write dependence. */
445 if (DR_IS_READ (drb)
446 && (STMT_VINFO_MIN_NEG_DIST (stmtinfo_b) == 0
447 || STMT_VINFO_MIN_NEG_DIST (stmtinfo_b) > (unsigned)dist))
448 STMT_VINFO_MIN_NEG_DIST (stmtinfo_b) = dist;
449 continue;
452 if (abs (dist) >= 2
453 && abs (dist) < *max_vf)
455 /* The dependence distance requires reduction of the maximal
456 vectorization factor. */
457 *max_vf = abs (dist);
458 if (dump_enabled_p ())
459 dump_printf_loc (MSG_NOTE, vect_location,
460 "adjusting maximal vectorization factor to %i\n",
461 *max_vf);
464 if (abs (dist) >= *max_vf)
466 /* Dependence distance does not create dependence, as far as
467 vectorization is concerned, in this case. */
468 if (dump_enabled_p ())
469 dump_printf_loc (MSG_NOTE, vect_location,
470 "dependence distance >= VF.\n");
471 continue;
474 if (dump_enabled_p ())
476 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
477 "not vectorized, possible dependence "
478 "between data-refs ");
479 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra));
480 dump_printf (MSG_NOTE, " and ");
481 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb));
482 dump_printf (MSG_NOTE, "\n");
485 return true;
488 return false;
491 /* Function vect_analyze_data_ref_dependences.
493 Examine all the data references in the loop, and make sure there do not
494 exist any data dependences between them. Set *MAX_VF according to
495 the maximum vectorization factor the data dependences allow. */
497 bool
498 vect_analyze_data_ref_dependences (loop_vec_info loop_vinfo, int *max_vf)
500 unsigned int i;
501 struct data_dependence_relation *ddr;
503 if (dump_enabled_p ())
504 dump_printf_loc (MSG_NOTE, vect_location,
505 "=== vect_analyze_data_ref_dependences ===\n");
507 LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo) = true;
508 if (!compute_all_dependences (LOOP_VINFO_DATAREFS (loop_vinfo),
509 &LOOP_VINFO_DDRS (loop_vinfo),
510 LOOP_VINFO_LOOP_NEST (loop_vinfo), true))
511 return false;
513 FOR_EACH_VEC_ELT (LOOP_VINFO_DDRS (loop_vinfo), i, ddr)
514 if (vect_analyze_data_ref_dependence (ddr, loop_vinfo, max_vf))
515 return false;
517 return true;
521 /* Function vect_slp_analyze_data_ref_dependence.
523 Return TRUE if there (might) exist a dependence between a memory-reference
524 DRA and a memory-reference DRB. When versioning for alias may check a
525 dependence at run-time, return FALSE. Adjust *MAX_VF according to
526 the data dependence. */
528 static bool
529 vect_slp_analyze_data_ref_dependence (struct data_dependence_relation *ddr)
531 struct data_reference *dra = DDR_A (ddr);
532 struct data_reference *drb = DDR_B (ddr);
534 /* We need to check dependences of statements marked as unvectorizable
535 as well, they still can prohibit vectorization. */
537 /* Independent data accesses. */
538 if (DDR_ARE_DEPENDENT (ddr) == chrec_known)
539 return false;
541 if (dra == drb)
542 return false;
544 /* Read-read is OK. */
545 if (DR_IS_READ (dra) && DR_IS_READ (drb))
546 return false;
548 /* If dra and drb are part of the same interleaving chain consider
549 them independent. */
550 if (STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (DR_STMT (dra)))
551 && (GROUP_FIRST_ELEMENT (vinfo_for_stmt (DR_STMT (dra)))
552 == GROUP_FIRST_ELEMENT (vinfo_for_stmt (DR_STMT (drb)))))
553 return false;
555 /* Unknown data dependence. */
556 if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know)
558 if (dump_enabled_p ())
560 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
561 "can't determine dependence between ");
562 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (dra));
563 dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
564 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (drb));
565 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
568 else if (dump_enabled_p ())
570 dump_printf_loc (MSG_NOTE, vect_location,
571 "determined dependence between ");
572 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra));
573 dump_printf (MSG_NOTE, " and ");
574 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb));
575 dump_printf (MSG_NOTE, "\n");
578 /* We do not vectorize basic blocks with write-write dependencies. */
579 if (DR_IS_WRITE (dra) && DR_IS_WRITE (drb))
580 return true;
582 /* If we have a read-write dependence check that the load is before the store.
583 When we vectorize basic blocks, vector load can be only before
584 corresponding scalar load, and vector store can be only after its
585 corresponding scalar store. So the order of the acceses is preserved in
586 case the load is before the store. */
587 gimple earlier_stmt = get_earlier_stmt (DR_STMT (dra), DR_STMT (drb));
588 if (DR_IS_READ (STMT_VINFO_DATA_REF (vinfo_for_stmt (earlier_stmt))))
590 /* That only holds for load-store pairs taking part in vectorization. */
591 if (STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dra)))
592 && STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (drb))))
593 return false;
596 return true;
600 /* Function vect_analyze_data_ref_dependences.
602 Examine all the data references in the basic-block, and make sure there
603 do not exist any data dependences between them. Set *MAX_VF according to
604 the maximum vectorization factor the data dependences allow. */
606 bool
607 vect_slp_analyze_data_ref_dependences (bb_vec_info bb_vinfo)
609 struct data_dependence_relation *ddr;
610 unsigned int i;
612 if (dump_enabled_p ())
613 dump_printf_loc (MSG_NOTE, vect_location,
614 "=== vect_slp_analyze_data_ref_dependences ===\n");
616 if (!compute_all_dependences (BB_VINFO_DATAREFS (bb_vinfo),
617 &BB_VINFO_DDRS (bb_vinfo),
618 vNULL, true))
619 return false;
621 FOR_EACH_VEC_ELT (BB_VINFO_DDRS (bb_vinfo), i, ddr)
622 if (vect_slp_analyze_data_ref_dependence (ddr))
623 return false;
625 return true;
629 /* Function vect_compute_data_ref_alignment
631 Compute the misalignment of the data reference DR.
633 Output:
634 1. If during the misalignment computation it is found that the data reference
635 cannot be vectorized then false is returned.
636 2. DR_MISALIGNMENT (DR) is defined.
638 FOR NOW: No analysis is actually performed. Misalignment is calculated
639 only for trivial cases. TODO. */
641 static bool
642 vect_compute_data_ref_alignment (struct data_reference *dr)
644 gimple stmt = DR_STMT (dr);
645 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
646 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
647 struct loop *loop = NULL;
648 tree ref = DR_REF (dr);
649 tree vectype;
650 tree base, base_addr;
651 bool base_aligned;
652 tree misalign;
653 tree aligned_to;
654 unsigned HOST_WIDE_INT alignment;
656 if (dump_enabled_p ())
657 dump_printf_loc (MSG_NOTE, vect_location,
658 "vect_compute_data_ref_alignment:\n");
660 if (loop_vinfo)
661 loop = LOOP_VINFO_LOOP (loop_vinfo);
663 /* Initialize misalignment to unknown. */
664 SET_DR_MISALIGNMENT (dr, -1);
666 /* Strided loads perform only component accesses, misalignment information
667 is irrelevant for them. */
668 if (STMT_VINFO_STRIDE_LOAD_P (stmt_info))
669 return true;
671 misalign = DR_INIT (dr);
672 aligned_to = DR_ALIGNED_TO (dr);
673 base_addr = DR_BASE_ADDRESS (dr);
674 vectype = STMT_VINFO_VECTYPE (stmt_info);
676 /* In case the dataref is in an inner-loop of the loop that is being
677 vectorized (LOOP), we use the base and misalignment information
678 relative to the outer-loop (LOOP). This is ok only if the misalignment
679 stays the same throughout the execution of the inner-loop, which is why
680 we have to check that the stride of the dataref in the inner-loop evenly
681 divides by the vector size. */
682 if (loop && nested_in_vect_loop_p (loop, stmt))
684 tree step = DR_STEP (dr);
685 HOST_WIDE_INT dr_step = TREE_INT_CST_LOW (step);
687 if (dr_step % GET_MODE_SIZE (TYPE_MODE (vectype)) == 0)
689 if (dump_enabled_p ())
690 dump_printf_loc (MSG_NOTE, vect_location,
691 "inner step divides the vector-size.\n");
692 misalign = STMT_VINFO_DR_INIT (stmt_info);
693 aligned_to = STMT_VINFO_DR_ALIGNED_TO (stmt_info);
694 base_addr = STMT_VINFO_DR_BASE_ADDRESS (stmt_info);
696 else
698 if (dump_enabled_p ())
699 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
700 "inner step doesn't divide the vector-size.\n");
701 misalign = NULL_TREE;
705 /* Similarly, if we're doing basic-block vectorization, we can only use
706 base and misalignment information relative to an innermost loop if the
707 misalignment stays the same throughout the execution of the loop.
708 As above, this is the case if the stride of the dataref evenly divides
709 by the vector size. */
710 if (!loop)
712 tree step = DR_STEP (dr);
713 HOST_WIDE_INT dr_step = TREE_INT_CST_LOW (step);
715 if (dr_step % GET_MODE_SIZE (TYPE_MODE (vectype)) != 0)
717 if (dump_enabled_p ())
718 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
719 "SLP: step doesn't divide the vector-size.\n");
720 misalign = NULL_TREE;
724 alignment = TYPE_ALIGN_UNIT (vectype);
726 if ((compare_tree_int (aligned_to, alignment) < 0)
727 || !misalign)
729 if (dump_enabled_p ())
731 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
732 "Unknown alignment for access: ");
733 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, ref);
734 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
736 return true;
739 /* To look at alignment of the base we have to preserve an inner MEM_REF
740 as that carries alignment information of the actual access. */
741 base = ref;
742 while (handled_component_p (base))
743 base = TREE_OPERAND (base, 0);
744 if (TREE_CODE (base) == MEM_REF)
745 base = build2 (MEM_REF, TREE_TYPE (base), base_addr,
746 build_int_cst (TREE_TYPE (TREE_OPERAND (base, 1)), 0));
748 if (get_object_alignment (base) >= TYPE_ALIGN (vectype))
749 base_aligned = true;
750 else
751 base_aligned = false;
753 if (!base_aligned)
755 /* Strip an inner MEM_REF to a bare decl if possible. */
756 if (TREE_CODE (base) == MEM_REF
757 && integer_zerop (TREE_OPERAND (base, 1))
758 && TREE_CODE (TREE_OPERAND (base, 0)) == ADDR_EXPR)
759 base = TREE_OPERAND (TREE_OPERAND (base, 0), 0);
761 if (!vect_can_force_dr_alignment_p (base, TYPE_ALIGN (vectype)))
763 if (dump_enabled_p ())
765 dump_printf_loc (MSG_NOTE, vect_location,
766 "can't force alignment of ref: ");
767 dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
768 dump_printf (MSG_NOTE, "\n");
770 return true;
773 /* Force the alignment of the decl.
774 NOTE: This is the only change to the code we make during
775 the analysis phase, before deciding to vectorize the loop. */
776 if (dump_enabled_p ())
778 dump_printf_loc (MSG_NOTE, vect_location, "force alignment of ");
779 dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
780 dump_printf (MSG_NOTE, "\n");
783 ((dataref_aux *)dr->aux)->base_decl = base;
784 ((dataref_aux *)dr->aux)->base_misaligned = true;
787 /* If this is a backward running DR then first access in the larger
788 vectype actually is N-1 elements before the address in the DR.
789 Adjust misalign accordingly. */
790 if (tree_int_cst_sgn (DR_STEP (dr)) < 0)
792 tree offset = ssize_int (TYPE_VECTOR_SUBPARTS (vectype) - 1);
793 /* DR_STEP(dr) is the same as -TYPE_SIZE of the scalar type,
794 otherwise we wouldn't be here. */
795 offset = fold_build2 (MULT_EXPR, ssizetype, offset, DR_STEP (dr));
796 /* PLUS because DR_STEP was negative. */
797 misalign = size_binop (PLUS_EXPR, misalign, offset);
800 SET_DR_MISALIGNMENT (dr,
801 wi::mod_floor (misalign, alignment, SIGNED).to_uhwi ());
803 if (dump_enabled_p ())
805 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
806 "misalign = %d bytes of ref ", DR_MISALIGNMENT (dr));
807 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, ref);
808 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
811 return true;
815 /* Function vect_compute_data_refs_alignment
817 Compute the misalignment of data references in the loop.
818 Return FALSE if a data reference is found that cannot be vectorized. */
820 static bool
821 vect_compute_data_refs_alignment (loop_vec_info loop_vinfo,
822 bb_vec_info bb_vinfo)
824 vec<data_reference_p> datarefs;
825 struct data_reference *dr;
826 unsigned int i;
828 if (loop_vinfo)
829 datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
830 else
831 datarefs = BB_VINFO_DATAREFS (bb_vinfo);
833 FOR_EACH_VEC_ELT (datarefs, i, dr)
834 if (STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr)))
835 && !vect_compute_data_ref_alignment (dr))
837 if (bb_vinfo)
839 /* Mark unsupported statement as unvectorizable. */
840 STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr))) = false;
841 continue;
843 else
844 return false;
847 return true;
851 /* Function vect_update_misalignment_for_peel
853 DR - the data reference whose misalignment is to be adjusted.
854 DR_PEEL - the data reference whose misalignment is being made
855 zero in the vector loop by the peel.
856 NPEEL - the number of iterations in the peel loop if the misalignment
857 of DR_PEEL is known at compile time. */
859 static void
860 vect_update_misalignment_for_peel (struct data_reference *dr,
861 struct data_reference *dr_peel, int npeel)
863 unsigned int i;
864 vec<dr_p> same_align_drs;
865 struct data_reference *current_dr;
866 int dr_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr))));
867 int dr_peel_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr_peel))));
868 stmt_vec_info stmt_info = vinfo_for_stmt (DR_STMT (dr));
869 stmt_vec_info peel_stmt_info = vinfo_for_stmt (DR_STMT (dr_peel));
871 /* For interleaved data accesses the step in the loop must be multiplied by
872 the size of the interleaving group. */
873 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
874 dr_size *= GROUP_SIZE (vinfo_for_stmt (GROUP_FIRST_ELEMENT (stmt_info)));
875 if (STMT_VINFO_GROUPED_ACCESS (peel_stmt_info))
876 dr_peel_size *= GROUP_SIZE (peel_stmt_info);
878 /* It can be assumed that the data refs with the same alignment as dr_peel
879 are aligned in the vector loop. */
880 same_align_drs
881 = STMT_VINFO_SAME_ALIGN_REFS (vinfo_for_stmt (DR_STMT (dr_peel)));
882 FOR_EACH_VEC_ELT (same_align_drs, i, current_dr)
884 if (current_dr != dr)
885 continue;
886 gcc_assert (DR_MISALIGNMENT (dr) / dr_size ==
887 DR_MISALIGNMENT (dr_peel) / dr_peel_size);
888 SET_DR_MISALIGNMENT (dr, 0);
889 return;
892 if (known_alignment_for_access_p (dr)
893 && known_alignment_for_access_p (dr_peel))
895 bool negative = tree_int_cst_compare (DR_STEP (dr), size_zero_node) < 0;
896 int misal = DR_MISALIGNMENT (dr);
897 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
898 misal += negative ? -npeel * dr_size : npeel * dr_size;
899 misal &= (TYPE_ALIGN (vectype) / BITS_PER_UNIT) - 1;
900 SET_DR_MISALIGNMENT (dr, misal);
901 return;
904 if (dump_enabled_p ())
905 dump_printf_loc (MSG_NOTE, vect_location, "Setting misalignment to -1.\n");
906 SET_DR_MISALIGNMENT (dr, -1);
910 /* Function vect_verify_datarefs_alignment
912 Return TRUE if all data references in the loop can be
913 handled with respect to alignment. */
915 bool
916 vect_verify_datarefs_alignment (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo)
918 vec<data_reference_p> datarefs;
919 struct data_reference *dr;
920 enum dr_alignment_support supportable_dr_alignment;
921 unsigned int i;
923 if (loop_vinfo)
924 datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
925 else
926 datarefs = BB_VINFO_DATAREFS (bb_vinfo);
928 FOR_EACH_VEC_ELT (datarefs, i, dr)
930 gimple stmt = DR_STMT (dr);
931 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
933 if (!STMT_VINFO_RELEVANT_P (stmt_info))
934 continue;
936 /* For interleaving, only the alignment of the first access matters.
937 Skip statements marked as not vectorizable. */
938 if ((STMT_VINFO_GROUPED_ACCESS (stmt_info)
939 && GROUP_FIRST_ELEMENT (stmt_info) != stmt)
940 || !STMT_VINFO_VECTORIZABLE (stmt_info))
941 continue;
943 /* Strided loads perform only component accesses, alignment is
944 irrelevant for them. */
945 if (STMT_VINFO_STRIDE_LOAD_P (stmt_info))
946 continue;
948 supportable_dr_alignment = vect_supportable_dr_alignment (dr, false);
949 if (!supportable_dr_alignment)
951 if (dump_enabled_p ())
953 if (DR_IS_READ (dr))
954 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
955 "not vectorized: unsupported unaligned load.");
956 else
957 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
958 "not vectorized: unsupported unaligned "
959 "store.");
961 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
962 DR_REF (dr));
963 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
965 return false;
967 if (supportable_dr_alignment != dr_aligned && dump_enabled_p ())
968 dump_printf_loc (MSG_NOTE, vect_location,
969 "Vectorizing an unaligned access.\n");
971 return true;
974 /* Given an memory reference EXP return whether its alignment is less
975 than its size. */
977 static bool
978 not_size_aligned (tree exp)
980 if (!tree_fits_uhwi_p (TYPE_SIZE (TREE_TYPE (exp))))
981 return true;
983 return (tree_to_uhwi (TYPE_SIZE (TREE_TYPE (exp)))
984 > get_object_alignment (exp));
987 /* Function vector_alignment_reachable_p
989 Return true if vector alignment for DR is reachable by peeling
990 a few loop iterations. Return false otherwise. */
992 static bool
993 vector_alignment_reachable_p (struct data_reference *dr)
995 gimple stmt = DR_STMT (dr);
996 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
997 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
999 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
1001 /* For interleaved access we peel only if number of iterations in
1002 the prolog loop ({VF - misalignment}), is a multiple of the
1003 number of the interleaved accesses. */
1004 int elem_size, mis_in_elements;
1005 int nelements = TYPE_VECTOR_SUBPARTS (vectype);
1007 /* FORNOW: handle only known alignment. */
1008 if (!known_alignment_for_access_p (dr))
1009 return false;
1011 elem_size = GET_MODE_SIZE (TYPE_MODE (vectype)) / nelements;
1012 mis_in_elements = DR_MISALIGNMENT (dr) / elem_size;
1014 if ((nelements - mis_in_elements) % GROUP_SIZE (stmt_info))
1015 return false;
1018 /* If misalignment is known at the compile time then allow peeling
1019 only if natural alignment is reachable through peeling. */
1020 if (known_alignment_for_access_p (dr) && !aligned_access_p (dr))
1022 HOST_WIDE_INT elmsize =
1023 int_cst_value (TYPE_SIZE_UNIT (TREE_TYPE (vectype)));
1024 if (dump_enabled_p ())
1026 dump_printf_loc (MSG_NOTE, vect_location,
1027 "data size =" HOST_WIDE_INT_PRINT_DEC, elmsize);
1028 dump_printf (MSG_NOTE,
1029 ". misalignment = %d.\n", DR_MISALIGNMENT (dr));
1031 if (DR_MISALIGNMENT (dr) % elmsize)
1033 if (dump_enabled_p ())
1034 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1035 "data size does not divide the misalignment.\n");
1036 return false;
1040 if (!known_alignment_for_access_p (dr))
1042 tree type = TREE_TYPE (DR_REF (dr));
1043 bool is_packed = not_size_aligned (DR_REF (dr));
1044 if (dump_enabled_p ())
1045 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1046 "Unknown misalignment, is_packed = %d\n",is_packed);
1047 if ((TYPE_USER_ALIGN (type) && !is_packed)
1048 || targetm.vectorize.vector_alignment_reachable (type, is_packed))
1049 return true;
1050 else
1051 return false;
1054 return true;
1058 /* Calculate the cost of the memory access represented by DR. */
1060 static void
1061 vect_get_data_access_cost (struct data_reference *dr,
1062 unsigned int *inside_cost,
1063 unsigned int *outside_cost,
1064 stmt_vector_for_cost *body_cost_vec)
1066 gimple stmt = DR_STMT (dr);
1067 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1068 int nunits = TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info));
1069 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1070 int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1071 int ncopies = vf / nunits;
1073 if (DR_IS_READ (dr))
1074 vect_get_load_cost (dr, ncopies, true, inside_cost, outside_cost,
1075 NULL, body_cost_vec, false);
1076 else
1077 vect_get_store_cost (dr, ncopies, inside_cost, body_cost_vec);
1079 if (dump_enabled_p ())
1080 dump_printf_loc (MSG_NOTE, vect_location,
1081 "vect_get_data_access_cost: inside_cost = %d, "
1082 "outside_cost = %d.\n", *inside_cost, *outside_cost);
1086 /* Insert DR into peeling hash table with NPEEL as key. */
1088 static void
1089 vect_peeling_hash_insert (loop_vec_info loop_vinfo, struct data_reference *dr,
1090 int npeel)
1092 struct _vect_peel_info elem, *slot;
1093 _vect_peel_info **new_slot;
1094 bool supportable_dr_alignment = vect_supportable_dr_alignment (dr, true);
1096 elem.npeel = npeel;
1097 slot = LOOP_VINFO_PEELING_HTAB (loop_vinfo)->find (&elem);
1098 if (slot)
1099 slot->count++;
1100 else
1102 slot = XNEW (struct _vect_peel_info);
1103 slot->npeel = npeel;
1104 slot->dr = dr;
1105 slot->count = 1;
1106 new_slot
1107 = LOOP_VINFO_PEELING_HTAB (loop_vinfo)->find_slot (slot, INSERT);
1108 *new_slot = slot;
1111 if (!supportable_dr_alignment
1112 && unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo)))
1113 slot->count += VECT_MAX_COST;
1117 /* Traverse peeling hash table to find peeling option that aligns maximum
1118 number of data accesses. */
1121 vect_peeling_hash_get_most_frequent (_vect_peel_info **slot,
1122 _vect_peel_extended_info *max)
1124 vect_peel_info elem = *slot;
1126 if (elem->count > max->peel_info.count
1127 || (elem->count == max->peel_info.count
1128 && max->peel_info.npeel > elem->npeel))
1130 max->peel_info.npeel = elem->npeel;
1131 max->peel_info.count = elem->count;
1132 max->peel_info.dr = elem->dr;
1135 return 1;
1139 /* Traverse peeling hash table and calculate cost for each peeling option.
1140 Find the one with the lowest cost. */
1143 vect_peeling_hash_get_lowest_cost (_vect_peel_info **slot,
1144 _vect_peel_extended_info *min)
1146 vect_peel_info elem = *slot;
1147 int save_misalignment, dummy;
1148 unsigned int inside_cost = 0, outside_cost = 0, i;
1149 gimple stmt = DR_STMT (elem->dr);
1150 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1151 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1152 vec<data_reference_p> datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
1153 struct data_reference *dr;
1154 stmt_vector_for_cost prologue_cost_vec, body_cost_vec, epilogue_cost_vec;
1156 prologue_cost_vec.create (2);
1157 body_cost_vec.create (2);
1158 epilogue_cost_vec.create (2);
1160 FOR_EACH_VEC_ELT (datarefs, i, dr)
1162 stmt = DR_STMT (dr);
1163 stmt_info = vinfo_for_stmt (stmt);
1164 /* For interleaving, only the alignment of the first access
1165 matters. */
1166 if (STMT_VINFO_GROUPED_ACCESS (stmt_info)
1167 && GROUP_FIRST_ELEMENT (stmt_info) != stmt)
1168 continue;
1170 save_misalignment = DR_MISALIGNMENT (dr);
1171 vect_update_misalignment_for_peel (dr, elem->dr, elem->npeel);
1172 vect_get_data_access_cost (dr, &inside_cost, &outside_cost,
1173 &body_cost_vec);
1174 SET_DR_MISALIGNMENT (dr, save_misalignment);
1177 auto_vec<stmt_info_for_cost> scalar_cost_vec;
1178 vect_get_single_scalar_iteration_cost (loop_vinfo, &scalar_cost_vec);
1179 outside_cost += vect_get_known_peeling_cost
1180 (loop_vinfo, elem->npeel, &dummy,
1181 &scalar_cost_vec, &prologue_cost_vec, &epilogue_cost_vec);
1183 /* Prologue and epilogue costs are added to the target model later.
1184 These costs depend only on the scalar iteration cost, the
1185 number of peeling iterations finally chosen, and the number of
1186 misaligned statements. So discard the information found here. */
1187 prologue_cost_vec.release ();
1188 epilogue_cost_vec.release ();
1190 if (inside_cost < min->inside_cost
1191 || (inside_cost == min->inside_cost && outside_cost < min->outside_cost))
1193 min->inside_cost = inside_cost;
1194 min->outside_cost = outside_cost;
1195 min->body_cost_vec.release ();
1196 min->body_cost_vec = body_cost_vec;
1197 min->peel_info.dr = elem->dr;
1198 min->peel_info.npeel = elem->npeel;
1200 else
1201 body_cost_vec.release ();
1203 return 1;
1207 /* Choose best peeling option by traversing peeling hash table and either
1208 choosing an option with the lowest cost (if cost model is enabled) or the
1209 option that aligns as many accesses as possible. */
1211 static struct data_reference *
1212 vect_peeling_hash_choose_best_peeling (loop_vec_info loop_vinfo,
1213 unsigned int *npeel,
1214 stmt_vector_for_cost *body_cost_vec)
1216 struct _vect_peel_extended_info res;
1218 res.peel_info.dr = NULL;
1219 res.body_cost_vec = stmt_vector_for_cost ();
1221 if (!unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo)))
1223 res.inside_cost = INT_MAX;
1224 res.outside_cost = INT_MAX;
1225 LOOP_VINFO_PEELING_HTAB (loop_vinfo)
1226 ->traverse <_vect_peel_extended_info *,
1227 vect_peeling_hash_get_lowest_cost> (&res);
1229 else
1231 res.peel_info.count = 0;
1232 LOOP_VINFO_PEELING_HTAB (loop_vinfo)
1233 ->traverse <_vect_peel_extended_info *,
1234 vect_peeling_hash_get_most_frequent> (&res);
1237 *npeel = res.peel_info.npeel;
1238 *body_cost_vec = res.body_cost_vec;
1239 return res.peel_info.dr;
1243 /* Function vect_enhance_data_refs_alignment
1245 This pass will use loop versioning and loop peeling in order to enhance
1246 the alignment of data references in the loop.
1248 FOR NOW: we assume that whatever versioning/peeling takes place, only the
1249 original loop is to be vectorized. Any other loops that are created by
1250 the transformations performed in this pass - are not supposed to be
1251 vectorized. This restriction will be relaxed.
1253 This pass will require a cost model to guide it whether to apply peeling
1254 or versioning or a combination of the two. For example, the scheme that
1255 intel uses when given a loop with several memory accesses, is as follows:
1256 choose one memory access ('p') which alignment you want to force by doing
1257 peeling. Then, either (1) generate a loop in which 'p' is aligned and all
1258 other accesses are not necessarily aligned, or (2) use loop versioning to
1259 generate one loop in which all accesses are aligned, and another loop in
1260 which only 'p' is necessarily aligned.
1262 ("Automatic Intra-Register Vectorization for the Intel Architecture",
1263 Aart J.C. Bik, Milind Girkar, Paul M. Grey and Ximmin Tian, International
1264 Journal of Parallel Programming, Vol. 30, No. 2, April 2002.)
1266 Devising a cost model is the most critical aspect of this work. It will
1267 guide us on which access to peel for, whether to use loop versioning, how
1268 many versions to create, etc. The cost model will probably consist of
1269 generic considerations as well as target specific considerations (on
1270 powerpc for example, misaligned stores are more painful than misaligned
1271 loads).
1273 Here are the general steps involved in alignment enhancements:
1275 -- original loop, before alignment analysis:
1276 for (i=0; i<N; i++){
1277 x = q[i]; # DR_MISALIGNMENT(q) = unknown
1278 p[i] = y; # DR_MISALIGNMENT(p) = unknown
1281 -- After vect_compute_data_refs_alignment:
1282 for (i=0; i<N; i++){
1283 x = q[i]; # DR_MISALIGNMENT(q) = 3
1284 p[i] = y; # DR_MISALIGNMENT(p) = unknown
1287 -- Possibility 1: we do loop versioning:
1288 if (p is aligned) {
1289 for (i=0; i<N; i++){ # loop 1A
1290 x = q[i]; # DR_MISALIGNMENT(q) = 3
1291 p[i] = y; # DR_MISALIGNMENT(p) = 0
1294 else {
1295 for (i=0; i<N; i++){ # loop 1B
1296 x = q[i]; # DR_MISALIGNMENT(q) = 3
1297 p[i] = y; # DR_MISALIGNMENT(p) = unaligned
1301 -- Possibility 2: we do loop peeling:
1302 for (i = 0; i < 3; i++){ # (scalar loop, not to be vectorized).
1303 x = q[i];
1304 p[i] = y;
1306 for (i = 3; i < N; i++){ # loop 2A
1307 x = q[i]; # DR_MISALIGNMENT(q) = 0
1308 p[i] = y; # DR_MISALIGNMENT(p) = unknown
1311 -- Possibility 3: combination of loop peeling and versioning:
1312 for (i = 0; i < 3; i++){ # (scalar loop, not to be vectorized).
1313 x = q[i];
1314 p[i] = y;
1316 if (p is aligned) {
1317 for (i = 3; i<N; i++){ # loop 3A
1318 x = q[i]; # DR_MISALIGNMENT(q) = 0
1319 p[i] = y; # DR_MISALIGNMENT(p) = 0
1322 else {
1323 for (i = 3; i<N; i++){ # loop 3B
1324 x = q[i]; # DR_MISALIGNMENT(q) = 0
1325 p[i] = y; # DR_MISALIGNMENT(p) = unaligned
1329 These loops are later passed to loop_transform to be vectorized. The
1330 vectorizer will use the alignment information to guide the transformation
1331 (whether to generate regular loads/stores, or with special handling for
1332 misalignment). */
1334 bool
1335 vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo)
1337 vec<data_reference_p> datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
1338 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1339 enum dr_alignment_support supportable_dr_alignment;
1340 struct data_reference *dr0 = NULL, *first_store = NULL;
1341 struct data_reference *dr;
1342 unsigned int i, j;
1343 bool do_peeling = false;
1344 bool do_versioning = false;
1345 bool stat;
1346 gimple stmt;
1347 stmt_vec_info stmt_info;
1348 unsigned int npeel = 0;
1349 bool all_misalignments_unknown = true;
1350 unsigned int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1351 unsigned possible_npeel_number = 1;
1352 tree vectype;
1353 unsigned int nelements, mis, same_align_drs_max = 0;
1354 stmt_vector_for_cost body_cost_vec = stmt_vector_for_cost ();
1356 if (dump_enabled_p ())
1357 dump_printf_loc (MSG_NOTE, vect_location,
1358 "=== vect_enhance_data_refs_alignment ===\n");
1360 /* While cost model enhancements are expected in the future, the high level
1361 view of the code at this time is as follows:
1363 A) If there is a misaligned access then see if peeling to align
1364 this access can make all data references satisfy
1365 vect_supportable_dr_alignment. If so, update data structures
1366 as needed and return true.
1368 B) If peeling wasn't possible and there is a data reference with an
1369 unknown misalignment that does not satisfy vect_supportable_dr_alignment
1370 then see if loop versioning checks can be used to make all data
1371 references satisfy vect_supportable_dr_alignment. If so, update
1372 data structures as needed and return true.
1374 C) If neither peeling nor versioning were successful then return false if
1375 any data reference does not satisfy vect_supportable_dr_alignment.
1377 D) Return true (all data references satisfy vect_supportable_dr_alignment).
1379 Note, Possibility 3 above (which is peeling and versioning together) is not
1380 being done at this time. */
1382 /* (1) Peeling to force alignment. */
1384 /* (1.1) Decide whether to perform peeling, and how many iterations to peel:
1385 Considerations:
1386 + How many accesses will become aligned due to the peeling
1387 - How many accesses will become unaligned due to the peeling,
1388 and the cost of misaligned accesses.
1389 - The cost of peeling (the extra runtime checks, the increase
1390 in code size). */
1392 FOR_EACH_VEC_ELT (datarefs, i, dr)
1394 stmt = DR_STMT (dr);
1395 stmt_info = vinfo_for_stmt (stmt);
1397 if (!STMT_VINFO_RELEVANT_P (stmt_info))
1398 continue;
1400 /* For interleaving, only the alignment of the first access
1401 matters. */
1402 if (STMT_VINFO_GROUPED_ACCESS (stmt_info)
1403 && GROUP_FIRST_ELEMENT (stmt_info) != stmt)
1404 continue;
1406 /* For invariant accesses there is nothing to enhance. */
1407 if (integer_zerop (DR_STEP (dr)))
1408 continue;
1410 /* Strided loads perform only component accesses, alignment is
1411 irrelevant for them. */
1412 if (STMT_VINFO_STRIDE_LOAD_P (stmt_info))
1413 continue;
1415 supportable_dr_alignment = vect_supportable_dr_alignment (dr, true);
1416 do_peeling = vector_alignment_reachable_p (dr);
1417 if (do_peeling)
1419 if (known_alignment_for_access_p (dr))
1421 unsigned int npeel_tmp;
1422 bool negative = tree_int_cst_compare (DR_STEP (dr),
1423 size_zero_node) < 0;
1425 /* Save info about DR in the hash table. */
1426 if (!LOOP_VINFO_PEELING_HTAB (loop_vinfo))
1427 LOOP_VINFO_PEELING_HTAB (loop_vinfo)
1428 = new hash_table<peel_info_hasher> (1);
1430 vectype = STMT_VINFO_VECTYPE (stmt_info);
1431 nelements = TYPE_VECTOR_SUBPARTS (vectype);
1432 mis = DR_MISALIGNMENT (dr) / GET_MODE_SIZE (TYPE_MODE (
1433 TREE_TYPE (DR_REF (dr))));
1434 npeel_tmp = (negative
1435 ? (mis - nelements) : (nelements - mis))
1436 & (nelements - 1);
1438 /* For multiple types, it is possible that the bigger type access
1439 will have more than one peeling option. E.g., a loop with two
1440 types: one of size (vector size / 4), and the other one of
1441 size (vector size / 8). Vectorization factor will 8. If both
1442 access are misaligned by 3, the first one needs one scalar
1443 iteration to be aligned, and the second one needs 5. But the
1444 the first one will be aligned also by peeling 5 scalar
1445 iterations, and in that case both accesses will be aligned.
1446 Hence, except for the immediate peeling amount, we also want
1447 to try to add full vector size, while we don't exceed
1448 vectorization factor.
1449 We do this automtically for cost model, since we calculate cost
1450 for every peeling option. */
1451 if (unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo)))
1452 possible_npeel_number = vf /nelements;
1454 /* Handle the aligned case. We may decide to align some other
1455 access, making DR unaligned. */
1456 if (DR_MISALIGNMENT (dr) == 0)
1458 npeel_tmp = 0;
1459 if (unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo)))
1460 possible_npeel_number++;
1463 for (j = 0; j < possible_npeel_number; j++)
1465 gcc_assert (npeel_tmp <= vf);
1466 vect_peeling_hash_insert (loop_vinfo, dr, npeel_tmp);
1467 npeel_tmp += nelements;
1470 all_misalignments_unknown = false;
1471 /* Data-ref that was chosen for the case that all the
1472 misalignments are unknown is not relevant anymore, since we
1473 have a data-ref with known alignment. */
1474 dr0 = NULL;
1476 else
1478 /* If we don't know any misalignment values, we prefer
1479 peeling for data-ref that has the maximum number of data-refs
1480 with the same alignment, unless the target prefers to align
1481 stores over load. */
1482 if (all_misalignments_unknown)
1484 unsigned same_align_drs
1485 = STMT_VINFO_SAME_ALIGN_REFS (stmt_info).length ();
1486 if (!dr0
1487 || same_align_drs_max < same_align_drs)
1489 same_align_drs_max = same_align_drs;
1490 dr0 = dr;
1492 /* For data-refs with the same number of related
1493 accesses prefer the one where the misalign
1494 computation will be invariant in the outermost loop. */
1495 else if (same_align_drs_max == same_align_drs)
1497 struct loop *ivloop0, *ivloop;
1498 ivloop0 = outermost_invariant_loop_for_expr
1499 (loop, DR_BASE_ADDRESS (dr0));
1500 ivloop = outermost_invariant_loop_for_expr
1501 (loop, DR_BASE_ADDRESS (dr));
1502 if ((ivloop && !ivloop0)
1503 || (ivloop && ivloop0
1504 && flow_loop_nested_p (ivloop, ivloop0)))
1505 dr0 = dr;
1508 if (!first_store && DR_IS_WRITE (dr))
1509 first_store = dr;
1512 /* If there are both known and unknown misaligned accesses in the
1513 loop, we choose peeling amount according to the known
1514 accesses. */
1515 if (!supportable_dr_alignment)
1517 dr0 = dr;
1518 if (!first_store && DR_IS_WRITE (dr))
1519 first_store = dr;
1523 else
1525 if (!aligned_access_p (dr))
1527 if (dump_enabled_p ())
1528 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1529 "vector alignment may not be reachable\n");
1530 break;
1535 /* Check if we can possibly peel the loop. */
1536 if (!vect_can_advance_ivs_p (loop_vinfo)
1537 || !slpeel_can_duplicate_loop_p (loop, single_exit (loop)))
1538 do_peeling = false;
1540 /* If we don't know how many times the peeling loop will run
1541 assume it will run VF-1 times and disable peeling if the remaining
1542 iters are less than the vectorization factor. */
1543 if (do_peeling
1544 && all_misalignments_unknown
1545 && LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1546 && (LOOP_VINFO_INT_NITERS (loop_vinfo)
1547 < 2 * (unsigned) LOOP_VINFO_VECT_FACTOR (loop_vinfo) - 1))
1548 do_peeling = false;
1550 if (do_peeling
1551 && all_misalignments_unknown
1552 && vect_supportable_dr_alignment (dr0, false))
1554 /* Check if the target requires to prefer stores over loads, i.e., if
1555 misaligned stores are more expensive than misaligned loads (taking
1556 drs with same alignment into account). */
1557 if (first_store && DR_IS_READ (dr0))
1559 unsigned int load_inside_cost = 0, load_outside_cost = 0;
1560 unsigned int store_inside_cost = 0, store_outside_cost = 0;
1561 unsigned int load_inside_penalty = 0, load_outside_penalty = 0;
1562 unsigned int store_inside_penalty = 0, store_outside_penalty = 0;
1563 stmt_vector_for_cost dummy;
1564 dummy.create (2);
1566 vect_get_data_access_cost (dr0, &load_inside_cost, &load_outside_cost,
1567 &dummy);
1568 vect_get_data_access_cost (first_store, &store_inside_cost,
1569 &store_outside_cost, &dummy);
1571 dummy.release ();
1573 /* Calculate the penalty for leaving FIRST_STORE unaligned (by
1574 aligning the load DR0). */
1575 load_inside_penalty = store_inside_cost;
1576 load_outside_penalty = store_outside_cost;
1577 for (i = 0;
1578 STMT_VINFO_SAME_ALIGN_REFS (vinfo_for_stmt (
1579 DR_STMT (first_store))).iterate (i, &dr);
1580 i++)
1581 if (DR_IS_READ (dr))
1583 load_inside_penalty += load_inside_cost;
1584 load_outside_penalty += load_outside_cost;
1586 else
1588 load_inside_penalty += store_inside_cost;
1589 load_outside_penalty += store_outside_cost;
1592 /* Calculate the penalty for leaving DR0 unaligned (by
1593 aligning the FIRST_STORE). */
1594 store_inside_penalty = load_inside_cost;
1595 store_outside_penalty = load_outside_cost;
1596 for (i = 0;
1597 STMT_VINFO_SAME_ALIGN_REFS (vinfo_for_stmt (
1598 DR_STMT (dr0))).iterate (i, &dr);
1599 i++)
1600 if (DR_IS_READ (dr))
1602 store_inside_penalty += load_inside_cost;
1603 store_outside_penalty += load_outside_cost;
1605 else
1607 store_inside_penalty += store_inside_cost;
1608 store_outside_penalty += store_outside_cost;
1611 if (load_inside_penalty > store_inside_penalty
1612 || (load_inside_penalty == store_inside_penalty
1613 && load_outside_penalty > store_outside_penalty))
1614 dr0 = first_store;
1617 /* In case there are only loads with different unknown misalignments, use
1618 peeling only if it may help to align other accesses in the loop. */
1619 if (!first_store
1620 && !STMT_VINFO_SAME_ALIGN_REFS (
1621 vinfo_for_stmt (DR_STMT (dr0))).length ()
1622 && vect_supportable_dr_alignment (dr0, false)
1623 != dr_unaligned_supported)
1624 do_peeling = false;
1627 if (do_peeling && !dr0)
1629 /* Peeling is possible, but there is no data access that is not supported
1630 unless aligned. So we try to choose the best possible peeling. */
1632 /* We should get here only if there are drs with known misalignment. */
1633 gcc_assert (!all_misalignments_unknown);
1635 /* Choose the best peeling from the hash table. */
1636 dr0 = vect_peeling_hash_choose_best_peeling (loop_vinfo, &npeel,
1637 &body_cost_vec);
1638 if (!dr0 || !npeel)
1639 do_peeling = false;
1641 /* If peeling by npeel will result in a remaining loop not iterating
1642 enough to be vectorized then do not peel. */
1643 if (do_peeling
1644 && LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1645 && (LOOP_VINFO_INT_NITERS (loop_vinfo)
1646 < LOOP_VINFO_VECT_FACTOR (loop_vinfo) + npeel))
1647 do_peeling = false;
1650 if (do_peeling)
1652 stmt = DR_STMT (dr0);
1653 stmt_info = vinfo_for_stmt (stmt);
1654 vectype = STMT_VINFO_VECTYPE (stmt_info);
1655 nelements = TYPE_VECTOR_SUBPARTS (vectype);
1657 if (known_alignment_for_access_p (dr0))
1659 bool negative = tree_int_cst_compare (DR_STEP (dr0),
1660 size_zero_node) < 0;
1661 if (!npeel)
1663 /* Since it's known at compile time, compute the number of
1664 iterations in the peeled loop (the peeling factor) for use in
1665 updating DR_MISALIGNMENT values. The peeling factor is the
1666 vectorization factor minus the misalignment as an element
1667 count. */
1668 mis = DR_MISALIGNMENT (dr0);
1669 mis /= GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr0))));
1670 npeel = ((negative ? mis - nelements : nelements - mis)
1671 & (nelements - 1));
1674 /* For interleaved data access every iteration accesses all the
1675 members of the group, therefore we divide the number of iterations
1676 by the group size. */
1677 stmt_info = vinfo_for_stmt (DR_STMT (dr0));
1678 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
1679 npeel /= GROUP_SIZE (stmt_info);
1681 if (dump_enabled_p ())
1682 dump_printf_loc (MSG_NOTE, vect_location,
1683 "Try peeling by %d\n", npeel);
1686 /* Ensure that all data refs can be vectorized after the peel. */
1687 FOR_EACH_VEC_ELT (datarefs, i, dr)
1689 int save_misalignment;
1691 if (dr == dr0)
1692 continue;
1694 stmt = DR_STMT (dr);
1695 stmt_info = vinfo_for_stmt (stmt);
1696 /* For interleaving, only the alignment of the first access
1697 matters. */
1698 if (STMT_VINFO_GROUPED_ACCESS (stmt_info)
1699 && GROUP_FIRST_ELEMENT (stmt_info) != stmt)
1700 continue;
1702 /* Strided loads perform only component accesses, alignment is
1703 irrelevant for them. */
1704 if (STMT_VINFO_STRIDE_LOAD_P (stmt_info))
1705 continue;
1707 save_misalignment = DR_MISALIGNMENT (dr);
1708 vect_update_misalignment_for_peel (dr, dr0, npeel);
1709 supportable_dr_alignment = vect_supportable_dr_alignment (dr, false);
1710 SET_DR_MISALIGNMENT (dr, save_misalignment);
1712 if (!supportable_dr_alignment)
1714 do_peeling = false;
1715 break;
1719 if (do_peeling && known_alignment_for_access_p (dr0) && npeel == 0)
1721 stat = vect_verify_datarefs_alignment (loop_vinfo, NULL);
1722 if (!stat)
1723 do_peeling = false;
1724 else
1726 body_cost_vec.release ();
1727 return stat;
1731 if (do_peeling)
1733 unsigned max_allowed_peel
1734 = PARAM_VALUE (PARAM_VECT_MAX_PEELING_FOR_ALIGNMENT);
1735 if (max_allowed_peel != (unsigned)-1)
1737 unsigned max_peel = npeel;
1738 if (max_peel == 0)
1740 gimple dr_stmt = DR_STMT (dr0);
1741 stmt_vec_info vinfo = vinfo_for_stmt (dr_stmt);
1742 tree vtype = STMT_VINFO_VECTYPE (vinfo);
1743 max_peel = TYPE_VECTOR_SUBPARTS (vtype) - 1;
1745 if (max_peel > max_allowed_peel)
1747 do_peeling = false;
1748 if (dump_enabled_p ())
1749 dump_printf_loc (MSG_NOTE, vect_location,
1750 "Disable peeling, max peels reached: %d\n", max_peel);
1755 if (do_peeling)
1757 /* (1.2) Update the DR_MISALIGNMENT of each data reference DR_i.
1758 If the misalignment of DR_i is identical to that of dr0 then set
1759 DR_MISALIGNMENT (DR_i) to zero. If the misalignment of DR_i and
1760 dr0 are known at compile time then increment DR_MISALIGNMENT (DR_i)
1761 by the peeling factor times the element size of DR_i (MOD the
1762 vectorization factor times the size). Otherwise, the
1763 misalignment of DR_i must be set to unknown. */
1764 FOR_EACH_VEC_ELT (datarefs, i, dr)
1765 if (dr != dr0)
1766 vect_update_misalignment_for_peel (dr, dr0, npeel);
1768 LOOP_VINFO_UNALIGNED_DR (loop_vinfo) = dr0;
1769 if (npeel)
1770 LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) = npeel;
1771 else
1772 LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo)
1773 = DR_MISALIGNMENT (dr0);
1774 SET_DR_MISALIGNMENT (dr0, 0);
1775 if (dump_enabled_p ())
1777 dump_printf_loc (MSG_NOTE, vect_location,
1778 "Alignment of access forced using peeling.\n");
1779 dump_printf_loc (MSG_NOTE, vect_location,
1780 "Peeling for alignment will be applied.\n");
1782 /* The inside-loop cost will be accounted for in vectorizable_load
1783 and vectorizable_store correctly with adjusted alignments.
1784 Drop the body_cst_vec on the floor here. */
1785 body_cost_vec.release ();
1787 stat = vect_verify_datarefs_alignment (loop_vinfo, NULL);
1788 gcc_assert (stat);
1789 return stat;
1793 body_cost_vec.release ();
1795 /* (2) Versioning to force alignment. */
1797 /* Try versioning if:
1798 1) optimize loop for speed
1799 2) there is at least one unsupported misaligned data ref with an unknown
1800 misalignment, and
1801 3) all misaligned data refs with a known misalignment are supported, and
1802 4) the number of runtime alignment checks is within reason. */
1804 do_versioning =
1805 optimize_loop_nest_for_speed_p (loop)
1806 && (!loop->inner); /* FORNOW */
1808 if (do_versioning)
1810 FOR_EACH_VEC_ELT (datarefs, i, dr)
1812 stmt = DR_STMT (dr);
1813 stmt_info = vinfo_for_stmt (stmt);
1815 /* For interleaving, only the alignment of the first access
1816 matters. */
1817 if (aligned_access_p (dr)
1818 || (STMT_VINFO_GROUPED_ACCESS (stmt_info)
1819 && GROUP_FIRST_ELEMENT (stmt_info) != stmt))
1820 continue;
1822 /* Strided loads perform only component accesses, alignment is
1823 irrelevant for them. */
1824 if (STMT_VINFO_STRIDE_LOAD_P (stmt_info))
1825 continue;
1827 supportable_dr_alignment = vect_supportable_dr_alignment (dr, false);
1829 if (!supportable_dr_alignment)
1831 gimple stmt;
1832 int mask;
1833 tree vectype;
1835 if (known_alignment_for_access_p (dr)
1836 || LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).length ()
1837 >= (unsigned) PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIGNMENT_CHECKS))
1839 do_versioning = false;
1840 break;
1843 stmt = DR_STMT (dr);
1844 vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
1845 gcc_assert (vectype);
1847 /* The rightmost bits of an aligned address must be zeros.
1848 Construct the mask needed for this test. For example,
1849 GET_MODE_SIZE for the vector mode V4SI is 16 bytes so the
1850 mask must be 15 = 0xf. */
1851 mask = GET_MODE_SIZE (TYPE_MODE (vectype)) - 1;
1853 /* FORNOW: use the same mask to test all potentially unaligned
1854 references in the loop. The vectorizer currently supports
1855 a single vector size, see the reference to
1856 GET_MODE_NUNITS (TYPE_MODE (vectype)) where the
1857 vectorization factor is computed. */
1858 gcc_assert (!LOOP_VINFO_PTR_MASK (loop_vinfo)
1859 || LOOP_VINFO_PTR_MASK (loop_vinfo) == mask);
1860 LOOP_VINFO_PTR_MASK (loop_vinfo) = mask;
1861 LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).safe_push (
1862 DR_STMT (dr));
1866 /* Versioning requires at least one misaligned data reference. */
1867 if (!LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo))
1868 do_versioning = false;
1869 else if (!do_versioning)
1870 LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).truncate (0);
1873 if (do_versioning)
1875 vec<gimple> may_misalign_stmts
1876 = LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo);
1877 gimple stmt;
1879 /* It can now be assumed that the data references in the statements
1880 in LOOP_VINFO_MAY_MISALIGN_STMTS will be aligned in the version
1881 of the loop being vectorized. */
1882 FOR_EACH_VEC_ELT (may_misalign_stmts, i, stmt)
1884 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1885 dr = STMT_VINFO_DATA_REF (stmt_info);
1886 SET_DR_MISALIGNMENT (dr, 0);
1887 if (dump_enabled_p ())
1888 dump_printf_loc (MSG_NOTE, vect_location,
1889 "Alignment of access forced using versioning.\n");
1892 if (dump_enabled_p ())
1893 dump_printf_loc (MSG_NOTE, vect_location,
1894 "Versioning for alignment will be applied.\n");
1896 /* Peeling and versioning can't be done together at this time. */
1897 gcc_assert (! (do_peeling && do_versioning));
1899 stat = vect_verify_datarefs_alignment (loop_vinfo, NULL);
1900 gcc_assert (stat);
1901 return stat;
1904 /* This point is reached if neither peeling nor versioning is being done. */
1905 gcc_assert (! (do_peeling || do_versioning));
1907 stat = vect_verify_datarefs_alignment (loop_vinfo, NULL);
1908 return stat;
1912 /* Function vect_find_same_alignment_drs.
1914 Update group and alignment relations according to the chosen
1915 vectorization factor. */
1917 static void
1918 vect_find_same_alignment_drs (struct data_dependence_relation *ddr,
1919 loop_vec_info loop_vinfo)
1921 unsigned int i;
1922 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1923 int vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1924 struct data_reference *dra = DDR_A (ddr);
1925 struct data_reference *drb = DDR_B (ddr);
1926 stmt_vec_info stmtinfo_a = vinfo_for_stmt (DR_STMT (dra));
1927 stmt_vec_info stmtinfo_b = vinfo_for_stmt (DR_STMT (drb));
1928 int dra_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dra))));
1929 int drb_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (drb))));
1930 lambda_vector dist_v;
1931 unsigned int loop_depth;
1933 if (DDR_ARE_DEPENDENT (ddr) == chrec_known)
1934 return;
1936 if (dra == drb)
1937 return;
1939 if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know)
1940 return;
1942 /* Loop-based vectorization and known data dependence. */
1943 if (DDR_NUM_DIST_VECTS (ddr) == 0)
1944 return;
1946 /* Data-dependence analysis reports a distance vector of zero
1947 for data-references that overlap only in the first iteration
1948 but have different sign step (see PR45764).
1949 So as a sanity check require equal DR_STEP. */
1950 if (!operand_equal_p (DR_STEP (dra), DR_STEP (drb), 0))
1951 return;
1953 loop_depth = index_in_loop_nest (loop->num, DDR_LOOP_NEST (ddr));
1954 FOR_EACH_VEC_ELT (DDR_DIST_VECTS (ddr), i, dist_v)
1956 int dist = dist_v[loop_depth];
1958 if (dump_enabled_p ())
1959 dump_printf_loc (MSG_NOTE, vect_location,
1960 "dependence distance = %d.\n", dist);
1962 /* Same loop iteration. */
1963 if (dist == 0
1964 || (dist % vectorization_factor == 0 && dra_size == drb_size))
1966 /* Two references with distance zero have the same alignment. */
1967 STMT_VINFO_SAME_ALIGN_REFS (stmtinfo_a).safe_push (drb);
1968 STMT_VINFO_SAME_ALIGN_REFS (stmtinfo_b).safe_push (dra);
1969 if (dump_enabled_p ())
1971 dump_printf_loc (MSG_NOTE, vect_location,
1972 "accesses have the same alignment.\n");
1973 dump_printf (MSG_NOTE,
1974 "dependence distance modulo vf == 0 between ");
1975 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra));
1976 dump_printf (MSG_NOTE, " and ");
1977 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb));
1978 dump_printf (MSG_NOTE, "\n");
1985 /* Function vect_analyze_data_refs_alignment
1987 Analyze the alignment of the data-references in the loop.
1988 Return FALSE if a data reference is found that cannot be vectorized. */
1990 bool
1991 vect_analyze_data_refs_alignment (loop_vec_info loop_vinfo,
1992 bb_vec_info bb_vinfo)
1994 if (dump_enabled_p ())
1995 dump_printf_loc (MSG_NOTE, vect_location,
1996 "=== vect_analyze_data_refs_alignment ===\n");
1998 /* Mark groups of data references with same alignment using
1999 data dependence information. */
2000 if (loop_vinfo)
2002 vec<ddr_p> ddrs = LOOP_VINFO_DDRS (loop_vinfo);
2003 struct data_dependence_relation *ddr;
2004 unsigned int i;
2006 FOR_EACH_VEC_ELT (ddrs, i, ddr)
2007 vect_find_same_alignment_drs (ddr, loop_vinfo);
2010 if (!vect_compute_data_refs_alignment (loop_vinfo, bb_vinfo))
2012 if (dump_enabled_p ())
2013 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2014 "not vectorized: can't calculate alignment "
2015 "for data ref.\n");
2016 return false;
2019 return true;
2023 /* Analyze groups of accesses: check that DR belongs to a group of
2024 accesses of legal size, step, etc. Detect gaps, single element
2025 interleaving, and other special cases. Set grouped access info.
2026 Collect groups of strided stores for further use in SLP analysis. */
2028 static bool
2029 vect_analyze_group_access (struct data_reference *dr)
2031 tree step = DR_STEP (dr);
2032 tree scalar_type = TREE_TYPE (DR_REF (dr));
2033 HOST_WIDE_INT type_size = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (scalar_type));
2034 gimple stmt = DR_STMT (dr);
2035 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2036 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2037 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
2038 HOST_WIDE_INT dr_step = TREE_INT_CST_LOW (step);
2039 HOST_WIDE_INT groupsize, last_accessed_element = 1;
2040 bool slp_impossible = false;
2041 struct loop *loop = NULL;
2043 if (loop_vinfo)
2044 loop = LOOP_VINFO_LOOP (loop_vinfo);
2046 /* For interleaving, GROUPSIZE is STEP counted in elements, i.e., the
2047 size of the interleaving group (including gaps). */
2048 groupsize = absu_hwi (dr_step) / type_size;
2050 /* Not consecutive access is possible only if it is a part of interleaving. */
2051 if (!GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
2053 /* Check if it this DR is a part of interleaving, and is a single
2054 element of the group that is accessed in the loop. */
2056 /* Gaps are supported only for loads. STEP must be a multiple of the type
2057 size. The size of the group must be a power of 2. */
2058 if (DR_IS_READ (dr)
2059 && (dr_step % type_size) == 0
2060 && groupsize > 0
2061 && exact_log2 (groupsize) != -1)
2063 GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = stmt;
2064 GROUP_SIZE (vinfo_for_stmt (stmt)) = groupsize;
2065 if (dump_enabled_p ())
2067 dump_printf_loc (MSG_NOTE, vect_location,
2068 "Detected single element interleaving ");
2069 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dr));
2070 dump_printf (MSG_NOTE, " step ");
2071 dump_generic_expr (MSG_NOTE, TDF_SLIM, step);
2072 dump_printf (MSG_NOTE, "\n");
2075 if (loop_vinfo)
2077 if (dump_enabled_p ())
2078 dump_printf_loc (MSG_NOTE, vect_location,
2079 "Data access with gaps requires scalar "
2080 "epilogue loop\n");
2081 if (loop->inner)
2083 if (dump_enabled_p ())
2084 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2085 "Peeling for outer loop is not"
2086 " supported\n");
2087 return false;
2090 LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) = true;
2093 return true;
2096 if (dump_enabled_p ())
2098 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2099 "not consecutive access ");
2100 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
2101 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
2104 if (bb_vinfo)
2106 /* Mark the statement as unvectorizable. */
2107 STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr))) = false;
2108 return true;
2111 return false;
2114 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) == stmt)
2116 /* First stmt in the interleaving chain. Check the chain. */
2117 gimple next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmt));
2118 struct data_reference *data_ref = dr;
2119 unsigned int count = 1;
2120 tree prev_init = DR_INIT (data_ref);
2121 gimple prev = stmt;
2122 HOST_WIDE_INT diff, gaps = 0;
2123 unsigned HOST_WIDE_INT count_in_bytes;
2125 while (next)
2127 /* Skip same data-refs. In case that two or more stmts share
2128 data-ref (supported only for loads), we vectorize only the first
2129 stmt, and the rest get their vectorized loads from the first
2130 one. */
2131 if (!tree_int_cst_compare (DR_INIT (data_ref),
2132 DR_INIT (STMT_VINFO_DATA_REF (
2133 vinfo_for_stmt (next)))))
2135 if (DR_IS_WRITE (data_ref))
2137 if (dump_enabled_p ())
2138 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2139 "Two store stmts share the same dr.\n");
2140 return false;
2143 /* For load use the same data-ref load. */
2144 GROUP_SAME_DR_STMT (vinfo_for_stmt (next)) = prev;
2146 prev = next;
2147 next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next));
2148 continue;
2151 prev = next;
2152 data_ref = STMT_VINFO_DATA_REF (vinfo_for_stmt (next));
2154 /* All group members have the same STEP by construction. */
2155 gcc_checking_assert (operand_equal_p (DR_STEP (data_ref), step, 0));
2157 /* Check that the distance between two accesses is equal to the type
2158 size. Otherwise, we have gaps. */
2159 diff = (TREE_INT_CST_LOW (DR_INIT (data_ref))
2160 - TREE_INT_CST_LOW (prev_init)) / type_size;
2161 if (diff != 1)
2163 /* FORNOW: SLP of accesses with gaps is not supported. */
2164 slp_impossible = true;
2165 if (DR_IS_WRITE (data_ref))
2167 if (dump_enabled_p ())
2168 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2169 "interleaved store with gaps\n");
2170 return false;
2173 gaps += diff - 1;
2176 last_accessed_element += diff;
2178 /* Store the gap from the previous member of the group. If there is no
2179 gap in the access, GROUP_GAP is always 1. */
2180 GROUP_GAP (vinfo_for_stmt (next)) = diff;
2182 prev_init = DR_INIT (data_ref);
2183 next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next));
2184 /* Count the number of data-refs in the chain. */
2185 count++;
2188 /* COUNT is the number of accesses found, we multiply it by the size of
2189 the type to get COUNT_IN_BYTES. */
2190 count_in_bytes = type_size * count;
2192 /* Check that the size of the interleaving (including gaps) is not
2193 greater than STEP. */
2194 if (dr_step != 0
2195 && absu_hwi (dr_step) < count_in_bytes + gaps * type_size)
2197 if (dump_enabled_p ())
2199 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2200 "interleaving size is greater than step for ");
2201 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
2202 DR_REF (dr));
2203 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
2205 return false;
2208 /* Check that the size of the interleaving is equal to STEP for stores,
2209 i.e., that there are no gaps. */
2210 if (dr_step != 0
2211 && absu_hwi (dr_step) != count_in_bytes)
2213 if (DR_IS_READ (dr))
2215 slp_impossible = true;
2216 /* There is a gap after the last load in the group. This gap is a
2217 difference between the groupsize and the number of elements.
2218 When there is no gap, this difference should be 0. */
2219 GROUP_GAP (vinfo_for_stmt (stmt)) = groupsize - count;
2221 else
2223 if (dump_enabled_p ())
2224 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2225 "interleaved store with gaps\n");
2226 return false;
2230 /* Check that STEP is a multiple of type size. */
2231 if (dr_step != 0
2232 && (dr_step % type_size) != 0)
2234 if (dump_enabled_p ())
2236 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2237 "step is not a multiple of type size: step ");
2238 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, step);
2239 dump_printf (MSG_MISSED_OPTIMIZATION, " size ");
2240 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
2241 TYPE_SIZE_UNIT (scalar_type));
2242 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
2244 return false;
2247 if (groupsize == 0)
2248 groupsize = count + gaps;
2250 GROUP_SIZE (vinfo_for_stmt (stmt)) = groupsize;
2251 if (dump_enabled_p ())
2252 dump_printf_loc (MSG_NOTE, vect_location,
2253 "Detected interleaving of size %d\n", (int)groupsize);
2255 /* SLP: create an SLP data structure for every interleaving group of
2256 stores for further analysis in vect_analyse_slp. */
2257 if (DR_IS_WRITE (dr) && !slp_impossible)
2259 if (loop_vinfo)
2260 LOOP_VINFO_GROUPED_STORES (loop_vinfo).safe_push (stmt);
2261 if (bb_vinfo)
2262 BB_VINFO_GROUPED_STORES (bb_vinfo).safe_push (stmt);
2265 /* There is a gap in the end of the group. */
2266 if (groupsize - last_accessed_element > 0 && loop_vinfo)
2268 if (dump_enabled_p ())
2269 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2270 "Data access with gaps requires scalar "
2271 "epilogue loop\n");
2272 if (loop->inner)
2274 if (dump_enabled_p ())
2275 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2276 "Peeling for outer loop is not supported\n");
2277 return false;
2280 LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) = true;
2284 return true;
2288 /* Analyze the access pattern of the data-reference DR.
2289 In case of non-consecutive accesses call vect_analyze_group_access() to
2290 analyze groups of accesses. */
2292 static bool
2293 vect_analyze_data_ref_access (struct data_reference *dr)
2295 tree step = DR_STEP (dr);
2296 tree scalar_type = TREE_TYPE (DR_REF (dr));
2297 gimple stmt = DR_STMT (dr);
2298 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2299 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2300 struct loop *loop = NULL;
2302 if (loop_vinfo)
2303 loop = LOOP_VINFO_LOOP (loop_vinfo);
2305 if (loop_vinfo && !step)
2307 if (dump_enabled_p ())
2308 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2309 "bad data-ref access in loop\n");
2310 return false;
2313 /* Allow invariant loads in not nested loops. */
2314 if (loop_vinfo && integer_zerop (step))
2316 GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = NULL;
2317 if (nested_in_vect_loop_p (loop, stmt))
2319 if (dump_enabled_p ())
2320 dump_printf_loc (MSG_NOTE, vect_location,
2321 "zero step in inner loop of nest\n");
2322 return false;
2324 return DR_IS_READ (dr);
2327 if (loop && nested_in_vect_loop_p (loop, stmt))
2329 /* Interleaved accesses are not yet supported within outer-loop
2330 vectorization for references in the inner-loop. */
2331 GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = NULL;
2333 /* For the rest of the analysis we use the outer-loop step. */
2334 step = STMT_VINFO_DR_STEP (stmt_info);
2335 if (integer_zerop (step))
2337 if (dump_enabled_p ())
2338 dump_printf_loc (MSG_NOTE, vect_location,
2339 "zero step in outer loop.\n");
2340 if (DR_IS_READ (dr))
2341 return true;
2342 else
2343 return false;
2347 /* Consecutive? */
2348 if (TREE_CODE (step) == INTEGER_CST)
2350 HOST_WIDE_INT dr_step = TREE_INT_CST_LOW (step);
2351 if (!tree_int_cst_compare (step, TYPE_SIZE_UNIT (scalar_type))
2352 || (dr_step < 0
2353 && !compare_tree_int (TYPE_SIZE_UNIT (scalar_type), -dr_step)))
2355 /* Mark that it is not interleaving. */
2356 GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = NULL;
2357 return true;
2361 if (loop && nested_in_vect_loop_p (loop, stmt))
2363 if (dump_enabled_p ())
2364 dump_printf_loc (MSG_NOTE, vect_location,
2365 "grouped access in outer loop.\n");
2366 return false;
2369 /* Assume this is a DR handled by non-constant strided load case. */
2370 if (TREE_CODE (step) != INTEGER_CST)
2371 return STMT_VINFO_STRIDE_LOAD_P (stmt_info);
2373 /* Not consecutive access - check if it's a part of interleaving group. */
2374 return vect_analyze_group_access (dr);
2379 /* A helper function used in the comparator function to sort data
2380 references. T1 and T2 are two data references to be compared.
2381 The function returns -1, 0, or 1. */
2383 static int
2384 compare_tree (tree t1, tree t2)
2386 int i, cmp;
2387 enum tree_code code;
2388 char tclass;
2390 if (t1 == t2)
2391 return 0;
2392 if (t1 == NULL)
2393 return -1;
2394 if (t2 == NULL)
2395 return 1;
2398 if (TREE_CODE (t1) != TREE_CODE (t2))
2399 return TREE_CODE (t1) < TREE_CODE (t2) ? -1 : 1;
2401 code = TREE_CODE (t1);
2402 switch (code)
2404 /* For const values, we can just use hash values for comparisons. */
2405 case INTEGER_CST:
2406 case REAL_CST:
2407 case FIXED_CST:
2408 case STRING_CST:
2409 case COMPLEX_CST:
2410 case VECTOR_CST:
2412 hashval_t h1 = iterative_hash_expr (t1, 0);
2413 hashval_t h2 = iterative_hash_expr (t2, 0);
2414 if (h1 != h2)
2415 return h1 < h2 ? -1 : 1;
2416 break;
2419 case SSA_NAME:
2420 cmp = compare_tree (SSA_NAME_VAR (t1), SSA_NAME_VAR (t2));
2421 if (cmp != 0)
2422 return cmp;
2424 if (SSA_NAME_VERSION (t1) != SSA_NAME_VERSION (t2))
2425 return SSA_NAME_VERSION (t1) < SSA_NAME_VERSION (t2) ? -1 : 1;
2426 break;
2428 default:
2429 tclass = TREE_CODE_CLASS (code);
2431 /* For var-decl, we could compare their UIDs. */
2432 if (tclass == tcc_declaration)
2434 if (DECL_UID (t1) != DECL_UID (t2))
2435 return DECL_UID (t1) < DECL_UID (t2) ? -1 : 1;
2436 break;
2439 /* For expressions with operands, compare their operands recursively. */
2440 for (i = TREE_OPERAND_LENGTH (t1) - 1; i >= 0; --i)
2442 cmp = compare_tree (TREE_OPERAND (t1, i), TREE_OPERAND (t2, i));
2443 if (cmp != 0)
2444 return cmp;
2448 return 0;
2452 /* Compare two data-references DRA and DRB to group them into chunks
2453 suitable for grouping. */
2455 static int
2456 dr_group_sort_cmp (const void *dra_, const void *drb_)
2458 data_reference_p dra = *(data_reference_p *)const_cast<void *>(dra_);
2459 data_reference_p drb = *(data_reference_p *)const_cast<void *>(drb_);
2460 int cmp;
2462 /* Stabilize sort. */
2463 if (dra == drb)
2464 return 0;
2466 /* Ordering of DRs according to base. */
2467 if (!operand_equal_p (DR_BASE_ADDRESS (dra), DR_BASE_ADDRESS (drb), 0))
2469 cmp = compare_tree (DR_BASE_ADDRESS (dra), DR_BASE_ADDRESS (drb));
2470 if (cmp != 0)
2471 return cmp;
2474 /* And according to DR_OFFSET. */
2475 if (!dr_equal_offsets_p (dra, drb))
2477 cmp = compare_tree (DR_OFFSET (dra), DR_OFFSET (drb));
2478 if (cmp != 0)
2479 return cmp;
2482 /* Put reads before writes. */
2483 if (DR_IS_READ (dra) != DR_IS_READ (drb))
2484 return DR_IS_READ (dra) ? -1 : 1;
2486 /* Then sort after access size. */
2487 if (!operand_equal_p (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dra))),
2488 TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (drb))), 0))
2490 cmp = compare_tree (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dra))),
2491 TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (drb))));
2492 if (cmp != 0)
2493 return cmp;
2496 /* And after step. */
2497 if (!operand_equal_p (DR_STEP (dra), DR_STEP (drb), 0))
2499 cmp = compare_tree (DR_STEP (dra), DR_STEP (drb));
2500 if (cmp != 0)
2501 return cmp;
2504 /* Then sort after DR_INIT. In case of identical DRs sort after stmt UID. */
2505 cmp = tree_int_cst_compare (DR_INIT (dra), DR_INIT (drb));
2506 if (cmp == 0)
2507 return gimple_uid (DR_STMT (dra)) < gimple_uid (DR_STMT (drb)) ? -1 : 1;
2508 return cmp;
2511 /* Function vect_analyze_data_ref_accesses.
2513 Analyze the access pattern of all the data references in the loop.
2515 FORNOW: the only access pattern that is considered vectorizable is a
2516 simple step 1 (consecutive) access.
2518 FORNOW: handle only arrays and pointer accesses. */
2520 bool
2521 vect_analyze_data_ref_accesses (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo)
2523 unsigned int i;
2524 vec<data_reference_p> datarefs;
2525 struct data_reference *dr;
2527 if (dump_enabled_p ())
2528 dump_printf_loc (MSG_NOTE, vect_location,
2529 "=== vect_analyze_data_ref_accesses ===\n");
2531 if (loop_vinfo)
2532 datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
2533 else
2534 datarefs = BB_VINFO_DATAREFS (bb_vinfo);
2536 if (datarefs.is_empty ())
2537 return true;
2539 /* Sort the array of datarefs to make building the interleaving chains
2540 linear. Don't modify the original vector's order, it is needed for
2541 determining what dependencies are reversed. */
2542 vec<data_reference_p> datarefs_copy = datarefs.copy ();
2543 datarefs_copy.qsort (dr_group_sort_cmp);
2545 /* Build the interleaving chains. */
2546 for (i = 0; i < datarefs_copy.length () - 1;)
2548 data_reference_p dra = datarefs_copy[i];
2549 stmt_vec_info stmtinfo_a = vinfo_for_stmt (DR_STMT (dra));
2550 stmt_vec_info lastinfo = NULL;
2551 for (i = i + 1; i < datarefs_copy.length (); ++i)
2553 data_reference_p drb = datarefs_copy[i];
2554 stmt_vec_info stmtinfo_b = vinfo_for_stmt (DR_STMT (drb));
2556 /* ??? Imperfect sorting (non-compatible types, non-modulo
2557 accesses, same accesses) can lead to a group to be artificially
2558 split here as we don't just skip over those. If it really
2559 matters we can push those to a worklist and re-iterate
2560 over them. The we can just skip ahead to the next DR here. */
2562 /* Check that the data-refs have same first location (except init)
2563 and they are both either store or load (not load and store,
2564 not masked loads or stores). */
2565 if (DR_IS_READ (dra) != DR_IS_READ (drb)
2566 || !operand_equal_p (DR_BASE_ADDRESS (dra),
2567 DR_BASE_ADDRESS (drb), 0)
2568 || !dr_equal_offsets_p (dra, drb)
2569 || !gimple_assign_single_p (DR_STMT (dra))
2570 || !gimple_assign_single_p (DR_STMT (drb)))
2571 break;
2573 /* Check that the data-refs have the same constant size and step. */
2574 tree sza = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dra)));
2575 tree szb = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (drb)));
2576 if (!tree_fits_uhwi_p (sza)
2577 || !tree_fits_uhwi_p (szb)
2578 || !tree_int_cst_equal (sza, szb)
2579 || !tree_fits_shwi_p (DR_STEP (dra))
2580 || !tree_fits_shwi_p (DR_STEP (drb))
2581 || !tree_int_cst_equal (DR_STEP (dra), DR_STEP (drb)))
2582 break;
2584 /* Do not place the same access in the interleaving chain twice. */
2585 if (tree_int_cst_compare (DR_INIT (dra), DR_INIT (drb)) == 0)
2586 break;
2588 /* Check the types are compatible.
2589 ??? We don't distinguish this during sorting. */
2590 if (!types_compatible_p (TREE_TYPE (DR_REF (dra)),
2591 TREE_TYPE (DR_REF (drb))))
2592 break;
2594 /* Sorting has ensured that DR_INIT (dra) <= DR_INIT (drb). */
2595 HOST_WIDE_INT init_a = TREE_INT_CST_LOW (DR_INIT (dra));
2596 HOST_WIDE_INT init_b = TREE_INT_CST_LOW (DR_INIT (drb));
2597 gcc_assert (init_a < init_b);
2599 /* If init_b == init_a + the size of the type * k, we have an
2600 interleaving, and DRA is accessed before DRB. */
2601 HOST_WIDE_INT type_size_a = tree_to_uhwi (sza);
2602 if ((init_b - init_a) % type_size_a != 0)
2603 break;
2605 /* If we have a store, the accesses are adjacent. This splits
2606 groups into chunks we support (we don't support vectorization
2607 of stores with gaps). */
2608 if (!DR_IS_READ (dra)
2609 && (init_b - (HOST_WIDE_INT) TREE_INT_CST_LOW
2610 (DR_INIT (datarefs_copy[i-1]))
2611 != type_size_a))
2612 break;
2614 /* The step (if not zero) is greater than the difference between
2615 data-refs' inits. This splits groups into suitable sizes. */
2616 HOST_WIDE_INT step = tree_to_shwi (DR_STEP (dra));
2617 if (step != 0 && step <= (init_b - init_a))
2618 break;
2620 if (dump_enabled_p ())
2622 dump_printf_loc (MSG_NOTE, vect_location,
2623 "Detected interleaving ");
2624 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra));
2625 dump_printf (MSG_NOTE, " and ");
2626 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb));
2627 dump_printf (MSG_NOTE, "\n");
2630 /* Link the found element into the group list. */
2631 if (!GROUP_FIRST_ELEMENT (stmtinfo_a))
2633 GROUP_FIRST_ELEMENT (stmtinfo_a) = DR_STMT (dra);
2634 lastinfo = stmtinfo_a;
2636 GROUP_FIRST_ELEMENT (stmtinfo_b) = DR_STMT (dra);
2637 GROUP_NEXT_ELEMENT (lastinfo) = DR_STMT (drb);
2638 lastinfo = stmtinfo_b;
2642 FOR_EACH_VEC_ELT (datarefs_copy, i, dr)
2643 if (STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr)))
2644 && !vect_analyze_data_ref_access (dr))
2646 if (dump_enabled_p ())
2647 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2648 "not vectorized: complicated access pattern.\n");
2650 if (bb_vinfo)
2652 /* Mark the statement as not vectorizable. */
2653 STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr))) = false;
2654 continue;
2656 else
2658 datarefs_copy.release ();
2659 return false;
2663 datarefs_copy.release ();
2664 return true;
2668 /* Operator == between two dr_with_seg_len objects.
2670 This equality operator is used to make sure two data refs
2671 are the same one so that we will consider to combine the
2672 aliasing checks of those two pairs of data dependent data
2673 refs. */
2675 static bool
2676 operator == (const dr_with_seg_len& d1,
2677 const dr_with_seg_len& d2)
2679 return operand_equal_p (DR_BASE_ADDRESS (d1.dr),
2680 DR_BASE_ADDRESS (d2.dr), 0)
2681 && compare_tree (d1.offset, d2.offset) == 0
2682 && compare_tree (d1.seg_len, d2.seg_len) == 0;
2685 /* Function comp_dr_with_seg_len_pair.
2687 Comparison function for sorting objects of dr_with_seg_len_pair_t
2688 so that we can combine aliasing checks in one scan. */
2690 static int
2691 comp_dr_with_seg_len_pair (const void *p1_, const void *p2_)
2693 const dr_with_seg_len_pair_t* p1 = (const dr_with_seg_len_pair_t *) p1_;
2694 const dr_with_seg_len_pair_t* p2 = (const dr_with_seg_len_pair_t *) p2_;
2696 const dr_with_seg_len &p11 = p1->first,
2697 &p12 = p1->second,
2698 &p21 = p2->first,
2699 &p22 = p2->second;
2701 /* For DR pairs (a, b) and (c, d), we only consider to merge the alias checks
2702 if a and c have the same basic address snd step, and b and d have the same
2703 address and step. Therefore, if any a&c or b&d don't have the same address
2704 and step, we don't care the order of those two pairs after sorting. */
2705 int comp_res;
2707 if ((comp_res = compare_tree (DR_BASE_ADDRESS (p11.dr),
2708 DR_BASE_ADDRESS (p21.dr))) != 0)
2709 return comp_res;
2710 if ((comp_res = compare_tree (DR_BASE_ADDRESS (p12.dr),
2711 DR_BASE_ADDRESS (p22.dr))) != 0)
2712 return comp_res;
2713 if ((comp_res = compare_tree (DR_STEP (p11.dr), DR_STEP (p21.dr))) != 0)
2714 return comp_res;
2715 if ((comp_res = compare_tree (DR_STEP (p12.dr), DR_STEP (p22.dr))) != 0)
2716 return comp_res;
2717 if ((comp_res = compare_tree (p11.offset, p21.offset)) != 0)
2718 return comp_res;
2719 if ((comp_res = compare_tree (p12.offset, p22.offset)) != 0)
2720 return comp_res;
2722 return 0;
2725 /* Function vect_vfa_segment_size.
2727 Create an expression that computes the size of segment
2728 that will be accessed for a data reference. The functions takes into
2729 account that realignment loads may access one more vector.
2731 Input:
2732 DR: The data reference.
2733 LENGTH_FACTOR: segment length to consider.
2735 Return an expression whose value is the size of segment which will be
2736 accessed by DR. */
2738 static tree
2739 vect_vfa_segment_size (struct data_reference *dr, tree length_factor)
2741 tree segment_length;
2743 if (integer_zerop (DR_STEP (dr)))
2744 segment_length = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dr)));
2745 else
2746 segment_length = size_binop (MULT_EXPR,
2747 fold_convert (sizetype, DR_STEP (dr)),
2748 fold_convert (sizetype, length_factor));
2750 if (vect_supportable_dr_alignment (dr, false)
2751 == dr_explicit_realign_optimized)
2753 tree vector_size = TYPE_SIZE_UNIT
2754 (STMT_VINFO_VECTYPE (vinfo_for_stmt (DR_STMT (dr))));
2756 segment_length = size_binop (PLUS_EXPR, segment_length, vector_size);
2758 return segment_length;
2761 /* Function vect_prune_runtime_alias_test_list.
2763 Prune a list of ddrs to be tested at run-time by versioning for alias.
2764 Merge several alias checks into one if possible.
2765 Return FALSE if resulting list of ddrs is longer then allowed by
2766 PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS, otherwise return TRUE. */
2768 bool
2769 vect_prune_runtime_alias_test_list (loop_vec_info loop_vinfo)
2771 vec<ddr_p> may_alias_ddrs =
2772 LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo);
2773 vec<dr_with_seg_len_pair_t>& comp_alias_ddrs =
2774 LOOP_VINFO_COMP_ALIAS_DDRS (loop_vinfo);
2775 int vect_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2776 tree scalar_loop_iters = LOOP_VINFO_NITERS (loop_vinfo);
2778 ddr_p ddr;
2779 unsigned int i;
2780 tree length_factor;
2782 if (dump_enabled_p ())
2783 dump_printf_loc (MSG_NOTE, vect_location,
2784 "=== vect_prune_runtime_alias_test_list ===\n");
2786 if (may_alias_ddrs.is_empty ())
2787 return true;
2789 /* Basically, for each pair of dependent data refs store_ptr_0
2790 and load_ptr_0, we create an expression:
2792 ((store_ptr_0 + store_segment_length_0) <= load_ptr_0)
2793 || (load_ptr_0 + load_segment_length_0) <= store_ptr_0))
2795 for aliasing checks. However, in some cases we can decrease
2796 the number of checks by combining two checks into one. For
2797 example, suppose we have another pair of data refs store_ptr_0
2798 and load_ptr_1, and if the following condition is satisfied:
2800 load_ptr_0 < load_ptr_1 &&
2801 load_ptr_1 - load_ptr_0 - load_segment_length_0 < store_segment_length_0
2803 (this condition means, in each iteration of vectorized loop,
2804 the accessed memory of store_ptr_0 cannot be between the memory
2805 of load_ptr_0 and load_ptr_1.)
2807 we then can use only the following expression to finish the
2808 alising checks between store_ptr_0 & load_ptr_0 and
2809 store_ptr_0 & load_ptr_1:
2811 ((store_ptr_0 + store_segment_length_0) <= load_ptr_0)
2812 || (load_ptr_1 + load_segment_length_1 <= store_ptr_0))
2814 Note that we only consider that load_ptr_0 and load_ptr_1 have the
2815 same basic address. */
2817 comp_alias_ddrs.create (may_alias_ddrs.length ());
2819 /* First, we collect all data ref pairs for aliasing checks. */
2820 FOR_EACH_VEC_ELT (may_alias_ddrs, i, ddr)
2822 struct data_reference *dr_a, *dr_b;
2823 gimple dr_group_first_a, dr_group_first_b;
2824 tree segment_length_a, segment_length_b;
2825 gimple stmt_a, stmt_b;
2827 dr_a = DDR_A (ddr);
2828 stmt_a = DR_STMT (DDR_A (ddr));
2829 dr_group_first_a = GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt_a));
2830 if (dr_group_first_a)
2832 stmt_a = dr_group_first_a;
2833 dr_a = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt_a));
2836 dr_b = DDR_B (ddr);
2837 stmt_b = DR_STMT (DDR_B (ddr));
2838 dr_group_first_b = GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt_b));
2839 if (dr_group_first_b)
2841 stmt_b = dr_group_first_b;
2842 dr_b = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt_b));
2845 if (!operand_equal_p (DR_STEP (dr_a), DR_STEP (dr_b), 0))
2846 length_factor = scalar_loop_iters;
2847 else
2848 length_factor = size_int (vect_factor);
2849 segment_length_a = vect_vfa_segment_size (dr_a, length_factor);
2850 segment_length_b = vect_vfa_segment_size (dr_b, length_factor);
2852 dr_with_seg_len_pair_t dr_with_seg_len_pair
2853 (dr_with_seg_len (dr_a, segment_length_a),
2854 dr_with_seg_len (dr_b, segment_length_b));
2856 if (compare_tree (DR_BASE_ADDRESS (dr_a), DR_BASE_ADDRESS (dr_b)) > 0)
2857 std::swap (dr_with_seg_len_pair.first, dr_with_seg_len_pair.second);
2859 comp_alias_ddrs.safe_push (dr_with_seg_len_pair);
2862 /* Second, we sort the collected data ref pairs so that we can scan
2863 them once to combine all possible aliasing checks. */
2864 comp_alias_ddrs.qsort (comp_dr_with_seg_len_pair);
2866 /* Third, we scan the sorted dr pairs and check if we can combine
2867 alias checks of two neighbouring dr pairs. */
2868 for (size_t i = 1; i < comp_alias_ddrs.length (); ++i)
2870 /* Deal with two ddrs (dr_a1, dr_b1) and (dr_a2, dr_b2). */
2871 dr_with_seg_len *dr_a1 = &comp_alias_ddrs[i-1].first,
2872 *dr_b1 = &comp_alias_ddrs[i-1].second,
2873 *dr_a2 = &comp_alias_ddrs[i].first,
2874 *dr_b2 = &comp_alias_ddrs[i].second;
2876 /* Remove duplicate data ref pairs. */
2877 if (*dr_a1 == *dr_a2 && *dr_b1 == *dr_b2)
2879 if (dump_enabled_p ())
2881 dump_printf_loc (MSG_NOTE, vect_location,
2882 "found equal ranges ");
2883 dump_generic_expr (MSG_NOTE, TDF_SLIM,
2884 DR_REF (dr_a1->dr));
2885 dump_printf (MSG_NOTE, ", ");
2886 dump_generic_expr (MSG_NOTE, TDF_SLIM,
2887 DR_REF (dr_b1->dr));
2888 dump_printf (MSG_NOTE, " and ");
2889 dump_generic_expr (MSG_NOTE, TDF_SLIM,
2890 DR_REF (dr_a2->dr));
2891 dump_printf (MSG_NOTE, ", ");
2892 dump_generic_expr (MSG_NOTE, TDF_SLIM,
2893 DR_REF (dr_b2->dr));
2894 dump_printf (MSG_NOTE, "\n");
2897 comp_alias_ddrs.ordered_remove (i--);
2898 continue;
2901 if (*dr_a1 == *dr_a2 || *dr_b1 == *dr_b2)
2903 /* We consider the case that DR_B1 and DR_B2 are same memrefs,
2904 and DR_A1 and DR_A2 are two consecutive memrefs. */
2905 if (*dr_a1 == *dr_a2)
2907 std::swap (dr_a1, dr_b1);
2908 std::swap (dr_a2, dr_b2);
2911 if (!operand_equal_p (DR_BASE_ADDRESS (dr_a1->dr),
2912 DR_BASE_ADDRESS (dr_a2->dr),
2914 || !tree_fits_shwi_p (dr_a1->offset)
2915 || !tree_fits_shwi_p (dr_a2->offset))
2916 continue;
2918 HOST_WIDE_INT diff = (tree_to_shwi (dr_a2->offset)
2919 - tree_to_shwi (dr_a1->offset));
2922 /* Now we check if the following condition is satisfied:
2924 DIFF - SEGMENT_LENGTH_A < SEGMENT_LENGTH_B
2926 where DIFF = DR_A2->OFFSET - DR_A1->OFFSET. However,
2927 SEGMENT_LENGTH_A or SEGMENT_LENGTH_B may not be constant so we
2928 have to make a best estimation. We can get the minimum value
2929 of SEGMENT_LENGTH_B as a constant, represented by MIN_SEG_LEN_B,
2930 then either of the following two conditions can guarantee the
2931 one above:
2933 1: DIFF <= MIN_SEG_LEN_B
2934 2: DIFF - SEGMENT_LENGTH_A < MIN_SEG_LEN_B
2938 HOST_WIDE_INT min_seg_len_b = (tree_fits_shwi_p (dr_b1->seg_len)
2939 ? tree_to_shwi (dr_b1->seg_len)
2940 : vect_factor);
2942 if (diff <= min_seg_len_b
2943 || (tree_fits_shwi_p (dr_a1->seg_len)
2944 && diff - tree_to_shwi (dr_a1->seg_len) < min_seg_len_b))
2946 if (dump_enabled_p ())
2948 dump_printf_loc (MSG_NOTE, vect_location,
2949 "merging ranges for ");
2950 dump_generic_expr (MSG_NOTE, TDF_SLIM,
2951 DR_REF (dr_a1->dr));
2952 dump_printf (MSG_NOTE, ", ");
2953 dump_generic_expr (MSG_NOTE, TDF_SLIM,
2954 DR_REF (dr_b1->dr));
2955 dump_printf (MSG_NOTE, " and ");
2956 dump_generic_expr (MSG_NOTE, TDF_SLIM,
2957 DR_REF (dr_a2->dr));
2958 dump_printf (MSG_NOTE, ", ");
2959 dump_generic_expr (MSG_NOTE, TDF_SLIM,
2960 DR_REF (dr_b2->dr));
2961 dump_printf (MSG_NOTE, "\n");
2964 dr_a1->seg_len = size_binop (PLUS_EXPR,
2965 dr_a2->seg_len, size_int (diff));
2966 comp_alias_ddrs.ordered_remove (i--);
2971 dump_printf_loc (MSG_NOTE, vect_location,
2972 "improved number of alias checks from %d to %d\n",
2973 may_alias_ddrs.length (), comp_alias_ddrs.length ());
2974 if ((int) comp_alias_ddrs.length () >
2975 PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS))
2976 return false;
2978 return true;
2981 /* Check whether a non-affine read in stmt is suitable for gather load
2982 and if so, return a builtin decl for that operation. */
2984 tree
2985 vect_check_gather (gimple stmt, loop_vec_info loop_vinfo, tree *basep,
2986 tree *offp, int *scalep)
2988 HOST_WIDE_INT scale = 1, pbitpos, pbitsize;
2989 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2990 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2991 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
2992 tree offtype = NULL_TREE;
2993 tree decl, base, off;
2994 machine_mode pmode;
2995 int punsignedp, pvolatilep;
2997 base = DR_REF (dr);
2998 /* For masked loads/stores, DR_REF (dr) is an artificial MEM_REF,
2999 see if we can use the def stmt of the address. */
3000 if (is_gimple_call (stmt)
3001 && gimple_call_internal_p (stmt)
3002 && (gimple_call_internal_fn (stmt) == IFN_MASK_LOAD
3003 || gimple_call_internal_fn (stmt) == IFN_MASK_STORE)
3004 && TREE_CODE (base) == MEM_REF
3005 && TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME
3006 && integer_zerop (TREE_OPERAND (base, 1))
3007 && !expr_invariant_in_loop_p (loop, TREE_OPERAND (base, 0)))
3009 gimple def_stmt = SSA_NAME_DEF_STMT (TREE_OPERAND (base, 0));
3010 if (is_gimple_assign (def_stmt)
3011 && gimple_assign_rhs_code (def_stmt) == ADDR_EXPR)
3012 base = TREE_OPERAND (gimple_assign_rhs1 (def_stmt), 0);
3015 /* The gather builtins need address of the form
3016 loop_invariant + vector * {1, 2, 4, 8}
3018 loop_invariant + sign_extend (vector) * { 1, 2, 4, 8 }.
3019 Unfortunately DR_BASE_ADDRESS/DR_OFFSET can be a mixture
3020 of loop invariants/SSA_NAMEs defined in the loop, with casts,
3021 multiplications and additions in it. To get a vector, we need
3022 a single SSA_NAME that will be defined in the loop and will
3023 contain everything that is not loop invariant and that can be
3024 vectorized. The following code attempts to find such a preexistng
3025 SSA_NAME OFF and put the loop invariants into a tree BASE
3026 that can be gimplified before the loop. */
3027 base = get_inner_reference (base, &pbitsize, &pbitpos, &off,
3028 &pmode, &punsignedp, &pvolatilep, false);
3029 gcc_assert (base != NULL_TREE && (pbitpos % BITS_PER_UNIT) == 0);
3031 if (TREE_CODE (base) == MEM_REF)
3033 if (!integer_zerop (TREE_OPERAND (base, 1)))
3035 if (off == NULL_TREE)
3037 offset_int moff = mem_ref_offset (base);
3038 off = wide_int_to_tree (sizetype, moff);
3040 else
3041 off = size_binop (PLUS_EXPR, off,
3042 fold_convert (sizetype, TREE_OPERAND (base, 1)));
3044 base = TREE_OPERAND (base, 0);
3046 else
3047 base = build_fold_addr_expr (base);
3049 if (off == NULL_TREE)
3050 off = size_zero_node;
3052 /* If base is not loop invariant, either off is 0, then we start with just
3053 the constant offset in the loop invariant BASE and continue with base
3054 as OFF, otherwise give up.
3055 We could handle that case by gimplifying the addition of base + off
3056 into some SSA_NAME and use that as off, but for now punt. */
3057 if (!expr_invariant_in_loop_p (loop, base))
3059 if (!integer_zerop (off))
3060 return NULL_TREE;
3061 off = base;
3062 base = size_int (pbitpos / BITS_PER_UNIT);
3064 /* Otherwise put base + constant offset into the loop invariant BASE
3065 and continue with OFF. */
3066 else
3068 base = fold_convert (sizetype, base);
3069 base = size_binop (PLUS_EXPR, base, size_int (pbitpos / BITS_PER_UNIT));
3072 /* OFF at this point may be either a SSA_NAME or some tree expression
3073 from get_inner_reference. Try to peel off loop invariants from it
3074 into BASE as long as possible. */
3075 STRIP_NOPS (off);
3076 while (offtype == NULL_TREE)
3078 enum tree_code code;
3079 tree op0, op1, add = NULL_TREE;
3081 if (TREE_CODE (off) == SSA_NAME)
3083 gimple def_stmt = SSA_NAME_DEF_STMT (off);
3085 if (expr_invariant_in_loop_p (loop, off))
3086 return NULL_TREE;
3088 if (gimple_code (def_stmt) != GIMPLE_ASSIGN)
3089 break;
3091 op0 = gimple_assign_rhs1 (def_stmt);
3092 code = gimple_assign_rhs_code (def_stmt);
3093 op1 = gimple_assign_rhs2 (def_stmt);
3095 else
3097 if (get_gimple_rhs_class (TREE_CODE (off)) == GIMPLE_TERNARY_RHS)
3098 return NULL_TREE;
3099 code = TREE_CODE (off);
3100 extract_ops_from_tree (off, &code, &op0, &op1);
3102 switch (code)
3104 case POINTER_PLUS_EXPR:
3105 case PLUS_EXPR:
3106 if (expr_invariant_in_loop_p (loop, op0))
3108 add = op0;
3109 off = op1;
3110 do_add:
3111 add = fold_convert (sizetype, add);
3112 if (scale != 1)
3113 add = size_binop (MULT_EXPR, add, size_int (scale));
3114 base = size_binop (PLUS_EXPR, base, add);
3115 continue;
3117 if (expr_invariant_in_loop_p (loop, op1))
3119 add = op1;
3120 off = op0;
3121 goto do_add;
3123 break;
3124 case MINUS_EXPR:
3125 if (expr_invariant_in_loop_p (loop, op1))
3127 add = fold_convert (sizetype, op1);
3128 add = size_binop (MINUS_EXPR, size_zero_node, add);
3129 off = op0;
3130 goto do_add;
3132 break;
3133 case MULT_EXPR:
3134 if (scale == 1 && tree_fits_shwi_p (op1))
3136 scale = tree_to_shwi (op1);
3137 off = op0;
3138 continue;
3140 break;
3141 case SSA_NAME:
3142 off = op0;
3143 continue;
3144 CASE_CONVERT:
3145 if (!POINTER_TYPE_P (TREE_TYPE (op0))
3146 && !INTEGRAL_TYPE_P (TREE_TYPE (op0)))
3147 break;
3148 if (TYPE_PRECISION (TREE_TYPE (op0))
3149 == TYPE_PRECISION (TREE_TYPE (off)))
3151 off = op0;
3152 continue;
3154 if (TYPE_PRECISION (TREE_TYPE (op0))
3155 < TYPE_PRECISION (TREE_TYPE (off)))
3157 off = op0;
3158 offtype = TREE_TYPE (off);
3159 STRIP_NOPS (off);
3160 continue;
3162 break;
3163 default:
3164 break;
3166 break;
3169 /* If at the end OFF still isn't a SSA_NAME or isn't
3170 defined in the loop, punt. */
3171 if (TREE_CODE (off) != SSA_NAME
3172 || expr_invariant_in_loop_p (loop, off))
3173 return NULL_TREE;
3175 if (offtype == NULL_TREE)
3176 offtype = TREE_TYPE (off);
3178 decl = targetm.vectorize.builtin_gather (STMT_VINFO_VECTYPE (stmt_info),
3179 offtype, scale);
3180 if (decl == NULL_TREE)
3181 return NULL_TREE;
3183 if (basep)
3184 *basep = base;
3185 if (offp)
3186 *offp = off;
3187 if (scalep)
3188 *scalep = scale;
3189 return decl;
3192 /* Function vect_analyze_data_refs.
3194 Find all the data references in the loop or basic block.
3196 The general structure of the analysis of data refs in the vectorizer is as
3197 follows:
3198 1- vect_analyze_data_refs(loop/bb): call
3199 compute_data_dependences_for_loop/bb to find and analyze all data-refs
3200 in the loop/bb and their dependences.
3201 2- vect_analyze_dependences(): apply dependence testing using ddrs.
3202 3- vect_analyze_drs_alignment(): check that ref_stmt.alignment is ok.
3203 4- vect_analyze_drs_access(): check that ref_stmt.step is ok.
3207 bool
3208 vect_analyze_data_refs (loop_vec_info loop_vinfo,
3209 bb_vec_info bb_vinfo,
3210 int *min_vf, unsigned *n_stmts)
3212 struct loop *loop = NULL;
3213 basic_block bb = NULL;
3214 unsigned int i;
3215 vec<data_reference_p> datarefs;
3216 struct data_reference *dr;
3217 tree scalar_type;
3219 if (dump_enabled_p ())
3220 dump_printf_loc (MSG_NOTE, vect_location,
3221 "=== vect_analyze_data_refs ===\n");
3223 if (loop_vinfo)
3225 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
3227 loop = LOOP_VINFO_LOOP (loop_vinfo);
3228 datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
3229 if (!find_loop_nest (loop, &LOOP_VINFO_LOOP_NEST (loop_vinfo)))
3231 if (dump_enabled_p ())
3232 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3233 "not vectorized: loop contains function calls"
3234 " or data references that cannot be analyzed\n");
3235 return false;
3238 for (i = 0; i < loop->num_nodes; i++)
3240 gimple_stmt_iterator gsi;
3242 for (gsi = gsi_start_bb (bbs[i]); !gsi_end_p (gsi); gsi_next (&gsi))
3244 gimple stmt = gsi_stmt (gsi);
3245 if (is_gimple_debug (stmt))
3246 continue;
3247 ++*n_stmts;
3248 if (!find_data_references_in_stmt (loop, stmt, &datarefs))
3250 if (is_gimple_call (stmt) && loop->safelen)
3252 tree fndecl = gimple_call_fndecl (stmt), op;
3253 if (fndecl != NULL_TREE)
3255 struct cgraph_node *node = cgraph_node::get (fndecl);
3256 if (node != NULL && node->simd_clones != NULL)
3258 unsigned int j, n = gimple_call_num_args (stmt);
3259 for (j = 0; j < n; j++)
3261 op = gimple_call_arg (stmt, j);
3262 if (DECL_P (op)
3263 || (REFERENCE_CLASS_P (op)
3264 && get_base_address (op)))
3265 break;
3267 op = gimple_call_lhs (stmt);
3268 /* Ignore #pragma omp declare simd functions
3269 if they don't have data references in the
3270 call stmt itself. */
3271 if (j == n
3272 && !(op
3273 && (DECL_P (op)
3274 || (REFERENCE_CLASS_P (op)
3275 && get_base_address (op)))))
3276 continue;
3280 LOOP_VINFO_DATAREFS (loop_vinfo) = datarefs;
3281 if (dump_enabled_p ())
3282 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3283 "not vectorized: loop contains function "
3284 "calls or data references that cannot "
3285 "be analyzed\n");
3286 return false;
3291 LOOP_VINFO_DATAREFS (loop_vinfo) = datarefs;
3293 else
3295 gimple_stmt_iterator gsi;
3297 bb = BB_VINFO_BB (bb_vinfo);
3298 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3300 gimple stmt = gsi_stmt (gsi);
3301 if (is_gimple_debug (stmt))
3302 continue;
3303 ++*n_stmts;
3304 if (!find_data_references_in_stmt (NULL, stmt,
3305 &BB_VINFO_DATAREFS (bb_vinfo)))
3307 /* Mark the rest of the basic-block as unvectorizable. */
3308 for (; !gsi_end_p (gsi); gsi_next (&gsi))
3310 stmt = gsi_stmt (gsi);
3311 STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (stmt)) = false;
3313 break;
3317 datarefs = BB_VINFO_DATAREFS (bb_vinfo);
3320 /* Go through the data-refs, check that the analysis succeeded. Update
3321 pointer from stmt_vec_info struct to DR and vectype. */
3323 FOR_EACH_VEC_ELT (datarefs, i, dr)
3325 gimple stmt;
3326 stmt_vec_info stmt_info;
3327 tree base, offset, init;
3328 bool gather = false;
3329 bool simd_lane_access = false;
3330 int vf;
3332 again:
3333 if (!dr || !DR_REF (dr))
3335 if (dump_enabled_p ())
3336 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3337 "not vectorized: unhandled data-ref\n");
3338 return false;
3341 stmt = DR_STMT (dr);
3342 stmt_info = vinfo_for_stmt (stmt);
3344 /* Discard clobbers from the dataref vector. We will remove
3345 clobber stmts during vectorization. */
3346 if (gimple_clobber_p (stmt))
3348 free_data_ref (dr);
3349 if (i == datarefs.length () - 1)
3351 datarefs.pop ();
3352 break;
3354 datarefs.ordered_remove (i);
3355 dr = datarefs[i];
3356 goto again;
3359 /* Check that analysis of the data-ref succeeded. */
3360 if (!DR_BASE_ADDRESS (dr) || !DR_OFFSET (dr) || !DR_INIT (dr)
3361 || !DR_STEP (dr))
3363 bool maybe_gather
3364 = DR_IS_READ (dr)
3365 && !TREE_THIS_VOLATILE (DR_REF (dr))
3366 && targetm.vectorize.builtin_gather != NULL;
3367 bool maybe_simd_lane_access
3368 = loop_vinfo && loop->simduid;
3370 /* If target supports vector gather loads, or if this might be
3371 a SIMD lane access, see if they can't be used. */
3372 if (loop_vinfo
3373 && (maybe_gather || maybe_simd_lane_access)
3374 && !nested_in_vect_loop_p (loop, stmt))
3376 struct data_reference *newdr
3377 = create_data_ref (NULL, loop_containing_stmt (stmt),
3378 DR_REF (dr), stmt, true);
3379 gcc_assert (newdr != NULL && DR_REF (newdr));
3380 if (DR_BASE_ADDRESS (newdr)
3381 && DR_OFFSET (newdr)
3382 && DR_INIT (newdr)
3383 && DR_STEP (newdr)
3384 && integer_zerop (DR_STEP (newdr)))
3386 if (maybe_simd_lane_access)
3388 tree off = DR_OFFSET (newdr);
3389 STRIP_NOPS (off);
3390 if (TREE_CODE (DR_INIT (newdr)) == INTEGER_CST
3391 && TREE_CODE (off) == MULT_EXPR
3392 && tree_fits_uhwi_p (TREE_OPERAND (off, 1)))
3394 tree step = TREE_OPERAND (off, 1);
3395 off = TREE_OPERAND (off, 0);
3396 STRIP_NOPS (off);
3397 if (CONVERT_EXPR_P (off)
3398 && TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (off,
3399 0)))
3400 < TYPE_PRECISION (TREE_TYPE (off)))
3401 off = TREE_OPERAND (off, 0);
3402 if (TREE_CODE (off) == SSA_NAME)
3404 gimple def = SSA_NAME_DEF_STMT (off);
3405 tree reft = TREE_TYPE (DR_REF (newdr));
3406 if (is_gimple_call (def)
3407 && gimple_call_internal_p (def)
3408 && (gimple_call_internal_fn (def)
3409 == IFN_GOMP_SIMD_LANE))
3411 tree arg = gimple_call_arg (def, 0);
3412 gcc_assert (TREE_CODE (arg) == SSA_NAME);
3413 arg = SSA_NAME_VAR (arg);
3414 if (arg == loop->simduid
3415 /* For now. */
3416 && tree_int_cst_equal
3417 (TYPE_SIZE_UNIT (reft),
3418 step))
3420 DR_OFFSET (newdr) = ssize_int (0);
3421 DR_STEP (newdr) = step;
3422 DR_ALIGNED_TO (newdr)
3423 = size_int (BIGGEST_ALIGNMENT);
3424 dr = newdr;
3425 simd_lane_access = true;
3431 if (!simd_lane_access && maybe_gather)
3433 dr = newdr;
3434 gather = true;
3437 if (!gather && !simd_lane_access)
3438 free_data_ref (newdr);
3441 if (!gather && !simd_lane_access)
3443 if (dump_enabled_p ())
3445 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3446 "not vectorized: data ref analysis "
3447 "failed ");
3448 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3449 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3452 if (bb_vinfo)
3453 break;
3455 return false;
3459 if (TREE_CODE (DR_BASE_ADDRESS (dr)) == INTEGER_CST)
3461 if (dump_enabled_p ())
3462 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3463 "not vectorized: base addr of dr is a "
3464 "constant\n");
3466 if (bb_vinfo)
3467 break;
3469 if (gather || simd_lane_access)
3470 free_data_ref (dr);
3471 return false;
3474 if (TREE_THIS_VOLATILE (DR_REF (dr)))
3476 if (dump_enabled_p ())
3478 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3479 "not vectorized: volatile type ");
3480 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3481 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3484 if (bb_vinfo)
3485 break;
3487 return false;
3490 if (stmt_can_throw_internal (stmt))
3492 if (dump_enabled_p ())
3494 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3495 "not vectorized: statement can throw an "
3496 "exception ");
3497 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3498 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3501 if (bb_vinfo)
3502 break;
3504 if (gather || simd_lane_access)
3505 free_data_ref (dr);
3506 return false;
3509 if (TREE_CODE (DR_REF (dr)) == COMPONENT_REF
3510 && DECL_BIT_FIELD (TREE_OPERAND (DR_REF (dr), 1)))
3512 if (dump_enabled_p ())
3514 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3515 "not vectorized: statement is bitfield "
3516 "access ");
3517 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3518 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3521 if (bb_vinfo)
3522 break;
3524 if (gather || simd_lane_access)
3525 free_data_ref (dr);
3526 return false;
3529 base = unshare_expr (DR_BASE_ADDRESS (dr));
3530 offset = unshare_expr (DR_OFFSET (dr));
3531 init = unshare_expr (DR_INIT (dr));
3533 if (is_gimple_call (stmt)
3534 && (!gimple_call_internal_p (stmt)
3535 || (gimple_call_internal_fn (stmt) != IFN_MASK_LOAD
3536 && gimple_call_internal_fn (stmt) != IFN_MASK_STORE)))
3538 if (dump_enabled_p ())
3540 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3541 "not vectorized: dr in a call ");
3542 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3543 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3546 if (bb_vinfo)
3547 break;
3549 if (gather || simd_lane_access)
3550 free_data_ref (dr);
3551 return false;
3554 /* Update DR field in stmt_vec_info struct. */
3556 /* If the dataref is in an inner-loop of the loop that is considered for
3557 for vectorization, we also want to analyze the access relative to
3558 the outer-loop (DR contains information only relative to the
3559 inner-most enclosing loop). We do that by building a reference to the
3560 first location accessed by the inner-loop, and analyze it relative to
3561 the outer-loop. */
3562 if (loop && nested_in_vect_loop_p (loop, stmt))
3564 tree outer_step, outer_base, outer_init;
3565 HOST_WIDE_INT pbitsize, pbitpos;
3566 tree poffset;
3567 machine_mode pmode;
3568 int punsignedp, pvolatilep;
3569 affine_iv base_iv, offset_iv;
3570 tree dinit;
3572 /* Build a reference to the first location accessed by the
3573 inner-loop: *(BASE+INIT). (The first location is actually
3574 BASE+INIT+OFFSET, but we add OFFSET separately later). */
3575 tree inner_base = build_fold_indirect_ref
3576 (fold_build_pointer_plus (base, init));
3578 if (dump_enabled_p ())
3580 dump_printf_loc (MSG_NOTE, vect_location,
3581 "analyze in outer-loop: ");
3582 dump_generic_expr (MSG_NOTE, TDF_SLIM, inner_base);
3583 dump_printf (MSG_NOTE, "\n");
3586 outer_base = get_inner_reference (inner_base, &pbitsize, &pbitpos,
3587 &poffset, &pmode, &punsignedp, &pvolatilep, false);
3588 gcc_assert (outer_base != NULL_TREE);
3590 if (pbitpos % BITS_PER_UNIT != 0)
3592 if (dump_enabled_p ())
3593 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3594 "failed: bit offset alignment.\n");
3595 return false;
3598 outer_base = build_fold_addr_expr (outer_base);
3599 if (!simple_iv (loop, loop_containing_stmt (stmt), outer_base,
3600 &base_iv, false))
3602 if (dump_enabled_p ())
3603 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3604 "failed: evolution of base is not affine.\n");
3605 return false;
3608 if (offset)
3610 if (poffset)
3611 poffset = fold_build2 (PLUS_EXPR, TREE_TYPE (offset), offset,
3612 poffset);
3613 else
3614 poffset = offset;
3617 if (!poffset)
3619 offset_iv.base = ssize_int (0);
3620 offset_iv.step = ssize_int (0);
3622 else if (!simple_iv (loop, loop_containing_stmt (stmt), poffset,
3623 &offset_iv, false))
3625 if (dump_enabled_p ())
3626 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3627 "evolution of offset is not affine.\n");
3628 return false;
3631 outer_init = ssize_int (pbitpos / BITS_PER_UNIT);
3632 split_constant_offset (base_iv.base, &base_iv.base, &dinit);
3633 outer_init = size_binop (PLUS_EXPR, outer_init, dinit);
3634 split_constant_offset (offset_iv.base, &offset_iv.base, &dinit);
3635 outer_init = size_binop (PLUS_EXPR, outer_init, dinit);
3637 outer_step = size_binop (PLUS_EXPR,
3638 fold_convert (ssizetype, base_iv.step),
3639 fold_convert (ssizetype, offset_iv.step));
3641 STMT_VINFO_DR_STEP (stmt_info) = outer_step;
3642 /* FIXME: Use canonicalize_base_object_address (base_iv.base); */
3643 STMT_VINFO_DR_BASE_ADDRESS (stmt_info) = base_iv.base;
3644 STMT_VINFO_DR_INIT (stmt_info) = outer_init;
3645 STMT_VINFO_DR_OFFSET (stmt_info) =
3646 fold_convert (ssizetype, offset_iv.base);
3647 STMT_VINFO_DR_ALIGNED_TO (stmt_info) =
3648 size_int (highest_pow2_factor (offset_iv.base));
3650 if (dump_enabled_p ())
3652 dump_printf_loc (MSG_NOTE, vect_location,
3653 "\touter base_address: ");
3654 dump_generic_expr (MSG_NOTE, TDF_SLIM,
3655 STMT_VINFO_DR_BASE_ADDRESS (stmt_info));
3656 dump_printf (MSG_NOTE, "\n\touter offset from base address: ");
3657 dump_generic_expr (MSG_NOTE, TDF_SLIM,
3658 STMT_VINFO_DR_OFFSET (stmt_info));
3659 dump_printf (MSG_NOTE,
3660 "\n\touter constant offset from base address: ");
3661 dump_generic_expr (MSG_NOTE, TDF_SLIM,
3662 STMT_VINFO_DR_INIT (stmt_info));
3663 dump_printf (MSG_NOTE, "\n\touter step: ");
3664 dump_generic_expr (MSG_NOTE, TDF_SLIM,
3665 STMT_VINFO_DR_STEP (stmt_info));
3666 dump_printf (MSG_NOTE, "\n\touter aligned to: ");
3667 dump_generic_expr (MSG_NOTE, TDF_SLIM,
3668 STMT_VINFO_DR_ALIGNED_TO (stmt_info));
3669 dump_printf (MSG_NOTE, "\n");
3673 if (STMT_VINFO_DATA_REF (stmt_info))
3675 if (dump_enabled_p ())
3677 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3678 "not vectorized: more than one data ref "
3679 "in stmt: ");
3680 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3681 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3684 if (bb_vinfo)
3685 break;
3687 if (gather || simd_lane_access)
3688 free_data_ref (dr);
3689 return false;
3692 STMT_VINFO_DATA_REF (stmt_info) = dr;
3693 if (simd_lane_access)
3695 STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info) = true;
3696 free_data_ref (datarefs[i]);
3697 datarefs[i] = dr;
3700 /* Set vectype for STMT. */
3701 scalar_type = TREE_TYPE (DR_REF (dr));
3702 STMT_VINFO_VECTYPE (stmt_info)
3703 = get_vectype_for_scalar_type (scalar_type);
3704 if (!STMT_VINFO_VECTYPE (stmt_info))
3706 if (dump_enabled_p ())
3708 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3709 "not vectorized: no vectype for stmt: ");
3710 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3711 dump_printf (MSG_MISSED_OPTIMIZATION, " scalar_type: ");
3712 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_DETAILS,
3713 scalar_type);
3714 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3717 if (bb_vinfo)
3718 break;
3720 if (gather || simd_lane_access)
3722 STMT_VINFO_DATA_REF (stmt_info) = NULL;
3723 if (gather)
3724 free_data_ref (dr);
3726 return false;
3728 else
3730 if (dump_enabled_p ())
3732 dump_printf_loc (MSG_NOTE, vect_location,
3733 "got vectype for stmt: ");
3734 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
3735 dump_generic_expr (MSG_NOTE, TDF_SLIM,
3736 STMT_VINFO_VECTYPE (stmt_info));
3737 dump_printf (MSG_NOTE, "\n");
3741 /* Adjust the minimal vectorization factor according to the
3742 vector type. */
3743 vf = TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info));
3744 if (vf > *min_vf)
3745 *min_vf = vf;
3747 if (gather)
3749 tree off;
3751 gather = 0 != vect_check_gather (stmt, loop_vinfo, NULL, &off, NULL);
3752 if (gather
3753 && get_vectype_for_scalar_type (TREE_TYPE (off)) == NULL_TREE)
3754 gather = false;
3755 if (!gather)
3757 STMT_VINFO_DATA_REF (stmt_info) = NULL;
3758 free_data_ref (dr);
3759 if (dump_enabled_p ())
3761 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3762 "not vectorized: not suitable for gather "
3763 "load ");
3764 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3765 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3767 return false;
3770 datarefs[i] = dr;
3771 STMT_VINFO_GATHER_P (stmt_info) = true;
3773 else if (loop_vinfo
3774 && TREE_CODE (DR_STEP (dr)) != INTEGER_CST)
3776 if (nested_in_vect_loop_p (loop, stmt)
3777 || !DR_IS_READ (dr))
3779 if (dump_enabled_p ())
3781 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3782 "not vectorized: not suitable for strided "
3783 "load ");
3784 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3785 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3787 return false;
3789 STMT_VINFO_STRIDE_LOAD_P (stmt_info) = true;
3793 /* If we stopped analysis at the first dataref we could not analyze
3794 when trying to vectorize a basic-block mark the rest of the datarefs
3795 as not vectorizable and truncate the vector of datarefs. That
3796 avoids spending useless time in analyzing their dependence. */
3797 if (i != datarefs.length ())
3799 gcc_assert (bb_vinfo != NULL);
3800 for (unsigned j = i; j < datarefs.length (); ++j)
3802 data_reference_p dr = datarefs[j];
3803 STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr))) = false;
3804 free_data_ref (dr);
3806 datarefs.truncate (i);
3809 return true;
3813 /* Function vect_get_new_vect_var.
3815 Returns a name for a new variable. The current naming scheme appends the
3816 prefix "vect_" or "vect_p" (depending on the value of VAR_KIND) to
3817 the name of vectorizer generated variables, and appends that to NAME if
3818 provided. */
3820 tree
3821 vect_get_new_vect_var (tree type, enum vect_var_kind var_kind, const char *name)
3823 const char *prefix;
3824 tree new_vect_var;
3826 switch (var_kind)
3828 case vect_simple_var:
3829 prefix = "vect";
3830 break;
3831 case vect_scalar_var:
3832 prefix = "stmp";
3833 break;
3834 case vect_pointer_var:
3835 prefix = "vectp";
3836 break;
3837 default:
3838 gcc_unreachable ();
3841 if (name)
3843 char* tmp = concat (prefix, "_", name, NULL);
3844 new_vect_var = create_tmp_reg (type, tmp);
3845 free (tmp);
3847 else
3848 new_vect_var = create_tmp_reg (type, prefix);
3850 return new_vect_var;
3853 /* Duplicate ptr info and set alignment/misaligment on NAME from DR. */
3855 static void
3856 vect_duplicate_ssa_name_ptr_info (tree name, data_reference *dr,
3857 stmt_vec_info stmt_info)
3859 duplicate_ssa_name_ptr_info (name, DR_PTR_INFO (dr));
3860 unsigned int align = TYPE_ALIGN_UNIT (STMT_VINFO_VECTYPE (stmt_info));
3861 int misalign = DR_MISALIGNMENT (dr);
3862 if (misalign == -1)
3863 mark_ptr_info_alignment_unknown (SSA_NAME_PTR_INFO (name));
3864 else
3865 set_ptr_info_alignment (SSA_NAME_PTR_INFO (name), align, misalign);
3868 /* Function vect_create_addr_base_for_vector_ref.
3870 Create an expression that computes the address of the first memory location
3871 that will be accessed for a data reference.
3873 Input:
3874 STMT: The statement containing the data reference.
3875 NEW_STMT_LIST: Must be initialized to NULL_TREE or a statement list.
3876 OFFSET: Optional. If supplied, it is be added to the initial address.
3877 LOOP: Specify relative to which loop-nest should the address be computed.
3878 For example, when the dataref is in an inner-loop nested in an
3879 outer-loop that is now being vectorized, LOOP can be either the
3880 outer-loop, or the inner-loop. The first memory location accessed
3881 by the following dataref ('in' points to short):
3883 for (i=0; i<N; i++)
3884 for (j=0; j<M; j++)
3885 s += in[i+j]
3887 is as follows:
3888 if LOOP=i_loop: &in (relative to i_loop)
3889 if LOOP=j_loop: &in+i*2B (relative to j_loop)
3890 BYTE_OFFSET: Optional, defaulted to NULL. If supplied, it is added to the
3891 initial address. Unlike OFFSET, which is number of elements to
3892 be added, BYTE_OFFSET is measured in bytes.
3894 Output:
3895 1. Return an SSA_NAME whose value is the address of the memory location of
3896 the first vector of the data reference.
3897 2. If new_stmt_list is not NULL_TREE after return then the caller must insert
3898 these statement(s) which define the returned SSA_NAME.
3900 FORNOW: We are only handling array accesses with step 1. */
3902 tree
3903 vect_create_addr_base_for_vector_ref (gimple stmt,
3904 gimple_seq *new_stmt_list,
3905 tree offset,
3906 struct loop *loop,
3907 tree byte_offset)
3909 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3910 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
3911 tree data_ref_base;
3912 const char *base_name;
3913 tree addr_base;
3914 tree dest;
3915 gimple_seq seq = NULL;
3916 tree base_offset;
3917 tree init;
3918 tree vect_ptr_type;
3919 tree step = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dr)));
3920 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3922 if (loop_vinfo && loop && loop != (gimple_bb (stmt))->loop_father)
3924 struct loop *outer_loop = LOOP_VINFO_LOOP (loop_vinfo);
3926 gcc_assert (nested_in_vect_loop_p (outer_loop, stmt));
3928 data_ref_base = unshare_expr (STMT_VINFO_DR_BASE_ADDRESS (stmt_info));
3929 base_offset = unshare_expr (STMT_VINFO_DR_OFFSET (stmt_info));
3930 init = unshare_expr (STMT_VINFO_DR_INIT (stmt_info));
3932 else
3934 data_ref_base = unshare_expr (DR_BASE_ADDRESS (dr));
3935 base_offset = unshare_expr (DR_OFFSET (dr));
3936 init = unshare_expr (DR_INIT (dr));
3939 if (loop_vinfo)
3940 base_name = get_name (data_ref_base);
3941 else
3943 base_offset = ssize_int (0);
3944 init = ssize_int (0);
3945 base_name = get_name (DR_REF (dr));
3948 /* Create base_offset */
3949 base_offset = size_binop (PLUS_EXPR,
3950 fold_convert (sizetype, base_offset),
3951 fold_convert (sizetype, init));
3953 if (offset)
3955 offset = fold_build2 (MULT_EXPR, sizetype,
3956 fold_convert (sizetype, offset), step);
3957 base_offset = fold_build2 (PLUS_EXPR, sizetype,
3958 base_offset, offset);
3960 if (byte_offset)
3962 byte_offset = fold_convert (sizetype, byte_offset);
3963 base_offset = fold_build2 (PLUS_EXPR, sizetype,
3964 base_offset, byte_offset);
3967 /* base + base_offset */
3968 if (loop_vinfo)
3969 addr_base = fold_build_pointer_plus (data_ref_base, base_offset);
3970 else
3972 addr_base = build1 (ADDR_EXPR,
3973 build_pointer_type (TREE_TYPE (DR_REF (dr))),
3974 unshare_expr (DR_REF (dr)));
3977 vect_ptr_type = build_pointer_type (STMT_VINFO_VECTYPE (stmt_info));
3978 addr_base = fold_convert (vect_ptr_type, addr_base);
3979 dest = vect_get_new_vect_var (vect_ptr_type, vect_pointer_var, base_name);
3980 addr_base = force_gimple_operand (addr_base, &seq, false, dest);
3981 gimple_seq_add_seq (new_stmt_list, seq);
3983 if (DR_PTR_INFO (dr)
3984 && TREE_CODE (addr_base) == SSA_NAME)
3986 vect_duplicate_ssa_name_ptr_info (addr_base, dr, stmt_info);
3987 if (offset || byte_offset)
3988 mark_ptr_info_alignment_unknown (SSA_NAME_PTR_INFO (addr_base));
3991 if (dump_enabled_p ())
3993 dump_printf_loc (MSG_NOTE, vect_location, "created ");
3994 dump_generic_expr (MSG_NOTE, TDF_SLIM, addr_base);
3995 dump_printf (MSG_NOTE, "\n");
3998 return addr_base;
4002 /* Function vect_create_data_ref_ptr.
4004 Create a new pointer-to-AGGR_TYPE variable (ap), that points to the first
4005 location accessed in the loop by STMT, along with the def-use update
4006 chain to appropriately advance the pointer through the loop iterations.
4007 Also set aliasing information for the pointer. This pointer is used by
4008 the callers to this function to create a memory reference expression for
4009 vector load/store access.
4011 Input:
4012 1. STMT: a stmt that references memory. Expected to be of the form
4013 GIMPLE_ASSIGN <name, data-ref> or
4014 GIMPLE_ASSIGN <data-ref, name>.
4015 2. AGGR_TYPE: the type of the reference, which should be either a vector
4016 or an array.
4017 3. AT_LOOP: the loop where the vector memref is to be created.
4018 4. OFFSET (optional): an offset to be added to the initial address accessed
4019 by the data-ref in STMT.
4020 5. BSI: location where the new stmts are to be placed if there is no loop
4021 6. ONLY_INIT: indicate if ap is to be updated in the loop, or remain
4022 pointing to the initial address.
4023 7. BYTE_OFFSET (optional, defaults to NULL): a byte offset to be added
4024 to the initial address accessed by the data-ref in STMT. This is
4025 similar to OFFSET, but OFFSET is counted in elements, while BYTE_OFFSET
4026 in bytes.
4028 Output:
4029 1. Declare a new ptr to vector_type, and have it point to the base of the
4030 data reference (initial addressed accessed by the data reference).
4031 For example, for vector of type V8HI, the following code is generated:
4033 v8hi *ap;
4034 ap = (v8hi *)initial_address;
4036 if OFFSET is not supplied:
4037 initial_address = &a[init];
4038 if OFFSET is supplied:
4039 initial_address = &a[init + OFFSET];
4040 if BYTE_OFFSET is supplied:
4041 initial_address = &a[init] + BYTE_OFFSET;
4043 Return the initial_address in INITIAL_ADDRESS.
4045 2. If ONLY_INIT is true, just return the initial pointer. Otherwise, also
4046 update the pointer in each iteration of the loop.
4048 Return the increment stmt that updates the pointer in PTR_INCR.
4050 3. Set INV_P to true if the access pattern of the data reference in the
4051 vectorized loop is invariant. Set it to false otherwise.
4053 4. Return the pointer. */
4055 tree
4056 vect_create_data_ref_ptr (gimple stmt, tree aggr_type, struct loop *at_loop,
4057 tree offset, tree *initial_address,
4058 gimple_stmt_iterator *gsi, gimple *ptr_incr,
4059 bool only_init, bool *inv_p, tree byte_offset)
4061 const char *base_name;
4062 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4063 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4064 struct loop *loop = NULL;
4065 bool nested_in_vect_loop = false;
4066 struct loop *containing_loop = NULL;
4067 tree aggr_ptr_type;
4068 tree aggr_ptr;
4069 tree new_temp;
4070 gimple vec_stmt;
4071 gimple_seq new_stmt_list = NULL;
4072 edge pe = NULL;
4073 basic_block new_bb;
4074 tree aggr_ptr_init;
4075 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
4076 tree aptr;
4077 gimple_stmt_iterator incr_gsi;
4078 bool insert_after;
4079 tree indx_before_incr, indx_after_incr;
4080 gimple incr;
4081 tree step;
4082 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
4084 gcc_assert (TREE_CODE (aggr_type) == ARRAY_TYPE
4085 || TREE_CODE (aggr_type) == VECTOR_TYPE);
4087 if (loop_vinfo)
4089 loop = LOOP_VINFO_LOOP (loop_vinfo);
4090 nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
4091 containing_loop = (gimple_bb (stmt))->loop_father;
4092 pe = loop_preheader_edge (loop);
4094 else
4096 gcc_assert (bb_vinfo);
4097 only_init = true;
4098 *ptr_incr = NULL;
4101 /* Check the step (evolution) of the load in LOOP, and record
4102 whether it's invariant. */
4103 if (nested_in_vect_loop)
4104 step = STMT_VINFO_DR_STEP (stmt_info);
4105 else
4106 step = DR_STEP (STMT_VINFO_DATA_REF (stmt_info));
4108 if (integer_zerop (step))
4109 *inv_p = true;
4110 else
4111 *inv_p = false;
4113 /* Create an expression for the first address accessed by this load
4114 in LOOP. */
4115 base_name = get_name (DR_BASE_ADDRESS (dr));
4117 if (dump_enabled_p ())
4119 tree dr_base_type = TREE_TYPE (DR_BASE_OBJECT (dr));
4120 dump_printf_loc (MSG_NOTE, vect_location,
4121 "create %s-pointer variable to type: ",
4122 get_tree_code_name (TREE_CODE (aggr_type)));
4123 dump_generic_expr (MSG_NOTE, TDF_SLIM, aggr_type);
4124 if (TREE_CODE (dr_base_type) == ARRAY_TYPE)
4125 dump_printf (MSG_NOTE, " vectorizing an array ref: ");
4126 else if (TREE_CODE (dr_base_type) == VECTOR_TYPE)
4127 dump_printf (MSG_NOTE, " vectorizing a vector ref: ");
4128 else if (TREE_CODE (dr_base_type) == RECORD_TYPE)
4129 dump_printf (MSG_NOTE, " vectorizing a record based array ref: ");
4130 else
4131 dump_printf (MSG_NOTE, " vectorizing a pointer ref: ");
4132 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_BASE_OBJECT (dr));
4133 dump_printf (MSG_NOTE, "\n");
4136 /* (1) Create the new aggregate-pointer variable.
4137 Vector and array types inherit the alias set of their component
4138 type by default so we need to use a ref-all pointer if the data
4139 reference does not conflict with the created aggregated data
4140 reference because it is not addressable. */
4141 bool need_ref_all = false;
4142 if (!alias_sets_conflict_p (get_alias_set (aggr_type),
4143 get_alias_set (DR_REF (dr))))
4144 need_ref_all = true;
4145 /* Likewise for any of the data references in the stmt group. */
4146 else if (STMT_VINFO_GROUP_SIZE (stmt_info) > 1)
4148 gimple orig_stmt = STMT_VINFO_GROUP_FIRST_ELEMENT (stmt_info);
4151 stmt_vec_info sinfo = vinfo_for_stmt (orig_stmt);
4152 struct data_reference *sdr = STMT_VINFO_DATA_REF (sinfo);
4153 if (!alias_sets_conflict_p (get_alias_set (aggr_type),
4154 get_alias_set (DR_REF (sdr))))
4156 need_ref_all = true;
4157 break;
4159 orig_stmt = STMT_VINFO_GROUP_NEXT_ELEMENT (sinfo);
4161 while (orig_stmt);
4163 aggr_ptr_type = build_pointer_type_for_mode (aggr_type, ptr_mode,
4164 need_ref_all);
4165 aggr_ptr = vect_get_new_vect_var (aggr_ptr_type, vect_pointer_var, base_name);
4168 /* Note: If the dataref is in an inner-loop nested in LOOP, and we are
4169 vectorizing LOOP (i.e., outer-loop vectorization), we need to create two
4170 def-use update cycles for the pointer: one relative to the outer-loop
4171 (LOOP), which is what steps (3) and (4) below do. The other is relative
4172 to the inner-loop (which is the inner-most loop containing the dataref),
4173 and this is done be step (5) below.
4175 When vectorizing inner-most loops, the vectorized loop (LOOP) is also the
4176 inner-most loop, and so steps (3),(4) work the same, and step (5) is
4177 redundant. Steps (3),(4) create the following:
4179 vp0 = &base_addr;
4180 LOOP: vp1 = phi(vp0,vp2)
4183 vp2 = vp1 + step
4184 goto LOOP
4186 If there is an inner-loop nested in loop, then step (5) will also be
4187 applied, and an additional update in the inner-loop will be created:
4189 vp0 = &base_addr;
4190 LOOP: vp1 = phi(vp0,vp2)
4192 inner: vp3 = phi(vp1,vp4)
4193 vp4 = vp3 + inner_step
4194 if () goto inner
4196 vp2 = vp1 + step
4197 if () goto LOOP */
4199 /* (2) Calculate the initial address of the aggregate-pointer, and set
4200 the aggregate-pointer to point to it before the loop. */
4202 /* Create: (&(base[init_val+offset]+byte_offset) in the loop preheader. */
4204 new_temp = vect_create_addr_base_for_vector_ref (stmt, &new_stmt_list,
4205 offset, loop, byte_offset);
4206 if (new_stmt_list)
4208 if (pe)
4210 new_bb = gsi_insert_seq_on_edge_immediate (pe, new_stmt_list);
4211 gcc_assert (!new_bb);
4213 else
4214 gsi_insert_seq_before (gsi, new_stmt_list, GSI_SAME_STMT);
4217 *initial_address = new_temp;
4219 /* Create: p = (aggr_type *) initial_base */
4220 if (TREE_CODE (new_temp) != SSA_NAME
4221 || !useless_type_conversion_p (aggr_ptr_type, TREE_TYPE (new_temp)))
4223 vec_stmt = gimple_build_assign (aggr_ptr,
4224 fold_convert (aggr_ptr_type, new_temp));
4225 aggr_ptr_init = make_ssa_name (aggr_ptr, vec_stmt);
4226 /* Copy the points-to information if it exists. */
4227 if (DR_PTR_INFO (dr))
4228 vect_duplicate_ssa_name_ptr_info (aggr_ptr_init, dr, stmt_info);
4229 gimple_assign_set_lhs (vec_stmt, aggr_ptr_init);
4230 if (pe)
4232 new_bb = gsi_insert_on_edge_immediate (pe, vec_stmt);
4233 gcc_assert (!new_bb);
4235 else
4236 gsi_insert_before (gsi, vec_stmt, GSI_SAME_STMT);
4238 else
4239 aggr_ptr_init = new_temp;
4241 /* (3) Handle the updating of the aggregate-pointer inside the loop.
4242 This is needed when ONLY_INIT is false, and also when AT_LOOP is the
4243 inner-loop nested in LOOP (during outer-loop vectorization). */
4245 /* No update in loop is required. */
4246 if (only_init && (!loop_vinfo || at_loop == loop))
4247 aptr = aggr_ptr_init;
4248 else
4250 /* The step of the aggregate pointer is the type size. */
4251 tree iv_step = TYPE_SIZE_UNIT (aggr_type);
4252 /* One exception to the above is when the scalar step of the load in
4253 LOOP is zero. In this case the step here is also zero. */
4254 if (*inv_p)
4255 iv_step = size_zero_node;
4256 else if (tree_int_cst_sgn (step) == -1)
4257 iv_step = fold_build1 (NEGATE_EXPR, TREE_TYPE (iv_step), iv_step);
4259 standard_iv_increment_position (loop, &incr_gsi, &insert_after);
4261 create_iv (aggr_ptr_init,
4262 fold_convert (aggr_ptr_type, iv_step),
4263 aggr_ptr, loop, &incr_gsi, insert_after,
4264 &indx_before_incr, &indx_after_incr);
4265 incr = gsi_stmt (incr_gsi);
4266 set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo, NULL));
4268 /* Copy the points-to information if it exists. */
4269 if (DR_PTR_INFO (dr))
4271 vect_duplicate_ssa_name_ptr_info (indx_before_incr, dr, stmt_info);
4272 vect_duplicate_ssa_name_ptr_info (indx_after_incr, dr, stmt_info);
4274 if (ptr_incr)
4275 *ptr_incr = incr;
4277 aptr = indx_before_incr;
4280 if (!nested_in_vect_loop || only_init)
4281 return aptr;
4284 /* (4) Handle the updating of the aggregate-pointer inside the inner-loop
4285 nested in LOOP, if exists. */
4287 gcc_assert (nested_in_vect_loop);
4288 if (!only_init)
4290 standard_iv_increment_position (containing_loop, &incr_gsi,
4291 &insert_after);
4292 create_iv (aptr, fold_convert (aggr_ptr_type, DR_STEP (dr)), aggr_ptr,
4293 containing_loop, &incr_gsi, insert_after, &indx_before_incr,
4294 &indx_after_incr);
4295 incr = gsi_stmt (incr_gsi);
4296 set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo, NULL));
4298 /* Copy the points-to information if it exists. */
4299 if (DR_PTR_INFO (dr))
4301 vect_duplicate_ssa_name_ptr_info (indx_before_incr, dr, stmt_info);
4302 vect_duplicate_ssa_name_ptr_info (indx_after_incr, dr, stmt_info);
4304 if (ptr_incr)
4305 *ptr_incr = incr;
4307 return indx_before_incr;
4309 else
4310 gcc_unreachable ();
4314 /* Function bump_vector_ptr
4316 Increment a pointer (to a vector type) by vector-size. If requested,
4317 i.e. if PTR-INCR is given, then also connect the new increment stmt
4318 to the existing def-use update-chain of the pointer, by modifying
4319 the PTR_INCR as illustrated below:
4321 The pointer def-use update-chain before this function:
4322 DATAREF_PTR = phi (p_0, p_2)
4323 ....
4324 PTR_INCR: p_2 = DATAREF_PTR + step
4326 The pointer def-use update-chain after this function:
4327 DATAREF_PTR = phi (p_0, p_2)
4328 ....
4329 NEW_DATAREF_PTR = DATAREF_PTR + BUMP
4330 ....
4331 PTR_INCR: p_2 = NEW_DATAREF_PTR + step
4333 Input:
4334 DATAREF_PTR - ssa_name of a pointer (to vector type) that is being updated
4335 in the loop.
4336 PTR_INCR - optional. The stmt that updates the pointer in each iteration of
4337 the loop. The increment amount across iterations is expected
4338 to be vector_size.
4339 BSI - location where the new update stmt is to be placed.
4340 STMT - the original scalar memory-access stmt that is being vectorized.
4341 BUMP - optional. The offset by which to bump the pointer. If not given,
4342 the offset is assumed to be vector_size.
4344 Output: Return NEW_DATAREF_PTR as illustrated above.
4348 tree
4349 bump_vector_ptr (tree dataref_ptr, gimple ptr_incr, gimple_stmt_iterator *gsi,
4350 gimple stmt, tree bump)
4352 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4353 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
4354 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
4355 tree update = TYPE_SIZE_UNIT (vectype);
4356 gassign *incr_stmt;
4357 ssa_op_iter iter;
4358 use_operand_p use_p;
4359 tree new_dataref_ptr;
4361 if (bump)
4362 update = bump;
4364 new_dataref_ptr = copy_ssa_name (dataref_ptr);
4365 incr_stmt = gimple_build_assign (new_dataref_ptr, POINTER_PLUS_EXPR,
4366 dataref_ptr, update);
4367 vect_finish_stmt_generation (stmt, incr_stmt, gsi);
4369 /* Copy the points-to information if it exists. */
4370 if (DR_PTR_INFO (dr))
4372 duplicate_ssa_name_ptr_info (new_dataref_ptr, DR_PTR_INFO (dr));
4373 mark_ptr_info_alignment_unknown (SSA_NAME_PTR_INFO (new_dataref_ptr));
4376 if (!ptr_incr)
4377 return new_dataref_ptr;
4379 /* Update the vector-pointer's cross-iteration increment. */
4380 FOR_EACH_SSA_USE_OPERAND (use_p, ptr_incr, iter, SSA_OP_USE)
4382 tree use = USE_FROM_PTR (use_p);
4384 if (use == dataref_ptr)
4385 SET_USE (use_p, new_dataref_ptr);
4386 else
4387 gcc_assert (tree_int_cst_compare (use, update) == 0);
4390 return new_dataref_ptr;
4394 /* Function vect_create_destination_var.
4396 Create a new temporary of type VECTYPE. */
4398 tree
4399 vect_create_destination_var (tree scalar_dest, tree vectype)
4401 tree vec_dest;
4402 const char *name;
4403 char *new_name;
4404 tree type;
4405 enum vect_var_kind kind;
4407 kind = vectype ? vect_simple_var : vect_scalar_var;
4408 type = vectype ? vectype : TREE_TYPE (scalar_dest);
4410 gcc_assert (TREE_CODE (scalar_dest) == SSA_NAME);
4412 name = get_name (scalar_dest);
4413 if (name)
4414 new_name = xasprintf ("%s_%u", name, SSA_NAME_VERSION (scalar_dest));
4415 else
4416 new_name = xasprintf ("_%u", SSA_NAME_VERSION (scalar_dest));
4417 vec_dest = vect_get_new_vect_var (type, kind, new_name);
4418 free (new_name);
4420 return vec_dest;
4423 /* Function vect_grouped_store_supported.
4425 Returns TRUE if interleave high and interleave low permutations
4426 are supported, and FALSE otherwise. */
4428 bool
4429 vect_grouped_store_supported (tree vectype, unsigned HOST_WIDE_INT count)
4431 machine_mode mode = TYPE_MODE (vectype);
4433 /* vect_permute_store_chain requires the group size to be equal to 3 or
4434 be a power of two. */
4435 if (count != 3 && exact_log2 (count) == -1)
4437 if (dump_enabled_p ())
4438 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4439 "the size of the group of accesses"
4440 " is not a power of 2 or not eqaul to 3\n");
4441 return false;
4444 /* Check that the permutation is supported. */
4445 if (VECTOR_MODE_P (mode))
4447 unsigned int i, nelt = GET_MODE_NUNITS (mode);
4448 unsigned char *sel = XALLOCAVEC (unsigned char, nelt);
4450 if (count == 3)
4452 unsigned int j0 = 0, j1 = 0, j2 = 0;
4453 unsigned int i, j;
4455 for (j = 0; j < 3; j++)
4457 int nelt0 = ((3 - j) * nelt) % 3;
4458 int nelt1 = ((3 - j) * nelt + 1) % 3;
4459 int nelt2 = ((3 - j) * nelt + 2) % 3;
4460 for (i = 0; i < nelt; i++)
4462 if (3 * i + nelt0 < nelt)
4463 sel[3 * i + nelt0] = j0++;
4464 if (3 * i + nelt1 < nelt)
4465 sel[3 * i + nelt1] = nelt + j1++;
4466 if (3 * i + nelt2 < nelt)
4467 sel[3 * i + nelt2] = 0;
4469 if (!can_vec_perm_p (mode, false, sel))
4471 if (dump_enabled_p ())
4472 dump_printf (MSG_MISSED_OPTIMIZATION,
4473 "permutaion op not supported by target.\n");
4474 return false;
4477 for (i = 0; i < nelt; i++)
4479 if (3 * i + nelt0 < nelt)
4480 sel[3 * i + nelt0] = 3 * i + nelt0;
4481 if (3 * i + nelt1 < nelt)
4482 sel[3 * i + nelt1] = 3 * i + nelt1;
4483 if (3 * i + nelt2 < nelt)
4484 sel[3 * i + nelt2] = nelt + j2++;
4486 if (!can_vec_perm_p (mode, false, sel))
4488 if (dump_enabled_p ())
4489 dump_printf (MSG_MISSED_OPTIMIZATION,
4490 "permutaion op not supported by target.\n");
4491 return false;
4494 return true;
4496 else
4498 /* If length is not equal to 3 then only power of 2 is supported. */
4499 gcc_assert (exact_log2 (count) != -1);
4501 for (i = 0; i < nelt / 2; i++)
4503 sel[i * 2] = i;
4504 sel[i * 2 + 1] = i + nelt;
4506 if (can_vec_perm_p (mode, false, sel))
4508 for (i = 0; i < nelt; i++)
4509 sel[i] += nelt / 2;
4510 if (can_vec_perm_p (mode, false, sel))
4511 return true;
4516 if (dump_enabled_p ())
4517 dump_printf (MSG_MISSED_OPTIMIZATION,
4518 "permutaion op not supported by target.\n");
4519 return false;
4523 /* Return TRUE if vec_store_lanes is available for COUNT vectors of
4524 type VECTYPE. */
4526 bool
4527 vect_store_lanes_supported (tree vectype, unsigned HOST_WIDE_INT count)
4529 return vect_lanes_optab_supported_p ("vec_store_lanes",
4530 vec_store_lanes_optab,
4531 vectype, count);
4535 /* Function vect_permute_store_chain.
4537 Given a chain of interleaved stores in DR_CHAIN of LENGTH that must be
4538 a power of 2 or equal to 3, generate interleave_high/low stmts to reorder
4539 the data correctly for the stores. Return the final references for stores
4540 in RESULT_CHAIN.
4542 E.g., LENGTH is 4 and the scalar type is short, i.e., VF is 8.
4543 The input is 4 vectors each containing 8 elements. We assign a number to
4544 each element, the input sequence is:
4546 1st vec: 0 1 2 3 4 5 6 7
4547 2nd vec: 8 9 10 11 12 13 14 15
4548 3rd vec: 16 17 18 19 20 21 22 23
4549 4th vec: 24 25 26 27 28 29 30 31
4551 The output sequence should be:
4553 1st vec: 0 8 16 24 1 9 17 25
4554 2nd vec: 2 10 18 26 3 11 19 27
4555 3rd vec: 4 12 20 28 5 13 21 30
4556 4th vec: 6 14 22 30 7 15 23 31
4558 i.e., we interleave the contents of the four vectors in their order.
4560 We use interleave_high/low instructions to create such output. The input of
4561 each interleave_high/low operation is two vectors:
4562 1st vec 2nd vec
4563 0 1 2 3 4 5 6 7
4564 the even elements of the result vector are obtained left-to-right from the
4565 high/low elements of the first vector. The odd elements of the result are
4566 obtained left-to-right from the high/low elements of the second vector.
4567 The output of interleave_high will be: 0 4 1 5
4568 and of interleave_low: 2 6 3 7
4571 The permutation is done in log LENGTH stages. In each stage interleave_high
4572 and interleave_low stmts are created for each pair of vectors in DR_CHAIN,
4573 where the first argument is taken from the first half of DR_CHAIN and the
4574 second argument from it's second half.
4575 In our example,
4577 I1: interleave_high (1st vec, 3rd vec)
4578 I2: interleave_low (1st vec, 3rd vec)
4579 I3: interleave_high (2nd vec, 4th vec)
4580 I4: interleave_low (2nd vec, 4th vec)
4582 The output for the first stage is:
4584 I1: 0 16 1 17 2 18 3 19
4585 I2: 4 20 5 21 6 22 7 23
4586 I3: 8 24 9 25 10 26 11 27
4587 I4: 12 28 13 29 14 30 15 31
4589 The output of the second stage, i.e. the final result is:
4591 I1: 0 8 16 24 1 9 17 25
4592 I2: 2 10 18 26 3 11 19 27
4593 I3: 4 12 20 28 5 13 21 30
4594 I4: 6 14 22 30 7 15 23 31. */
4596 void
4597 vect_permute_store_chain (vec<tree> dr_chain,
4598 unsigned int length,
4599 gimple stmt,
4600 gimple_stmt_iterator *gsi,
4601 vec<tree> *result_chain)
4603 tree vect1, vect2, high, low;
4604 gimple perm_stmt;
4605 tree vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
4606 tree perm_mask_low, perm_mask_high;
4607 tree data_ref;
4608 tree perm3_mask_low, perm3_mask_high;
4609 unsigned int i, n, log_length = exact_log2 (length);
4610 unsigned int j, nelt = TYPE_VECTOR_SUBPARTS (vectype);
4611 unsigned char *sel = XALLOCAVEC (unsigned char, nelt);
4613 result_chain->quick_grow (length);
4614 memcpy (result_chain->address (), dr_chain.address (),
4615 length * sizeof (tree));
4617 if (length == 3)
4619 unsigned int j0 = 0, j1 = 0, j2 = 0;
4621 for (j = 0; j < 3; j++)
4623 int nelt0 = ((3 - j) * nelt) % 3;
4624 int nelt1 = ((3 - j) * nelt + 1) % 3;
4625 int nelt2 = ((3 - j) * nelt + 2) % 3;
4627 for (i = 0; i < nelt; i++)
4629 if (3 * i + nelt0 < nelt)
4630 sel[3 * i + nelt0] = j0++;
4631 if (3 * i + nelt1 < nelt)
4632 sel[3 * i + nelt1] = nelt + j1++;
4633 if (3 * i + nelt2 < nelt)
4634 sel[3 * i + nelt2] = 0;
4636 perm3_mask_low = vect_gen_perm_mask_checked (vectype, sel);
4638 for (i = 0; i < nelt; i++)
4640 if (3 * i + nelt0 < nelt)
4641 sel[3 * i + nelt0] = 3 * i + nelt0;
4642 if (3 * i + nelt1 < nelt)
4643 sel[3 * i + nelt1] = 3 * i + nelt1;
4644 if (3 * i + nelt2 < nelt)
4645 sel[3 * i + nelt2] = nelt + j2++;
4647 perm3_mask_high = vect_gen_perm_mask_checked (vectype, sel);
4649 vect1 = dr_chain[0];
4650 vect2 = dr_chain[1];
4652 /* Create interleaving stmt:
4653 low = VEC_PERM_EXPR <vect1, vect2,
4654 {j, nelt, *, j + 1, nelt + j + 1, *,
4655 j + 2, nelt + j + 2, *, ...}> */
4656 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle3_low");
4657 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, vect1,
4658 vect2, perm3_mask_low);
4659 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
4661 vect1 = data_ref;
4662 vect2 = dr_chain[2];
4663 /* Create interleaving stmt:
4664 low = VEC_PERM_EXPR <vect1, vect2,
4665 {0, 1, nelt + j, 3, 4, nelt + j + 1,
4666 6, 7, nelt + j + 2, ...}> */
4667 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle3_high");
4668 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, vect1,
4669 vect2, perm3_mask_high);
4670 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
4671 (*result_chain)[j] = data_ref;
4674 else
4676 /* If length is not equal to 3 then only power of 2 is supported. */
4677 gcc_assert (exact_log2 (length) != -1);
4679 for (i = 0, n = nelt / 2; i < n; i++)
4681 sel[i * 2] = i;
4682 sel[i * 2 + 1] = i + nelt;
4684 perm_mask_high = vect_gen_perm_mask_checked (vectype, sel);
4686 for (i = 0; i < nelt; i++)
4687 sel[i] += nelt / 2;
4688 perm_mask_low = vect_gen_perm_mask_checked (vectype, sel);
4690 for (i = 0, n = log_length; i < n; i++)
4692 for (j = 0; j < length/2; j++)
4694 vect1 = dr_chain[j];
4695 vect2 = dr_chain[j+length/2];
4697 /* Create interleaving stmt:
4698 high = VEC_PERM_EXPR <vect1, vect2, {0, nelt, 1, nelt+1,
4699 ...}> */
4700 high = make_temp_ssa_name (vectype, NULL, "vect_inter_high");
4701 perm_stmt = gimple_build_assign (high, VEC_PERM_EXPR, vect1,
4702 vect2, perm_mask_high);
4703 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
4704 (*result_chain)[2*j] = high;
4706 /* Create interleaving stmt:
4707 low = VEC_PERM_EXPR <vect1, vect2,
4708 {nelt/2, nelt*3/2, nelt/2+1, nelt*3/2+1,
4709 ...}> */
4710 low = make_temp_ssa_name (vectype, NULL, "vect_inter_low");
4711 perm_stmt = gimple_build_assign (low, VEC_PERM_EXPR, vect1,
4712 vect2, perm_mask_low);
4713 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
4714 (*result_chain)[2*j+1] = low;
4716 memcpy (dr_chain.address (), result_chain->address (),
4717 length * sizeof (tree));
4722 /* Function vect_setup_realignment
4724 This function is called when vectorizing an unaligned load using
4725 the dr_explicit_realign[_optimized] scheme.
4726 This function generates the following code at the loop prolog:
4728 p = initial_addr;
4729 x msq_init = *(floor(p)); # prolog load
4730 realignment_token = call target_builtin;
4731 loop:
4732 x msq = phi (msq_init, ---)
4734 The stmts marked with x are generated only for the case of
4735 dr_explicit_realign_optimized.
4737 The code above sets up a new (vector) pointer, pointing to the first
4738 location accessed by STMT, and a "floor-aligned" load using that pointer.
4739 It also generates code to compute the "realignment-token" (if the relevant
4740 target hook was defined), and creates a phi-node at the loop-header bb
4741 whose arguments are the result of the prolog-load (created by this
4742 function) and the result of a load that takes place in the loop (to be
4743 created by the caller to this function).
4745 For the case of dr_explicit_realign_optimized:
4746 The caller to this function uses the phi-result (msq) to create the
4747 realignment code inside the loop, and sets up the missing phi argument,
4748 as follows:
4749 loop:
4750 msq = phi (msq_init, lsq)
4751 lsq = *(floor(p')); # load in loop
4752 result = realign_load (msq, lsq, realignment_token);
4754 For the case of dr_explicit_realign:
4755 loop:
4756 msq = *(floor(p)); # load in loop
4757 p' = p + (VS-1);
4758 lsq = *(floor(p')); # load in loop
4759 result = realign_load (msq, lsq, realignment_token);
4761 Input:
4762 STMT - (scalar) load stmt to be vectorized. This load accesses
4763 a memory location that may be unaligned.
4764 BSI - place where new code is to be inserted.
4765 ALIGNMENT_SUPPORT_SCHEME - which of the two misalignment handling schemes
4766 is used.
4768 Output:
4769 REALIGNMENT_TOKEN - the result of a call to the builtin_mask_for_load
4770 target hook, if defined.
4771 Return value - the result of the loop-header phi node. */
4773 tree
4774 vect_setup_realignment (gimple stmt, gimple_stmt_iterator *gsi,
4775 tree *realignment_token,
4776 enum dr_alignment_support alignment_support_scheme,
4777 tree init_addr,
4778 struct loop **at_loop)
4780 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4781 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
4782 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4783 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
4784 struct loop *loop = NULL;
4785 edge pe = NULL;
4786 tree scalar_dest = gimple_assign_lhs (stmt);
4787 tree vec_dest;
4788 gimple inc;
4789 tree ptr;
4790 tree data_ref;
4791 basic_block new_bb;
4792 tree msq_init = NULL_TREE;
4793 tree new_temp;
4794 gphi *phi_stmt;
4795 tree msq = NULL_TREE;
4796 gimple_seq stmts = NULL;
4797 bool inv_p;
4798 bool compute_in_loop = false;
4799 bool nested_in_vect_loop = false;
4800 struct loop *containing_loop = (gimple_bb (stmt))->loop_father;
4801 struct loop *loop_for_initial_load = NULL;
4803 if (loop_vinfo)
4805 loop = LOOP_VINFO_LOOP (loop_vinfo);
4806 nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
4809 gcc_assert (alignment_support_scheme == dr_explicit_realign
4810 || alignment_support_scheme == dr_explicit_realign_optimized);
4812 /* We need to generate three things:
4813 1. the misalignment computation
4814 2. the extra vector load (for the optimized realignment scheme).
4815 3. the phi node for the two vectors from which the realignment is
4816 done (for the optimized realignment scheme). */
4818 /* 1. Determine where to generate the misalignment computation.
4820 If INIT_ADDR is NULL_TREE, this indicates that the misalignment
4821 calculation will be generated by this function, outside the loop (in the
4822 preheader). Otherwise, INIT_ADDR had already been computed for us by the
4823 caller, inside the loop.
4825 Background: If the misalignment remains fixed throughout the iterations of
4826 the loop, then both realignment schemes are applicable, and also the
4827 misalignment computation can be done outside LOOP. This is because we are
4828 vectorizing LOOP, and so the memory accesses in LOOP advance in steps that
4829 are a multiple of VS (the Vector Size), and therefore the misalignment in
4830 different vectorized LOOP iterations is always the same.
4831 The problem arises only if the memory access is in an inner-loop nested
4832 inside LOOP, which is now being vectorized using outer-loop vectorization.
4833 This is the only case when the misalignment of the memory access may not
4834 remain fixed throughout the iterations of the inner-loop (as explained in
4835 detail in vect_supportable_dr_alignment). In this case, not only is the
4836 optimized realignment scheme not applicable, but also the misalignment
4837 computation (and generation of the realignment token that is passed to
4838 REALIGN_LOAD) have to be done inside the loop.
4840 In short, INIT_ADDR indicates whether we are in a COMPUTE_IN_LOOP mode
4841 or not, which in turn determines if the misalignment is computed inside
4842 the inner-loop, or outside LOOP. */
4844 if (init_addr != NULL_TREE || !loop_vinfo)
4846 compute_in_loop = true;
4847 gcc_assert (alignment_support_scheme == dr_explicit_realign);
4851 /* 2. Determine where to generate the extra vector load.
4853 For the optimized realignment scheme, instead of generating two vector
4854 loads in each iteration, we generate a single extra vector load in the
4855 preheader of the loop, and in each iteration reuse the result of the
4856 vector load from the previous iteration. In case the memory access is in
4857 an inner-loop nested inside LOOP, which is now being vectorized using
4858 outer-loop vectorization, we need to determine whether this initial vector
4859 load should be generated at the preheader of the inner-loop, or can be
4860 generated at the preheader of LOOP. If the memory access has no evolution
4861 in LOOP, it can be generated in the preheader of LOOP. Otherwise, it has
4862 to be generated inside LOOP (in the preheader of the inner-loop). */
4864 if (nested_in_vect_loop)
4866 tree outerloop_step = STMT_VINFO_DR_STEP (stmt_info);
4867 bool invariant_in_outerloop =
4868 (tree_int_cst_compare (outerloop_step, size_zero_node) == 0);
4869 loop_for_initial_load = (invariant_in_outerloop ? loop : loop->inner);
4871 else
4872 loop_for_initial_load = loop;
4873 if (at_loop)
4874 *at_loop = loop_for_initial_load;
4876 if (loop_for_initial_load)
4877 pe = loop_preheader_edge (loop_for_initial_load);
4879 /* 3. For the case of the optimized realignment, create the first vector
4880 load at the loop preheader. */
4882 if (alignment_support_scheme == dr_explicit_realign_optimized)
4884 /* Create msq_init = *(floor(p1)) in the loop preheader */
4885 gassign *new_stmt;
4887 gcc_assert (!compute_in_loop);
4888 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4889 ptr = vect_create_data_ref_ptr (stmt, vectype, loop_for_initial_load,
4890 NULL_TREE, &init_addr, NULL, &inc,
4891 true, &inv_p);
4892 new_temp = copy_ssa_name (ptr);
4893 new_stmt = gimple_build_assign
4894 (new_temp, BIT_AND_EXPR, ptr,
4895 build_int_cst (TREE_TYPE (ptr),
4896 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
4897 new_bb = gsi_insert_on_edge_immediate (pe, new_stmt);
4898 gcc_assert (!new_bb);
4899 data_ref
4900 = build2 (MEM_REF, TREE_TYPE (vec_dest), new_temp,
4901 build_int_cst (reference_alias_ptr_type (DR_REF (dr)), 0));
4902 new_stmt = gimple_build_assign (vec_dest, data_ref);
4903 new_temp = make_ssa_name (vec_dest, new_stmt);
4904 gimple_assign_set_lhs (new_stmt, new_temp);
4905 if (pe)
4907 new_bb = gsi_insert_on_edge_immediate (pe, new_stmt);
4908 gcc_assert (!new_bb);
4910 else
4911 gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
4913 msq_init = gimple_assign_lhs (new_stmt);
4916 /* 4. Create realignment token using a target builtin, if available.
4917 It is done either inside the containing loop, or before LOOP (as
4918 determined above). */
4920 if (targetm.vectorize.builtin_mask_for_load)
4922 gcall *new_stmt;
4923 tree builtin_decl;
4925 /* Compute INIT_ADDR - the initial addressed accessed by this memref. */
4926 if (!init_addr)
4928 /* Generate the INIT_ADDR computation outside LOOP. */
4929 init_addr = vect_create_addr_base_for_vector_ref (stmt, &stmts,
4930 NULL_TREE, loop);
4931 if (loop)
4933 pe = loop_preheader_edge (loop);
4934 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
4935 gcc_assert (!new_bb);
4937 else
4938 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
4941 builtin_decl = targetm.vectorize.builtin_mask_for_load ();
4942 new_stmt = gimple_build_call (builtin_decl, 1, init_addr);
4943 vec_dest =
4944 vect_create_destination_var (scalar_dest,
4945 gimple_call_return_type (new_stmt));
4946 new_temp = make_ssa_name (vec_dest, new_stmt);
4947 gimple_call_set_lhs (new_stmt, new_temp);
4949 if (compute_in_loop)
4950 gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
4951 else
4953 /* Generate the misalignment computation outside LOOP. */
4954 pe = loop_preheader_edge (loop);
4955 new_bb = gsi_insert_on_edge_immediate (pe, new_stmt);
4956 gcc_assert (!new_bb);
4959 *realignment_token = gimple_call_lhs (new_stmt);
4961 /* The result of the CALL_EXPR to this builtin is determined from
4962 the value of the parameter and no global variables are touched
4963 which makes the builtin a "const" function. Requiring the
4964 builtin to have the "const" attribute makes it unnecessary
4965 to call mark_call_clobbered. */
4966 gcc_assert (TREE_READONLY (builtin_decl));
4969 if (alignment_support_scheme == dr_explicit_realign)
4970 return msq;
4972 gcc_assert (!compute_in_loop);
4973 gcc_assert (alignment_support_scheme == dr_explicit_realign_optimized);
4976 /* 5. Create msq = phi <msq_init, lsq> in loop */
4978 pe = loop_preheader_edge (containing_loop);
4979 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4980 msq = make_ssa_name (vec_dest);
4981 phi_stmt = create_phi_node (msq, containing_loop->header);
4982 add_phi_arg (phi_stmt, msq_init, pe, UNKNOWN_LOCATION);
4984 return msq;
4988 /* Function vect_grouped_load_supported.
4990 Returns TRUE if even and odd permutations are supported,
4991 and FALSE otherwise. */
4993 bool
4994 vect_grouped_load_supported (tree vectype, unsigned HOST_WIDE_INT count)
4996 machine_mode mode = TYPE_MODE (vectype);
4998 /* vect_permute_load_chain requires the group size to be equal to 3 or
4999 be a power of two. */
5000 if (count != 3 && exact_log2 (count) == -1)
5002 if (dump_enabled_p ())
5003 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5004 "the size of the group of accesses"
5005 " is not a power of 2 or not equal to 3\n");
5006 return false;
5009 /* Check that the permutation is supported. */
5010 if (VECTOR_MODE_P (mode))
5012 unsigned int i, j, nelt = GET_MODE_NUNITS (mode);
5013 unsigned char *sel = XALLOCAVEC (unsigned char, nelt);
5015 if (count == 3)
5017 unsigned int k;
5018 for (k = 0; k < 3; k++)
5020 for (i = 0; i < nelt; i++)
5021 if (3 * i + k < 2 * nelt)
5022 sel[i] = 3 * i + k;
5023 else
5024 sel[i] = 0;
5025 if (!can_vec_perm_p (mode, false, sel))
5027 if (dump_enabled_p ())
5028 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5029 "shuffle of 3 loads is not supported by"
5030 " target\n");
5031 return false;
5033 for (i = 0, j = 0; i < nelt; i++)
5034 if (3 * i + k < 2 * nelt)
5035 sel[i] = i;
5036 else
5037 sel[i] = nelt + ((nelt + k) % 3) + 3 * (j++);
5038 if (!can_vec_perm_p (mode, false, sel))
5040 if (dump_enabled_p ())
5041 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5042 "shuffle of 3 loads is not supported by"
5043 " target\n");
5044 return false;
5047 return true;
5049 else
5051 /* If length is not equal to 3 then only power of 2 is supported. */
5052 gcc_assert (exact_log2 (count) != -1);
5053 for (i = 0; i < nelt; i++)
5054 sel[i] = i * 2;
5055 if (can_vec_perm_p (mode, false, sel))
5057 for (i = 0; i < nelt; i++)
5058 sel[i] = i * 2 + 1;
5059 if (can_vec_perm_p (mode, false, sel))
5060 return true;
5065 if (dump_enabled_p ())
5066 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5067 "extract even/odd not supported by target\n");
5068 return false;
5071 /* Return TRUE if vec_load_lanes is available for COUNT vectors of
5072 type VECTYPE. */
5074 bool
5075 vect_load_lanes_supported (tree vectype, unsigned HOST_WIDE_INT count)
5077 return vect_lanes_optab_supported_p ("vec_load_lanes",
5078 vec_load_lanes_optab,
5079 vectype, count);
5082 /* Function vect_permute_load_chain.
5084 Given a chain of interleaved loads in DR_CHAIN of LENGTH that must be
5085 a power of 2 or equal to 3, generate extract_even/odd stmts to reorder
5086 the input data correctly. Return the final references for loads in
5087 RESULT_CHAIN.
5089 E.g., LENGTH is 4 and the scalar type is short, i.e., VF is 8.
5090 The input is 4 vectors each containing 8 elements. We assign a number to each
5091 element, the input sequence is:
5093 1st vec: 0 1 2 3 4 5 6 7
5094 2nd vec: 8 9 10 11 12 13 14 15
5095 3rd vec: 16 17 18 19 20 21 22 23
5096 4th vec: 24 25 26 27 28 29 30 31
5098 The output sequence should be:
5100 1st vec: 0 4 8 12 16 20 24 28
5101 2nd vec: 1 5 9 13 17 21 25 29
5102 3rd vec: 2 6 10 14 18 22 26 30
5103 4th vec: 3 7 11 15 19 23 27 31
5105 i.e., the first output vector should contain the first elements of each
5106 interleaving group, etc.
5108 We use extract_even/odd instructions to create such output. The input of
5109 each extract_even/odd operation is two vectors
5110 1st vec 2nd vec
5111 0 1 2 3 4 5 6 7
5113 and the output is the vector of extracted even/odd elements. The output of
5114 extract_even will be: 0 2 4 6
5115 and of extract_odd: 1 3 5 7
5118 The permutation is done in log LENGTH stages. In each stage extract_even
5119 and extract_odd stmts are created for each pair of vectors in DR_CHAIN in
5120 their order. In our example,
5122 E1: extract_even (1st vec, 2nd vec)
5123 E2: extract_odd (1st vec, 2nd vec)
5124 E3: extract_even (3rd vec, 4th vec)
5125 E4: extract_odd (3rd vec, 4th vec)
5127 The output for the first stage will be:
5129 E1: 0 2 4 6 8 10 12 14
5130 E2: 1 3 5 7 9 11 13 15
5131 E3: 16 18 20 22 24 26 28 30
5132 E4: 17 19 21 23 25 27 29 31
5134 In order to proceed and create the correct sequence for the next stage (or
5135 for the correct output, if the second stage is the last one, as in our
5136 example), we first put the output of extract_even operation and then the
5137 output of extract_odd in RESULT_CHAIN (which is then copied to DR_CHAIN).
5138 The input for the second stage is:
5140 1st vec (E1): 0 2 4 6 8 10 12 14
5141 2nd vec (E3): 16 18 20 22 24 26 28 30
5142 3rd vec (E2): 1 3 5 7 9 11 13 15
5143 4th vec (E4): 17 19 21 23 25 27 29 31
5145 The output of the second stage:
5147 E1: 0 4 8 12 16 20 24 28
5148 E2: 2 6 10 14 18 22 26 30
5149 E3: 1 5 9 13 17 21 25 29
5150 E4: 3 7 11 15 19 23 27 31
5152 And RESULT_CHAIN after reordering:
5154 1st vec (E1): 0 4 8 12 16 20 24 28
5155 2nd vec (E3): 1 5 9 13 17 21 25 29
5156 3rd vec (E2): 2 6 10 14 18 22 26 30
5157 4th vec (E4): 3 7 11 15 19 23 27 31. */
5159 static void
5160 vect_permute_load_chain (vec<tree> dr_chain,
5161 unsigned int length,
5162 gimple stmt,
5163 gimple_stmt_iterator *gsi,
5164 vec<tree> *result_chain)
5166 tree data_ref, first_vect, second_vect;
5167 tree perm_mask_even, perm_mask_odd;
5168 tree perm3_mask_low, perm3_mask_high;
5169 gimple perm_stmt;
5170 tree vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
5171 unsigned int i, j, log_length = exact_log2 (length);
5172 unsigned nelt = TYPE_VECTOR_SUBPARTS (vectype);
5173 unsigned char *sel = XALLOCAVEC (unsigned char, nelt);
5175 result_chain->quick_grow (length);
5176 memcpy (result_chain->address (), dr_chain.address (),
5177 length * sizeof (tree));
5179 if (length == 3)
5181 unsigned int k;
5183 for (k = 0; k < 3; k++)
5185 for (i = 0; i < nelt; i++)
5186 if (3 * i + k < 2 * nelt)
5187 sel[i] = 3 * i + k;
5188 else
5189 sel[i] = 0;
5190 perm3_mask_low = vect_gen_perm_mask_checked (vectype, sel);
5192 for (i = 0, j = 0; i < nelt; i++)
5193 if (3 * i + k < 2 * nelt)
5194 sel[i] = i;
5195 else
5196 sel[i] = nelt + ((nelt + k) % 3) + 3 * (j++);
5198 perm3_mask_high = vect_gen_perm_mask_checked (vectype, sel);
5200 first_vect = dr_chain[0];
5201 second_vect = dr_chain[1];
5203 /* Create interleaving stmt (low part of):
5204 low = VEC_PERM_EXPR <first_vect, second_vect2, {k, 3 + k, 6 + k,
5205 ...}> */
5206 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle3_low");
5207 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, first_vect,
5208 second_vect, perm3_mask_low);
5209 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5211 /* Create interleaving stmt (high part of):
5212 high = VEC_PERM_EXPR <first_vect, second_vect2, {k, 3 + k, 6 + k,
5213 ...}> */
5214 first_vect = data_ref;
5215 second_vect = dr_chain[2];
5216 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle3_high");
5217 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, first_vect,
5218 second_vect, perm3_mask_high);
5219 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5220 (*result_chain)[k] = data_ref;
5223 else
5225 /* If length is not equal to 3 then only power of 2 is supported. */
5226 gcc_assert (exact_log2 (length) != -1);
5228 for (i = 0; i < nelt; ++i)
5229 sel[i] = i * 2;
5230 perm_mask_even = vect_gen_perm_mask_checked (vectype, sel);
5232 for (i = 0; i < nelt; ++i)
5233 sel[i] = i * 2 + 1;
5234 perm_mask_odd = vect_gen_perm_mask_checked (vectype, sel);
5236 for (i = 0; i < log_length; i++)
5238 for (j = 0; j < length; j += 2)
5240 first_vect = dr_chain[j];
5241 second_vect = dr_chain[j+1];
5243 /* data_ref = permute_even (first_data_ref, second_data_ref); */
5244 data_ref = make_temp_ssa_name (vectype, NULL, "vect_perm_even");
5245 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
5246 first_vect, second_vect,
5247 perm_mask_even);
5248 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5249 (*result_chain)[j/2] = data_ref;
5251 /* data_ref = permute_odd (first_data_ref, second_data_ref); */
5252 data_ref = make_temp_ssa_name (vectype, NULL, "vect_perm_odd");
5253 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
5254 first_vect, second_vect,
5255 perm_mask_odd);
5256 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5257 (*result_chain)[j/2+length/2] = data_ref;
5259 memcpy (dr_chain.address (), result_chain->address (),
5260 length * sizeof (tree));
5265 /* Function vect_shift_permute_load_chain.
5267 Given a chain of loads in DR_CHAIN of LENGTH 2 or 3, generate
5268 sequence of stmts to reorder the input data accordingly.
5269 Return the final references for loads in RESULT_CHAIN.
5270 Return true if successed, false otherwise.
5272 E.g., LENGTH is 3 and the scalar type is short, i.e., VF is 8.
5273 The input is 3 vectors each containing 8 elements. We assign a
5274 number to each element, the input sequence is:
5276 1st vec: 0 1 2 3 4 5 6 7
5277 2nd vec: 8 9 10 11 12 13 14 15
5278 3rd vec: 16 17 18 19 20 21 22 23
5280 The output sequence should be:
5282 1st vec: 0 3 6 9 12 15 18 21
5283 2nd vec: 1 4 7 10 13 16 19 22
5284 3rd vec: 2 5 8 11 14 17 20 23
5286 We use 3 shuffle instructions and 3 * 3 - 1 shifts to create such output.
5288 First we shuffle all 3 vectors to get correct elements order:
5290 1st vec: ( 0 3 6) ( 1 4 7) ( 2 5)
5291 2nd vec: ( 8 11 14) ( 9 12 15) (10 13)
5292 3rd vec: (16 19 22) (17 20 23) (18 21)
5294 Next we unite and shift vector 3 times:
5296 1st step:
5297 shift right by 6 the concatenation of:
5298 "1st vec" and "2nd vec"
5299 ( 0 3 6) ( 1 4 7) |( 2 5) _ ( 8 11 14) ( 9 12 15)| (10 13)
5300 "2nd vec" and "3rd vec"
5301 ( 8 11 14) ( 9 12 15) |(10 13) _ (16 19 22) (17 20 23)| (18 21)
5302 "3rd vec" and "1st vec"
5303 (16 19 22) (17 20 23) |(18 21) _ ( 0 3 6) ( 1 4 7)| ( 2 5)
5304 | New vectors |
5306 So that now new vectors are:
5308 1st vec: ( 2 5) ( 8 11 14) ( 9 12 15)
5309 2nd vec: (10 13) (16 19 22) (17 20 23)
5310 3rd vec: (18 21) ( 0 3 6) ( 1 4 7)
5312 2nd step:
5313 shift right by 5 the concatenation of:
5314 "1st vec" and "3rd vec"
5315 ( 2 5) ( 8 11 14) |( 9 12 15) _ (18 21) ( 0 3 6)| ( 1 4 7)
5316 "2nd vec" and "1st vec"
5317 (10 13) (16 19 22) |(17 20 23) _ ( 2 5) ( 8 11 14)| ( 9 12 15)
5318 "3rd vec" and "2nd vec"
5319 (18 21) ( 0 3 6) |( 1 4 7) _ (10 13) (16 19 22)| (17 20 23)
5320 | New vectors |
5322 So that now new vectors are:
5324 1st vec: ( 9 12 15) (18 21) ( 0 3 6)
5325 2nd vec: (17 20 23) ( 2 5) ( 8 11 14)
5326 3rd vec: ( 1 4 7) (10 13) (16 19 22) READY
5328 3rd step:
5329 shift right by 5 the concatenation of:
5330 "1st vec" and "1st vec"
5331 ( 9 12 15) (18 21) |( 0 3 6) _ ( 9 12 15) (18 21)| ( 0 3 6)
5332 shift right by 3 the concatenation of:
5333 "2nd vec" and "2nd vec"
5334 (17 20 23) |( 2 5) ( 8 11 14) _ (17 20 23)| ( 2 5) ( 8 11 14)
5335 | New vectors |
5337 So that now all vectors are READY:
5338 1st vec: ( 0 3 6) ( 9 12 15) (18 21)
5339 2nd vec: ( 2 5) ( 8 11 14) (17 20 23)
5340 3rd vec: ( 1 4 7) (10 13) (16 19 22)
5342 This algorithm is faster than one in vect_permute_load_chain if:
5343 1. "shift of a concatination" is faster than general permutation.
5344 This is usually so.
5345 2. The TARGET machine can't execute vector instructions in parallel.
5346 This is because each step of the algorithm depends on previous.
5347 The algorithm in vect_permute_load_chain is much more parallel.
5349 The algorithm is applicable only for LOAD CHAIN LENGTH less than VF.
5352 static bool
5353 vect_shift_permute_load_chain (vec<tree> dr_chain,
5354 unsigned int length,
5355 gimple stmt,
5356 gimple_stmt_iterator *gsi,
5357 vec<tree> *result_chain)
5359 tree vect[3], vect_shift[3], data_ref, first_vect, second_vect;
5360 tree perm2_mask1, perm2_mask2, perm3_mask;
5361 tree select_mask, shift1_mask, shift2_mask, shift3_mask, shift4_mask;
5362 gimple perm_stmt;
5364 tree vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
5365 unsigned int i;
5366 unsigned nelt = TYPE_VECTOR_SUBPARTS (vectype);
5367 unsigned char *sel = XALLOCAVEC (unsigned char, nelt);
5368 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5369 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5371 result_chain->quick_grow (length);
5372 memcpy (result_chain->address (), dr_chain.address (),
5373 length * sizeof (tree));
5375 if (exact_log2 (length) != -1 && LOOP_VINFO_VECT_FACTOR (loop_vinfo) > 4)
5377 unsigned int j, log_length = exact_log2 (length);
5378 for (i = 0; i < nelt / 2; ++i)
5379 sel[i] = i * 2;
5380 for (i = 0; i < nelt / 2; ++i)
5381 sel[nelt / 2 + i] = i * 2 + 1;
5382 if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
5384 if (dump_enabled_p ())
5385 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5386 "shuffle of 2 fields structure is not \
5387 supported by target\n");
5388 return false;
5390 perm2_mask1 = vect_gen_perm_mask_checked (vectype, sel);
5392 for (i = 0; i < nelt / 2; ++i)
5393 sel[i] = i * 2 + 1;
5394 for (i = 0; i < nelt / 2; ++i)
5395 sel[nelt / 2 + i] = i * 2;
5396 if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
5398 if (dump_enabled_p ())
5399 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5400 "shuffle of 2 fields structure is not \
5401 supported by target\n");
5402 return false;
5404 perm2_mask2 = vect_gen_perm_mask_checked (vectype, sel);
5406 /* Generating permutation constant to shift all elements.
5407 For vector length 8 it is {4 5 6 7 8 9 10 11}. */
5408 for (i = 0; i < nelt; i++)
5409 sel[i] = nelt / 2 + i;
5410 if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
5412 if (dump_enabled_p ())
5413 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5414 "shift permutation is not supported by target\n");
5415 return false;
5417 shift1_mask = vect_gen_perm_mask_checked (vectype, sel);
5419 /* Generating permutation constant to select vector from 2.
5420 For vector length 8 it is {0 1 2 3 12 13 14 15}. */
5421 for (i = 0; i < nelt / 2; i++)
5422 sel[i] = i;
5423 for (i = nelt / 2; i < nelt; i++)
5424 sel[i] = nelt + i;
5425 if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
5427 if (dump_enabled_p ())
5428 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5429 "select is not supported by target\n");
5430 return false;
5432 select_mask = vect_gen_perm_mask_checked (vectype, sel);
5434 for (i = 0; i < log_length; i++)
5436 for (j = 0; j < length; j += 2)
5438 first_vect = dr_chain[j];
5439 second_vect = dr_chain[j + 1];
5441 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle2");
5442 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
5443 first_vect, first_vect,
5444 perm2_mask1);
5445 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5446 vect[0] = data_ref;
5448 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle2");
5449 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
5450 second_vect, second_vect,
5451 perm2_mask2);
5452 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5453 vect[1] = data_ref;
5455 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shift");
5456 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
5457 vect[0], vect[1], shift1_mask);
5458 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5459 (*result_chain)[j/2 + length/2] = data_ref;
5461 data_ref = make_temp_ssa_name (vectype, NULL, "vect_select");
5462 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
5463 vect[0], vect[1], select_mask);
5464 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5465 (*result_chain)[j/2] = data_ref;
5467 memcpy (dr_chain.address (), result_chain->address (),
5468 length * sizeof (tree));
5470 return true;
5472 if (length == 3 && LOOP_VINFO_VECT_FACTOR (loop_vinfo) > 2)
5474 unsigned int k = 0, l = 0;
5476 /* Generating permutation constant to get all elements in rigth order.
5477 For vector length 8 it is {0 3 6 1 4 7 2 5}. */
5478 for (i = 0; i < nelt; i++)
5480 if (3 * k + (l % 3) >= nelt)
5482 k = 0;
5483 l += (3 - (nelt % 3));
5485 sel[i] = 3 * k + (l % 3);
5486 k++;
5488 if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
5490 if (dump_enabled_p ())
5491 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5492 "shuffle of 3 fields structure is not \
5493 supported by target\n");
5494 return false;
5496 perm3_mask = vect_gen_perm_mask_checked (vectype, sel);
5498 /* Generating permutation constant to shift all elements.
5499 For vector length 8 it is {6 7 8 9 10 11 12 13}. */
5500 for (i = 0; i < nelt; i++)
5501 sel[i] = 2 * (nelt / 3) + (nelt % 3) + i;
5502 if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
5504 if (dump_enabled_p ())
5505 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5506 "shift permutation is not supported by target\n");
5507 return false;
5509 shift1_mask = vect_gen_perm_mask_checked (vectype, sel);
5511 /* Generating permutation constant to shift all elements.
5512 For vector length 8 it is {5 6 7 8 9 10 11 12}. */
5513 for (i = 0; i < nelt; i++)
5514 sel[i] = 2 * (nelt / 3) + 1 + i;
5515 if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
5517 if (dump_enabled_p ())
5518 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5519 "shift permutation is not supported by target\n");
5520 return false;
5522 shift2_mask = vect_gen_perm_mask_checked (vectype, sel);
5524 /* Generating permutation constant to shift all elements.
5525 For vector length 8 it is {3 4 5 6 7 8 9 10}. */
5526 for (i = 0; i < nelt; i++)
5527 sel[i] = (nelt / 3) + (nelt % 3) / 2 + i;
5528 if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
5530 if (dump_enabled_p ())
5531 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5532 "shift permutation is not supported by target\n");
5533 return false;
5535 shift3_mask = vect_gen_perm_mask_checked (vectype, sel);
5537 /* Generating permutation constant to shift all elements.
5538 For vector length 8 it is {5 6 7 8 9 10 11 12}. */
5539 for (i = 0; i < nelt; i++)
5540 sel[i] = 2 * (nelt / 3) + (nelt % 3) / 2 + i;
5541 if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
5543 if (dump_enabled_p ())
5544 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5545 "shift permutation is not supported by target\n");
5546 return false;
5548 shift4_mask = vect_gen_perm_mask_checked (vectype, sel);
5550 for (k = 0; k < 3; k++)
5552 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle3");
5553 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
5554 dr_chain[k], dr_chain[k],
5555 perm3_mask);
5556 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5557 vect[k] = data_ref;
5560 for (k = 0; k < 3; k++)
5562 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shift1");
5563 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
5564 vect[k % 3], vect[(k + 1) % 3],
5565 shift1_mask);
5566 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5567 vect_shift[k] = data_ref;
5570 for (k = 0; k < 3; k++)
5572 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shift2");
5573 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
5574 vect_shift[(4 - k) % 3],
5575 vect_shift[(3 - k) % 3],
5576 shift2_mask);
5577 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5578 vect[k] = data_ref;
5581 (*result_chain)[3 - (nelt % 3)] = vect[2];
5583 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shift3");
5584 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, vect[0],
5585 vect[0], shift3_mask);
5586 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5587 (*result_chain)[nelt % 3] = data_ref;
5589 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shift4");
5590 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, vect[1],
5591 vect[1], shift4_mask);
5592 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5593 (*result_chain)[0] = data_ref;
5594 return true;
5596 return false;
5599 /* Function vect_transform_grouped_load.
5601 Given a chain of input interleaved data-refs (in DR_CHAIN), build statements
5602 to perform their permutation and ascribe the result vectorized statements to
5603 the scalar statements.
5606 void
5607 vect_transform_grouped_load (gimple stmt, vec<tree> dr_chain, int size,
5608 gimple_stmt_iterator *gsi)
5610 machine_mode mode;
5611 vec<tree> result_chain = vNULL;
5613 /* DR_CHAIN contains input data-refs that are a part of the interleaving.
5614 RESULT_CHAIN is the output of vect_permute_load_chain, it contains permuted
5615 vectors, that are ready for vector computation. */
5616 result_chain.create (size);
5618 /* If reassociation width for vector type is 2 or greater target machine can
5619 execute 2 or more vector instructions in parallel. Otherwise try to
5620 get chain for loads group using vect_shift_permute_load_chain. */
5621 mode = TYPE_MODE (STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt)));
5622 if (targetm.sched.reassociation_width (VEC_PERM_EXPR, mode) > 1
5623 || exact_log2 (size) != -1
5624 || !vect_shift_permute_load_chain (dr_chain, size, stmt,
5625 gsi, &result_chain))
5626 vect_permute_load_chain (dr_chain, size, stmt, gsi, &result_chain);
5627 vect_record_grouped_load_vectors (stmt, result_chain);
5628 result_chain.release ();
5631 /* RESULT_CHAIN contains the output of a group of grouped loads that were
5632 generated as part of the vectorization of STMT. Assign the statement
5633 for each vector to the associated scalar statement. */
5635 void
5636 vect_record_grouped_load_vectors (gimple stmt, vec<tree> result_chain)
5638 gimple first_stmt = GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt));
5639 gimple next_stmt, new_stmt;
5640 unsigned int i, gap_count;
5641 tree tmp_data_ref;
5643 /* Put a permuted data-ref in the VECTORIZED_STMT field.
5644 Since we scan the chain starting from it's first node, their order
5645 corresponds the order of data-refs in RESULT_CHAIN. */
5646 next_stmt = first_stmt;
5647 gap_count = 1;
5648 FOR_EACH_VEC_ELT (result_chain, i, tmp_data_ref)
5650 if (!next_stmt)
5651 break;
5653 /* Skip the gaps. Loads created for the gaps will be removed by dead
5654 code elimination pass later. No need to check for the first stmt in
5655 the group, since it always exists.
5656 GROUP_GAP is the number of steps in elements from the previous
5657 access (if there is no gap GROUP_GAP is 1). We skip loads that
5658 correspond to the gaps. */
5659 if (next_stmt != first_stmt
5660 && gap_count < GROUP_GAP (vinfo_for_stmt (next_stmt)))
5662 gap_count++;
5663 continue;
5666 while (next_stmt)
5668 new_stmt = SSA_NAME_DEF_STMT (tmp_data_ref);
5669 /* We assume that if VEC_STMT is not NULL, this is a case of multiple
5670 copies, and we put the new vector statement in the first available
5671 RELATED_STMT. */
5672 if (!STMT_VINFO_VEC_STMT (vinfo_for_stmt (next_stmt)))
5673 STMT_VINFO_VEC_STMT (vinfo_for_stmt (next_stmt)) = new_stmt;
5674 else
5676 if (!GROUP_SAME_DR_STMT (vinfo_for_stmt (next_stmt)))
5678 gimple prev_stmt =
5679 STMT_VINFO_VEC_STMT (vinfo_for_stmt (next_stmt));
5680 gimple rel_stmt =
5681 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (prev_stmt));
5682 while (rel_stmt)
5684 prev_stmt = rel_stmt;
5685 rel_stmt =
5686 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (rel_stmt));
5689 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (prev_stmt)) =
5690 new_stmt;
5694 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
5695 gap_count = 1;
5696 /* If NEXT_STMT accesses the same DR as the previous statement,
5697 put the same TMP_DATA_REF as its vectorized statement; otherwise
5698 get the next data-ref from RESULT_CHAIN. */
5699 if (!next_stmt || !GROUP_SAME_DR_STMT (vinfo_for_stmt (next_stmt)))
5700 break;
5705 /* Function vect_force_dr_alignment_p.
5707 Returns whether the alignment of a DECL can be forced to be aligned
5708 on ALIGNMENT bit boundary. */
5710 bool
5711 vect_can_force_dr_alignment_p (const_tree decl, unsigned int alignment)
5713 if (TREE_CODE (decl) != VAR_DECL)
5714 return false;
5716 if (decl_in_symtab_p (decl)
5717 && !symtab_node::get (decl)->can_increase_alignment_p ())
5718 return false;
5720 if (TREE_STATIC (decl))
5721 return (alignment <= MAX_OFILE_ALIGNMENT);
5722 else
5723 return (alignment <= MAX_STACK_ALIGNMENT);
5727 /* Return whether the data reference DR is supported with respect to its
5728 alignment.
5729 If CHECK_ALIGNED_ACCESSES is TRUE, check if the access is supported even
5730 it is aligned, i.e., check if it is possible to vectorize it with different
5731 alignment. */
5733 enum dr_alignment_support
5734 vect_supportable_dr_alignment (struct data_reference *dr,
5735 bool check_aligned_accesses)
5737 gimple stmt = DR_STMT (dr);
5738 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5739 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
5740 machine_mode mode = TYPE_MODE (vectype);
5741 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5742 struct loop *vect_loop = NULL;
5743 bool nested_in_vect_loop = false;
5745 if (aligned_access_p (dr) && !check_aligned_accesses)
5746 return dr_aligned;
5748 /* For now assume all conditional loads/stores support unaligned
5749 access without any special code. */
5750 if (is_gimple_call (stmt)
5751 && gimple_call_internal_p (stmt)
5752 && (gimple_call_internal_fn (stmt) == IFN_MASK_LOAD
5753 || gimple_call_internal_fn (stmt) == IFN_MASK_STORE))
5754 return dr_unaligned_supported;
5756 if (loop_vinfo)
5758 vect_loop = LOOP_VINFO_LOOP (loop_vinfo);
5759 nested_in_vect_loop = nested_in_vect_loop_p (vect_loop, stmt);
5762 /* Possibly unaligned access. */
5764 /* We can choose between using the implicit realignment scheme (generating
5765 a misaligned_move stmt) and the explicit realignment scheme (generating
5766 aligned loads with a REALIGN_LOAD). There are two variants to the
5767 explicit realignment scheme: optimized, and unoptimized.
5768 We can optimize the realignment only if the step between consecutive
5769 vector loads is equal to the vector size. Since the vector memory
5770 accesses advance in steps of VS (Vector Size) in the vectorized loop, it
5771 is guaranteed that the misalignment amount remains the same throughout the
5772 execution of the vectorized loop. Therefore, we can create the
5773 "realignment token" (the permutation mask that is passed to REALIGN_LOAD)
5774 at the loop preheader.
5776 However, in the case of outer-loop vectorization, when vectorizing a
5777 memory access in the inner-loop nested within the LOOP that is now being
5778 vectorized, while it is guaranteed that the misalignment of the
5779 vectorized memory access will remain the same in different outer-loop
5780 iterations, it is *not* guaranteed that is will remain the same throughout
5781 the execution of the inner-loop. This is because the inner-loop advances
5782 with the original scalar step (and not in steps of VS). If the inner-loop
5783 step happens to be a multiple of VS, then the misalignment remains fixed
5784 and we can use the optimized realignment scheme. For example:
5786 for (i=0; i<N; i++)
5787 for (j=0; j<M; j++)
5788 s += a[i+j];
5790 When vectorizing the i-loop in the above example, the step between
5791 consecutive vector loads is 1, and so the misalignment does not remain
5792 fixed across the execution of the inner-loop, and the realignment cannot
5793 be optimized (as illustrated in the following pseudo vectorized loop):
5795 for (i=0; i<N; i+=4)
5796 for (j=0; j<M; j++){
5797 vs += vp[i+j]; // misalignment of &vp[i+j] is {0,1,2,3,0,1,2,3,...}
5798 // when j is {0,1,2,3,4,5,6,7,...} respectively.
5799 // (assuming that we start from an aligned address).
5802 We therefore have to use the unoptimized realignment scheme:
5804 for (i=0; i<N; i+=4)
5805 for (j=k; j<M; j+=4)
5806 vs += vp[i+j]; // misalignment of &vp[i+j] is always k (assuming
5807 // that the misalignment of the initial address is
5808 // 0).
5810 The loop can then be vectorized as follows:
5812 for (k=0; k<4; k++){
5813 rt = get_realignment_token (&vp[k]);
5814 for (i=0; i<N; i+=4){
5815 v1 = vp[i+k];
5816 for (j=k; j<M; j+=4){
5817 v2 = vp[i+j+VS-1];
5818 va = REALIGN_LOAD <v1,v2,rt>;
5819 vs += va;
5820 v1 = v2;
5823 } */
5825 if (DR_IS_READ (dr))
5827 bool is_packed = false;
5828 tree type = (TREE_TYPE (DR_REF (dr)));
5830 if (optab_handler (vec_realign_load_optab, mode) != CODE_FOR_nothing
5831 && (!targetm.vectorize.builtin_mask_for_load
5832 || targetm.vectorize.builtin_mask_for_load ()))
5834 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
5835 if ((nested_in_vect_loop
5836 && (TREE_INT_CST_LOW (DR_STEP (dr))
5837 != GET_MODE_SIZE (TYPE_MODE (vectype))))
5838 || !loop_vinfo)
5839 return dr_explicit_realign;
5840 else
5841 return dr_explicit_realign_optimized;
5843 if (!known_alignment_for_access_p (dr))
5844 is_packed = not_size_aligned (DR_REF (dr));
5846 if ((TYPE_USER_ALIGN (type) && !is_packed)
5847 || targetm.vectorize.
5848 support_vector_misalignment (mode, type,
5849 DR_MISALIGNMENT (dr), is_packed))
5850 /* Can't software pipeline the loads, but can at least do them. */
5851 return dr_unaligned_supported;
5853 else
5855 bool is_packed = false;
5856 tree type = (TREE_TYPE (DR_REF (dr)));
5858 if (!known_alignment_for_access_p (dr))
5859 is_packed = not_size_aligned (DR_REF (dr));
5861 if ((TYPE_USER_ALIGN (type) && !is_packed)
5862 || targetm.vectorize.
5863 support_vector_misalignment (mode, type,
5864 DR_MISALIGNMENT (dr), is_packed))
5865 return dr_unaligned_supported;
5868 /* Unsupported. */
5869 return dr_unaligned_unsupported;