PR libstdc++/65978
[official-gcc.git] / gcc / tree-vect-data-refs.c
blob96afc7ab68d40c3d3eb815dad84c6421229a9914
1 /* Data References Analysis and Manipulation Utilities for Vectorization.
2 Copyright (C) 2003-2015 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
4 and Ira Rosen <irar@il.ibm.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "dumpfile.h"
26 #include "tm.h"
27 #include "hash-set.h"
28 #include "machmode.h"
29 #include "vec.h"
30 #include "double-int.h"
31 #include "input.h"
32 #include "alias.h"
33 #include "symtab.h"
34 #include "wide-int.h"
35 #include "inchash.h"
36 #include "tree.h"
37 #include "fold-const.h"
38 #include "stor-layout.h"
39 #include "tm_p.h"
40 #include "target.h"
41 #include "predict.h"
42 #include "hard-reg-set.h"
43 #include "function.h"
44 #include "dominance.h"
45 #include "cfg.h"
46 #include "basic-block.h"
47 #include "gimple-pretty-print.h"
48 #include "tree-ssa-alias.h"
49 #include "internal-fn.h"
50 #include "tree-eh.h"
51 #include "gimple-expr.h"
52 #include "is-a.h"
53 #include "gimple.h"
54 #include "gimplify.h"
55 #include "gimple-iterator.h"
56 #include "gimplify-me.h"
57 #include "gimple-ssa.h"
58 #include "tree-phinodes.h"
59 #include "ssa-iterators.h"
60 #include "stringpool.h"
61 #include "tree-ssanames.h"
62 #include "tree-ssa-loop-ivopts.h"
63 #include "tree-ssa-loop-manip.h"
64 #include "tree-ssa-loop.h"
65 #include "cfgloop.h"
66 #include "tree-chrec.h"
67 #include "tree-scalar-evolution.h"
68 #include "tree-vectorizer.h"
69 #include "diagnostic-core.h"
70 #include "hash-map.h"
71 #include "plugin-api.h"
72 #include "ipa-ref.h"
73 #include "cgraph.h"
74 /* Need to include rtl.h, expr.h, etc. for optabs. */
75 #include "hashtab.h"
76 #include "rtl.h"
77 #include "flags.h"
78 #include "statistics.h"
79 #include "real.h"
80 #include "fixed-value.h"
81 #include "insn-config.h"
82 #include "expmed.h"
83 #include "dojump.h"
84 #include "explow.h"
85 #include "calls.h"
86 #include "emit-rtl.h"
87 #include "varasm.h"
88 #include "stmt.h"
89 #include "expr.h"
90 #include "insn-codes.h"
91 #include "optabs.h"
92 #include "builtins.h"
94 /* Return true if load- or store-lanes optab OPTAB is implemented for
95 COUNT vectors of type VECTYPE. NAME is the name of OPTAB. */
97 static bool
98 vect_lanes_optab_supported_p (const char *name, convert_optab optab,
99 tree vectype, unsigned HOST_WIDE_INT count)
101 machine_mode mode, array_mode;
102 bool limit_p;
104 mode = TYPE_MODE (vectype);
105 limit_p = !targetm.array_mode_supported_p (mode, count);
106 array_mode = mode_for_size (count * GET_MODE_BITSIZE (mode),
107 MODE_INT, limit_p);
109 if (array_mode == BLKmode)
111 if (dump_enabled_p ())
112 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
113 "no array mode for %s[" HOST_WIDE_INT_PRINT_DEC "]\n",
114 GET_MODE_NAME (mode), count);
115 return false;
118 if (convert_optab_handler (optab, array_mode, mode) == CODE_FOR_nothing)
120 if (dump_enabled_p ())
121 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
122 "cannot use %s<%s><%s>\n", name,
123 GET_MODE_NAME (array_mode), GET_MODE_NAME (mode));
124 return false;
127 if (dump_enabled_p ())
128 dump_printf_loc (MSG_NOTE, vect_location,
129 "can use %s<%s><%s>\n", name, GET_MODE_NAME (array_mode),
130 GET_MODE_NAME (mode));
132 return true;
136 /* Return the smallest scalar part of STMT.
137 This is used to determine the vectype of the stmt. We generally set the
138 vectype according to the type of the result (lhs). For stmts whose
139 result-type is different than the type of the arguments (e.g., demotion,
140 promotion), vectype will be reset appropriately (later). Note that we have
141 to visit the smallest datatype in this function, because that determines the
142 VF. If the smallest datatype in the loop is present only as the rhs of a
143 promotion operation - we'd miss it.
144 Such a case, where a variable of this datatype does not appear in the lhs
145 anywhere in the loop, can only occur if it's an invariant: e.g.:
146 'int_x = (int) short_inv', which we'd expect to have been optimized away by
147 invariant motion. However, we cannot rely on invariant motion to always
148 take invariants out of the loop, and so in the case of promotion we also
149 have to check the rhs.
150 LHS_SIZE_UNIT and RHS_SIZE_UNIT contain the sizes of the corresponding
151 types. */
153 tree
154 vect_get_smallest_scalar_type (gimple stmt, HOST_WIDE_INT *lhs_size_unit,
155 HOST_WIDE_INT *rhs_size_unit)
157 tree scalar_type = gimple_expr_type (stmt);
158 HOST_WIDE_INT lhs, rhs;
160 lhs = rhs = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (scalar_type));
162 if (is_gimple_assign (stmt)
163 && (gimple_assign_cast_p (stmt)
164 || gimple_assign_rhs_code (stmt) == WIDEN_MULT_EXPR
165 || gimple_assign_rhs_code (stmt) == WIDEN_LSHIFT_EXPR
166 || gimple_assign_rhs_code (stmt) == FLOAT_EXPR))
168 tree rhs_type = TREE_TYPE (gimple_assign_rhs1 (stmt));
170 rhs = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (rhs_type));
171 if (rhs < lhs)
172 scalar_type = rhs_type;
175 *lhs_size_unit = lhs;
176 *rhs_size_unit = rhs;
177 return scalar_type;
181 /* Insert DDR into LOOP_VINFO list of ddrs that may alias and need to be
182 tested at run-time. Return TRUE if DDR was successfully inserted.
183 Return false if versioning is not supported. */
185 static bool
186 vect_mark_for_runtime_alias_test (ddr_p ddr, loop_vec_info loop_vinfo)
188 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
190 if ((unsigned) PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS) == 0)
191 return false;
193 if (dump_enabled_p ())
195 dump_printf_loc (MSG_NOTE, vect_location,
196 "mark for run-time aliasing test between ");
197 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (DDR_A (ddr)));
198 dump_printf (MSG_NOTE, " and ");
199 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (DDR_B (ddr)));
200 dump_printf (MSG_NOTE, "\n");
203 if (optimize_loop_nest_for_size_p (loop))
205 if (dump_enabled_p ())
206 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
207 "versioning not supported when optimizing"
208 " for size.\n");
209 return false;
212 /* FORNOW: We don't support versioning with outer-loop vectorization. */
213 if (loop->inner)
215 if (dump_enabled_p ())
216 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
217 "versioning not yet supported for outer-loops.\n");
218 return false;
221 /* FORNOW: We don't support creating runtime alias tests for non-constant
222 step. */
223 if (TREE_CODE (DR_STEP (DDR_A (ddr))) != INTEGER_CST
224 || TREE_CODE (DR_STEP (DDR_B (ddr))) != INTEGER_CST)
226 if (dump_enabled_p ())
227 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
228 "versioning not yet supported for non-constant "
229 "step\n");
230 return false;
233 LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo).safe_push (ddr);
234 return true;
238 /* Function vect_analyze_data_ref_dependence.
240 Return TRUE if there (might) exist a dependence between a memory-reference
241 DRA and a memory-reference DRB. When versioning for alias may check a
242 dependence at run-time, return FALSE. Adjust *MAX_VF according to
243 the data dependence. */
245 static bool
246 vect_analyze_data_ref_dependence (struct data_dependence_relation *ddr,
247 loop_vec_info loop_vinfo, int *max_vf)
249 unsigned int i;
250 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
251 struct data_reference *dra = DDR_A (ddr);
252 struct data_reference *drb = DDR_B (ddr);
253 stmt_vec_info stmtinfo_a = vinfo_for_stmt (DR_STMT (dra));
254 stmt_vec_info stmtinfo_b = vinfo_for_stmt (DR_STMT (drb));
255 lambda_vector dist_v;
256 unsigned int loop_depth;
258 /* In loop analysis all data references should be vectorizable. */
259 if (!STMT_VINFO_VECTORIZABLE (stmtinfo_a)
260 || !STMT_VINFO_VECTORIZABLE (stmtinfo_b))
261 gcc_unreachable ();
263 /* Independent data accesses. */
264 if (DDR_ARE_DEPENDENT (ddr) == chrec_known)
265 return false;
267 if (dra == drb
268 || (DR_IS_READ (dra) && DR_IS_READ (drb)))
269 return false;
271 /* Even if we have an anti-dependence then, as the vectorized loop covers at
272 least two scalar iterations, there is always also a true dependence.
273 As the vectorizer does not re-order loads and stores we can ignore
274 the anti-dependence if TBAA can disambiguate both DRs similar to the
275 case with known negative distance anti-dependences (positive
276 distance anti-dependences would violate TBAA constraints). */
277 if (((DR_IS_READ (dra) && DR_IS_WRITE (drb))
278 || (DR_IS_WRITE (dra) && DR_IS_READ (drb)))
279 && !alias_sets_conflict_p (get_alias_set (DR_REF (dra)),
280 get_alias_set (DR_REF (drb))))
281 return false;
283 /* Unknown data dependence. */
284 if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know)
286 /* If user asserted safelen consecutive iterations can be
287 executed concurrently, assume independence. */
288 if (loop->safelen >= 2)
290 if (loop->safelen < *max_vf)
291 *max_vf = loop->safelen;
292 LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo) = false;
293 return false;
296 if (STMT_VINFO_GATHER_P (stmtinfo_a)
297 || STMT_VINFO_GATHER_P (stmtinfo_b))
299 if (dump_enabled_p ())
301 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
302 "versioning for alias not supported for: "
303 "can't determine dependence between ");
304 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
305 DR_REF (dra));
306 dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
307 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
308 DR_REF (drb));
309 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
311 return true;
314 if (dump_enabled_p ())
316 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
317 "versioning for alias required: "
318 "can't determine dependence between ");
319 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
320 DR_REF (dra));
321 dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
322 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
323 DR_REF (drb));
324 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
327 /* Add to list of ddrs that need to be tested at run-time. */
328 return !vect_mark_for_runtime_alias_test (ddr, loop_vinfo);
331 /* Known data dependence. */
332 if (DDR_NUM_DIST_VECTS (ddr) == 0)
334 /* If user asserted safelen consecutive iterations can be
335 executed concurrently, assume independence. */
336 if (loop->safelen >= 2)
338 if (loop->safelen < *max_vf)
339 *max_vf = loop->safelen;
340 LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo) = false;
341 return false;
344 if (STMT_VINFO_GATHER_P (stmtinfo_a)
345 || STMT_VINFO_GATHER_P (stmtinfo_b))
347 if (dump_enabled_p ())
349 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
350 "versioning for alias not supported for: "
351 "bad dist vector for ");
352 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
353 DR_REF (dra));
354 dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
355 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
356 DR_REF (drb));
357 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
359 return true;
362 if (dump_enabled_p ())
364 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
365 "versioning for alias required: "
366 "bad dist vector for ");
367 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (dra));
368 dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
369 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (drb));
370 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
372 /* Add to list of ddrs that need to be tested at run-time. */
373 return !vect_mark_for_runtime_alias_test (ddr, loop_vinfo);
376 loop_depth = index_in_loop_nest (loop->num, DDR_LOOP_NEST (ddr));
377 FOR_EACH_VEC_ELT (DDR_DIST_VECTS (ddr), i, dist_v)
379 int dist = dist_v[loop_depth];
381 if (dump_enabled_p ())
382 dump_printf_loc (MSG_NOTE, vect_location,
383 "dependence distance = %d.\n", dist);
385 if (dist == 0)
387 if (dump_enabled_p ())
389 dump_printf_loc (MSG_NOTE, vect_location,
390 "dependence distance == 0 between ");
391 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra));
392 dump_printf (MSG_NOTE, " and ");
393 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb));
394 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
397 /* When we perform grouped accesses and perform implicit CSE
398 by detecting equal accesses and doing disambiguation with
399 runtime alias tests like for
400 .. = a[i];
401 .. = a[i+1];
402 a[i] = ..;
403 a[i+1] = ..;
404 *p = ..;
405 .. = a[i];
406 .. = a[i+1];
407 where we will end up loading { a[i], a[i+1] } once, make
408 sure that inserting group loads before the first load and
409 stores after the last store will do the right thing.
410 Similar for groups like
411 a[i] = ...;
412 ... = a[i];
413 a[i+1] = ...;
414 where loads from the group interleave with the store. */
415 if (STMT_VINFO_GROUPED_ACCESS (stmtinfo_a)
416 || STMT_VINFO_GROUPED_ACCESS (stmtinfo_b))
418 gimple earlier_stmt;
419 earlier_stmt = get_earlier_stmt (DR_STMT (dra), DR_STMT (drb));
420 if (DR_IS_WRITE
421 (STMT_VINFO_DATA_REF (vinfo_for_stmt (earlier_stmt))))
423 if (dump_enabled_p ())
424 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
425 "READ_WRITE dependence in interleaving."
426 "\n");
427 return true;
431 continue;
434 if (dist > 0 && DDR_REVERSED_P (ddr))
436 /* If DDR_REVERSED_P the order of the data-refs in DDR was
437 reversed (to make distance vector positive), and the actual
438 distance is negative. */
439 if (dump_enabled_p ())
440 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
441 "dependence distance negative.\n");
442 /* Record a negative dependence distance to later limit the
443 amount of stmt copying / unrolling we can perform.
444 Only need to handle read-after-write dependence. */
445 if (DR_IS_READ (drb)
446 && (STMT_VINFO_MIN_NEG_DIST (stmtinfo_b) == 0
447 || STMT_VINFO_MIN_NEG_DIST (stmtinfo_b) > (unsigned)dist))
448 STMT_VINFO_MIN_NEG_DIST (stmtinfo_b) = dist;
449 continue;
452 if (abs (dist) >= 2
453 && abs (dist) < *max_vf)
455 /* The dependence distance requires reduction of the maximal
456 vectorization factor. */
457 *max_vf = abs (dist);
458 if (dump_enabled_p ())
459 dump_printf_loc (MSG_NOTE, vect_location,
460 "adjusting maximal vectorization factor to %i\n",
461 *max_vf);
464 if (abs (dist) >= *max_vf)
466 /* Dependence distance does not create dependence, as far as
467 vectorization is concerned, in this case. */
468 if (dump_enabled_p ())
469 dump_printf_loc (MSG_NOTE, vect_location,
470 "dependence distance >= VF.\n");
471 continue;
474 if (dump_enabled_p ())
476 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
477 "not vectorized, possible dependence "
478 "between data-refs ");
479 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra));
480 dump_printf (MSG_NOTE, " and ");
481 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb));
482 dump_printf (MSG_NOTE, "\n");
485 return true;
488 return false;
491 /* Function vect_analyze_data_ref_dependences.
493 Examine all the data references in the loop, and make sure there do not
494 exist any data dependences between them. Set *MAX_VF according to
495 the maximum vectorization factor the data dependences allow. */
497 bool
498 vect_analyze_data_ref_dependences (loop_vec_info loop_vinfo, int *max_vf)
500 unsigned int i;
501 struct data_dependence_relation *ddr;
503 if (dump_enabled_p ())
504 dump_printf_loc (MSG_NOTE, vect_location,
505 "=== vect_analyze_data_ref_dependences ===\n");
507 LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo) = true;
508 if (!compute_all_dependences (LOOP_VINFO_DATAREFS (loop_vinfo),
509 &LOOP_VINFO_DDRS (loop_vinfo),
510 LOOP_VINFO_LOOP_NEST (loop_vinfo), true))
511 return false;
513 FOR_EACH_VEC_ELT (LOOP_VINFO_DDRS (loop_vinfo), i, ddr)
514 if (vect_analyze_data_ref_dependence (ddr, loop_vinfo, max_vf))
515 return false;
517 return true;
521 /* Function vect_slp_analyze_data_ref_dependence.
523 Return TRUE if there (might) exist a dependence between a memory-reference
524 DRA and a memory-reference DRB. When versioning for alias may check a
525 dependence at run-time, return FALSE. Adjust *MAX_VF according to
526 the data dependence. */
528 static bool
529 vect_slp_analyze_data_ref_dependence (struct data_dependence_relation *ddr)
531 struct data_reference *dra = DDR_A (ddr);
532 struct data_reference *drb = DDR_B (ddr);
534 /* We need to check dependences of statements marked as unvectorizable
535 as well, they still can prohibit vectorization. */
537 /* Independent data accesses. */
538 if (DDR_ARE_DEPENDENT (ddr) == chrec_known)
539 return false;
541 if (dra == drb)
542 return false;
544 /* Read-read is OK. */
545 if (DR_IS_READ (dra) && DR_IS_READ (drb))
546 return false;
548 /* If dra and drb are part of the same interleaving chain consider
549 them independent. */
550 if (STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (DR_STMT (dra)))
551 && (GROUP_FIRST_ELEMENT (vinfo_for_stmt (DR_STMT (dra)))
552 == GROUP_FIRST_ELEMENT (vinfo_for_stmt (DR_STMT (drb)))))
553 return false;
555 /* Unknown data dependence. */
556 if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know)
558 if (dump_enabled_p ())
560 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
561 "can't determine dependence between ");
562 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (dra));
563 dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
564 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (drb));
565 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
568 else if (dump_enabled_p ())
570 dump_printf_loc (MSG_NOTE, vect_location,
571 "determined dependence between ");
572 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra));
573 dump_printf (MSG_NOTE, " and ");
574 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb));
575 dump_printf (MSG_NOTE, "\n");
578 /* We do not vectorize basic blocks with write-write dependencies. */
579 if (DR_IS_WRITE (dra) && DR_IS_WRITE (drb))
580 return true;
582 /* If we have a read-write dependence check that the load is before the store.
583 When we vectorize basic blocks, vector load can be only before
584 corresponding scalar load, and vector store can be only after its
585 corresponding scalar store. So the order of the acceses is preserved in
586 case the load is before the store. */
587 gimple earlier_stmt = get_earlier_stmt (DR_STMT (dra), DR_STMT (drb));
588 if (DR_IS_READ (STMT_VINFO_DATA_REF (vinfo_for_stmt (earlier_stmt))))
590 /* That only holds for load-store pairs taking part in vectorization. */
591 if (STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dra)))
592 && STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (drb))))
593 return false;
596 return true;
600 /* Function vect_analyze_data_ref_dependences.
602 Examine all the data references in the basic-block, and make sure there
603 do not exist any data dependences between them. Set *MAX_VF according to
604 the maximum vectorization factor the data dependences allow. */
606 bool
607 vect_slp_analyze_data_ref_dependences (bb_vec_info bb_vinfo)
609 struct data_dependence_relation *ddr;
610 unsigned int i;
612 if (dump_enabled_p ())
613 dump_printf_loc (MSG_NOTE, vect_location,
614 "=== vect_slp_analyze_data_ref_dependences ===\n");
616 if (!compute_all_dependences (BB_VINFO_DATAREFS (bb_vinfo),
617 &BB_VINFO_DDRS (bb_vinfo),
618 vNULL, true))
619 return false;
621 FOR_EACH_VEC_ELT (BB_VINFO_DDRS (bb_vinfo), i, ddr)
622 if (vect_slp_analyze_data_ref_dependence (ddr))
623 return false;
625 return true;
629 /* Function vect_compute_data_ref_alignment
631 Compute the misalignment of the data reference DR.
633 Output:
634 1. If during the misalignment computation it is found that the data reference
635 cannot be vectorized then false is returned.
636 2. DR_MISALIGNMENT (DR) is defined.
638 FOR NOW: No analysis is actually performed. Misalignment is calculated
639 only for trivial cases. TODO. */
641 static bool
642 vect_compute_data_ref_alignment (struct data_reference *dr)
644 gimple stmt = DR_STMT (dr);
645 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
646 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
647 struct loop *loop = NULL;
648 tree ref = DR_REF (dr);
649 tree vectype;
650 tree base, base_addr;
651 bool base_aligned;
652 tree misalign;
653 tree aligned_to;
654 unsigned HOST_WIDE_INT alignment;
656 if (dump_enabled_p ())
657 dump_printf_loc (MSG_NOTE, vect_location,
658 "vect_compute_data_ref_alignment:\n");
660 if (loop_vinfo)
661 loop = LOOP_VINFO_LOOP (loop_vinfo);
663 /* Initialize misalignment to unknown. */
664 SET_DR_MISALIGNMENT (dr, -1);
666 /* Strided loads perform only component accesses, misalignment information
667 is irrelevant for them. */
668 if (STMT_VINFO_STRIDE_LOAD_P (stmt_info))
669 return true;
671 misalign = DR_INIT (dr);
672 aligned_to = DR_ALIGNED_TO (dr);
673 base_addr = DR_BASE_ADDRESS (dr);
674 vectype = STMT_VINFO_VECTYPE (stmt_info);
676 /* In case the dataref is in an inner-loop of the loop that is being
677 vectorized (LOOP), we use the base and misalignment information
678 relative to the outer-loop (LOOP). This is ok only if the misalignment
679 stays the same throughout the execution of the inner-loop, which is why
680 we have to check that the stride of the dataref in the inner-loop evenly
681 divides by the vector size. */
682 if (loop && nested_in_vect_loop_p (loop, stmt))
684 tree step = DR_STEP (dr);
685 HOST_WIDE_INT dr_step = TREE_INT_CST_LOW (step);
687 if (dr_step % GET_MODE_SIZE (TYPE_MODE (vectype)) == 0)
689 if (dump_enabled_p ())
690 dump_printf_loc (MSG_NOTE, vect_location,
691 "inner step divides the vector-size.\n");
692 misalign = STMT_VINFO_DR_INIT (stmt_info);
693 aligned_to = STMT_VINFO_DR_ALIGNED_TO (stmt_info);
694 base_addr = STMT_VINFO_DR_BASE_ADDRESS (stmt_info);
696 else
698 if (dump_enabled_p ())
699 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
700 "inner step doesn't divide the vector-size.\n");
701 misalign = NULL_TREE;
705 /* Similarly, if we're doing basic-block vectorization, we can only use
706 base and misalignment information relative to an innermost loop if the
707 misalignment stays the same throughout the execution of the loop.
708 As above, this is the case if the stride of the dataref evenly divides
709 by the vector size. */
710 if (!loop)
712 tree step = DR_STEP (dr);
713 HOST_WIDE_INT dr_step = TREE_INT_CST_LOW (step);
715 if (dr_step % GET_MODE_SIZE (TYPE_MODE (vectype)) != 0)
717 if (dump_enabled_p ())
718 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
719 "SLP: step doesn't divide the vector-size.\n");
720 misalign = NULL_TREE;
724 alignment = TYPE_ALIGN_UNIT (vectype);
726 if ((compare_tree_int (aligned_to, alignment) < 0)
727 || !misalign)
729 if (dump_enabled_p ())
731 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
732 "Unknown alignment for access: ");
733 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, ref);
734 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
736 return true;
739 /* To look at alignment of the base we have to preserve an inner MEM_REF
740 as that carries alignment information of the actual access. */
741 base = ref;
742 while (handled_component_p (base))
743 base = TREE_OPERAND (base, 0);
744 if (TREE_CODE (base) == MEM_REF)
745 base = build2 (MEM_REF, TREE_TYPE (base), base_addr,
746 build_int_cst (TREE_TYPE (TREE_OPERAND (base, 1)), 0));
748 if (get_object_alignment (base) >= TYPE_ALIGN (vectype))
749 base_aligned = true;
750 else
751 base_aligned = false;
753 if (!base_aligned)
755 /* Strip an inner MEM_REF to a bare decl if possible. */
756 if (TREE_CODE (base) == MEM_REF
757 && integer_zerop (TREE_OPERAND (base, 1))
758 && TREE_CODE (TREE_OPERAND (base, 0)) == ADDR_EXPR)
759 base = TREE_OPERAND (TREE_OPERAND (base, 0), 0);
761 if (!vect_can_force_dr_alignment_p (base, TYPE_ALIGN (vectype)))
763 if (dump_enabled_p ())
765 dump_printf_loc (MSG_NOTE, vect_location,
766 "can't force alignment of ref: ");
767 dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
768 dump_printf (MSG_NOTE, "\n");
770 return true;
773 /* Force the alignment of the decl.
774 NOTE: This is the only change to the code we make during
775 the analysis phase, before deciding to vectorize the loop. */
776 if (dump_enabled_p ())
778 dump_printf_loc (MSG_NOTE, vect_location, "force alignment of ");
779 dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
780 dump_printf (MSG_NOTE, "\n");
783 ((dataref_aux *)dr->aux)->base_decl = base;
784 ((dataref_aux *)dr->aux)->base_misaligned = true;
787 /* If this is a backward running DR then first access in the larger
788 vectype actually is N-1 elements before the address in the DR.
789 Adjust misalign accordingly. */
790 if (tree_int_cst_sgn (DR_STEP (dr)) < 0)
792 tree offset = ssize_int (TYPE_VECTOR_SUBPARTS (vectype) - 1);
793 /* DR_STEP(dr) is the same as -TYPE_SIZE of the scalar type,
794 otherwise we wouldn't be here. */
795 offset = fold_build2 (MULT_EXPR, ssizetype, offset, DR_STEP (dr));
796 /* PLUS because DR_STEP was negative. */
797 misalign = size_binop (PLUS_EXPR, misalign, offset);
800 SET_DR_MISALIGNMENT (dr,
801 wi::mod_floor (misalign, alignment, SIGNED).to_uhwi ());
803 if (dump_enabled_p ())
805 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
806 "misalign = %d bytes of ref ", DR_MISALIGNMENT (dr));
807 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, ref);
808 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
811 return true;
815 /* Function vect_compute_data_refs_alignment
817 Compute the misalignment of data references in the loop.
818 Return FALSE if a data reference is found that cannot be vectorized. */
820 static bool
821 vect_compute_data_refs_alignment (loop_vec_info loop_vinfo,
822 bb_vec_info bb_vinfo)
824 vec<data_reference_p> datarefs;
825 struct data_reference *dr;
826 unsigned int i;
828 if (loop_vinfo)
829 datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
830 else
831 datarefs = BB_VINFO_DATAREFS (bb_vinfo);
833 FOR_EACH_VEC_ELT (datarefs, i, dr)
834 if (STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr)))
835 && !vect_compute_data_ref_alignment (dr))
837 if (bb_vinfo)
839 /* Mark unsupported statement as unvectorizable. */
840 STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr))) = false;
841 continue;
843 else
844 return false;
847 return true;
851 /* Function vect_update_misalignment_for_peel
853 DR - the data reference whose misalignment is to be adjusted.
854 DR_PEEL - the data reference whose misalignment is being made
855 zero in the vector loop by the peel.
856 NPEEL - the number of iterations in the peel loop if the misalignment
857 of DR_PEEL is known at compile time. */
859 static void
860 vect_update_misalignment_for_peel (struct data_reference *dr,
861 struct data_reference *dr_peel, int npeel)
863 unsigned int i;
864 vec<dr_p> same_align_drs;
865 struct data_reference *current_dr;
866 int dr_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr))));
867 int dr_peel_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr_peel))));
868 stmt_vec_info stmt_info = vinfo_for_stmt (DR_STMT (dr));
869 stmt_vec_info peel_stmt_info = vinfo_for_stmt (DR_STMT (dr_peel));
871 /* For interleaved data accesses the step in the loop must be multiplied by
872 the size of the interleaving group. */
873 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
874 dr_size *= GROUP_SIZE (vinfo_for_stmt (GROUP_FIRST_ELEMENT (stmt_info)));
875 if (STMT_VINFO_GROUPED_ACCESS (peel_stmt_info))
876 dr_peel_size *= GROUP_SIZE (peel_stmt_info);
878 /* It can be assumed that the data refs with the same alignment as dr_peel
879 are aligned in the vector loop. */
880 same_align_drs
881 = STMT_VINFO_SAME_ALIGN_REFS (vinfo_for_stmt (DR_STMT (dr_peel)));
882 FOR_EACH_VEC_ELT (same_align_drs, i, current_dr)
884 if (current_dr != dr)
885 continue;
886 gcc_assert (DR_MISALIGNMENT (dr) / dr_size ==
887 DR_MISALIGNMENT (dr_peel) / dr_peel_size);
888 SET_DR_MISALIGNMENT (dr, 0);
889 return;
892 if (known_alignment_for_access_p (dr)
893 && known_alignment_for_access_p (dr_peel))
895 bool negative = tree_int_cst_compare (DR_STEP (dr), size_zero_node) < 0;
896 int misal = DR_MISALIGNMENT (dr);
897 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
898 misal += negative ? -npeel * dr_size : npeel * dr_size;
899 misal &= (TYPE_ALIGN (vectype) / BITS_PER_UNIT) - 1;
900 SET_DR_MISALIGNMENT (dr, misal);
901 return;
904 if (dump_enabled_p ())
905 dump_printf_loc (MSG_NOTE, vect_location, "Setting misalignment to -1.\n");
906 SET_DR_MISALIGNMENT (dr, -1);
910 /* Function vect_verify_datarefs_alignment
912 Return TRUE if all data references in the loop can be
913 handled with respect to alignment. */
915 bool
916 vect_verify_datarefs_alignment (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo)
918 vec<data_reference_p> datarefs;
919 struct data_reference *dr;
920 enum dr_alignment_support supportable_dr_alignment;
921 unsigned int i;
923 if (loop_vinfo)
924 datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
925 else
926 datarefs = BB_VINFO_DATAREFS (bb_vinfo);
928 FOR_EACH_VEC_ELT (datarefs, i, dr)
930 gimple stmt = DR_STMT (dr);
931 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
933 if (!STMT_VINFO_RELEVANT_P (stmt_info))
934 continue;
936 /* For interleaving, only the alignment of the first access matters.
937 Skip statements marked as not vectorizable. */
938 if ((STMT_VINFO_GROUPED_ACCESS (stmt_info)
939 && GROUP_FIRST_ELEMENT (stmt_info) != stmt)
940 || !STMT_VINFO_VECTORIZABLE (stmt_info))
941 continue;
943 /* Strided loads perform only component accesses, alignment is
944 irrelevant for them. */
945 if (STMT_VINFO_STRIDE_LOAD_P (stmt_info))
946 continue;
948 supportable_dr_alignment = vect_supportable_dr_alignment (dr, false);
949 if (!supportable_dr_alignment)
951 if (dump_enabled_p ())
953 if (DR_IS_READ (dr))
954 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
955 "not vectorized: unsupported unaligned load.");
956 else
957 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
958 "not vectorized: unsupported unaligned "
959 "store.");
961 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
962 DR_REF (dr));
963 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
965 return false;
967 if (supportable_dr_alignment != dr_aligned && dump_enabled_p ())
968 dump_printf_loc (MSG_NOTE, vect_location,
969 "Vectorizing an unaligned access.\n");
971 return true;
974 /* Given an memory reference EXP return whether its alignment is less
975 than its size. */
977 static bool
978 not_size_aligned (tree exp)
980 if (!tree_fits_uhwi_p (TYPE_SIZE (TREE_TYPE (exp))))
981 return true;
983 return (tree_to_uhwi (TYPE_SIZE (TREE_TYPE (exp)))
984 > get_object_alignment (exp));
987 /* Function vector_alignment_reachable_p
989 Return true if vector alignment for DR is reachable by peeling
990 a few loop iterations. Return false otherwise. */
992 static bool
993 vector_alignment_reachable_p (struct data_reference *dr)
995 gimple stmt = DR_STMT (dr);
996 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
997 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
999 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
1001 /* For interleaved access we peel only if number of iterations in
1002 the prolog loop ({VF - misalignment}), is a multiple of the
1003 number of the interleaved accesses. */
1004 int elem_size, mis_in_elements;
1005 int nelements = TYPE_VECTOR_SUBPARTS (vectype);
1007 /* FORNOW: handle only known alignment. */
1008 if (!known_alignment_for_access_p (dr))
1009 return false;
1011 elem_size = GET_MODE_SIZE (TYPE_MODE (vectype)) / nelements;
1012 mis_in_elements = DR_MISALIGNMENT (dr) / elem_size;
1014 if ((nelements - mis_in_elements) % GROUP_SIZE (stmt_info))
1015 return false;
1018 /* If misalignment is known at the compile time then allow peeling
1019 only if natural alignment is reachable through peeling. */
1020 if (known_alignment_for_access_p (dr) && !aligned_access_p (dr))
1022 HOST_WIDE_INT elmsize =
1023 int_cst_value (TYPE_SIZE_UNIT (TREE_TYPE (vectype)));
1024 if (dump_enabled_p ())
1026 dump_printf_loc (MSG_NOTE, vect_location,
1027 "data size =" HOST_WIDE_INT_PRINT_DEC, elmsize);
1028 dump_printf (MSG_NOTE,
1029 ". misalignment = %d.\n", DR_MISALIGNMENT (dr));
1031 if (DR_MISALIGNMENT (dr) % elmsize)
1033 if (dump_enabled_p ())
1034 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1035 "data size does not divide the misalignment.\n");
1036 return false;
1040 if (!known_alignment_for_access_p (dr))
1042 tree type = TREE_TYPE (DR_REF (dr));
1043 bool is_packed = not_size_aligned (DR_REF (dr));
1044 if (dump_enabled_p ())
1045 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1046 "Unknown misalignment, is_packed = %d\n",is_packed);
1047 if ((TYPE_USER_ALIGN (type) && !is_packed)
1048 || targetm.vectorize.vector_alignment_reachable (type, is_packed))
1049 return true;
1050 else
1051 return false;
1054 return true;
1058 /* Calculate the cost of the memory access represented by DR. */
1060 static void
1061 vect_get_data_access_cost (struct data_reference *dr,
1062 unsigned int *inside_cost,
1063 unsigned int *outside_cost,
1064 stmt_vector_for_cost *body_cost_vec)
1066 gimple stmt = DR_STMT (dr);
1067 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1068 int nunits = TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info));
1069 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1070 int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1071 int ncopies = vf / nunits;
1073 if (DR_IS_READ (dr))
1074 vect_get_load_cost (dr, ncopies, true, inside_cost, outside_cost,
1075 NULL, body_cost_vec, false);
1076 else
1077 vect_get_store_cost (dr, ncopies, inside_cost, body_cost_vec);
1079 if (dump_enabled_p ())
1080 dump_printf_loc (MSG_NOTE, vect_location,
1081 "vect_get_data_access_cost: inside_cost = %d, "
1082 "outside_cost = %d.\n", *inside_cost, *outside_cost);
1086 /* Insert DR into peeling hash table with NPEEL as key. */
1088 static void
1089 vect_peeling_hash_insert (loop_vec_info loop_vinfo, struct data_reference *dr,
1090 int npeel)
1092 struct _vect_peel_info elem, *slot;
1093 _vect_peel_info **new_slot;
1094 bool supportable_dr_alignment = vect_supportable_dr_alignment (dr, true);
1096 elem.npeel = npeel;
1097 slot = LOOP_VINFO_PEELING_HTAB (loop_vinfo)->find (&elem);
1098 if (slot)
1099 slot->count++;
1100 else
1102 slot = XNEW (struct _vect_peel_info);
1103 slot->npeel = npeel;
1104 slot->dr = dr;
1105 slot->count = 1;
1106 new_slot
1107 = LOOP_VINFO_PEELING_HTAB (loop_vinfo)->find_slot (slot, INSERT);
1108 *new_slot = slot;
1111 if (!supportable_dr_alignment
1112 && unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo)))
1113 slot->count += VECT_MAX_COST;
1117 /* Traverse peeling hash table to find peeling option that aligns maximum
1118 number of data accesses. */
1121 vect_peeling_hash_get_most_frequent (_vect_peel_info **slot,
1122 _vect_peel_extended_info *max)
1124 vect_peel_info elem = *slot;
1126 if (elem->count > max->peel_info.count
1127 || (elem->count == max->peel_info.count
1128 && max->peel_info.npeel > elem->npeel))
1130 max->peel_info.npeel = elem->npeel;
1131 max->peel_info.count = elem->count;
1132 max->peel_info.dr = elem->dr;
1135 return 1;
1139 /* Traverse peeling hash table and calculate cost for each peeling option.
1140 Find the one with the lowest cost. */
1143 vect_peeling_hash_get_lowest_cost (_vect_peel_info **slot,
1144 _vect_peel_extended_info *min)
1146 vect_peel_info elem = *slot;
1147 int save_misalignment, dummy;
1148 unsigned int inside_cost = 0, outside_cost = 0, i;
1149 gimple stmt = DR_STMT (elem->dr);
1150 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1151 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1152 vec<data_reference_p> datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
1153 struct data_reference *dr;
1154 stmt_vector_for_cost prologue_cost_vec, body_cost_vec, epilogue_cost_vec;
1156 prologue_cost_vec.create (2);
1157 body_cost_vec.create (2);
1158 epilogue_cost_vec.create (2);
1160 FOR_EACH_VEC_ELT (datarefs, i, dr)
1162 stmt = DR_STMT (dr);
1163 stmt_info = vinfo_for_stmt (stmt);
1164 /* For interleaving, only the alignment of the first access
1165 matters. */
1166 if (STMT_VINFO_GROUPED_ACCESS (stmt_info)
1167 && GROUP_FIRST_ELEMENT (stmt_info) != stmt)
1168 continue;
1170 save_misalignment = DR_MISALIGNMENT (dr);
1171 vect_update_misalignment_for_peel (dr, elem->dr, elem->npeel);
1172 vect_get_data_access_cost (dr, &inside_cost, &outside_cost,
1173 &body_cost_vec);
1174 SET_DR_MISALIGNMENT (dr, save_misalignment);
1177 auto_vec<stmt_info_for_cost> scalar_cost_vec;
1178 vect_get_single_scalar_iteration_cost (loop_vinfo, &scalar_cost_vec);
1179 outside_cost += vect_get_known_peeling_cost
1180 (loop_vinfo, elem->npeel, &dummy,
1181 &scalar_cost_vec, &prologue_cost_vec, &epilogue_cost_vec);
1183 /* Prologue and epilogue costs are added to the target model later.
1184 These costs depend only on the scalar iteration cost, the
1185 number of peeling iterations finally chosen, and the number of
1186 misaligned statements. So discard the information found here. */
1187 prologue_cost_vec.release ();
1188 epilogue_cost_vec.release ();
1190 if (inside_cost < min->inside_cost
1191 || (inside_cost == min->inside_cost && outside_cost < min->outside_cost))
1193 min->inside_cost = inside_cost;
1194 min->outside_cost = outside_cost;
1195 min->body_cost_vec.release ();
1196 min->body_cost_vec = body_cost_vec;
1197 min->peel_info.dr = elem->dr;
1198 min->peel_info.npeel = elem->npeel;
1200 else
1201 body_cost_vec.release ();
1203 return 1;
1207 /* Choose best peeling option by traversing peeling hash table and either
1208 choosing an option with the lowest cost (if cost model is enabled) or the
1209 option that aligns as many accesses as possible. */
1211 static struct data_reference *
1212 vect_peeling_hash_choose_best_peeling (loop_vec_info loop_vinfo,
1213 unsigned int *npeel,
1214 stmt_vector_for_cost *body_cost_vec)
1216 struct _vect_peel_extended_info res;
1218 res.peel_info.dr = NULL;
1219 res.body_cost_vec = stmt_vector_for_cost ();
1221 if (!unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo)))
1223 res.inside_cost = INT_MAX;
1224 res.outside_cost = INT_MAX;
1225 LOOP_VINFO_PEELING_HTAB (loop_vinfo)
1226 ->traverse <_vect_peel_extended_info *,
1227 vect_peeling_hash_get_lowest_cost> (&res);
1229 else
1231 res.peel_info.count = 0;
1232 LOOP_VINFO_PEELING_HTAB (loop_vinfo)
1233 ->traverse <_vect_peel_extended_info *,
1234 vect_peeling_hash_get_most_frequent> (&res);
1237 *npeel = res.peel_info.npeel;
1238 *body_cost_vec = res.body_cost_vec;
1239 return res.peel_info.dr;
1243 /* Function vect_enhance_data_refs_alignment
1245 This pass will use loop versioning and loop peeling in order to enhance
1246 the alignment of data references in the loop.
1248 FOR NOW: we assume that whatever versioning/peeling takes place, only the
1249 original loop is to be vectorized. Any other loops that are created by
1250 the transformations performed in this pass - are not supposed to be
1251 vectorized. This restriction will be relaxed.
1253 This pass will require a cost model to guide it whether to apply peeling
1254 or versioning or a combination of the two. For example, the scheme that
1255 intel uses when given a loop with several memory accesses, is as follows:
1256 choose one memory access ('p') which alignment you want to force by doing
1257 peeling. Then, either (1) generate a loop in which 'p' is aligned and all
1258 other accesses are not necessarily aligned, or (2) use loop versioning to
1259 generate one loop in which all accesses are aligned, and another loop in
1260 which only 'p' is necessarily aligned.
1262 ("Automatic Intra-Register Vectorization for the Intel Architecture",
1263 Aart J.C. Bik, Milind Girkar, Paul M. Grey and Ximmin Tian, International
1264 Journal of Parallel Programming, Vol. 30, No. 2, April 2002.)
1266 Devising a cost model is the most critical aspect of this work. It will
1267 guide us on which access to peel for, whether to use loop versioning, how
1268 many versions to create, etc. The cost model will probably consist of
1269 generic considerations as well as target specific considerations (on
1270 powerpc for example, misaligned stores are more painful than misaligned
1271 loads).
1273 Here are the general steps involved in alignment enhancements:
1275 -- original loop, before alignment analysis:
1276 for (i=0; i<N; i++){
1277 x = q[i]; # DR_MISALIGNMENT(q) = unknown
1278 p[i] = y; # DR_MISALIGNMENT(p) = unknown
1281 -- After vect_compute_data_refs_alignment:
1282 for (i=0; i<N; i++){
1283 x = q[i]; # DR_MISALIGNMENT(q) = 3
1284 p[i] = y; # DR_MISALIGNMENT(p) = unknown
1287 -- Possibility 1: we do loop versioning:
1288 if (p is aligned) {
1289 for (i=0; i<N; i++){ # loop 1A
1290 x = q[i]; # DR_MISALIGNMENT(q) = 3
1291 p[i] = y; # DR_MISALIGNMENT(p) = 0
1294 else {
1295 for (i=0; i<N; i++){ # loop 1B
1296 x = q[i]; # DR_MISALIGNMENT(q) = 3
1297 p[i] = y; # DR_MISALIGNMENT(p) = unaligned
1301 -- Possibility 2: we do loop peeling:
1302 for (i = 0; i < 3; i++){ # (scalar loop, not to be vectorized).
1303 x = q[i];
1304 p[i] = y;
1306 for (i = 3; i < N; i++){ # loop 2A
1307 x = q[i]; # DR_MISALIGNMENT(q) = 0
1308 p[i] = y; # DR_MISALIGNMENT(p) = unknown
1311 -- Possibility 3: combination of loop peeling and versioning:
1312 for (i = 0; i < 3; i++){ # (scalar loop, not to be vectorized).
1313 x = q[i];
1314 p[i] = y;
1316 if (p is aligned) {
1317 for (i = 3; i<N; i++){ # loop 3A
1318 x = q[i]; # DR_MISALIGNMENT(q) = 0
1319 p[i] = y; # DR_MISALIGNMENT(p) = 0
1322 else {
1323 for (i = 3; i<N; i++){ # loop 3B
1324 x = q[i]; # DR_MISALIGNMENT(q) = 0
1325 p[i] = y; # DR_MISALIGNMENT(p) = unaligned
1329 These loops are later passed to loop_transform to be vectorized. The
1330 vectorizer will use the alignment information to guide the transformation
1331 (whether to generate regular loads/stores, or with special handling for
1332 misalignment). */
1334 bool
1335 vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo)
1337 vec<data_reference_p> datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
1338 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1339 enum dr_alignment_support supportable_dr_alignment;
1340 struct data_reference *dr0 = NULL, *first_store = NULL;
1341 struct data_reference *dr;
1342 unsigned int i, j;
1343 bool do_peeling = false;
1344 bool do_versioning = false;
1345 bool stat;
1346 gimple stmt;
1347 stmt_vec_info stmt_info;
1348 unsigned int npeel = 0;
1349 bool all_misalignments_unknown = true;
1350 unsigned int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1351 unsigned possible_npeel_number = 1;
1352 tree vectype;
1353 unsigned int nelements, mis, same_align_drs_max = 0;
1354 stmt_vector_for_cost body_cost_vec = stmt_vector_for_cost ();
1356 if (dump_enabled_p ())
1357 dump_printf_loc (MSG_NOTE, vect_location,
1358 "=== vect_enhance_data_refs_alignment ===\n");
1360 /* While cost model enhancements are expected in the future, the high level
1361 view of the code at this time is as follows:
1363 A) If there is a misaligned access then see if peeling to align
1364 this access can make all data references satisfy
1365 vect_supportable_dr_alignment. If so, update data structures
1366 as needed and return true.
1368 B) If peeling wasn't possible and there is a data reference with an
1369 unknown misalignment that does not satisfy vect_supportable_dr_alignment
1370 then see if loop versioning checks can be used to make all data
1371 references satisfy vect_supportable_dr_alignment. If so, update
1372 data structures as needed and return true.
1374 C) If neither peeling nor versioning were successful then return false if
1375 any data reference does not satisfy vect_supportable_dr_alignment.
1377 D) Return true (all data references satisfy vect_supportable_dr_alignment).
1379 Note, Possibility 3 above (which is peeling and versioning together) is not
1380 being done at this time. */
1382 /* (1) Peeling to force alignment. */
1384 /* (1.1) Decide whether to perform peeling, and how many iterations to peel:
1385 Considerations:
1386 + How many accesses will become aligned due to the peeling
1387 - How many accesses will become unaligned due to the peeling,
1388 and the cost of misaligned accesses.
1389 - The cost of peeling (the extra runtime checks, the increase
1390 in code size). */
1392 FOR_EACH_VEC_ELT (datarefs, i, dr)
1394 stmt = DR_STMT (dr);
1395 stmt_info = vinfo_for_stmt (stmt);
1397 if (!STMT_VINFO_RELEVANT_P (stmt_info))
1398 continue;
1400 /* For interleaving, only the alignment of the first access
1401 matters. */
1402 if (STMT_VINFO_GROUPED_ACCESS (stmt_info)
1403 && GROUP_FIRST_ELEMENT (stmt_info) != stmt)
1404 continue;
1406 /* For invariant accesses there is nothing to enhance. */
1407 if (integer_zerop (DR_STEP (dr)))
1408 continue;
1410 /* Strided loads perform only component accesses, alignment is
1411 irrelevant for them. */
1412 if (STMT_VINFO_STRIDE_LOAD_P (stmt_info))
1413 continue;
1415 supportable_dr_alignment = vect_supportable_dr_alignment (dr, true);
1416 do_peeling = vector_alignment_reachable_p (dr);
1417 if (do_peeling)
1419 if (known_alignment_for_access_p (dr))
1421 unsigned int npeel_tmp;
1422 bool negative = tree_int_cst_compare (DR_STEP (dr),
1423 size_zero_node) < 0;
1425 /* Save info about DR in the hash table. */
1426 if (!LOOP_VINFO_PEELING_HTAB (loop_vinfo))
1427 LOOP_VINFO_PEELING_HTAB (loop_vinfo)
1428 = new hash_table<peel_info_hasher> (1);
1430 vectype = STMT_VINFO_VECTYPE (stmt_info);
1431 nelements = TYPE_VECTOR_SUBPARTS (vectype);
1432 mis = DR_MISALIGNMENT (dr) / GET_MODE_SIZE (TYPE_MODE (
1433 TREE_TYPE (DR_REF (dr))));
1434 npeel_tmp = (negative
1435 ? (mis - nelements) : (nelements - mis))
1436 & (nelements - 1);
1438 /* For multiple types, it is possible that the bigger type access
1439 will have more than one peeling option. E.g., a loop with two
1440 types: one of size (vector size / 4), and the other one of
1441 size (vector size / 8). Vectorization factor will 8. If both
1442 access are misaligned by 3, the first one needs one scalar
1443 iteration to be aligned, and the second one needs 5. But the
1444 the first one will be aligned also by peeling 5 scalar
1445 iterations, and in that case both accesses will be aligned.
1446 Hence, except for the immediate peeling amount, we also want
1447 to try to add full vector size, while we don't exceed
1448 vectorization factor.
1449 We do this automtically for cost model, since we calculate cost
1450 for every peeling option. */
1451 if (unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo)))
1452 possible_npeel_number = vf /nelements;
1454 /* Handle the aligned case. We may decide to align some other
1455 access, making DR unaligned. */
1456 if (DR_MISALIGNMENT (dr) == 0)
1458 npeel_tmp = 0;
1459 if (unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo)))
1460 possible_npeel_number++;
1463 for (j = 0; j < possible_npeel_number; j++)
1465 gcc_assert (npeel_tmp <= vf);
1466 vect_peeling_hash_insert (loop_vinfo, dr, npeel_tmp);
1467 npeel_tmp += nelements;
1470 all_misalignments_unknown = false;
1471 /* Data-ref that was chosen for the case that all the
1472 misalignments are unknown is not relevant anymore, since we
1473 have a data-ref with known alignment. */
1474 dr0 = NULL;
1476 else
1478 /* If we don't know any misalignment values, we prefer
1479 peeling for data-ref that has the maximum number of data-refs
1480 with the same alignment, unless the target prefers to align
1481 stores over load. */
1482 if (all_misalignments_unknown)
1484 unsigned same_align_drs
1485 = STMT_VINFO_SAME_ALIGN_REFS (stmt_info).length ();
1486 if (!dr0
1487 || same_align_drs_max < same_align_drs)
1489 same_align_drs_max = same_align_drs;
1490 dr0 = dr;
1492 /* For data-refs with the same number of related
1493 accesses prefer the one where the misalign
1494 computation will be invariant in the outermost loop. */
1495 else if (same_align_drs_max == same_align_drs)
1497 struct loop *ivloop0, *ivloop;
1498 ivloop0 = outermost_invariant_loop_for_expr
1499 (loop, DR_BASE_ADDRESS (dr0));
1500 ivloop = outermost_invariant_loop_for_expr
1501 (loop, DR_BASE_ADDRESS (dr));
1502 if ((ivloop && !ivloop0)
1503 || (ivloop && ivloop0
1504 && flow_loop_nested_p (ivloop, ivloop0)))
1505 dr0 = dr;
1508 if (!first_store && DR_IS_WRITE (dr))
1509 first_store = dr;
1512 /* If there are both known and unknown misaligned accesses in the
1513 loop, we choose peeling amount according to the known
1514 accesses. */
1515 if (!supportable_dr_alignment)
1517 dr0 = dr;
1518 if (!first_store && DR_IS_WRITE (dr))
1519 first_store = dr;
1523 else
1525 if (!aligned_access_p (dr))
1527 if (dump_enabled_p ())
1528 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1529 "vector alignment may not be reachable\n");
1530 break;
1535 /* Check if we can possibly peel the loop. */
1536 if (!vect_can_advance_ivs_p (loop_vinfo)
1537 || !slpeel_can_duplicate_loop_p (loop, single_exit (loop)))
1538 do_peeling = false;
1540 /* If we don't know how many times the peeling loop will run
1541 assume it will run VF-1 times and disable peeling if the remaining
1542 iters are less than the vectorization factor. */
1543 if (do_peeling
1544 && all_misalignments_unknown
1545 && LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1546 && (LOOP_VINFO_INT_NITERS (loop_vinfo)
1547 < 2 * (unsigned) LOOP_VINFO_VECT_FACTOR (loop_vinfo) - 1))
1548 do_peeling = false;
1550 if (do_peeling
1551 && all_misalignments_unknown
1552 && vect_supportable_dr_alignment (dr0, false))
1554 /* Check if the target requires to prefer stores over loads, i.e., if
1555 misaligned stores are more expensive than misaligned loads (taking
1556 drs with same alignment into account). */
1557 if (first_store && DR_IS_READ (dr0))
1559 unsigned int load_inside_cost = 0, load_outside_cost = 0;
1560 unsigned int store_inside_cost = 0, store_outside_cost = 0;
1561 unsigned int load_inside_penalty = 0, load_outside_penalty = 0;
1562 unsigned int store_inside_penalty = 0, store_outside_penalty = 0;
1563 stmt_vector_for_cost dummy;
1564 dummy.create (2);
1566 vect_get_data_access_cost (dr0, &load_inside_cost, &load_outside_cost,
1567 &dummy);
1568 vect_get_data_access_cost (first_store, &store_inside_cost,
1569 &store_outside_cost, &dummy);
1571 dummy.release ();
1573 /* Calculate the penalty for leaving FIRST_STORE unaligned (by
1574 aligning the load DR0). */
1575 load_inside_penalty = store_inside_cost;
1576 load_outside_penalty = store_outside_cost;
1577 for (i = 0;
1578 STMT_VINFO_SAME_ALIGN_REFS (vinfo_for_stmt (
1579 DR_STMT (first_store))).iterate (i, &dr);
1580 i++)
1581 if (DR_IS_READ (dr))
1583 load_inside_penalty += load_inside_cost;
1584 load_outside_penalty += load_outside_cost;
1586 else
1588 load_inside_penalty += store_inside_cost;
1589 load_outside_penalty += store_outside_cost;
1592 /* Calculate the penalty for leaving DR0 unaligned (by
1593 aligning the FIRST_STORE). */
1594 store_inside_penalty = load_inside_cost;
1595 store_outside_penalty = load_outside_cost;
1596 for (i = 0;
1597 STMT_VINFO_SAME_ALIGN_REFS (vinfo_for_stmt (
1598 DR_STMT (dr0))).iterate (i, &dr);
1599 i++)
1600 if (DR_IS_READ (dr))
1602 store_inside_penalty += load_inside_cost;
1603 store_outside_penalty += load_outside_cost;
1605 else
1607 store_inside_penalty += store_inside_cost;
1608 store_outside_penalty += store_outside_cost;
1611 if (load_inside_penalty > store_inside_penalty
1612 || (load_inside_penalty == store_inside_penalty
1613 && load_outside_penalty > store_outside_penalty))
1614 dr0 = first_store;
1617 /* In case there are only loads with different unknown misalignments, use
1618 peeling only if it may help to align other accesses in the loop. */
1619 if (!first_store
1620 && !STMT_VINFO_SAME_ALIGN_REFS (
1621 vinfo_for_stmt (DR_STMT (dr0))).length ()
1622 && vect_supportable_dr_alignment (dr0, false)
1623 != dr_unaligned_supported)
1624 do_peeling = false;
1627 if (do_peeling && !dr0)
1629 /* Peeling is possible, but there is no data access that is not supported
1630 unless aligned. So we try to choose the best possible peeling. */
1632 /* We should get here only if there are drs with known misalignment. */
1633 gcc_assert (!all_misalignments_unknown);
1635 /* Choose the best peeling from the hash table. */
1636 dr0 = vect_peeling_hash_choose_best_peeling (loop_vinfo, &npeel,
1637 &body_cost_vec);
1638 if (!dr0 || !npeel)
1639 do_peeling = false;
1641 /* If peeling by npeel will result in a remaining loop not iterating
1642 enough to be vectorized then do not peel. */
1643 if (do_peeling
1644 && LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1645 && (LOOP_VINFO_INT_NITERS (loop_vinfo)
1646 < LOOP_VINFO_VECT_FACTOR (loop_vinfo) + npeel))
1647 do_peeling = false;
1650 if (do_peeling)
1652 stmt = DR_STMT (dr0);
1653 stmt_info = vinfo_for_stmt (stmt);
1654 vectype = STMT_VINFO_VECTYPE (stmt_info);
1655 nelements = TYPE_VECTOR_SUBPARTS (vectype);
1657 if (known_alignment_for_access_p (dr0))
1659 bool negative = tree_int_cst_compare (DR_STEP (dr0),
1660 size_zero_node) < 0;
1661 if (!npeel)
1663 /* Since it's known at compile time, compute the number of
1664 iterations in the peeled loop (the peeling factor) for use in
1665 updating DR_MISALIGNMENT values. The peeling factor is the
1666 vectorization factor minus the misalignment as an element
1667 count. */
1668 mis = DR_MISALIGNMENT (dr0);
1669 mis /= GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr0))));
1670 npeel = ((negative ? mis - nelements : nelements - mis)
1671 & (nelements - 1));
1674 /* For interleaved data access every iteration accesses all the
1675 members of the group, therefore we divide the number of iterations
1676 by the group size. */
1677 stmt_info = vinfo_for_stmt (DR_STMT (dr0));
1678 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
1679 npeel /= GROUP_SIZE (stmt_info);
1681 if (dump_enabled_p ())
1682 dump_printf_loc (MSG_NOTE, vect_location,
1683 "Try peeling by %d\n", npeel);
1686 /* Ensure that all data refs can be vectorized after the peel. */
1687 FOR_EACH_VEC_ELT (datarefs, i, dr)
1689 int save_misalignment;
1691 if (dr == dr0)
1692 continue;
1694 stmt = DR_STMT (dr);
1695 stmt_info = vinfo_for_stmt (stmt);
1696 /* For interleaving, only the alignment of the first access
1697 matters. */
1698 if (STMT_VINFO_GROUPED_ACCESS (stmt_info)
1699 && GROUP_FIRST_ELEMENT (stmt_info) != stmt)
1700 continue;
1702 /* Strided loads perform only component accesses, alignment is
1703 irrelevant for them. */
1704 if (STMT_VINFO_STRIDE_LOAD_P (stmt_info))
1705 continue;
1707 save_misalignment = DR_MISALIGNMENT (dr);
1708 vect_update_misalignment_for_peel (dr, dr0, npeel);
1709 supportable_dr_alignment = vect_supportable_dr_alignment (dr, false);
1710 SET_DR_MISALIGNMENT (dr, save_misalignment);
1712 if (!supportable_dr_alignment)
1714 do_peeling = false;
1715 break;
1719 if (do_peeling && known_alignment_for_access_p (dr0) && npeel == 0)
1721 stat = vect_verify_datarefs_alignment (loop_vinfo, NULL);
1722 if (!stat)
1723 do_peeling = false;
1724 else
1726 body_cost_vec.release ();
1727 return stat;
1731 if (do_peeling)
1733 unsigned max_allowed_peel
1734 = PARAM_VALUE (PARAM_VECT_MAX_PEELING_FOR_ALIGNMENT);
1735 if (max_allowed_peel != (unsigned)-1)
1737 unsigned max_peel = npeel;
1738 if (max_peel == 0)
1740 gimple dr_stmt = DR_STMT (dr0);
1741 stmt_vec_info vinfo = vinfo_for_stmt (dr_stmt);
1742 tree vtype = STMT_VINFO_VECTYPE (vinfo);
1743 max_peel = TYPE_VECTOR_SUBPARTS (vtype) - 1;
1745 if (max_peel > max_allowed_peel)
1747 do_peeling = false;
1748 if (dump_enabled_p ())
1749 dump_printf_loc (MSG_NOTE, vect_location,
1750 "Disable peeling, max peels reached: %d\n", max_peel);
1755 if (do_peeling)
1757 /* (1.2) Update the DR_MISALIGNMENT of each data reference DR_i.
1758 If the misalignment of DR_i is identical to that of dr0 then set
1759 DR_MISALIGNMENT (DR_i) to zero. If the misalignment of DR_i and
1760 dr0 are known at compile time then increment DR_MISALIGNMENT (DR_i)
1761 by the peeling factor times the element size of DR_i (MOD the
1762 vectorization factor times the size). Otherwise, the
1763 misalignment of DR_i must be set to unknown. */
1764 FOR_EACH_VEC_ELT (datarefs, i, dr)
1765 if (dr != dr0)
1766 vect_update_misalignment_for_peel (dr, dr0, npeel);
1768 LOOP_VINFO_UNALIGNED_DR (loop_vinfo) = dr0;
1769 if (npeel)
1770 LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) = npeel;
1771 else
1772 LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo)
1773 = DR_MISALIGNMENT (dr0);
1774 SET_DR_MISALIGNMENT (dr0, 0);
1775 if (dump_enabled_p ())
1777 dump_printf_loc (MSG_NOTE, vect_location,
1778 "Alignment of access forced using peeling.\n");
1779 dump_printf_loc (MSG_NOTE, vect_location,
1780 "Peeling for alignment will be applied.\n");
1782 /* The inside-loop cost will be accounted for in vectorizable_load
1783 and vectorizable_store correctly with adjusted alignments.
1784 Drop the body_cst_vec on the floor here. */
1785 body_cost_vec.release ();
1787 stat = vect_verify_datarefs_alignment (loop_vinfo, NULL);
1788 gcc_assert (stat);
1789 return stat;
1793 body_cost_vec.release ();
1795 /* (2) Versioning to force alignment. */
1797 /* Try versioning if:
1798 1) optimize loop for speed
1799 2) there is at least one unsupported misaligned data ref with an unknown
1800 misalignment, and
1801 3) all misaligned data refs with a known misalignment are supported, and
1802 4) the number of runtime alignment checks is within reason. */
1804 do_versioning =
1805 optimize_loop_nest_for_speed_p (loop)
1806 && (!loop->inner); /* FORNOW */
1808 if (do_versioning)
1810 FOR_EACH_VEC_ELT (datarefs, i, dr)
1812 stmt = DR_STMT (dr);
1813 stmt_info = vinfo_for_stmt (stmt);
1815 /* For interleaving, only the alignment of the first access
1816 matters. */
1817 if (aligned_access_p (dr)
1818 || (STMT_VINFO_GROUPED_ACCESS (stmt_info)
1819 && GROUP_FIRST_ELEMENT (stmt_info) != stmt))
1820 continue;
1822 /* Strided loads perform only component accesses, alignment is
1823 irrelevant for them. */
1824 if (STMT_VINFO_STRIDE_LOAD_P (stmt_info))
1825 continue;
1827 supportable_dr_alignment = vect_supportable_dr_alignment (dr, false);
1829 if (!supportable_dr_alignment)
1831 gimple stmt;
1832 int mask;
1833 tree vectype;
1835 if (known_alignment_for_access_p (dr)
1836 || LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).length ()
1837 >= (unsigned) PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIGNMENT_CHECKS))
1839 do_versioning = false;
1840 break;
1843 stmt = DR_STMT (dr);
1844 vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
1845 gcc_assert (vectype);
1847 /* The rightmost bits of an aligned address must be zeros.
1848 Construct the mask needed for this test. For example,
1849 GET_MODE_SIZE for the vector mode V4SI is 16 bytes so the
1850 mask must be 15 = 0xf. */
1851 mask = GET_MODE_SIZE (TYPE_MODE (vectype)) - 1;
1853 /* FORNOW: use the same mask to test all potentially unaligned
1854 references in the loop. The vectorizer currently supports
1855 a single vector size, see the reference to
1856 GET_MODE_NUNITS (TYPE_MODE (vectype)) where the
1857 vectorization factor is computed. */
1858 gcc_assert (!LOOP_VINFO_PTR_MASK (loop_vinfo)
1859 || LOOP_VINFO_PTR_MASK (loop_vinfo) == mask);
1860 LOOP_VINFO_PTR_MASK (loop_vinfo) = mask;
1861 LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).safe_push (
1862 DR_STMT (dr));
1866 /* Versioning requires at least one misaligned data reference. */
1867 if (!LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo))
1868 do_versioning = false;
1869 else if (!do_versioning)
1870 LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).truncate (0);
1873 if (do_versioning)
1875 vec<gimple> may_misalign_stmts
1876 = LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo);
1877 gimple stmt;
1879 /* It can now be assumed that the data references in the statements
1880 in LOOP_VINFO_MAY_MISALIGN_STMTS will be aligned in the version
1881 of the loop being vectorized. */
1882 FOR_EACH_VEC_ELT (may_misalign_stmts, i, stmt)
1884 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1885 dr = STMT_VINFO_DATA_REF (stmt_info);
1886 SET_DR_MISALIGNMENT (dr, 0);
1887 if (dump_enabled_p ())
1888 dump_printf_loc (MSG_NOTE, vect_location,
1889 "Alignment of access forced using versioning.\n");
1892 if (dump_enabled_p ())
1893 dump_printf_loc (MSG_NOTE, vect_location,
1894 "Versioning for alignment will be applied.\n");
1896 /* Peeling and versioning can't be done together at this time. */
1897 gcc_assert (! (do_peeling && do_versioning));
1899 stat = vect_verify_datarefs_alignment (loop_vinfo, NULL);
1900 gcc_assert (stat);
1901 return stat;
1904 /* This point is reached if neither peeling nor versioning is being done. */
1905 gcc_assert (! (do_peeling || do_versioning));
1907 stat = vect_verify_datarefs_alignment (loop_vinfo, NULL);
1908 return stat;
1912 /* Function vect_find_same_alignment_drs.
1914 Update group and alignment relations according to the chosen
1915 vectorization factor. */
1917 static void
1918 vect_find_same_alignment_drs (struct data_dependence_relation *ddr,
1919 loop_vec_info loop_vinfo)
1921 unsigned int i;
1922 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1923 int vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1924 struct data_reference *dra = DDR_A (ddr);
1925 struct data_reference *drb = DDR_B (ddr);
1926 stmt_vec_info stmtinfo_a = vinfo_for_stmt (DR_STMT (dra));
1927 stmt_vec_info stmtinfo_b = vinfo_for_stmt (DR_STMT (drb));
1928 int dra_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dra))));
1929 int drb_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (drb))));
1930 lambda_vector dist_v;
1931 unsigned int loop_depth;
1933 if (DDR_ARE_DEPENDENT (ddr) == chrec_known)
1934 return;
1936 if (dra == drb)
1937 return;
1939 if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know)
1940 return;
1942 /* Loop-based vectorization and known data dependence. */
1943 if (DDR_NUM_DIST_VECTS (ddr) == 0)
1944 return;
1946 /* Data-dependence analysis reports a distance vector of zero
1947 for data-references that overlap only in the first iteration
1948 but have different sign step (see PR45764).
1949 So as a sanity check require equal DR_STEP. */
1950 if (!operand_equal_p (DR_STEP (dra), DR_STEP (drb), 0))
1951 return;
1953 loop_depth = index_in_loop_nest (loop->num, DDR_LOOP_NEST (ddr));
1954 FOR_EACH_VEC_ELT (DDR_DIST_VECTS (ddr), i, dist_v)
1956 int dist = dist_v[loop_depth];
1958 if (dump_enabled_p ())
1959 dump_printf_loc (MSG_NOTE, vect_location,
1960 "dependence distance = %d.\n", dist);
1962 /* Same loop iteration. */
1963 if (dist == 0
1964 || (dist % vectorization_factor == 0 && dra_size == drb_size))
1966 /* Two references with distance zero have the same alignment. */
1967 STMT_VINFO_SAME_ALIGN_REFS (stmtinfo_a).safe_push (drb);
1968 STMT_VINFO_SAME_ALIGN_REFS (stmtinfo_b).safe_push (dra);
1969 if (dump_enabled_p ())
1971 dump_printf_loc (MSG_NOTE, vect_location,
1972 "accesses have the same alignment.\n");
1973 dump_printf (MSG_NOTE,
1974 "dependence distance modulo vf == 0 between ");
1975 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra));
1976 dump_printf (MSG_NOTE, " and ");
1977 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb));
1978 dump_printf (MSG_NOTE, "\n");
1985 /* Function vect_analyze_data_refs_alignment
1987 Analyze the alignment of the data-references in the loop.
1988 Return FALSE if a data reference is found that cannot be vectorized. */
1990 bool
1991 vect_analyze_data_refs_alignment (loop_vec_info loop_vinfo,
1992 bb_vec_info bb_vinfo)
1994 if (dump_enabled_p ())
1995 dump_printf_loc (MSG_NOTE, vect_location,
1996 "=== vect_analyze_data_refs_alignment ===\n");
1998 /* Mark groups of data references with same alignment using
1999 data dependence information. */
2000 if (loop_vinfo)
2002 vec<ddr_p> ddrs = LOOP_VINFO_DDRS (loop_vinfo);
2003 struct data_dependence_relation *ddr;
2004 unsigned int i;
2006 FOR_EACH_VEC_ELT (ddrs, i, ddr)
2007 vect_find_same_alignment_drs (ddr, loop_vinfo);
2010 if (!vect_compute_data_refs_alignment (loop_vinfo, bb_vinfo))
2012 if (dump_enabled_p ())
2013 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2014 "not vectorized: can't calculate alignment "
2015 "for data ref.\n");
2016 return false;
2019 return true;
2023 /* Analyze groups of accesses: check that DR belongs to a group of
2024 accesses of legal size, step, etc. Detect gaps, single element
2025 interleaving, and other special cases. Set grouped access info.
2026 Collect groups of strided stores for further use in SLP analysis. */
2028 static bool
2029 vect_analyze_group_access (struct data_reference *dr)
2031 tree step = DR_STEP (dr);
2032 tree scalar_type = TREE_TYPE (DR_REF (dr));
2033 HOST_WIDE_INT type_size = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (scalar_type));
2034 gimple stmt = DR_STMT (dr);
2035 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2036 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2037 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
2038 HOST_WIDE_INT dr_step = TREE_INT_CST_LOW (step);
2039 HOST_WIDE_INT groupsize, last_accessed_element = 1;
2040 bool slp_impossible = false;
2041 struct loop *loop = NULL;
2043 if (loop_vinfo)
2044 loop = LOOP_VINFO_LOOP (loop_vinfo);
2046 /* For interleaving, GROUPSIZE is STEP counted in elements, i.e., the
2047 size of the interleaving group (including gaps). */
2048 groupsize = absu_hwi (dr_step) / type_size;
2050 /* Not consecutive access is possible only if it is a part of interleaving. */
2051 if (!GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
2053 /* Check if it this DR is a part of interleaving, and is a single
2054 element of the group that is accessed in the loop. */
2056 /* Gaps are supported only for loads. STEP must be a multiple of the type
2057 size. The size of the group must be a power of 2. */
2058 if (DR_IS_READ (dr)
2059 && (dr_step % type_size) == 0
2060 && groupsize > 0
2061 && exact_log2 (groupsize) != -1)
2063 GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = stmt;
2064 GROUP_SIZE (vinfo_for_stmt (stmt)) = groupsize;
2065 if (dump_enabled_p ())
2067 dump_printf_loc (MSG_NOTE, vect_location,
2068 "Detected single element interleaving ");
2069 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dr));
2070 dump_printf (MSG_NOTE, " step ");
2071 dump_generic_expr (MSG_NOTE, TDF_SLIM, step);
2072 dump_printf (MSG_NOTE, "\n");
2075 if (loop_vinfo)
2077 if (dump_enabled_p ())
2078 dump_printf_loc (MSG_NOTE, vect_location,
2079 "Data access with gaps requires scalar "
2080 "epilogue loop\n");
2081 if (loop->inner)
2083 if (dump_enabled_p ())
2084 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2085 "Peeling for outer loop is not"
2086 " supported\n");
2087 return false;
2090 LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) = true;
2093 return true;
2096 if (dump_enabled_p ())
2098 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2099 "not consecutive access ");
2100 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
2101 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
2104 if (bb_vinfo)
2106 /* Mark the statement as unvectorizable. */
2107 STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr))) = false;
2108 return true;
2111 return false;
2114 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) == stmt)
2116 /* First stmt in the interleaving chain. Check the chain. */
2117 gimple next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmt));
2118 struct data_reference *data_ref = dr;
2119 unsigned int count = 1;
2120 tree prev_init = DR_INIT (data_ref);
2121 gimple prev = stmt;
2122 HOST_WIDE_INT diff, gaps = 0;
2123 unsigned HOST_WIDE_INT count_in_bytes;
2125 while (next)
2127 /* Skip same data-refs. In case that two or more stmts share
2128 data-ref (supported only for loads), we vectorize only the first
2129 stmt, and the rest get their vectorized loads from the first
2130 one. */
2131 if (!tree_int_cst_compare (DR_INIT (data_ref),
2132 DR_INIT (STMT_VINFO_DATA_REF (
2133 vinfo_for_stmt (next)))))
2135 if (DR_IS_WRITE (data_ref))
2137 if (dump_enabled_p ())
2138 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2139 "Two store stmts share the same dr.\n");
2140 return false;
2143 /* For load use the same data-ref load. */
2144 GROUP_SAME_DR_STMT (vinfo_for_stmt (next)) = prev;
2146 prev = next;
2147 next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next));
2148 continue;
2151 prev = next;
2152 data_ref = STMT_VINFO_DATA_REF (vinfo_for_stmt (next));
2154 /* All group members have the same STEP by construction. */
2155 gcc_checking_assert (operand_equal_p (DR_STEP (data_ref), step, 0));
2157 /* Check that the distance between two accesses is equal to the type
2158 size. Otherwise, we have gaps. */
2159 diff = (TREE_INT_CST_LOW (DR_INIT (data_ref))
2160 - TREE_INT_CST_LOW (prev_init)) / type_size;
2161 if (diff != 1)
2163 /* FORNOW: SLP of accesses with gaps is not supported. */
2164 slp_impossible = true;
2165 if (DR_IS_WRITE (data_ref))
2167 if (dump_enabled_p ())
2168 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2169 "interleaved store with gaps\n");
2170 return false;
2173 gaps += diff - 1;
2176 last_accessed_element += diff;
2178 /* Store the gap from the previous member of the group. If there is no
2179 gap in the access, GROUP_GAP is always 1. */
2180 GROUP_GAP (vinfo_for_stmt (next)) = diff;
2182 prev_init = DR_INIT (data_ref);
2183 next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next));
2184 /* Count the number of data-refs in the chain. */
2185 count++;
2188 /* COUNT is the number of accesses found, we multiply it by the size of
2189 the type to get COUNT_IN_BYTES. */
2190 count_in_bytes = type_size * count;
2192 /* Check that the size of the interleaving (including gaps) is not
2193 greater than STEP. */
2194 if (dr_step != 0
2195 && absu_hwi (dr_step) < count_in_bytes + gaps * type_size)
2197 if (dump_enabled_p ())
2199 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2200 "interleaving size is greater than step for ");
2201 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
2202 DR_REF (dr));
2203 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
2205 return false;
2208 /* Check that the size of the interleaving is equal to STEP for stores,
2209 i.e., that there are no gaps. */
2210 if (dr_step != 0
2211 && absu_hwi (dr_step) != count_in_bytes)
2213 if (DR_IS_READ (dr))
2215 slp_impossible = true;
2216 /* There is a gap after the last load in the group. This gap is a
2217 difference between the groupsize and the number of elements.
2218 When there is no gap, this difference should be 0. */
2219 GROUP_GAP (vinfo_for_stmt (stmt)) = groupsize - count;
2221 else
2223 if (dump_enabled_p ())
2224 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2225 "interleaved store with gaps\n");
2226 return false;
2230 /* Check that STEP is a multiple of type size. */
2231 if (dr_step != 0
2232 && (dr_step % type_size) != 0)
2234 if (dump_enabled_p ())
2236 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2237 "step is not a multiple of type size: step ");
2238 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, step);
2239 dump_printf (MSG_MISSED_OPTIMIZATION, " size ");
2240 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
2241 TYPE_SIZE_UNIT (scalar_type));
2242 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
2244 return false;
2247 if (groupsize == 0)
2248 groupsize = count + gaps;
2250 GROUP_SIZE (vinfo_for_stmt (stmt)) = groupsize;
2251 if (dump_enabled_p ())
2252 dump_printf_loc (MSG_NOTE, vect_location,
2253 "Detected interleaving of size %d\n", (int)groupsize);
2255 /* SLP: create an SLP data structure for every interleaving group of
2256 stores for further analysis in vect_analyse_slp. */
2257 if (DR_IS_WRITE (dr) && !slp_impossible)
2259 if (loop_vinfo)
2260 LOOP_VINFO_GROUPED_STORES (loop_vinfo).safe_push (stmt);
2261 if (bb_vinfo)
2262 BB_VINFO_GROUPED_STORES (bb_vinfo).safe_push (stmt);
2265 /* There is a gap in the end of the group. */
2266 if (groupsize - last_accessed_element > 0 && loop_vinfo)
2268 if (dump_enabled_p ())
2269 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2270 "Data access with gaps requires scalar "
2271 "epilogue loop\n");
2272 if (loop->inner)
2274 if (dump_enabled_p ())
2275 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2276 "Peeling for outer loop is not supported\n");
2277 return false;
2280 LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) = true;
2284 return true;
2288 /* Analyze the access pattern of the data-reference DR.
2289 In case of non-consecutive accesses call vect_analyze_group_access() to
2290 analyze groups of accesses. */
2292 static bool
2293 vect_analyze_data_ref_access (struct data_reference *dr)
2295 tree step = DR_STEP (dr);
2296 tree scalar_type = TREE_TYPE (DR_REF (dr));
2297 gimple stmt = DR_STMT (dr);
2298 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2299 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2300 struct loop *loop = NULL;
2302 if (loop_vinfo)
2303 loop = LOOP_VINFO_LOOP (loop_vinfo);
2305 if (loop_vinfo && !step)
2307 if (dump_enabled_p ())
2308 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2309 "bad data-ref access in loop\n");
2310 return false;
2313 /* Allow invariant loads in not nested loops. */
2314 if (loop_vinfo && integer_zerop (step))
2316 GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = NULL;
2317 if (nested_in_vect_loop_p (loop, stmt))
2319 if (dump_enabled_p ())
2320 dump_printf_loc (MSG_NOTE, vect_location,
2321 "zero step in inner loop of nest\n");
2322 return false;
2324 return DR_IS_READ (dr);
2327 if (loop && nested_in_vect_loop_p (loop, stmt))
2329 /* Interleaved accesses are not yet supported within outer-loop
2330 vectorization for references in the inner-loop. */
2331 GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = NULL;
2333 /* For the rest of the analysis we use the outer-loop step. */
2334 step = STMT_VINFO_DR_STEP (stmt_info);
2335 if (integer_zerop (step))
2337 if (dump_enabled_p ())
2338 dump_printf_loc (MSG_NOTE, vect_location,
2339 "zero step in outer loop.\n");
2340 if (DR_IS_READ (dr))
2341 return true;
2342 else
2343 return false;
2347 /* Consecutive? */
2348 if (TREE_CODE (step) == INTEGER_CST)
2350 HOST_WIDE_INT dr_step = TREE_INT_CST_LOW (step);
2351 if (!tree_int_cst_compare (step, TYPE_SIZE_UNIT (scalar_type))
2352 || (dr_step < 0
2353 && !compare_tree_int (TYPE_SIZE_UNIT (scalar_type), -dr_step)))
2355 /* Mark that it is not interleaving. */
2356 GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = NULL;
2357 return true;
2361 if (loop && nested_in_vect_loop_p (loop, stmt))
2363 if (dump_enabled_p ())
2364 dump_printf_loc (MSG_NOTE, vect_location,
2365 "grouped access in outer loop.\n");
2366 return false;
2369 /* Assume this is a DR handled by non-constant strided load case. */
2370 if (TREE_CODE (step) != INTEGER_CST)
2371 return STMT_VINFO_STRIDE_LOAD_P (stmt_info);
2373 /* Not consecutive access - check if it's a part of interleaving group. */
2374 return vect_analyze_group_access (dr);
2379 /* A helper function used in the comparator function to sort data
2380 references. T1 and T2 are two data references to be compared.
2381 The function returns -1, 0, or 1. */
2383 static int
2384 compare_tree (tree t1, tree t2)
2386 int i, cmp;
2387 enum tree_code code;
2388 char tclass;
2390 if (t1 == t2)
2391 return 0;
2392 if (t1 == NULL)
2393 return -1;
2394 if (t2 == NULL)
2395 return 1;
2398 if (TREE_CODE (t1) != TREE_CODE (t2))
2399 return TREE_CODE (t1) < TREE_CODE (t2) ? -1 : 1;
2401 code = TREE_CODE (t1);
2402 switch (code)
2404 /* For const values, we can just use hash values for comparisons. */
2405 case INTEGER_CST:
2406 case REAL_CST:
2407 case FIXED_CST:
2408 case STRING_CST:
2409 case COMPLEX_CST:
2410 case VECTOR_CST:
2412 hashval_t h1 = iterative_hash_expr (t1, 0);
2413 hashval_t h2 = iterative_hash_expr (t2, 0);
2414 if (h1 != h2)
2415 return h1 < h2 ? -1 : 1;
2416 break;
2419 case SSA_NAME:
2420 cmp = compare_tree (SSA_NAME_VAR (t1), SSA_NAME_VAR (t2));
2421 if (cmp != 0)
2422 return cmp;
2424 if (SSA_NAME_VERSION (t1) != SSA_NAME_VERSION (t2))
2425 return SSA_NAME_VERSION (t1) < SSA_NAME_VERSION (t2) ? -1 : 1;
2426 break;
2428 default:
2429 tclass = TREE_CODE_CLASS (code);
2431 /* For var-decl, we could compare their UIDs. */
2432 if (tclass == tcc_declaration)
2434 if (DECL_UID (t1) != DECL_UID (t2))
2435 return DECL_UID (t1) < DECL_UID (t2) ? -1 : 1;
2436 break;
2439 /* For expressions with operands, compare their operands recursively. */
2440 for (i = TREE_OPERAND_LENGTH (t1) - 1; i >= 0; --i)
2442 cmp = compare_tree (TREE_OPERAND (t1, i), TREE_OPERAND (t2, i));
2443 if (cmp != 0)
2444 return cmp;
2448 return 0;
2452 /* Compare two data-references DRA and DRB to group them into chunks
2453 suitable for grouping. */
2455 static int
2456 dr_group_sort_cmp (const void *dra_, const void *drb_)
2458 data_reference_p dra = *(data_reference_p *)const_cast<void *>(dra_);
2459 data_reference_p drb = *(data_reference_p *)const_cast<void *>(drb_);
2460 int cmp;
2462 /* Stabilize sort. */
2463 if (dra == drb)
2464 return 0;
2466 /* Ordering of DRs according to base. */
2467 if (!operand_equal_p (DR_BASE_ADDRESS (dra), DR_BASE_ADDRESS (drb), 0))
2469 cmp = compare_tree (DR_BASE_ADDRESS (dra), DR_BASE_ADDRESS (drb));
2470 if (cmp != 0)
2471 return cmp;
2474 /* And according to DR_OFFSET. */
2475 if (!dr_equal_offsets_p (dra, drb))
2477 cmp = compare_tree (DR_OFFSET (dra), DR_OFFSET (drb));
2478 if (cmp != 0)
2479 return cmp;
2482 /* Put reads before writes. */
2483 if (DR_IS_READ (dra) != DR_IS_READ (drb))
2484 return DR_IS_READ (dra) ? -1 : 1;
2486 /* Then sort after access size. */
2487 if (!operand_equal_p (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dra))),
2488 TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (drb))), 0))
2490 cmp = compare_tree (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dra))),
2491 TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (drb))));
2492 if (cmp != 0)
2493 return cmp;
2496 /* And after step. */
2497 if (!operand_equal_p (DR_STEP (dra), DR_STEP (drb), 0))
2499 cmp = compare_tree (DR_STEP (dra), DR_STEP (drb));
2500 if (cmp != 0)
2501 return cmp;
2504 /* Then sort after DR_INIT. In case of identical DRs sort after stmt UID. */
2505 cmp = tree_int_cst_compare (DR_INIT (dra), DR_INIT (drb));
2506 if (cmp == 0)
2507 return gimple_uid (DR_STMT (dra)) < gimple_uid (DR_STMT (drb)) ? -1 : 1;
2508 return cmp;
2511 /* Function vect_analyze_data_ref_accesses.
2513 Analyze the access pattern of all the data references in the loop.
2515 FORNOW: the only access pattern that is considered vectorizable is a
2516 simple step 1 (consecutive) access.
2518 FORNOW: handle only arrays and pointer accesses. */
2520 bool
2521 vect_analyze_data_ref_accesses (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo)
2523 unsigned int i;
2524 vec<data_reference_p> datarefs;
2525 struct data_reference *dr;
2527 if (dump_enabled_p ())
2528 dump_printf_loc (MSG_NOTE, vect_location,
2529 "=== vect_analyze_data_ref_accesses ===\n");
2531 if (loop_vinfo)
2532 datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
2533 else
2534 datarefs = BB_VINFO_DATAREFS (bb_vinfo);
2536 if (datarefs.is_empty ())
2537 return true;
2539 /* Sort the array of datarefs to make building the interleaving chains
2540 linear. Don't modify the original vector's order, it is needed for
2541 determining what dependencies are reversed. */
2542 vec<data_reference_p> datarefs_copy = datarefs.copy ();
2543 datarefs_copy.qsort (dr_group_sort_cmp);
2545 /* Build the interleaving chains. */
2546 for (i = 0; i < datarefs_copy.length () - 1;)
2548 data_reference_p dra = datarefs_copy[i];
2549 stmt_vec_info stmtinfo_a = vinfo_for_stmt (DR_STMT (dra));
2550 stmt_vec_info lastinfo = NULL;
2551 for (i = i + 1; i < datarefs_copy.length (); ++i)
2553 data_reference_p drb = datarefs_copy[i];
2554 stmt_vec_info stmtinfo_b = vinfo_for_stmt (DR_STMT (drb));
2556 /* ??? Imperfect sorting (non-compatible types, non-modulo
2557 accesses, same accesses) can lead to a group to be artificially
2558 split here as we don't just skip over those. If it really
2559 matters we can push those to a worklist and re-iterate
2560 over them. The we can just skip ahead to the next DR here. */
2562 /* Check that the data-refs have same first location (except init)
2563 and they are both either store or load (not load and store,
2564 not masked loads or stores). */
2565 if (DR_IS_READ (dra) != DR_IS_READ (drb)
2566 || !operand_equal_p (DR_BASE_ADDRESS (dra),
2567 DR_BASE_ADDRESS (drb), 0)
2568 || !dr_equal_offsets_p (dra, drb)
2569 || !gimple_assign_single_p (DR_STMT (dra))
2570 || !gimple_assign_single_p (DR_STMT (drb)))
2571 break;
2573 /* Check that the data-refs have the same constant size and step. */
2574 tree sza = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dra)));
2575 tree szb = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (drb)));
2576 if (!tree_fits_uhwi_p (sza)
2577 || !tree_fits_uhwi_p (szb)
2578 || !tree_int_cst_equal (sza, szb)
2579 || !tree_fits_shwi_p (DR_STEP (dra))
2580 || !tree_fits_shwi_p (DR_STEP (drb))
2581 || !tree_int_cst_equal (DR_STEP (dra), DR_STEP (drb)))
2582 break;
2584 /* Do not place the same access in the interleaving chain twice. */
2585 if (tree_int_cst_compare (DR_INIT (dra), DR_INIT (drb)) == 0)
2586 break;
2588 /* Check the types are compatible.
2589 ??? We don't distinguish this during sorting. */
2590 if (!types_compatible_p (TREE_TYPE (DR_REF (dra)),
2591 TREE_TYPE (DR_REF (drb))))
2592 break;
2594 /* Sorting has ensured that DR_INIT (dra) <= DR_INIT (drb). */
2595 HOST_WIDE_INT init_a = TREE_INT_CST_LOW (DR_INIT (dra));
2596 HOST_WIDE_INT init_b = TREE_INT_CST_LOW (DR_INIT (drb));
2597 gcc_assert (init_a < init_b);
2599 /* If init_b == init_a + the size of the type * k, we have an
2600 interleaving, and DRA is accessed before DRB. */
2601 HOST_WIDE_INT type_size_a = tree_to_uhwi (sza);
2602 if ((init_b - init_a) % type_size_a != 0)
2603 break;
2605 /* The step (if not zero) is greater than the difference between
2606 data-refs' inits. This splits groups into suitable sizes. */
2607 HOST_WIDE_INT step = tree_to_shwi (DR_STEP (dra));
2608 if (step != 0 && step <= (init_b - init_a))
2609 break;
2611 if (dump_enabled_p ())
2613 dump_printf_loc (MSG_NOTE, vect_location,
2614 "Detected interleaving ");
2615 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra));
2616 dump_printf (MSG_NOTE, " and ");
2617 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb));
2618 dump_printf (MSG_NOTE, "\n");
2621 /* Link the found element into the group list. */
2622 if (!GROUP_FIRST_ELEMENT (stmtinfo_a))
2624 GROUP_FIRST_ELEMENT (stmtinfo_a) = DR_STMT (dra);
2625 lastinfo = stmtinfo_a;
2627 GROUP_FIRST_ELEMENT (stmtinfo_b) = DR_STMT (dra);
2628 GROUP_NEXT_ELEMENT (lastinfo) = DR_STMT (drb);
2629 lastinfo = stmtinfo_b;
2633 FOR_EACH_VEC_ELT (datarefs_copy, i, dr)
2634 if (STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr)))
2635 && !vect_analyze_data_ref_access (dr))
2637 if (dump_enabled_p ())
2638 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2639 "not vectorized: complicated access pattern.\n");
2641 if (bb_vinfo)
2643 /* Mark the statement as not vectorizable. */
2644 STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr))) = false;
2645 continue;
2647 else
2649 datarefs_copy.release ();
2650 return false;
2654 datarefs_copy.release ();
2655 return true;
2659 /* Operator == between two dr_with_seg_len objects.
2661 This equality operator is used to make sure two data refs
2662 are the same one so that we will consider to combine the
2663 aliasing checks of those two pairs of data dependent data
2664 refs. */
2666 static bool
2667 operator == (const dr_with_seg_len& d1,
2668 const dr_with_seg_len& d2)
2670 return operand_equal_p (DR_BASE_ADDRESS (d1.dr),
2671 DR_BASE_ADDRESS (d2.dr), 0)
2672 && compare_tree (d1.offset, d2.offset) == 0
2673 && compare_tree (d1.seg_len, d2.seg_len) == 0;
2676 /* Function comp_dr_with_seg_len_pair.
2678 Comparison function for sorting objects of dr_with_seg_len_pair_t
2679 so that we can combine aliasing checks in one scan. */
2681 static int
2682 comp_dr_with_seg_len_pair (const void *p1_, const void *p2_)
2684 const dr_with_seg_len_pair_t* p1 = (const dr_with_seg_len_pair_t *) p1_;
2685 const dr_with_seg_len_pair_t* p2 = (const dr_with_seg_len_pair_t *) p2_;
2687 const dr_with_seg_len &p11 = p1->first,
2688 &p12 = p1->second,
2689 &p21 = p2->first,
2690 &p22 = p2->second;
2692 /* For DR pairs (a, b) and (c, d), we only consider to merge the alias checks
2693 if a and c have the same basic address snd step, and b and d have the same
2694 address and step. Therefore, if any a&c or b&d don't have the same address
2695 and step, we don't care the order of those two pairs after sorting. */
2696 int comp_res;
2698 if ((comp_res = compare_tree (DR_BASE_ADDRESS (p11.dr),
2699 DR_BASE_ADDRESS (p21.dr))) != 0)
2700 return comp_res;
2701 if ((comp_res = compare_tree (DR_BASE_ADDRESS (p12.dr),
2702 DR_BASE_ADDRESS (p22.dr))) != 0)
2703 return comp_res;
2704 if ((comp_res = compare_tree (DR_STEP (p11.dr), DR_STEP (p21.dr))) != 0)
2705 return comp_res;
2706 if ((comp_res = compare_tree (DR_STEP (p12.dr), DR_STEP (p22.dr))) != 0)
2707 return comp_res;
2708 if ((comp_res = compare_tree (p11.offset, p21.offset)) != 0)
2709 return comp_res;
2710 if ((comp_res = compare_tree (p12.offset, p22.offset)) != 0)
2711 return comp_res;
2713 return 0;
2716 /* Function vect_vfa_segment_size.
2718 Create an expression that computes the size of segment
2719 that will be accessed for a data reference. The functions takes into
2720 account that realignment loads may access one more vector.
2722 Input:
2723 DR: The data reference.
2724 LENGTH_FACTOR: segment length to consider.
2726 Return an expression whose value is the size of segment which will be
2727 accessed by DR. */
2729 static tree
2730 vect_vfa_segment_size (struct data_reference *dr, tree length_factor)
2732 tree segment_length;
2734 if (integer_zerop (DR_STEP (dr)))
2735 segment_length = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dr)));
2736 else
2737 segment_length = size_binop (MULT_EXPR,
2738 fold_convert (sizetype, DR_STEP (dr)),
2739 fold_convert (sizetype, length_factor));
2741 if (vect_supportable_dr_alignment (dr, false)
2742 == dr_explicit_realign_optimized)
2744 tree vector_size = TYPE_SIZE_UNIT
2745 (STMT_VINFO_VECTYPE (vinfo_for_stmt (DR_STMT (dr))));
2747 segment_length = size_binop (PLUS_EXPR, segment_length, vector_size);
2749 return segment_length;
2752 /* Function vect_prune_runtime_alias_test_list.
2754 Prune a list of ddrs to be tested at run-time by versioning for alias.
2755 Merge several alias checks into one if possible.
2756 Return FALSE if resulting list of ddrs is longer then allowed by
2757 PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS, otherwise return TRUE. */
2759 bool
2760 vect_prune_runtime_alias_test_list (loop_vec_info loop_vinfo)
2762 vec<ddr_p> may_alias_ddrs =
2763 LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo);
2764 vec<dr_with_seg_len_pair_t>& comp_alias_ddrs =
2765 LOOP_VINFO_COMP_ALIAS_DDRS (loop_vinfo);
2766 int vect_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2767 tree scalar_loop_iters = LOOP_VINFO_NITERS (loop_vinfo);
2769 ddr_p ddr;
2770 unsigned int i;
2771 tree length_factor;
2773 if (dump_enabled_p ())
2774 dump_printf_loc (MSG_NOTE, vect_location,
2775 "=== vect_prune_runtime_alias_test_list ===\n");
2777 if (may_alias_ddrs.is_empty ())
2778 return true;
2780 /* Basically, for each pair of dependent data refs store_ptr_0
2781 and load_ptr_0, we create an expression:
2783 ((store_ptr_0 + store_segment_length_0) <= load_ptr_0)
2784 || (load_ptr_0 + load_segment_length_0) <= store_ptr_0))
2786 for aliasing checks. However, in some cases we can decrease
2787 the number of checks by combining two checks into one. For
2788 example, suppose we have another pair of data refs store_ptr_0
2789 and load_ptr_1, and if the following condition is satisfied:
2791 load_ptr_0 < load_ptr_1 &&
2792 load_ptr_1 - load_ptr_0 - load_segment_length_0 < store_segment_length_0
2794 (this condition means, in each iteration of vectorized loop,
2795 the accessed memory of store_ptr_0 cannot be between the memory
2796 of load_ptr_0 and load_ptr_1.)
2798 we then can use only the following expression to finish the
2799 alising checks between store_ptr_0 & load_ptr_0 and
2800 store_ptr_0 & load_ptr_1:
2802 ((store_ptr_0 + store_segment_length_0) <= load_ptr_0)
2803 || (load_ptr_1 + load_segment_length_1 <= store_ptr_0))
2805 Note that we only consider that load_ptr_0 and load_ptr_1 have the
2806 same basic address. */
2808 comp_alias_ddrs.create (may_alias_ddrs.length ());
2810 /* First, we collect all data ref pairs for aliasing checks. */
2811 FOR_EACH_VEC_ELT (may_alias_ddrs, i, ddr)
2813 struct data_reference *dr_a, *dr_b;
2814 gimple dr_group_first_a, dr_group_first_b;
2815 tree segment_length_a, segment_length_b;
2816 gimple stmt_a, stmt_b;
2818 dr_a = DDR_A (ddr);
2819 stmt_a = DR_STMT (DDR_A (ddr));
2820 dr_group_first_a = GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt_a));
2821 if (dr_group_first_a)
2823 stmt_a = dr_group_first_a;
2824 dr_a = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt_a));
2827 dr_b = DDR_B (ddr);
2828 stmt_b = DR_STMT (DDR_B (ddr));
2829 dr_group_first_b = GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt_b));
2830 if (dr_group_first_b)
2832 stmt_b = dr_group_first_b;
2833 dr_b = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt_b));
2836 if (!operand_equal_p (DR_STEP (dr_a), DR_STEP (dr_b), 0))
2837 length_factor = scalar_loop_iters;
2838 else
2839 length_factor = size_int (vect_factor);
2840 segment_length_a = vect_vfa_segment_size (dr_a, length_factor);
2841 segment_length_b = vect_vfa_segment_size (dr_b, length_factor);
2843 dr_with_seg_len_pair_t dr_with_seg_len_pair
2844 (dr_with_seg_len (dr_a, segment_length_a),
2845 dr_with_seg_len (dr_b, segment_length_b));
2847 if (compare_tree (DR_BASE_ADDRESS (dr_a), DR_BASE_ADDRESS (dr_b)) > 0)
2848 std::swap (dr_with_seg_len_pair.first, dr_with_seg_len_pair.second);
2850 comp_alias_ddrs.safe_push (dr_with_seg_len_pair);
2853 /* Second, we sort the collected data ref pairs so that we can scan
2854 them once to combine all possible aliasing checks. */
2855 comp_alias_ddrs.qsort (comp_dr_with_seg_len_pair);
2857 /* Third, we scan the sorted dr pairs and check if we can combine
2858 alias checks of two neighbouring dr pairs. */
2859 for (size_t i = 1; i < comp_alias_ddrs.length (); ++i)
2861 /* Deal with two ddrs (dr_a1, dr_b1) and (dr_a2, dr_b2). */
2862 dr_with_seg_len *dr_a1 = &comp_alias_ddrs[i-1].first,
2863 *dr_b1 = &comp_alias_ddrs[i-1].second,
2864 *dr_a2 = &comp_alias_ddrs[i].first,
2865 *dr_b2 = &comp_alias_ddrs[i].second;
2867 /* Remove duplicate data ref pairs. */
2868 if (*dr_a1 == *dr_a2 && *dr_b1 == *dr_b2)
2870 if (dump_enabled_p ())
2872 dump_printf_loc (MSG_NOTE, vect_location,
2873 "found equal ranges ");
2874 dump_generic_expr (MSG_NOTE, TDF_SLIM,
2875 DR_REF (dr_a1->dr));
2876 dump_printf (MSG_NOTE, ", ");
2877 dump_generic_expr (MSG_NOTE, TDF_SLIM,
2878 DR_REF (dr_b1->dr));
2879 dump_printf (MSG_NOTE, " and ");
2880 dump_generic_expr (MSG_NOTE, TDF_SLIM,
2881 DR_REF (dr_a2->dr));
2882 dump_printf (MSG_NOTE, ", ");
2883 dump_generic_expr (MSG_NOTE, TDF_SLIM,
2884 DR_REF (dr_b2->dr));
2885 dump_printf (MSG_NOTE, "\n");
2888 comp_alias_ddrs.ordered_remove (i--);
2889 continue;
2892 if (*dr_a1 == *dr_a2 || *dr_b1 == *dr_b2)
2894 /* We consider the case that DR_B1 and DR_B2 are same memrefs,
2895 and DR_A1 and DR_A2 are two consecutive memrefs. */
2896 if (*dr_a1 == *dr_a2)
2898 std::swap (dr_a1, dr_b1);
2899 std::swap (dr_a2, dr_b2);
2902 if (!operand_equal_p (DR_BASE_ADDRESS (dr_a1->dr),
2903 DR_BASE_ADDRESS (dr_a2->dr),
2905 || !tree_fits_shwi_p (dr_a1->offset)
2906 || !tree_fits_shwi_p (dr_a2->offset))
2907 continue;
2909 HOST_WIDE_INT diff = (tree_to_shwi (dr_a2->offset)
2910 - tree_to_shwi (dr_a1->offset));
2913 /* Now we check if the following condition is satisfied:
2915 DIFF - SEGMENT_LENGTH_A < SEGMENT_LENGTH_B
2917 where DIFF = DR_A2->OFFSET - DR_A1->OFFSET. However,
2918 SEGMENT_LENGTH_A or SEGMENT_LENGTH_B may not be constant so we
2919 have to make a best estimation. We can get the minimum value
2920 of SEGMENT_LENGTH_B as a constant, represented by MIN_SEG_LEN_B,
2921 then either of the following two conditions can guarantee the
2922 one above:
2924 1: DIFF <= MIN_SEG_LEN_B
2925 2: DIFF - SEGMENT_LENGTH_A < MIN_SEG_LEN_B
2929 HOST_WIDE_INT min_seg_len_b = (tree_fits_shwi_p (dr_b1->seg_len)
2930 ? tree_to_shwi (dr_b1->seg_len)
2931 : vect_factor);
2933 if (diff <= min_seg_len_b
2934 || (tree_fits_shwi_p (dr_a1->seg_len)
2935 && diff - tree_to_shwi (dr_a1->seg_len) < min_seg_len_b))
2937 if (dump_enabled_p ())
2939 dump_printf_loc (MSG_NOTE, vect_location,
2940 "merging ranges for ");
2941 dump_generic_expr (MSG_NOTE, TDF_SLIM,
2942 DR_REF (dr_a1->dr));
2943 dump_printf (MSG_NOTE, ", ");
2944 dump_generic_expr (MSG_NOTE, TDF_SLIM,
2945 DR_REF (dr_b1->dr));
2946 dump_printf (MSG_NOTE, " and ");
2947 dump_generic_expr (MSG_NOTE, TDF_SLIM,
2948 DR_REF (dr_a2->dr));
2949 dump_printf (MSG_NOTE, ", ");
2950 dump_generic_expr (MSG_NOTE, TDF_SLIM,
2951 DR_REF (dr_b2->dr));
2952 dump_printf (MSG_NOTE, "\n");
2955 dr_a1->seg_len = size_binop (PLUS_EXPR,
2956 dr_a2->seg_len, size_int (diff));
2957 comp_alias_ddrs.ordered_remove (i--);
2962 dump_printf_loc (MSG_NOTE, vect_location,
2963 "improved number of alias checks from %d to %d\n",
2964 may_alias_ddrs.length (), comp_alias_ddrs.length ());
2965 if ((int) comp_alias_ddrs.length () >
2966 PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS))
2967 return false;
2969 return true;
2972 /* Check whether a non-affine read in stmt is suitable for gather load
2973 and if so, return a builtin decl for that operation. */
2975 tree
2976 vect_check_gather (gimple stmt, loop_vec_info loop_vinfo, tree *basep,
2977 tree *offp, int *scalep)
2979 HOST_WIDE_INT scale = 1, pbitpos, pbitsize;
2980 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2981 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2982 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
2983 tree offtype = NULL_TREE;
2984 tree decl, base, off;
2985 machine_mode pmode;
2986 int punsignedp, pvolatilep;
2988 base = DR_REF (dr);
2989 /* For masked loads/stores, DR_REF (dr) is an artificial MEM_REF,
2990 see if we can use the def stmt of the address. */
2991 if (is_gimple_call (stmt)
2992 && gimple_call_internal_p (stmt)
2993 && (gimple_call_internal_fn (stmt) == IFN_MASK_LOAD
2994 || gimple_call_internal_fn (stmt) == IFN_MASK_STORE)
2995 && TREE_CODE (base) == MEM_REF
2996 && TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME
2997 && integer_zerop (TREE_OPERAND (base, 1))
2998 && !expr_invariant_in_loop_p (loop, TREE_OPERAND (base, 0)))
3000 gimple def_stmt = SSA_NAME_DEF_STMT (TREE_OPERAND (base, 0));
3001 if (is_gimple_assign (def_stmt)
3002 && gimple_assign_rhs_code (def_stmt) == ADDR_EXPR)
3003 base = TREE_OPERAND (gimple_assign_rhs1 (def_stmt), 0);
3006 /* The gather builtins need address of the form
3007 loop_invariant + vector * {1, 2, 4, 8}
3009 loop_invariant + sign_extend (vector) * { 1, 2, 4, 8 }.
3010 Unfortunately DR_BASE_ADDRESS/DR_OFFSET can be a mixture
3011 of loop invariants/SSA_NAMEs defined in the loop, with casts,
3012 multiplications and additions in it. To get a vector, we need
3013 a single SSA_NAME that will be defined in the loop and will
3014 contain everything that is not loop invariant and that can be
3015 vectorized. The following code attempts to find such a preexistng
3016 SSA_NAME OFF and put the loop invariants into a tree BASE
3017 that can be gimplified before the loop. */
3018 base = get_inner_reference (base, &pbitsize, &pbitpos, &off,
3019 &pmode, &punsignedp, &pvolatilep, false);
3020 gcc_assert (base != NULL_TREE && (pbitpos % BITS_PER_UNIT) == 0);
3022 if (TREE_CODE (base) == MEM_REF)
3024 if (!integer_zerop (TREE_OPERAND (base, 1)))
3026 if (off == NULL_TREE)
3028 offset_int moff = mem_ref_offset (base);
3029 off = wide_int_to_tree (sizetype, moff);
3031 else
3032 off = size_binop (PLUS_EXPR, off,
3033 fold_convert (sizetype, TREE_OPERAND (base, 1)));
3035 base = TREE_OPERAND (base, 0);
3037 else
3038 base = build_fold_addr_expr (base);
3040 if (off == NULL_TREE)
3041 off = size_zero_node;
3043 /* If base is not loop invariant, either off is 0, then we start with just
3044 the constant offset in the loop invariant BASE and continue with base
3045 as OFF, otherwise give up.
3046 We could handle that case by gimplifying the addition of base + off
3047 into some SSA_NAME and use that as off, but for now punt. */
3048 if (!expr_invariant_in_loop_p (loop, base))
3050 if (!integer_zerop (off))
3051 return NULL_TREE;
3052 off = base;
3053 base = size_int (pbitpos / BITS_PER_UNIT);
3055 /* Otherwise put base + constant offset into the loop invariant BASE
3056 and continue with OFF. */
3057 else
3059 base = fold_convert (sizetype, base);
3060 base = size_binop (PLUS_EXPR, base, size_int (pbitpos / BITS_PER_UNIT));
3063 /* OFF at this point may be either a SSA_NAME or some tree expression
3064 from get_inner_reference. Try to peel off loop invariants from it
3065 into BASE as long as possible. */
3066 STRIP_NOPS (off);
3067 while (offtype == NULL_TREE)
3069 enum tree_code code;
3070 tree op0, op1, add = NULL_TREE;
3072 if (TREE_CODE (off) == SSA_NAME)
3074 gimple def_stmt = SSA_NAME_DEF_STMT (off);
3076 if (expr_invariant_in_loop_p (loop, off))
3077 return NULL_TREE;
3079 if (gimple_code (def_stmt) != GIMPLE_ASSIGN)
3080 break;
3082 op0 = gimple_assign_rhs1 (def_stmt);
3083 code = gimple_assign_rhs_code (def_stmt);
3084 op1 = gimple_assign_rhs2 (def_stmt);
3086 else
3088 if (get_gimple_rhs_class (TREE_CODE (off)) == GIMPLE_TERNARY_RHS)
3089 return NULL_TREE;
3090 code = TREE_CODE (off);
3091 extract_ops_from_tree (off, &code, &op0, &op1);
3093 switch (code)
3095 case POINTER_PLUS_EXPR:
3096 case PLUS_EXPR:
3097 if (expr_invariant_in_loop_p (loop, op0))
3099 add = op0;
3100 off = op1;
3101 do_add:
3102 add = fold_convert (sizetype, add);
3103 if (scale != 1)
3104 add = size_binop (MULT_EXPR, add, size_int (scale));
3105 base = size_binop (PLUS_EXPR, base, add);
3106 continue;
3108 if (expr_invariant_in_loop_p (loop, op1))
3110 add = op1;
3111 off = op0;
3112 goto do_add;
3114 break;
3115 case MINUS_EXPR:
3116 if (expr_invariant_in_loop_p (loop, op1))
3118 add = fold_convert (sizetype, op1);
3119 add = size_binop (MINUS_EXPR, size_zero_node, add);
3120 off = op0;
3121 goto do_add;
3123 break;
3124 case MULT_EXPR:
3125 if (scale == 1 && tree_fits_shwi_p (op1))
3127 scale = tree_to_shwi (op1);
3128 off = op0;
3129 continue;
3131 break;
3132 case SSA_NAME:
3133 off = op0;
3134 continue;
3135 CASE_CONVERT:
3136 if (!POINTER_TYPE_P (TREE_TYPE (op0))
3137 && !INTEGRAL_TYPE_P (TREE_TYPE (op0)))
3138 break;
3139 if (TYPE_PRECISION (TREE_TYPE (op0))
3140 == TYPE_PRECISION (TREE_TYPE (off)))
3142 off = op0;
3143 continue;
3145 if (TYPE_PRECISION (TREE_TYPE (op0))
3146 < TYPE_PRECISION (TREE_TYPE (off)))
3148 off = op0;
3149 offtype = TREE_TYPE (off);
3150 STRIP_NOPS (off);
3151 continue;
3153 break;
3154 default:
3155 break;
3157 break;
3160 /* If at the end OFF still isn't a SSA_NAME or isn't
3161 defined in the loop, punt. */
3162 if (TREE_CODE (off) != SSA_NAME
3163 || expr_invariant_in_loop_p (loop, off))
3164 return NULL_TREE;
3166 if (offtype == NULL_TREE)
3167 offtype = TREE_TYPE (off);
3169 decl = targetm.vectorize.builtin_gather (STMT_VINFO_VECTYPE (stmt_info),
3170 offtype, scale);
3171 if (decl == NULL_TREE)
3172 return NULL_TREE;
3174 if (basep)
3175 *basep = base;
3176 if (offp)
3177 *offp = off;
3178 if (scalep)
3179 *scalep = scale;
3180 return decl;
3183 /* Function vect_analyze_data_refs.
3185 Find all the data references in the loop or basic block.
3187 The general structure of the analysis of data refs in the vectorizer is as
3188 follows:
3189 1- vect_analyze_data_refs(loop/bb): call
3190 compute_data_dependences_for_loop/bb to find and analyze all data-refs
3191 in the loop/bb and their dependences.
3192 2- vect_analyze_dependences(): apply dependence testing using ddrs.
3193 3- vect_analyze_drs_alignment(): check that ref_stmt.alignment is ok.
3194 4- vect_analyze_drs_access(): check that ref_stmt.step is ok.
3198 bool
3199 vect_analyze_data_refs (loop_vec_info loop_vinfo,
3200 bb_vec_info bb_vinfo,
3201 int *min_vf, unsigned *n_stmts)
3203 struct loop *loop = NULL;
3204 basic_block bb = NULL;
3205 unsigned int i;
3206 vec<data_reference_p> datarefs;
3207 struct data_reference *dr;
3208 tree scalar_type;
3210 if (dump_enabled_p ())
3211 dump_printf_loc (MSG_NOTE, vect_location,
3212 "=== vect_analyze_data_refs ===\n");
3214 if (loop_vinfo)
3216 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
3218 loop = LOOP_VINFO_LOOP (loop_vinfo);
3219 datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
3220 if (!find_loop_nest (loop, &LOOP_VINFO_LOOP_NEST (loop_vinfo)))
3222 if (dump_enabled_p ())
3223 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3224 "not vectorized: loop contains function calls"
3225 " or data references that cannot be analyzed\n");
3226 return false;
3229 for (i = 0; i < loop->num_nodes; i++)
3231 gimple_stmt_iterator gsi;
3233 for (gsi = gsi_start_bb (bbs[i]); !gsi_end_p (gsi); gsi_next (&gsi))
3235 gimple stmt = gsi_stmt (gsi);
3236 if (is_gimple_debug (stmt))
3237 continue;
3238 ++*n_stmts;
3239 if (!find_data_references_in_stmt (loop, stmt, &datarefs))
3241 if (is_gimple_call (stmt) && loop->safelen)
3243 tree fndecl = gimple_call_fndecl (stmt), op;
3244 if (fndecl != NULL_TREE)
3246 struct cgraph_node *node = cgraph_node::get (fndecl);
3247 if (node != NULL && node->simd_clones != NULL)
3249 unsigned int j, n = gimple_call_num_args (stmt);
3250 for (j = 0; j < n; j++)
3252 op = gimple_call_arg (stmt, j);
3253 if (DECL_P (op)
3254 || (REFERENCE_CLASS_P (op)
3255 && get_base_address (op)))
3256 break;
3258 op = gimple_call_lhs (stmt);
3259 /* Ignore #pragma omp declare simd functions
3260 if they don't have data references in the
3261 call stmt itself. */
3262 if (j == n
3263 && !(op
3264 && (DECL_P (op)
3265 || (REFERENCE_CLASS_P (op)
3266 && get_base_address (op)))))
3267 continue;
3271 LOOP_VINFO_DATAREFS (loop_vinfo) = datarefs;
3272 if (dump_enabled_p ())
3273 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3274 "not vectorized: loop contains function "
3275 "calls or data references that cannot "
3276 "be analyzed\n");
3277 return false;
3282 LOOP_VINFO_DATAREFS (loop_vinfo) = datarefs;
3284 else
3286 gimple_stmt_iterator gsi;
3288 bb = BB_VINFO_BB (bb_vinfo);
3289 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3291 gimple stmt = gsi_stmt (gsi);
3292 if (is_gimple_debug (stmt))
3293 continue;
3294 ++*n_stmts;
3295 if (!find_data_references_in_stmt (NULL, stmt,
3296 &BB_VINFO_DATAREFS (bb_vinfo)))
3298 /* Mark the rest of the basic-block as unvectorizable. */
3299 for (; !gsi_end_p (gsi); gsi_next (&gsi))
3301 stmt = gsi_stmt (gsi);
3302 STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (stmt)) = false;
3304 break;
3308 datarefs = BB_VINFO_DATAREFS (bb_vinfo);
3311 /* Go through the data-refs, check that the analysis succeeded. Update
3312 pointer from stmt_vec_info struct to DR and vectype. */
3314 FOR_EACH_VEC_ELT (datarefs, i, dr)
3316 gimple stmt;
3317 stmt_vec_info stmt_info;
3318 tree base, offset, init;
3319 bool gather = false;
3320 bool simd_lane_access = false;
3321 int vf;
3323 again:
3324 if (!dr || !DR_REF (dr))
3326 if (dump_enabled_p ())
3327 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3328 "not vectorized: unhandled data-ref\n");
3329 return false;
3332 stmt = DR_STMT (dr);
3333 stmt_info = vinfo_for_stmt (stmt);
3335 /* Discard clobbers from the dataref vector. We will remove
3336 clobber stmts during vectorization. */
3337 if (gimple_clobber_p (stmt))
3339 free_data_ref (dr);
3340 if (i == datarefs.length () - 1)
3342 datarefs.pop ();
3343 break;
3345 datarefs.ordered_remove (i);
3346 dr = datarefs[i];
3347 goto again;
3350 /* Check that analysis of the data-ref succeeded. */
3351 if (!DR_BASE_ADDRESS (dr) || !DR_OFFSET (dr) || !DR_INIT (dr)
3352 || !DR_STEP (dr))
3354 bool maybe_gather
3355 = DR_IS_READ (dr)
3356 && !TREE_THIS_VOLATILE (DR_REF (dr))
3357 && targetm.vectorize.builtin_gather != NULL;
3358 bool maybe_simd_lane_access
3359 = loop_vinfo && loop->simduid;
3361 /* If target supports vector gather loads, or if this might be
3362 a SIMD lane access, see if they can't be used. */
3363 if (loop_vinfo
3364 && (maybe_gather || maybe_simd_lane_access)
3365 && !nested_in_vect_loop_p (loop, stmt))
3367 struct data_reference *newdr
3368 = create_data_ref (NULL, loop_containing_stmt (stmt),
3369 DR_REF (dr), stmt, true);
3370 gcc_assert (newdr != NULL && DR_REF (newdr));
3371 if (DR_BASE_ADDRESS (newdr)
3372 && DR_OFFSET (newdr)
3373 && DR_INIT (newdr)
3374 && DR_STEP (newdr)
3375 && integer_zerop (DR_STEP (newdr)))
3377 if (maybe_simd_lane_access)
3379 tree off = DR_OFFSET (newdr);
3380 STRIP_NOPS (off);
3381 if (TREE_CODE (DR_INIT (newdr)) == INTEGER_CST
3382 && TREE_CODE (off) == MULT_EXPR
3383 && tree_fits_uhwi_p (TREE_OPERAND (off, 1)))
3385 tree step = TREE_OPERAND (off, 1);
3386 off = TREE_OPERAND (off, 0);
3387 STRIP_NOPS (off);
3388 if (CONVERT_EXPR_P (off)
3389 && TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (off,
3390 0)))
3391 < TYPE_PRECISION (TREE_TYPE (off)))
3392 off = TREE_OPERAND (off, 0);
3393 if (TREE_CODE (off) == SSA_NAME)
3395 gimple def = SSA_NAME_DEF_STMT (off);
3396 tree reft = TREE_TYPE (DR_REF (newdr));
3397 if (is_gimple_call (def)
3398 && gimple_call_internal_p (def)
3399 && (gimple_call_internal_fn (def)
3400 == IFN_GOMP_SIMD_LANE))
3402 tree arg = gimple_call_arg (def, 0);
3403 gcc_assert (TREE_CODE (arg) == SSA_NAME);
3404 arg = SSA_NAME_VAR (arg);
3405 if (arg == loop->simduid
3406 /* For now. */
3407 && tree_int_cst_equal
3408 (TYPE_SIZE_UNIT (reft),
3409 step))
3411 DR_OFFSET (newdr) = ssize_int (0);
3412 DR_STEP (newdr) = step;
3413 DR_ALIGNED_TO (newdr)
3414 = size_int (BIGGEST_ALIGNMENT);
3415 dr = newdr;
3416 simd_lane_access = true;
3422 if (!simd_lane_access && maybe_gather)
3424 dr = newdr;
3425 gather = true;
3428 if (!gather && !simd_lane_access)
3429 free_data_ref (newdr);
3432 if (!gather && !simd_lane_access)
3434 if (dump_enabled_p ())
3436 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3437 "not vectorized: data ref analysis "
3438 "failed ");
3439 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3440 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3443 if (bb_vinfo)
3444 break;
3446 return false;
3450 if (TREE_CODE (DR_BASE_ADDRESS (dr)) == INTEGER_CST)
3452 if (dump_enabled_p ())
3453 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3454 "not vectorized: base addr of dr is a "
3455 "constant\n");
3457 if (bb_vinfo)
3458 break;
3460 if (gather || simd_lane_access)
3461 free_data_ref (dr);
3462 return false;
3465 if (TREE_THIS_VOLATILE (DR_REF (dr)))
3467 if (dump_enabled_p ())
3469 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3470 "not vectorized: volatile type ");
3471 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3472 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3475 if (bb_vinfo)
3476 break;
3478 return false;
3481 if (stmt_can_throw_internal (stmt))
3483 if (dump_enabled_p ())
3485 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3486 "not vectorized: statement can throw an "
3487 "exception ");
3488 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3489 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3492 if (bb_vinfo)
3493 break;
3495 if (gather || simd_lane_access)
3496 free_data_ref (dr);
3497 return false;
3500 if (TREE_CODE (DR_REF (dr)) == COMPONENT_REF
3501 && DECL_BIT_FIELD (TREE_OPERAND (DR_REF (dr), 1)))
3503 if (dump_enabled_p ())
3505 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3506 "not vectorized: statement is bitfield "
3507 "access ");
3508 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3509 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3512 if (bb_vinfo)
3513 break;
3515 if (gather || simd_lane_access)
3516 free_data_ref (dr);
3517 return false;
3520 base = unshare_expr (DR_BASE_ADDRESS (dr));
3521 offset = unshare_expr (DR_OFFSET (dr));
3522 init = unshare_expr (DR_INIT (dr));
3524 if (is_gimple_call (stmt)
3525 && (!gimple_call_internal_p (stmt)
3526 || (gimple_call_internal_fn (stmt) != IFN_MASK_LOAD
3527 && gimple_call_internal_fn (stmt) != IFN_MASK_STORE)))
3529 if (dump_enabled_p ())
3531 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3532 "not vectorized: dr in a call ");
3533 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3534 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3537 if (bb_vinfo)
3538 break;
3540 if (gather || simd_lane_access)
3541 free_data_ref (dr);
3542 return false;
3545 /* Update DR field in stmt_vec_info struct. */
3547 /* If the dataref is in an inner-loop of the loop that is considered for
3548 for vectorization, we also want to analyze the access relative to
3549 the outer-loop (DR contains information only relative to the
3550 inner-most enclosing loop). We do that by building a reference to the
3551 first location accessed by the inner-loop, and analyze it relative to
3552 the outer-loop. */
3553 if (loop && nested_in_vect_loop_p (loop, stmt))
3555 tree outer_step, outer_base, outer_init;
3556 HOST_WIDE_INT pbitsize, pbitpos;
3557 tree poffset;
3558 machine_mode pmode;
3559 int punsignedp, pvolatilep;
3560 affine_iv base_iv, offset_iv;
3561 tree dinit;
3563 /* Build a reference to the first location accessed by the
3564 inner-loop: *(BASE+INIT). (The first location is actually
3565 BASE+INIT+OFFSET, but we add OFFSET separately later). */
3566 tree inner_base = build_fold_indirect_ref
3567 (fold_build_pointer_plus (base, init));
3569 if (dump_enabled_p ())
3571 dump_printf_loc (MSG_NOTE, vect_location,
3572 "analyze in outer-loop: ");
3573 dump_generic_expr (MSG_NOTE, TDF_SLIM, inner_base);
3574 dump_printf (MSG_NOTE, "\n");
3577 outer_base = get_inner_reference (inner_base, &pbitsize, &pbitpos,
3578 &poffset, &pmode, &punsignedp, &pvolatilep, false);
3579 gcc_assert (outer_base != NULL_TREE);
3581 if (pbitpos % BITS_PER_UNIT != 0)
3583 if (dump_enabled_p ())
3584 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3585 "failed: bit offset alignment.\n");
3586 return false;
3589 outer_base = build_fold_addr_expr (outer_base);
3590 if (!simple_iv (loop, loop_containing_stmt (stmt), outer_base,
3591 &base_iv, false))
3593 if (dump_enabled_p ())
3594 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3595 "failed: evolution of base is not affine.\n");
3596 return false;
3599 if (offset)
3601 if (poffset)
3602 poffset = fold_build2 (PLUS_EXPR, TREE_TYPE (offset), offset,
3603 poffset);
3604 else
3605 poffset = offset;
3608 if (!poffset)
3610 offset_iv.base = ssize_int (0);
3611 offset_iv.step = ssize_int (0);
3613 else if (!simple_iv (loop, loop_containing_stmt (stmt), poffset,
3614 &offset_iv, false))
3616 if (dump_enabled_p ())
3617 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3618 "evolution of offset is not affine.\n");
3619 return false;
3622 outer_init = ssize_int (pbitpos / BITS_PER_UNIT);
3623 split_constant_offset (base_iv.base, &base_iv.base, &dinit);
3624 outer_init = size_binop (PLUS_EXPR, outer_init, dinit);
3625 split_constant_offset (offset_iv.base, &offset_iv.base, &dinit);
3626 outer_init = size_binop (PLUS_EXPR, outer_init, dinit);
3628 outer_step = size_binop (PLUS_EXPR,
3629 fold_convert (ssizetype, base_iv.step),
3630 fold_convert (ssizetype, offset_iv.step));
3632 STMT_VINFO_DR_STEP (stmt_info) = outer_step;
3633 /* FIXME: Use canonicalize_base_object_address (base_iv.base); */
3634 STMT_VINFO_DR_BASE_ADDRESS (stmt_info) = base_iv.base;
3635 STMT_VINFO_DR_INIT (stmt_info) = outer_init;
3636 STMT_VINFO_DR_OFFSET (stmt_info) =
3637 fold_convert (ssizetype, offset_iv.base);
3638 STMT_VINFO_DR_ALIGNED_TO (stmt_info) =
3639 size_int (highest_pow2_factor (offset_iv.base));
3641 if (dump_enabled_p ())
3643 dump_printf_loc (MSG_NOTE, vect_location,
3644 "\touter base_address: ");
3645 dump_generic_expr (MSG_NOTE, TDF_SLIM,
3646 STMT_VINFO_DR_BASE_ADDRESS (stmt_info));
3647 dump_printf (MSG_NOTE, "\n\touter offset from base address: ");
3648 dump_generic_expr (MSG_NOTE, TDF_SLIM,
3649 STMT_VINFO_DR_OFFSET (stmt_info));
3650 dump_printf (MSG_NOTE,
3651 "\n\touter constant offset from base address: ");
3652 dump_generic_expr (MSG_NOTE, TDF_SLIM,
3653 STMT_VINFO_DR_INIT (stmt_info));
3654 dump_printf (MSG_NOTE, "\n\touter step: ");
3655 dump_generic_expr (MSG_NOTE, TDF_SLIM,
3656 STMT_VINFO_DR_STEP (stmt_info));
3657 dump_printf (MSG_NOTE, "\n\touter aligned to: ");
3658 dump_generic_expr (MSG_NOTE, TDF_SLIM,
3659 STMT_VINFO_DR_ALIGNED_TO (stmt_info));
3660 dump_printf (MSG_NOTE, "\n");
3664 if (STMT_VINFO_DATA_REF (stmt_info))
3666 if (dump_enabled_p ())
3668 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3669 "not vectorized: more than one data ref "
3670 "in stmt: ");
3671 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3672 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3675 if (bb_vinfo)
3676 break;
3678 if (gather || simd_lane_access)
3679 free_data_ref (dr);
3680 return false;
3683 STMT_VINFO_DATA_REF (stmt_info) = dr;
3684 if (simd_lane_access)
3686 STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info) = true;
3687 free_data_ref (datarefs[i]);
3688 datarefs[i] = dr;
3691 /* Set vectype for STMT. */
3692 scalar_type = TREE_TYPE (DR_REF (dr));
3693 STMT_VINFO_VECTYPE (stmt_info)
3694 = get_vectype_for_scalar_type (scalar_type);
3695 if (!STMT_VINFO_VECTYPE (stmt_info))
3697 if (dump_enabled_p ())
3699 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3700 "not vectorized: no vectype for stmt: ");
3701 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3702 dump_printf (MSG_MISSED_OPTIMIZATION, " scalar_type: ");
3703 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_DETAILS,
3704 scalar_type);
3705 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3708 if (bb_vinfo)
3709 break;
3711 if (gather || simd_lane_access)
3713 STMT_VINFO_DATA_REF (stmt_info) = NULL;
3714 if (gather)
3715 free_data_ref (dr);
3717 return false;
3719 else
3721 if (dump_enabled_p ())
3723 dump_printf_loc (MSG_NOTE, vect_location,
3724 "got vectype for stmt: ");
3725 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
3726 dump_generic_expr (MSG_NOTE, TDF_SLIM,
3727 STMT_VINFO_VECTYPE (stmt_info));
3728 dump_printf (MSG_NOTE, "\n");
3732 /* Adjust the minimal vectorization factor according to the
3733 vector type. */
3734 vf = TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info));
3735 if (vf > *min_vf)
3736 *min_vf = vf;
3738 if (gather)
3740 tree off;
3742 gather = 0 != vect_check_gather (stmt, loop_vinfo, NULL, &off, NULL);
3743 if (gather
3744 && get_vectype_for_scalar_type (TREE_TYPE (off)) == NULL_TREE)
3745 gather = false;
3746 if (!gather)
3748 STMT_VINFO_DATA_REF (stmt_info) = NULL;
3749 free_data_ref (dr);
3750 if (dump_enabled_p ())
3752 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3753 "not vectorized: not suitable for gather "
3754 "load ");
3755 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3756 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3758 return false;
3761 datarefs[i] = dr;
3762 STMT_VINFO_GATHER_P (stmt_info) = true;
3764 else if (loop_vinfo
3765 && TREE_CODE (DR_STEP (dr)) != INTEGER_CST)
3767 if (nested_in_vect_loop_p (loop, stmt)
3768 || !DR_IS_READ (dr))
3770 if (dump_enabled_p ())
3772 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3773 "not vectorized: not suitable for strided "
3774 "load ");
3775 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3776 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3778 return false;
3780 STMT_VINFO_STRIDE_LOAD_P (stmt_info) = true;
3784 /* If we stopped analysis at the first dataref we could not analyze
3785 when trying to vectorize a basic-block mark the rest of the datarefs
3786 as not vectorizable and truncate the vector of datarefs. That
3787 avoids spending useless time in analyzing their dependence. */
3788 if (i != datarefs.length ())
3790 gcc_assert (bb_vinfo != NULL);
3791 for (unsigned j = i; j < datarefs.length (); ++j)
3793 data_reference_p dr = datarefs[j];
3794 STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr))) = false;
3795 free_data_ref (dr);
3797 datarefs.truncate (i);
3800 return true;
3804 /* Function vect_get_new_vect_var.
3806 Returns a name for a new variable. The current naming scheme appends the
3807 prefix "vect_" or "vect_p" (depending on the value of VAR_KIND) to
3808 the name of vectorizer generated variables, and appends that to NAME if
3809 provided. */
3811 tree
3812 vect_get_new_vect_var (tree type, enum vect_var_kind var_kind, const char *name)
3814 const char *prefix;
3815 tree new_vect_var;
3817 switch (var_kind)
3819 case vect_simple_var:
3820 prefix = "vect";
3821 break;
3822 case vect_scalar_var:
3823 prefix = "stmp";
3824 break;
3825 case vect_pointer_var:
3826 prefix = "vectp";
3827 break;
3828 default:
3829 gcc_unreachable ();
3832 if (name)
3834 char* tmp = concat (prefix, "_", name, NULL);
3835 new_vect_var = create_tmp_reg (type, tmp);
3836 free (tmp);
3838 else
3839 new_vect_var = create_tmp_reg (type, prefix);
3841 return new_vect_var;
3844 /* Duplicate ptr info and set alignment/misaligment on NAME from DR. */
3846 static void
3847 vect_duplicate_ssa_name_ptr_info (tree name, data_reference *dr,
3848 stmt_vec_info stmt_info)
3850 duplicate_ssa_name_ptr_info (name, DR_PTR_INFO (dr));
3851 unsigned int align = TYPE_ALIGN_UNIT (STMT_VINFO_VECTYPE (stmt_info));
3852 int misalign = DR_MISALIGNMENT (dr);
3853 if (misalign == -1)
3854 mark_ptr_info_alignment_unknown (SSA_NAME_PTR_INFO (name));
3855 else
3856 set_ptr_info_alignment (SSA_NAME_PTR_INFO (name), align, misalign);
3859 /* Function vect_create_addr_base_for_vector_ref.
3861 Create an expression that computes the address of the first memory location
3862 that will be accessed for a data reference.
3864 Input:
3865 STMT: The statement containing the data reference.
3866 NEW_STMT_LIST: Must be initialized to NULL_TREE or a statement list.
3867 OFFSET: Optional. If supplied, it is be added to the initial address.
3868 LOOP: Specify relative to which loop-nest should the address be computed.
3869 For example, when the dataref is in an inner-loop nested in an
3870 outer-loop that is now being vectorized, LOOP can be either the
3871 outer-loop, or the inner-loop. The first memory location accessed
3872 by the following dataref ('in' points to short):
3874 for (i=0; i<N; i++)
3875 for (j=0; j<M; j++)
3876 s += in[i+j]
3878 is as follows:
3879 if LOOP=i_loop: &in (relative to i_loop)
3880 if LOOP=j_loop: &in+i*2B (relative to j_loop)
3881 BYTE_OFFSET: Optional, defaulted to NULL. If supplied, it is added to the
3882 initial address. Unlike OFFSET, which is number of elements to
3883 be added, BYTE_OFFSET is measured in bytes.
3885 Output:
3886 1. Return an SSA_NAME whose value is the address of the memory location of
3887 the first vector of the data reference.
3888 2. If new_stmt_list is not NULL_TREE after return then the caller must insert
3889 these statement(s) which define the returned SSA_NAME.
3891 FORNOW: We are only handling array accesses with step 1. */
3893 tree
3894 vect_create_addr_base_for_vector_ref (gimple stmt,
3895 gimple_seq *new_stmt_list,
3896 tree offset,
3897 struct loop *loop,
3898 tree byte_offset)
3900 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3901 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
3902 tree data_ref_base;
3903 const char *base_name;
3904 tree addr_base;
3905 tree dest;
3906 gimple_seq seq = NULL;
3907 tree base_offset;
3908 tree init;
3909 tree vect_ptr_type;
3910 tree step = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dr)));
3911 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3913 if (loop_vinfo && loop && loop != (gimple_bb (stmt))->loop_father)
3915 struct loop *outer_loop = LOOP_VINFO_LOOP (loop_vinfo);
3917 gcc_assert (nested_in_vect_loop_p (outer_loop, stmt));
3919 data_ref_base = unshare_expr (STMT_VINFO_DR_BASE_ADDRESS (stmt_info));
3920 base_offset = unshare_expr (STMT_VINFO_DR_OFFSET (stmt_info));
3921 init = unshare_expr (STMT_VINFO_DR_INIT (stmt_info));
3923 else
3925 data_ref_base = unshare_expr (DR_BASE_ADDRESS (dr));
3926 base_offset = unshare_expr (DR_OFFSET (dr));
3927 init = unshare_expr (DR_INIT (dr));
3930 if (loop_vinfo)
3931 base_name = get_name (data_ref_base);
3932 else
3934 base_offset = ssize_int (0);
3935 init = ssize_int (0);
3936 base_name = get_name (DR_REF (dr));
3939 /* Create base_offset */
3940 base_offset = size_binop (PLUS_EXPR,
3941 fold_convert (sizetype, base_offset),
3942 fold_convert (sizetype, init));
3944 if (offset)
3946 offset = fold_build2 (MULT_EXPR, sizetype,
3947 fold_convert (sizetype, offset), step);
3948 base_offset = fold_build2 (PLUS_EXPR, sizetype,
3949 base_offset, offset);
3951 if (byte_offset)
3953 byte_offset = fold_convert (sizetype, byte_offset);
3954 base_offset = fold_build2 (PLUS_EXPR, sizetype,
3955 base_offset, byte_offset);
3958 /* base + base_offset */
3959 if (loop_vinfo)
3960 addr_base = fold_build_pointer_plus (data_ref_base, base_offset);
3961 else
3963 addr_base = build1 (ADDR_EXPR,
3964 build_pointer_type (TREE_TYPE (DR_REF (dr))),
3965 unshare_expr (DR_REF (dr)));
3968 vect_ptr_type = build_pointer_type (STMT_VINFO_VECTYPE (stmt_info));
3969 addr_base = fold_convert (vect_ptr_type, addr_base);
3970 dest = vect_get_new_vect_var (vect_ptr_type, vect_pointer_var, base_name);
3971 addr_base = force_gimple_operand (addr_base, &seq, false, dest);
3972 gimple_seq_add_seq (new_stmt_list, seq);
3974 if (DR_PTR_INFO (dr)
3975 && TREE_CODE (addr_base) == SSA_NAME)
3977 vect_duplicate_ssa_name_ptr_info (addr_base, dr, stmt_info);
3978 if (offset || byte_offset)
3979 mark_ptr_info_alignment_unknown (SSA_NAME_PTR_INFO (addr_base));
3982 if (dump_enabled_p ())
3984 dump_printf_loc (MSG_NOTE, vect_location, "created ");
3985 dump_generic_expr (MSG_NOTE, TDF_SLIM, addr_base);
3986 dump_printf (MSG_NOTE, "\n");
3989 return addr_base;
3993 /* Function vect_create_data_ref_ptr.
3995 Create a new pointer-to-AGGR_TYPE variable (ap), that points to the first
3996 location accessed in the loop by STMT, along with the def-use update
3997 chain to appropriately advance the pointer through the loop iterations.
3998 Also set aliasing information for the pointer. This pointer is used by
3999 the callers to this function to create a memory reference expression for
4000 vector load/store access.
4002 Input:
4003 1. STMT: a stmt that references memory. Expected to be of the form
4004 GIMPLE_ASSIGN <name, data-ref> or
4005 GIMPLE_ASSIGN <data-ref, name>.
4006 2. AGGR_TYPE: the type of the reference, which should be either a vector
4007 or an array.
4008 3. AT_LOOP: the loop where the vector memref is to be created.
4009 4. OFFSET (optional): an offset to be added to the initial address accessed
4010 by the data-ref in STMT.
4011 5. BSI: location where the new stmts are to be placed if there is no loop
4012 6. ONLY_INIT: indicate if ap is to be updated in the loop, or remain
4013 pointing to the initial address.
4014 7. BYTE_OFFSET (optional, defaults to NULL): a byte offset to be added
4015 to the initial address accessed by the data-ref in STMT. This is
4016 similar to OFFSET, but OFFSET is counted in elements, while BYTE_OFFSET
4017 in bytes.
4019 Output:
4020 1. Declare a new ptr to vector_type, and have it point to the base of the
4021 data reference (initial addressed accessed by the data reference).
4022 For example, for vector of type V8HI, the following code is generated:
4024 v8hi *ap;
4025 ap = (v8hi *)initial_address;
4027 if OFFSET is not supplied:
4028 initial_address = &a[init];
4029 if OFFSET is supplied:
4030 initial_address = &a[init + OFFSET];
4031 if BYTE_OFFSET is supplied:
4032 initial_address = &a[init] + BYTE_OFFSET;
4034 Return the initial_address in INITIAL_ADDRESS.
4036 2. If ONLY_INIT is true, just return the initial pointer. Otherwise, also
4037 update the pointer in each iteration of the loop.
4039 Return the increment stmt that updates the pointer in PTR_INCR.
4041 3. Set INV_P to true if the access pattern of the data reference in the
4042 vectorized loop is invariant. Set it to false otherwise.
4044 4. Return the pointer. */
4046 tree
4047 vect_create_data_ref_ptr (gimple stmt, tree aggr_type, struct loop *at_loop,
4048 tree offset, tree *initial_address,
4049 gimple_stmt_iterator *gsi, gimple *ptr_incr,
4050 bool only_init, bool *inv_p, tree byte_offset)
4052 const char *base_name;
4053 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4054 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4055 struct loop *loop = NULL;
4056 bool nested_in_vect_loop = false;
4057 struct loop *containing_loop = NULL;
4058 tree aggr_ptr_type;
4059 tree aggr_ptr;
4060 tree new_temp;
4061 gimple vec_stmt;
4062 gimple_seq new_stmt_list = NULL;
4063 edge pe = NULL;
4064 basic_block new_bb;
4065 tree aggr_ptr_init;
4066 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
4067 tree aptr;
4068 gimple_stmt_iterator incr_gsi;
4069 bool insert_after;
4070 tree indx_before_incr, indx_after_incr;
4071 gimple incr;
4072 tree step;
4073 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
4075 gcc_assert (TREE_CODE (aggr_type) == ARRAY_TYPE
4076 || TREE_CODE (aggr_type) == VECTOR_TYPE);
4078 if (loop_vinfo)
4080 loop = LOOP_VINFO_LOOP (loop_vinfo);
4081 nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
4082 containing_loop = (gimple_bb (stmt))->loop_father;
4083 pe = loop_preheader_edge (loop);
4085 else
4087 gcc_assert (bb_vinfo);
4088 only_init = true;
4089 *ptr_incr = NULL;
4092 /* Check the step (evolution) of the load in LOOP, and record
4093 whether it's invariant. */
4094 if (nested_in_vect_loop)
4095 step = STMT_VINFO_DR_STEP (stmt_info);
4096 else
4097 step = DR_STEP (STMT_VINFO_DATA_REF (stmt_info));
4099 if (integer_zerop (step))
4100 *inv_p = true;
4101 else
4102 *inv_p = false;
4104 /* Create an expression for the first address accessed by this load
4105 in LOOP. */
4106 base_name = get_name (DR_BASE_ADDRESS (dr));
4108 if (dump_enabled_p ())
4110 tree dr_base_type = TREE_TYPE (DR_BASE_OBJECT (dr));
4111 dump_printf_loc (MSG_NOTE, vect_location,
4112 "create %s-pointer variable to type: ",
4113 get_tree_code_name (TREE_CODE (aggr_type)));
4114 dump_generic_expr (MSG_NOTE, TDF_SLIM, aggr_type);
4115 if (TREE_CODE (dr_base_type) == ARRAY_TYPE)
4116 dump_printf (MSG_NOTE, " vectorizing an array ref: ");
4117 else if (TREE_CODE (dr_base_type) == VECTOR_TYPE)
4118 dump_printf (MSG_NOTE, " vectorizing a vector ref: ");
4119 else if (TREE_CODE (dr_base_type) == RECORD_TYPE)
4120 dump_printf (MSG_NOTE, " vectorizing a record based array ref: ");
4121 else
4122 dump_printf (MSG_NOTE, " vectorizing a pointer ref: ");
4123 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_BASE_OBJECT (dr));
4124 dump_printf (MSG_NOTE, "\n");
4127 /* (1) Create the new aggregate-pointer variable.
4128 Vector and array types inherit the alias set of their component
4129 type by default so we need to use a ref-all pointer if the data
4130 reference does not conflict with the created aggregated data
4131 reference because it is not addressable. */
4132 bool need_ref_all = false;
4133 if (!alias_sets_conflict_p (get_alias_set (aggr_type),
4134 get_alias_set (DR_REF (dr))))
4135 need_ref_all = true;
4136 /* Likewise for any of the data references in the stmt group. */
4137 else if (STMT_VINFO_GROUP_SIZE (stmt_info) > 1)
4139 gimple orig_stmt = STMT_VINFO_GROUP_FIRST_ELEMENT (stmt_info);
4142 stmt_vec_info sinfo = vinfo_for_stmt (orig_stmt);
4143 struct data_reference *sdr = STMT_VINFO_DATA_REF (sinfo);
4144 if (!alias_sets_conflict_p (get_alias_set (aggr_type),
4145 get_alias_set (DR_REF (sdr))))
4147 need_ref_all = true;
4148 break;
4150 orig_stmt = STMT_VINFO_GROUP_NEXT_ELEMENT (sinfo);
4152 while (orig_stmt);
4154 aggr_ptr_type = build_pointer_type_for_mode (aggr_type, ptr_mode,
4155 need_ref_all);
4156 aggr_ptr = vect_get_new_vect_var (aggr_ptr_type, vect_pointer_var, base_name);
4159 /* Note: If the dataref is in an inner-loop nested in LOOP, and we are
4160 vectorizing LOOP (i.e., outer-loop vectorization), we need to create two
4161 def-use update cycles for the pointer: one relative to the outer-loop
4162 (LOOP), which is what steps (3) and (4) below do. The other is relative
4163 to the inner-loop (which is the inner-most loop containing the dataref),
4164 and this is done be step (5) below.
4166 When vectorizing inner-most loops, the vectorized loop (LOOP) is also the
4167 inner-most loop, and so steps (3),(4) work the same, and step (5) is
4168 redundant. Steps (3),(4) create the following:
4170 vp0 = &base_addr;
4171 LOOP: vp1 = phi(vp0,vp2)
4174 vp2 = vp1 + step
4175 goto LOOP
4177 If there is an inner-loop nested in loop, then step (5) will also be
4178 applied, and an additional update in the inner-loop will be created:
4180 vp0 = &base_addr;
4181 LOOP: vp1 = phi(vp0,vp2)
4183 inner: vp3 = phi(vp1,vp4)
4184 vp4 = vp3 + inner_step
4185 if () goto inner
4187 vp2 = vp1 + step
4188 if () goto LOOP */
4190 /* (2) Calculate the initial address of the aggregate-pointer, and set
4191 the aggregate-pointer to point to it before the loop. */
4193 /* Create: (&(base[init_val+offset]+byte_offset) in the loop preheader. */
4195 new_temp = vect_create_addr_base_for_vector_ref (stmt, &new_stmt_list,
4196 offset, loop, byte_offset);
4197 if (new_stmt_list)
4199 if (pe)
4201 new_bb = gsi_insert_seq_on_edge_immediate (pe, new_stmt_list);
4202 gcc_assert (!new_bb);
4204 else
4205 gsi_insert_seq_before (gsi, new_stmt_list, GSI_SAME_STMT);
4208 *initial_address = new_temp;
4210 /* Create: p = (aggr_type *) initial_base */
4211 if (TREE_CODE (new_temp) != SSA_NAME
4212 || !useless_type_conversion_p (aggr_ptr_type, TREE_TYPE (new_temp)))
4214 vec_stmt = gimple_build_assign (aggr_ptr,
4215 fold_convert (aggr_ptr_type, new_temp));
4216 aggr_ptr_init = make_ssa_name (aggr_ptr, vec_stmt);
4217 /* Copy the points-to information if it exists. */
4218 if (DR_PTR_INFO (dr))
4219 vect_duplicate_ssa_name_ptr_info (aggr_ptr_init, dr, stmt_info);
4220 gimple_assign_set_lhs (vec_stmt, aggr_ptr_init);
4221 if (pe)
4223 new_bb = gsi_insert_on_edge_immediate (pe, vec_stmt);
4224 gcc_assert (!new_bb);
4226 else
4227 gsi_insert_before (gsi, vec_stmt, GSI_SAME_STMT);
4229 else
4230 aggr_ptr_init = new_temp;
4232 /* (3) Handle the updating of the aggregate-pointer inside the loop.
4233 This is needed when ONLY_INIT is false, and also when AT_LOOP is the
4234 inner-loop nested in LOOP (during outer-loop vectorization). */
4236 /* No update in loop is required. */
4237 if (only_init && (!loop_vinfo || at_loop == loop))
4238 aptr = aggr_ptr_init;
4239 else
4241 /* The step of the aggregate pointer is the type size. */
4242 tree iv_step = TYPE_SIZE_UNIT (aggr_type);
4243 /* One exception to the above is when the scalar step of the load in
4244 LOOP is zero. In this case the step here is also zero. */
4245 if (*inv_p)
4246 iv_step = size_zero_node;
4247 else if (tree_int_cst_sgn (step) == -1)
4248 iv_step = fold_build1 (NEGATE_EXPR, TREE_TYPE (iv_step), iv_step);
4250 standard_iv_increment_position (loop, &incr_gsi, &insert_after);
4252 create_iv (aggr_ptr_init,
4253 fold_convert (aggr_ptr_type, iv_step),
4254 aggr_ptr, loop, &incr_gsi, insert_after,
4255 &indx_before_incr, &indx_after_incr);
4256 incr = gsi_stmt (incr_gsi);
4257 set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo, NULL));
4259 /* Copy the points-to information if it exists. */
4260 if (DR_PTR_INFO (dr))
4262 vect_duplicate_ssa_name_ptr_info (indx_before_incr, dr, stmt_info);
4263 vect_duplicate_ssa_name_ptr_info (indx_after_incr, dr, stmt_info);
4265 if (ptr_incr)
4266 *ptr_incr = incr;
4268 aptr = indx_before_incr;
4271 if (!nested_in_vect_loop || only_init)
4272 return aptr;
4275 /* (4) Handle the updating of the aggregate-pointer inside the inner-loop
4276 nested in LOOP, if exists. */
4278 gcc_assert (nested_in_vect_loop);
4279 if (!only_init)
4281 standard_iv_increment_position (containing_loop, &incr_gsi,
4282 &insert_after);
4283 create_iv (aptr, fold_convert (aggr_ptr_type, DR_STEP (dr)), aggr_ptr,
4284 containing_loop, &incr_gsi, insert_after, &indx_before_incr,
4285 &indx_after_incr);
4286 incr = gsi_stmt (incr_gsi);
4287 set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo, NULL));
4289 /* Copy the points-to information if it exists. */
4290 if (DR_PTR_INFO (dr))
4292 vect_duplicate_ssa_name_ptr_info (indx_before_incr, dr, stmt_info);
4293 vect_duplicate_ssa_name_ptr_info (indx_after_incr, dr, stmt_info);
4295 if (ptr_incr)
4296 *ptr_incr = incr;
4298 return indx_before_incr;
4300 else
4301 gcc_unreachable ();
4305 /* Function bump_vector_ptr
4307 Increment a pointer (to a vector type) by vector-size. If requested,
4308 i.e. if PTR-INCR is given, then also connect the new increment stmt
4309 to the existing def-use update-chain of the pointer, by modifying
4310 the PTR_INCR as illustrated below:
4312 The pointer def-use update-chain before this function:
4313 DATAREF_PTR = phi (p_0, p_2)
4314 ....
4315 PTR_INCR: p_2 = DATAREF_PTR + step
4317 The pointer def-use update-chain after this function:
4318 DATAREF_PTR = phi (p_0, p_2)
4319 ....
4320 NEW_DATAREF_PTR = DATAREF_PTR + BUMP
4321 ....
4322 PTR_INCR: p_2 = NEW_DATAREF_PTR + step
4324 Input:
4325 DATAREF_PTR - ssa_name of a pointer (to vector type) that is being updated
4326 in the loop.
4327 PTR_INCR - optional. The stmt that updates the pointer in each iteration of
4328 the loop. The increment amount across iterations is expected
4329 to be vector_size.
4330 BSI - location where the new update stmt is to be placed.
4331 STMT - the original scalar memory-access stmt that is being vectorized.
4332 BUMP - optional. The offset by which to bump the pointer. If not given,
4333 the offset is assumed to be vector_size.
4335 Output: Return NEW_DATAREF_PTR as illustrated above.
4339 tree
4340 bump_vector_ptr (tree dataref_ptr, gimple ptr_incr, gimple_stmt_iterator *gsi,
4341 gimple stmt, tree bump)
4343 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4344 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
4345 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
4346 tree update = TYPE_SIZE_UNIT (vectype);
4347 gassign *incr_stmt;
4348 ssa_op_iter iter;
4349 use_operand_p use_p;
4350 tree new_dataref_ptr;
4352 if (bump)
4353 update = bump;
4355 new_dataref_ptr = copy_ssa_name (dataref_ptr);
4356 incr_stmt = gimple_build_assign (new_dataref_ptr, POINTER_PLUS_EXPR,
4357 dataref_ptr, update);
4358 vect_finish_stmt_generation (stmt, incr_stmt, gsi);
4360 /* Copy the points-to information if it exists. */
4361 if (DR_PTR_INFO (dr))
4363 duplicate_ssa_name_ptr_info (new_dataref_ptr, DR_PTR_INFO (dr));
4364 mark_ptr_info_alignment_unknown (SSA_NAME_PTR_INFO (new_dataref_ptr));
4367 if (!ptr_incr)
4368 return new_dataref_ptr;
4370 /* Update the vector-pointer's cross-iteration increment. */
4371 FOR_EACH_SSA_USE_OPERAND (use_p, ptr_incr, iter, SSA_OP_USE)
4373 tree use = USE_FROM_PTR (use_p);
4375 if (use == dataref_ptr)
4376 SET_USE (use_p, new_dataref_ptr);
4377 else
4378 gcc_assert (tree_int_cst_compare (use, update) == 0);
4381 return new_dataref_ptr;
4385 /* Function vect_create_destination_var.
4387 Create a new temporary of type VECTYPE. */
4389 tree
4390 vect_create_destination_var (tree scalar_dest, tree vectype)
4392 tree vec_dest;
4393 const char *name;
4394 char *new_name;
4395 tree type;
4396 enum vect_var_kind kind;
4398 kind = vectype ? vect_simple_var : vect_scalar_var;
4399 type = vectype ? vectype : TREE_TYPE (scalar_dest);
4401 gcc_assert (TREE_CODE (scalar_dest) == SSA_NAME);
4403 name = get_name (scalar_dest);
4404 if (name)
4405 new_name = xasprintf ("%s_%u", name, SSA_NAME_VERSION (scalar_dest));
4406 else
4407 new_name = xasprintf ("_%u", SSA_NAME_VERSION (scalar_dest));
4408 vec_dest = vect_get_new_vect_var (type, kind, new_name);
4409 free (new_name);
4411 return vec_dest;
4414 /* Function vect_grouped_store_supported.
4416 Returns TRUE if interleave high and interleave low permutations
4417 are supported, and FALSE otherwise. */
4419 bool
4420 vect_grouped_store_supported (tree vectype, unsigned HOST_WIDE_INT count)
4422 machine_mode mode = TYPE_MODE (vectype);
4424 /* vect_permute_store_chain requires the group size to be equal to 3 or
4425 be a power of two. */
4426 if (count != 3 && exact_log2 (count) == -1)
4428 if (dump_enabled_p ())
4429 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4430 "the size of the group of accesses"
4431 " is not a power of 2 or not eqaul to 3\n");
4432 return false;
4435 /* Check that the permutation is supported. */
4436 if (VECTOR_MODE_P (mode))
4438 unsigned int i, nelt = GET_MODE_NUNITS (mode);
4439 unsigned char *sel = XALLOCAVEC (unsigned char, nelt);
4441 if (count == 3)
4443 unsigned int j0 = 0, j1 = 0, j2 = 0;
4444 unsigned int i, j;
4446 for (j = 0; j < 3; j++)
4448 int nelt0 = ((3 - j) * nelt) % 3;
4449 int nelt1 = ((3 - j) * nelt + 1) % 3;
4450 int nelt2 = ((3 - j) * nelt + 2) % 3;
4451 for (i = 0; i < nelt; i++)
4453 if (3 * i + nelt0 < nelt)
4454 sel[3 * i + nelt0] = j0++;
4455 if (3 * i + nelt1 < nelt)
4456 sel[3 * i + nelt1] = nelt + j1++;
4457 if (3 * i + nelt2 < nelt)
4458 sel[3 * i + nelt2] = 0;
4460 if (!can_vec_perm_p (mode, false, sel))
4462 if (dump_enabled_p ())
4463 dump_printf (MSG_MISSED_OPTIMIZATION,
4464 "permutaion op not supported by target.\n");
4465 return false;
4468 for (i = 0; i < nelt; i++)
4470 if (3 * i + nelt0 < nelt)
4471 sel[3 * i + nelt0] = 3 * i + nelt0;
4472 if (3 * i + nelt1 < nelt)
4473 sel[3 * i + nelt1] = 3 * i + nelt1;
4474 if (3 * i + nelt2 < nelt)
4475 sel[3 * i + nelt2] = nelt + j2++;
4477 if (!can_vec_perm_p (mode, false, sel))
4479 if (dump_enabled_p ())
4480 dump_printf (MSG_MISSED_OPTIMIZATION,
4481 "permutaion op not supported by target.\n");
4482 return false;
4485 return true;
4487 else
4489 /* If length is not equal to 3 then only power of 2 is supported. */
4490 gcc_assert (exact_log2 (count) != -1);
4492 for (i = 0; i < nelt / 2; i++)
4494 sel[i * 2] = i;
4495 sel[i * 2 + 1] = i + nelt;
4497 if (can_vec_perm_p (mode, false, sel))
4499 for (i = 0; i < nelt; i++)
4500 sel[i] += nelt / 2;
4501 if (can_vec_perm_p (mode, false, sel))
4502 return true;
4507 if (dump_enabled_p ())
4508 dump_printf (MSG_MISSED_OPTIMIZATION,
4509 "permutaion op not supported by target.\n");
4510 return false;
4514 /* Return TRUE if vec_store_lanes is available for COUNT vectors of
4515 type VECTYPE. */
4517 bool
4518 vect_store_lanes_supported (tree vectype, unsigned HOST_WIDE_INT count)
4520 return vect_lanes_optab_supported_p ("vec_store_lanes",
4521 vec_store_lanes_optab,
4522 vectype, count);
4526 /* Function vect_permute_store_chain.
4528 Given a chain of interleaved stores in DR_CHAIN of LENGTH that must be
4529 a power of 2 or equal to 3, generate interleave_high/low stmts to reorder
4530 the data correctly for the stores. Return the final references for stores
4531 in RESULT_CHAIN.
4533 E.g., LENGTH is 4 and the scalar type is short, i.e., VF is 8.
4534 The input is 4 vectors each containing 8 elements. We assign a number to
4535 each element, the input sequence is:
4537 1st vec: 0 1 2 3 4 5 6 7
4538 2nd vec: 8 9 10 11 12 13 14 15
4539 3rd vec: 16 17 18 19 20 21 22 23
4540 4th vec: 24 25 26 27 28 29 30 31
4542 The output sequence should be:
4544 1st vec: 0 8 16 24 1 9 17 25
4545 2nd vec: 2 10 18 26 3 11 19 27
4546 3rd vec: 4 12 20 28 5 13 21 30
4547 4th vec: 6 14 22 30 7 15 23 31
4549 i.e., we interleave the contents of the four vectors in their order.
4551 We use interleave_high/low instructions to create such output. The input of
4552 each interleave_high/low operation is two vectors:
4553 1st vec 2nd vec
4554 0 1 2 3 4 5 6 7
4555 the even elements of the result vector are obtained left-to-right from the
4556 high/low elements of the first vector. The odd elements of the result are
4557 obtained left-to-right from the high/low elements of the second vector.
4558 The output of interleave_high will be: 0 4 1 5
4559 and of interleave_low: 2 6 3 7
4562 The permutation is done in log LENGTH stages. In each stage interleave_high
4563 and interleave_low stmts are created for each pair of vectors in DR_CHAIN,
4564 where the first argument is taken from the first half of DR_CHAIN and the
4565 second argument from it's second half.
4566 In our example,
4568 I1: interleave_high (1st vec, 3rd vec)
4569 I2: interleave_low (1st vec, 3rd vec)
4570 I3: interleave_high (2nd vec, 4th vec)
4571 I4: interleave_low (2nd vec, 4th vec)
4573 The output for the first stage is:
4575 I1: 0 16 1 17 2 18 3 19
4576 I2: 4 20 5 21 6 22 7 23
4577 I3: 8 24 9 25 10 26 11 27
4578 I4: 12 28 13 29 14 30 15 31
4580 The output of the second stage, i.e. the final result is:
4582 I1: 0 8 16 24 1 9 17 25
4583 I2: 2 10 18 26 3 11 19 27
4584 I3: 4 12 20 28 5 13 21 30
4585 I4: 6 14 22 30 7 15 23 31. */
4587 void
4588 vect_permute_store_chain (vec<tree> dr_chain,
4589 unsigned int length,
4590 gimple stmt,
4591 gimple_stmt_iterator *gsi,
4592 vec<tree> *result_chain)
4594 tree vect1, vect2, high, low;
4595 gimple perm_stmt;
4596 tree vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
4597 tree perm_mask_low, perm_mask_high;
4598 tree data_ref;
4599 tree perm3_mask_low, perm3_mask_high;
4600 unsigned int i, n, log_length = exact_log2 (length);
4601 unsigned int j, nelt = TYPE_VECTOR_SUBPARTS (vectype);
4602 unsigned char *sel = XALLOCAVEC (unsigned char, nelt);
4604 result_chain->quick_grow (length);
4605 memcpy (result_chain->address (), dr_chain.address (),
4606 length * sizeof (tree));
4608 if (length == 3)
4610 unsigned int j0 = 0, j1 = 0, j2 = 0;
4612 for (j = 0; j < 3; j++)
4614 int nelt0 = ((3 - j) * nelt) % 3;
4615 int nelt1 = ((3 - j) * nelt + 1) % 3;
4616 int nelt2 = ((3 - j) * nelt + 2) % 3;
4618 for (i = 0; i < nelt; i++)
4620 if (3 * i + nelt0 < nelt)
4621 sel[3 * i + nelt0] = j0++;
4622 if (3 * i + nelt1 < nelt)
4623 sel[3 * i + nelt1] = nelt + j1++;
4624 if (3 * i + nelt2 < nelt)
4625 sel[3 * i + nelt2] = 0;
4627 perm3_mask_low = vect_gen_perm_mask_checked (vectype, sel);
4629 for (i = 0; i < nelt; i++)
4631 if (3 * i + nelt0 < nelt)
4632 sel[3 * i + nelt0] = 3 * i + nelt0;
4633 if (3 * i + nelt1 < nelt)
4634 sel[3 * i + nelt1] = 3 * i + nelt1;
4635 if (3 * i + nelt2 < nelt)
4636 sel[3 * i + nelt2] = nelt + j2++;
4638 perm3_mask_high = vect_gen_perm_mask_checked (vectype, sel);
4640 vect1 = dr_chain[0];
4641 vect2 = dr_chain[1];
4643 /* Create interleaving stmt:
4644 low = VEC_PERM_EXPR <vect1, vect2,
4645 {j, nelt, *, j + 1, nelt + j + 1, *,
4646 j + 2, nelt + j + 2, *, ...}> */
4647 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle3_low");
4648 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, vect1,
4649 vect2, perm3_mask_low);
4650 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
4652 vect1 = data_ref;
4653 vect2 = dr_chain[2];
4654 /* Create interleaving stmt:
4655 low = VEC_PERM_EXPR <vect1, vect2,
4656 {0, 1, nelt + j, 3, 4, nelt + j + 1,
4657 6, 7, nelt + j + 2, ...}> */
4658 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle3_high");
4659 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, vect1,
4660 vect2, perm3_mask_high);
4661 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
4662 (*result_chain)[j] = data_ref;
4665 else
4667 /* If length is not equal to 3 then only power of 2 is supported. */
4668 gcc_assert (exact_log2 (length) != -1);
4670 for (i = 0, n = nelt / 2; i < n; i++)
4672 sel[i * 2] = i;
4673 sel[i * 2 + 1] = i + nelt;
4675 perm_mask_high = vect_gen_perm_mask_checked (vectype, sel);
4677 for (i = 0; i < nelt; i++)
4678 sel[i] += nelt / 2;
4679 perm_mask_low = vect_gen_perm_mask_checked (vectype, sel);
4681 for (i = 0, n = log_length; i < n; i++)
4683 for (j = 0; j < length/2; j++)
4685 vect1 = dr_chain[j];
4686 vect2 = dr_chain[j+length/2];
4688 /* Create interleaving stmt:
4689 high = VEC_PERM_EXPR <vect1, vect2, {0, nelt, 1, nelt+1,
4690 ...}> */
4691 high = make_temp_ssa_name (vectype, NULL, "vect_inter_high");
4692 perm_stmt = gimple_build_assign (high, VEC_PERM_EXPR, vect1,
4693 vect2, perm_mask_high);
4694 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
4695 (*result_chain)[2*j] = high;
4697 /* Create interleaving stmt:
4698 low = VEC_PERM_EXPR <vect1, vect2,
4699 {nelt/2, nelt*3/2, nelt/2+1, nelt*3/2+1,
4700 ...}> */
4701 low = make_temp_ssa_name (vectype, NULL, "vect_inter_low");
4702 perm_stmt = gimple_build_assign (low, VEC_PERM_EXPR, vect1,
4703 vect2, perm_mask_low);
4704 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
4705 (*result_chain)[2*j+1] = low;
4707 memcpy (dr_chain.address (), result_chain->address (),
4708 length * sizeof (tree));
4713 /* Function vect_setup_realignment
4715 This function is called when vectorizing an unaligned load using
4716 the dr_explicit_realign[_optimized] scheme.
4717 This function generates the following code at the loop prolog:
4719 p = initial_addr;
4720 x msq_init = *(floor(p)); # prolog load
4721 realignment_token = call target_builtin;
4722 loop:
4723 x msq = phi (msq_init, ---)
4725 The stmts marked with x are generated only for the case of
4726 dr_explicit_realign_optimized.
4728 The code above sets up a new (vector) pointer, pointing to the first
4729 location accessed by STMT, and a "floor-aligned" load using that pointer.
4730 It also generates code to compute the "realignment-token" (if the relevant
4731 target hook was defined), and creates a phi-node at the loop-header bb
4732 whose arguments are the result of the prolog-load (created by this
4733 function) and the result of a load that takes place in the loop (to be
4734 created by the caller to this function).
4736 For the case of dr_explicit_realign_optimized:
4737 The caller to this function uses the phi-result (msq) to create the
4738 realignment code inside the loop, and sets up the missing phi argument,
4739 as follows:
4740 loop:
4741 msq = phi (msq_init, lsq)
4742 lsq = *(floor(p')); # load in loop
4743 result = realign_load (msq, lsq, realignment_token);
4745 For the case of dr_explicit_realign:
4746 loop:
4747 msq = *(floor(p)); # load in loop
4748 p' = p + (VS-1);
4749 lsq = *(floor(p')); # load in loop
4750 result = realign_load (msq, lsq, realignment_token);
4752 Input:
4753 STMT - (scalar) load stmt to be vectorized. This load accesses
4754 a memory location that may be unaligned.
4755 BSI - place where new code is to be inserted.
4756 ALIGNMENT_SUPPORT_SCHEME - which of the two misalignment handling schemes
4757 is used.
4759 Output:
4760 REALIGNMENT_TOKEN - the result of a call to the builtin_mask_for_load
4761 target hook, if defined.
4762 Return value - the result of the loop-header phi node. */
4764 tree
4765 vect_setup_realignment (gimple stmt, gimple_stmt_iterator *gsi,
4766 tree *realignment_token,
4767 enum dr_alignment_support alignment_support_scheme,
4768 tree init_addr,
4769 struct loop **at_loop)
4771 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4772 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
4773 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4774 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
4775 struct loop *loop = NULL;
4776 edge pe = NULL;
4777 tree scalar_dest = gimple_assign_lhs (stmt);
4778 tree vec_dest;
4779 gimple inc;
4780 tree ptr;
4781 tree data_ref;
4782 basic_block new_bb;
4783 tree msq_init = NULL_TREE;
4784 tree new_temp;
4785 gphi *phi_stmt;
4786 tree msq = NULL_TREE;
4787 gimple_seq stmts = NULL;
4788 bool inv_p;
4789 bool compute_in_loop = false;
4790 bool nested_in_vect_loop = false;
4791 struct loop *containing_loop = (gimple_bb (stmt))->loop_father;
4792 struct loop *loop_for_initial_load = NULL;
4794 if (loop_vinfo)
4796 loop = LOOP_VINFO_LOOP (loop_vinfo);
4797 nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
4800 gcc_assert (alignment_support_scheme == dr_explicit_realign
4801 || alignment_support_scheme == dr_explicit_realign_optimized);
4803 /* We need to generate three things:
4804 1. the misalignment computation
4805 2. the extra vector load (for the optimized realignment scheme).
4806 3. the phi node for the two vectors from which the realignment is
4807 done (for the optimized realignment scheme). */
4809 /* 1. Determine where to generate the misalignment computation.
4811 If INIT_ADDR is NULL_TREE, this indicates that the misalignment
4812 calculation will be generated by this function, outside the loop (in the
4813 preheader). Otherwise, INIT_ADDR had already been computed for us by the
4814 caller, inside the loop.
4816 Background: If the misalignment remains fixed throughout the iterations of
4817 the loop, then both realignment schemes are applicable, and also the
4818 misalignment computation can be done outside LOOP. This is because we are
4819 vectorizing LOOP, and so the memory accesses in LOOP advance in steps that
4820 are a multiple of VS (the Vector Size), and therefore the misalignment in
4821 different vectorized LOOP iterations is always the same.
4822 The problem arises only if the memory access is in an inner-loop nested
4823 inside LOOP, which is now being vectorized using outer-loop vectorization.
4824 This is the only case when the misalignment of the memory access may not
4825 remain fixed throughout the iterations of the inner-loop (as explained in
4826 detail in vect_supportable_dr_alignment). In this case, not only is the
4827 optimized realignment scheme not applicable, but also the misalignment
4828 computation (and generation of the realignment token that is passed to
4829 REALIGN_LOAD) have to be done inside the loop.
4831 In short, INIT_ADDR indicates whether we are in a COMPUTE_IN_LOOP mode
4832 or not, which in turn determines if the misalignment is computed inside
4833 the inner-loop, or outside LOOP. */
4835 if (init_addr != NULL_TREE || !loop_vinfo)
4837 compute_in_loop = true;
4838 gcc_assert (alignment_support_scheme == dr_explicit_realign);
4842 /* 2. Determine where to generate the extra vector load.
4844 For the optimized realignment scheme, instead of generating two vector
4845 loads in each iteration, we generate a single extra vector load in the
4846 preheader of the loop, and in each iteration reuse the result of the
4847 vector load from the previous iteration. In case the memory access is in
4848 an inner-loop nested inside LOOP, which is now being vectorized using
4849 outer-loop vectorization, we need to determine whether this initial vector
4850 load should be generated at the preheader of the inner-loop, or can be
4851 generated at the preheader of LOOP. If the memory access has no evolution
4852 in LOOP, it can be generated in the preheader of LOOP. Otherwise, it has
4853 to be generated inside LOOP (in the preheader of the inner-loop). */
4855 if (nested_in_vect_loop)
4857 tree outerloop_step = STMT_VINFO_DR_STEP (stmt_info);
4858 bool invariant_in_outerloop =
4859 (tree_int_cst_compare (outerloop_step, size_zero_node) == 0);
4860 loop_for_initial_load = (invariant_in_outerloop ? loop : loop->inner);
4862 else
4863 loop_for_initial_load = loop;
4864 if (at_loop)
4865 *at_loop = loop_for_initial_load;
4867 if (loop_for_initial_load)
4868 pe = loop_preheader_edge (loop_for_initial_load);
4870 /* 3. For the case of the optimized realignment, create the first vector
4871 load at the loop preheader. */
4873 if (alignment_support_scheme == dr_explicit_realign_optimized)
4875 /* Create msq_init = *(floor(p1)) in the loop preheader */
4876 gassign *new_stmt;
4878 gcc_assert (!compute_in_loop);
4879 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4880 ptr = vect_create_data_ref_ptr (stmt, vectype, loop_for_initial_load,
4881 NULL_TREE, &init_addr, NULL, &inc,
4882 true, &inv_p);
4883 new_temp = copy_ssa_name (ptr);
4884 new_stmt = gimple_build_assign
4885 (new_temp, BIT_AND_EXPR, ptr,
4886 build_int_cst (TREE_TYPE (ptr),
4887 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
4888 new_bb = gsi_insert_on_edge_immediate (pe, new_stmt);
4889 gcc_assert (!new_bb);
4890 data_ref
4891 = build2 (MEM_REF, TREE_TYPE (vec_dest), new_temp,
4892 build_int_cst (reference_alias_ptr_type (DR_REF (dr)), 0));
4893 new_stmt = gimple_build_assign (vec_dest, data_ref);
4894 new_temp = make_ssa_name (vec_dest, new_stmt);
4895 gimple_assign_set_lhs (new_stmt, new_temp);
4896 if (pe)
4898 new_bb = gsi_insert_on_edge_immediate (pe, new_stmt);
4899 gcc_assert (!new_bb);
4901 else
4902 gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
4904 msq_init = gimple_assign_lhs (new_stmt);
4907 /* 4. Create realignment token using a target builtin, if available.
4908 It is done either inside the containing loop, or before LOOP (as
4909 determined above). */
4911 if (targetm.vectorize.builtin_mask_for_load)
4913 gcall *new_stmt;
4914 tree builtin_decl;
4916 /* Compute INIT_ADDR - the initial addressed accessed by this memref. */
4917 if (!init_addr)
4919 /* Generate the INIT_ADDR computation outside LOOP. */
4920 init_addr = vect_create_addr_base_for_vector_ref (stmt, &stmts,
4921 NULL_TREE, loop);
4922 if (loop)
4924 pe = loop_preheader_edge (loop);
4925 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
4926 gcc_assert (!new_bb);
4928 else
4929 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
4932 builtin_decl = targetm.vectorize.builtin_mask_for_load ();
4933 new_stmt = gimple_build_call (builtin_decl, 1, init_addr);
4934 vec_dest =
4935 vect_create_destination_var (scalar_dest,
4936 gimple_call_return_type (new_stmt));
4937 new_temp = make_ssa_name (vec_dest, new_stmt);
4938 gimple_call_set_lhs (new_stmt, new_temp);
4940 if (compute_in_loop)
4941 gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
4942 else
4944 /* Generate the misalignment computation outside LOOP. */
4945 pe = loop_preheader_edge (loop);
4946 new_bb = gsi_insert_on_edge_immediate (pe, new_stmt);
4947 gcc_assert (!new_bb);
4950 *realignment_token = gimple_call_lhs (new_stmt);
4952 /* The result of the CALL_EXPR to this builtin is determined from
4953 the value of the parameter and no global variables are touched
4954 which makes the builtin a "const" function. Requiring the
4955 builtin to have the "const" attribute makes it unnecessary
4956 to call mark_call_clobbered. */
4957 gcc_assert (TREE_READONLY (builtin_decl));
4960 if (alignment_support_scheme == dr_explicit_realign)
4961 return msq;
4963 gcc_assert (!compute_in_loop);
4964 gcc_assert (alignment_support_scheme == dr_explicit_realign_optimized);
4967 /* 5. Create msq = phi <msq_init, lsq> in loop */
4969 pe = loop_preheader_edge (containing_loop);
4970 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4971 msq = make_ssa_name (vec_dest);
4972 phi_stmt = create_phi_node (msq, containing_loop->header);
4973 add_phi_arg (phi_stmt, msq_init, pe, UNKNOWN_LOCATION);
4975 return msq;
4979 /* Function vect_grouped_load_supported.
4981 Returns TRUE if even and odd permutations are supported,
4982 and FALSE otherwise. */
4984 bool
4985 vect_grouped_load_supported (tree vectype, unsigned HOST_WIDE_INT count)
4987 machine_mode mode = TYPE_MODE (vectype);
4989 /* vect_permute_load_chain requires the group size to be equal to 3 or
4990 be a power of two. */
4991 if (count != 3 && exact_log2 (count) == -1)
4993 if (dump_enabled_p ())
4994 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4995 "the size of the group of accesses"
4996 " is not a power of 2 or not equal to 3\n");
4997 return false;
5000 /* Check that the permutation is supported. */
5001 if (VECTOR_MODE_P (mode))
5003 unsigned int i, j, nelt = GET_MODE_NUNITS (mode);
5004 unsigned char *sel = XALLOCAVEC (unsigned char, nelt);
5006 if (count == 3)
5008 unsigned int k;
5009 for (k = 0; k < 3; k++)
5011 for (i = 0; i < nelt; i++)
5012 if (3 * i + k < 2 * nelt)
5013 sel[i] = 3 * i + k;
5014 else
5015 sel[i] = 0;
5016 if (!can_vec_perm_p (mode, false, sel))
5018 if (dump_enabled_p ())
5019 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5020 "shuffle of 3 loads is not supported by"
5021 " target\n");
5022 return false;
5024 for (i = 0, j = 0; i < nelt; i++)
5025 if (3 * i + k < 2 * nelt)
5026 sel[i] = i;
5027 else
5028 sel[i] = nelt + ((nelt + k) % 3) + 3 * (j++);
5029 if (!can_vec_perm_p (mode, false, sel))
5031 if (dump_enabled_p ())
5032 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5033 "shuffle of 3 loads is not supported by"
5034 " target\n");
5035 return false;
5038 return true;
5040 else
5042 /* If length is not equal to 3 then only power of 2 is supported. */
5043 gcc_assert (exact_log2 (count) != -1);
5044 for (i = 0; i < nelt; i++)
5045 sel[i] = i * 2;
5046 if (can_vec_perm_p (mode, false, sel))
5048 for (i = 0; i < nelt; i++)
5049 sel[i] = i * 2 + 1;
5050 if (can_vec_perm_p (mode, false, sel))
5051 return true;
5056 if (dump_enabled_p ())
5057 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5058 "extract even/odd not supported by target\n");
5059 return false;
5062 /* Return TRUE if vec_load_lanes is available for COUNT vectors of
5063 type VECTYPE. */
5065 bool
5066 vect_load_lanes_supported (tree vectype, unsigned HOST_WIDE_INT count)
5068 return vect_lanes_optab_supported_p ("vec_load_lanes",
5069 vec_load_lanes_optab,
5070 vectype, count);
5073 /* Function vect_permute_load_chain.
5075 Given a chain of interleaved loads in DR_CHAIN of LENGTH that must be
5076 a power of 2 or equal to 3, generate extract_even/odd stmts to reorder
5077 the input data correctly. Return the final references for loads in
5078 RESULT_CHAIN.
5080 E.g., LENGTH is 4 and the scalar type is short, i.e., VF is 8.
5081 The input is 4 vectors each containing 8 elements. We assign a number to each
5082 element, the input sequence is:
5084 1st vec: 0 1 2 3 4 5 6 7
5085 2nd vec: 8 9 10 11 12 13 14 15
5086 3rd vec: 16 17 18 19 20 21 22 23
5087 4th vec: 24 25 26 27 28 29 30 31
5089 The output sequence should be:
5091 1st vec: 0 4 8 12 16 20 24 28
5092 2nd vec: 1 5 9 13 17 21 25 29
5093 3rd vec: 2 6 10 14 18 22 26 30
5094 4th vec: 3 7 11 15 19 23 27 31
5096 i.e., the first output vector should contain the first elements of each
5097 interleaving group, etc.
5099 We use extract_even/odd instructions to create such output. The input of
5100 each extract_even/odd operation is two vectors
5101 1st vec 2nd vec
5102 0 1 2 3 4 5 6 7
5104 and the output is the vector of extracted even/odd elements. The output of
5105 extract_even will be: 0 2 4 6
5106 and of extract_odd: 1 3 5 7
5109 The permutation is done in log LENGTH stages. In each stage extract_even
5110 and extract_odd stmts are created for each pair of vectors in DR_CHAIN in
5111 their order. In our example,
5113 E1: extract_even (1st vec, 2nd vec)
5114 E2: extract_odd (1st vec, 2nd vec)
5115 E3: extract_even (3rd vec, 4th vec)
5116 E4: extract_odd (3rd vec, 4th vec)
5118 The output for the first stage will be:
5120 E1: 0 2 4 6 8 10 12 14
5121 E2: 1 3 5 7 9 11 13 15
5122 E3: 16 18 20 22 24 26 28 30
5123 E4: 17 19 21 23 25 27 29 31
5125 In order to proceed and create the correct sequence for the next stage (or
5126 for the correct output, if the second stage is the last one, as in our
5127 example), we first put the output of extract_even operation and then the
5128 output of extract_odd in RESULT_CHAIN (which is then copied to DR_CHAIN).
5129 The input for the second stage is:
5131 1st vec (E1): 0 2 4 6 8 10 12 14
5132 2nd vec (E3): 16 18 20 22 24 26 28 30
5133 3rd vec (E2): 1 3 5 7 9 11 13 15
5134 4th vec (E4): 17 19 21 23 25 27 29 31
5136 The output of the second stage:
5138 E1: 0 4 8 12 16 20 24 28
5139 E2: 2 6 10 14 18 22 26 30
5140 E3: 1 5 9 13 17 21 25 29
5141 E4: 3 7 11 15 19 23 27 31
5143 And RESULT_CHAIN after reordering:
5145 1st vec (E1): 0 4 8 12 16 20 24 28
5146 2nd vec (E3): 1 5 9 13 17 21 25 29
5147 3rd vec (E2): 2 6 10 14 18 22 26 30
5148 4th vec (E4): 3 7 11 15 19 23 27 31. */
5150 static void
5151 vect_permute_load_chain (vec<tree> dr_chain,
5152 unsigned int length,
5153 gimple stmt,
5154 gimple_stmt_iterator *gsi,
5155 vec<tree> *result_chain)
5157 tree data_ref, first_vect, second_vect;
5158 tree perm_mask_even, perm_mask_odd;
5159 tree perm3_mask_low, perm3_mask_high;
5160 gimple perm_stmt;
5161 tree vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
5162 unsigned int i, j, log_length = exact_log2 (length);
5163 unsigned nelt = TYPE_VECTOR_SUBPARTS (vectype);
5164 unsigned char *sel = XALLOCAVEC (unsigned char, nelt);
5166 result_chain->quick_grow (length);
5167 memcpy (result_chain->address (), dr_chain.address (),
5168 length * sizeof (tree));
5170 if (length == 3)
5172 unsigned int k;
5174 for (k = 0; k < 3; k++)
5176 for (i = 0; i < nelt; i++)
5177 if (3 * i + k < 2 * nelt)
5178 sel[i] = 3 * i + k;
5179 else
5180 sel[i] = 0;
5181 perm3_mask_low = vect_gen_perm_mask_checked (vectype, sel);
5183 for (i = 0, j = 0; i < nelt; i++)
5184 if (3 * i + k < 2 * nelt)
5185 sel[i] = i;
5186 else
5187 sel[i] = nelt + ((nelt + k) % 3) + 3 * (j++);
5189 perm3_mask_high = vect_gen_perm_mask_checked (vectype, sel);
5191 first_vect = dr_chain[0];
5192 second_vect = dr_chain[1];
5194 /* Create interleaving stmt (low part of):
5195 low = VEC_PERM_EXPR <first_vect, second_vect2, {k, 3 + k, 6 + k,
5196 ...}> */
5197 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle3_low");
5198 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, first_vect,
5199 second_vect, perm3_mask_low);
5200 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5202 /* Create interleaving stmt (high part of):
5203 high = VEC_PERM_EXPR <first_vect, second_vect2, {k, 3 + k, 6 + k,
5204 ...}> */
5205 first_vect = data_ref;
5206 second_vect = dr_chain[2];
5207 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle3_high");
5208 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, first_vect,
5209 second_vect, perm3_mask_high);
5210 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5211 (*result_chain)[k] = data_ref;
5214 else
5216 /* If length is not equal to 3 then only power of 2 is supported. */
5217 gcc_assert (exact_log2 (length) != -1);
5219 for (i = 0; i < nelt; ++i)
5220 sel[i] = i * 2;
5221 perm_mask_even = vect_gen_perm_mask_checked (vectype, sel);
5223 for (i = 0; i < nelt; ++i)
5224 sel[i] = i * 2 + 1;
5225 perm_mask_odd = vect_gen_perm_mask_checked (vectype, sel);
5227 for (i = 0; i < log_length; i++)
5229 for (j = 0; j < length; j += 2)
5231 first_vect = dr_chain[j];
5232 second_vect = dr_chain[j+1];
5234 /* data_ref = permute_even (first_data_ref, second_data_ref); */
5235 data_ref = make_temp_ssa_name (vectype, NULL, "vect_perm_even");
5236 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
5237 first_vect, second_vect,
5238 perm_mask_even);
5239 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5240 (*result_chain)[j/2] = data_ref;
5242 /* data_ref = permute_odd (first_data_ref, second_data_ref); */
5243 data_ref = make_temp_ssa_name (vectype, NULL, "vect_perm_odd");
5244 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
5245 first_vect, second_vect,
5246 perm_mask_odd);
5247 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5248 (*result_chain)[j/2+length/2] = data_ref;
5250 memcpy (dr_chain.address (), result_chain->address (),
5251 length * sizeof (tree));
5256 /* Function vect_shift_permute_load_chain.
5258 Given a chain of loads in DR_CHAIN of LENGTH 2 or 3, generate
5259 sequence of stmts to reorder the input data accordingly.
5260 Return the final references for loads in RESULT_CHAIN.
5261 Return true if successed, false otherwise.
5263 E.g., LENGTH is 3 and the scalar type is short, i.e., VF is 8.
5264 The input is 3 vectors each containing 8 elements. We assign a
5265 number to each element, the input sequence is:
5267 1st vec: 0 1 2 3 4 5 6 7
5268 2nd vec: 8 9 10 11 12 13 14 15
5269 3rd vec: 16 17 18 19 20 21 22 23
5271 The output sequence should be:
5273 1st vec: 0 3 6 9 12 15 18 21
5274 2nd vec: 1 4 7 10 13 16 19 22
5275 3rd vec: 2 5 8 11 14 17 20 23
5277 We use 3 shuffle instructions and 3 * 3 - 1 shifts to create such output.
5279 First we shuffle all 3 vectors to get correct elements order:
5281 1st vec: ( 0 3 6) ( 1 4 7) ( 2 5)
5282 2nd vec: ( 8 11 14) ( 9 12 15) (10 13)
5283 3rd vec: (16 19 22) (17 20 23) (18 21)
5285 Next we unite and shift vector 3 times:
5287 1st step:
5288 shift right by 6 the concatenation of:
5289 "1st vec" and "2nd vec"
5290 ( 0 3 6) ( 1 4 7) |( 2 5) _ ( 8 11 14) ( 9 12 15)| (10 13)
5291 "2nd vec" and "3rd vec"
5292 ( 8 11 14) ( 9 12 15) |(10 13) _ (16 19 22) (17 20 23)| (18 21)
5293 "3rd vec" and "1st vec"
5294 (16 19 22) (17 20 23) |(18 21) _ ( 0 3 6) ( 1 4 7)| ( 2 5)
5295 | New vectors |
5297 So that now new vectors are:
5299 1st vec: ( 2 5) ( 8 11 14) ( 9 12 15)
5300 2nd vec: (10 13) (16 19 22) (17 20 23)
5301 3rd vec: (18 21) ( 0 3 6) ( 1 4 7)
5303 2nd step:
5304 shift right by 5 the concatenation of:
5305 "1st vec" and "3rd vec"
5306 ( 2 5) ( 8 11 14) |( 9 12 15) _ (18 21) ( 0 3 6)| ( 1 4 7)
5307 "2nd vec" and "1st vec"
5308 (10 13) (16 19 22) |(17 20 23) _ ( 2 5) ( 8 11 14)| ( 9 12 15)
5309 "3rd vec" and "2nd vec"
5310 (18 21) ( 0 3 6) |( 1 4 7) _ (10 13) (16 19 22)| (17 20 23)
5311 | New vectors |
5313 So that now new vectors are:
5315 1st vec: ( 9 12 15) (18 21) ( 0 3 6)
5316 2nd vec: (17 20 23) ( 2 5) ( 8 11 14)
5317 3rd vec: ( 1 4 7) (10 13) (16 19 22) READY
5319 3rd step:
5320 shift right by 5 the concatenation of:
5321 "1st vec" and "1st vec"
5322 ( 9 12 15) (18 21) |( 0 3 6) _ ( 9 12 15) (18 21)| ( 0 3 6)
5323 shift right by 3 the concatenation of:
5324 "2nd vec" and "2nd vec"
5325 (17 20 23) |( 2 5) ( 8 11 14) _ (17 20 23)| ( 2 5) ( 8 11 14)
5326 | New vectors |
5328 So that now all vectors are READY:
5329 1st vec: ( 0 3 6) ( 9 12 15) (18 21)
5330 2nd vec: ( 2 5) ( 8 11 14) (17 20 23)
5331 3rd vec: ( 1 4 7) (10 13) (16 19 22)
5333 This algorithm is faster than one in vect_permute_load_chain if:
5334 1. "shift of a concatination" is faster than general permutation.
5335 This is usually so.
5336 2. The TARGET machine can't execute vector instructions in parallel.
5337 This is because each step of the algorithm depends on previous.
5338 The algorithm in vect_permute_load_chain is much more parallel.
5340 The algorithm is applicable only for LOAD CHAIN LENGTH less than VF.
5343 static bool
5344 vect_shift_permute_load_chain (vec<tree> dr_chain,
5345 unsigned int length,
5346 gimple stmt,
5347 gimple_stmt_iterator *gsi,
5348 vec<tree> *result_chain)
5350 tree vect[3], vect_shift[3], data_ref, first_vect, second_vect;
5351 tree perm2_mask1, perm2_mask2, perm3_mask;
5352 tree select_mask, shift1_mask, shift2_mask, shift3_mask, shift4_mask;
5353 gimple perm_stmt;
5355 tree vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
5356 unsigned int i;
5357 unsigned nelt = TYPE_VECTOR_SUBPARTS (vectype);
5358 unsigned char *sel = XALLOCAVEC (unsigned char, nelt);
5359 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5360 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5362 result_chain->quick_grow (length);
5363 memcpy (result_chain->address (), dr_chain.address (),
5364 length * sizeof (tree));
5366 if (exact_log2 (length) != -1 && LOOP_VINFO_VECT_FACTOR (loop_vinfo) > 4)
5368 unsigned int j, log_length = exact_log2 (length);
5369 for (i = 0; i < nelt / 2; ++i)
5370 sel[i] = i * 2;
5371 for (i = 0; i < nelt / 2; ++i)
5372 sel[nelt / 2 + i] = i * 2 + 1;
5373 if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
5375 if (dump_enabled_p ())
5376 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5377 "shuffle of 2 fields structure is not \
5378 supported by target\n");
5379 return false;
5381 perm2_mask1 = vect_gen_perm_mask_checked (vectype, sel);
5383 for (i = 0; i < nelt / 2; ++i)
5384 sel[i] = i * 2 + 1;
5385 for (i = 0; i < nelt / 2; ++i)
5386 sel[nelt / 2 + i] = i * 2;
5387 if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
5389 if (dump_enabled_p ())
5390 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5391 "shuffle of 2 fields structure is not \
5392 supported by target\n");
5393 return false;
5395 perm2_mask2 = vect_gen_perm_mask_checked (vectype, sel);
5397 /* Generating permutation constant to shift all elements.
5398 For vector length 8 it is {4 5 6 7 8 9 10 11}. */
5399 for (i = 0; i < nelt; i++)
5400 sel[i] = nelt / 2 + i;
5401 if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
5403 if (dump_enabled_p ())
5404 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5405 "shift permutation is not supported by target\n");
5406 return false;
5408 shift1_mask = vect_gen_perm_mask_checked (vectype, sel);
5410 /* Generating permutation constant to select vector from 2.
5411 For vector length 8 it is {0 1 2 3 12 13 14 15}. */
5412 for (i = 0; i < nelt / 2; i++)
5413 sel[i] = i;
5414 for (i = nelt / 2; i < nelt; i++)
5415 sel[i] = nelt + i;
5416 if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
5418 if (dump_enabled_p ())
5419 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5420 "select is not supported by target\n");
5421 return false;
5423 select_mask = vect_gen_perm_mask_checked (vectype, sel);
5425 for (i = 0; i < log_length; i++)
5427 for (j = 0; j < length; j += 2)
5429 first_vect = dr_chain[j];
5430 second_vect = dr_chain[j + 1];
5432 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle2");
5433 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
5434 first_vect, first_vect,
5435 perm2_mask1);
5436 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5437 vect[0] = data_ref;
5439 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle2");
5440 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
5441 second_vect, second_vect,
5442 perm2_mask2);
5443 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5444 vect[1] = data_ref;
5446 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shift");
5447 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
5448 vect[0], vect[1], shift1_mask);
5449 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5450 (*result_chain)[j/2 + length/2] = data_ref;
5452 data_ref = make_temp_ssa_name (vectype, NULL, "vect_select");
5453 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
5454 vect[0], vect[1], select_mask);
5455 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5456 (*result_chain)[j/2] = data_ref;
5458 memcpy (dr_chain.address (), result_chain->address (),
5459 length * sizeof (tree));
5461 return true;
5463 if (length == 3 && LOOP_VINFO_VECT_FACTOR (loop_vinfo) > 2)
5465 unsigned int k = 0, l = 0;
5467 /* Generating permutation constant to get all elements in rigth order.
5468 For vector length 8 it is {0 3 6 1 4 7 2 5}. */
5469 for (i = 0; i < nelt; i++)
5471 if (3 * k + (l % 3) >= nelt)
5473 k = 0;
5474 l += (3 - (nelt % 3));
5476 sel[i] = 3 * k + (l % 3);
5477 k++;
5479 if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
5481 if (dump_enabled_p ())
5482 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5483 "shuffle of 3 fields structure is not \
5484 supported by target\n");
5485 return false;
5487 perm3_mask = vect_gen_perm_mask_checked (vectype, sel);
5489 /* Generating permutation constant to shift all elements.
5490 For vector length 8 it is {6 7 8 9 10 11 12 13}. */
5491 for (i = 0; i < nelt; i++)
5492 sel[i] = 2 * (nelt / 3) + (nelt % 3) + i;
5493 if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
5495 if (dump_enabled_p ())
5496 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5497 "shift permutation is not supported by target\n");
5498 return false;
5500 shift1_mask = vect_gen_perm_mask_checked (vectype, sel);
5502 /* Generating permutation constant to shift all elements.
5503 For vector length 8 it is {5 6 7 8 9 10 11 12}. */
5504 for (i = 0; i < nelt; i++)
5505 sel[i] = 2 * (nelt / 3) + 1 + i;
5506 if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
5508 if (dump_enabled_p ())
5509 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5510 "shift permutation is not supported by target\n");
5511 return false;
5513 shift2_mask = vect_gen_perm_mask_checked (vectype, sel);
5515 /* Generating permutation constant to shift all elements.
5516 For vector length 8 it is {3 4 5 6 7 8 9 10}. */
5517 for (i = 0; i < nelt; i++)
5518 sel[i] = (nelt / 3) + (nelt % 3) / 2 + i;
5519 if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
5521 if (dump_enabled_p ())
5522 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5523 "shift permutation is not supported by target\n");
5524 return false;
5526 shift3_mask = vect_gen_perm_mask_checked (vectype, sel);
5528 /* Generating permutation constant to shift all elements.
5529 For vector length 8 it is {5 6 7 8 9 10 11 12}. */
5530 for (i = 0; i < nelt; i++)
5531 sel[i] = 2 * (nelt / 3) + (nelt % 3) / 2 + i;
5532 if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
5534 if (dump_enabled_p ())
5535 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5536 "shift permutation is not supported by target\n");
5537 return false;
5539 shift4_mask = vect_gen_perm_mask_checked (vectype, sel);
5541 for (k = 0; k < 3; k++)
5543 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle3");
5544 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
5545 dr_chain[k], dr_chain[k],
5546 perm3_mask);
5547 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5548 vect[k] = data_ref;
5551 for (k = 0; k < 3; k++)
5553 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shift1");
5554 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
5555 vect[k % 3], vect[(k + 1) % 3],
5556 shift1_mask);
5557 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5558 vect_shift[k] = data_ref;
5561 for (k = 0; k < 3; k++)
5563 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shift2");
5564 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
5565 vect_shift[(4 - k) % 3],
5566 vect_shift[(3 - k) % 3],
5567 shift2_mask);
5568 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5569 vect[k] = data_ref;
5572 (*result_chain)[3 - (nelt % 3)] = vect[2];
5574 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shift3");
5575 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, vect[0],
5576 vect[0], shift3_mask);
5577 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5578 (*result_chain)[nelt % 3] = data_ref;
5580 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shift4");
5581 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, vect[1],
5582 vect[1], shift4_mask);
5583 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5584 (*result_chain)[0] = data_ref;
5585 return true;
5587 return false;
5590 /* Function vect_transform_grouped_load.
5592 Given a chain of input interleaved data-refs (in DR_CHAIN), build statements
5593 to perform their permutation and ascribe the result vectorized statements to
5594 the scalar statements.
5597 void
5598 vect_transform_grouped_load (gimple stmt, vec<tree> dr_chain, int size,
5599 gimple_stmt_iterator *gsi)
5601 machine_mode mode;
5602 vec<tree> result_chain = vNULL;
5604 /* DR_CHAIN contains input data-refs that are a part of the interleaving.
5605 RESULT_CHAIN is the output of vect_permute_load_chain, it contains permuted
5606 vectors, that are ready for vector computation. */
5607 result_chain.create (size);
5609 /* If reassociation width for vector type is 2 or greater target machine can
5610 execute 2 or more vector instructions in parallel. Otherwise try to
5611 get chain for loads group using vect_shift_permute_load_chain. */
5612 mode = TYPE_MODE (STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt)));
5613 if (targetm.sched.reassociation_width (VEC_PERM_EXPR, mode) > 1
5614 || exact_log2 (size) != -1
5615 || !vect_shift_permute_load_chain (dr_chain, size, stmt,
5616 gsi, &result_chain))
5617 vect_permute_load_chain (dr_chain, size, stmt, gsi, &result_chain);
5618 vect_record_grouped_load_vectors (stmt, result_chain);
5619 result_chain.release ();
5622 /* RESULT_CHAIN contains the output of a group of grouped loads that were
5623 generated as part of the vectorization of STMT. Assign the statement
5624 for each vector to the associated scalar statement. */
5626 void
5627 vect_record_grouped_load_vectors (gimple stmt, vec<tree> result_chain)
5629 gimple first_stmt = GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt));
5630 gimple next_stmt, new_stmt;
5631 unsigned int i, gap_count;
5632 tree tmp_data_ref;
5634 /* Put a permuted data-ref in the VECTORIZED_STMT field.
5635 Since we scan the chain starting from it's first node, their order
5636 corresponds the order of data-refs in RESULT_CHAIN. */
5637 next_stmt = first_stmt;
5638 gap_count = 1;
5639 FOR_EACH_VEC_ELT (result_chain, i, tmp_data_ref)
5641 if (!next_stmt)
5642 break;
5644 /* Skip the gaps. Loads created for the gaps will be removed by dead
5645 code elimination pass later. No need to check for the first stmt in
5646 the group, since it always exists.
5647 GROUP_GAP is the number of steps in elements from the previous
5648 access (if there is no gap GROUP_GAP is 1). We skip loads that
5649 correspond to the gaps. */
5650 if (next_stmt != first_stmt
5651 && gap_count < GROUP_GAP (vinfo_for_stmt (next_stmt)))
5653 gap_count++;
5654 continue;
5657 while (next_stmt)
5659 new_stmt = SSA_NAME_DEF_STMT (tmp_data_ref);
5660 /* We assume that if VEC_STMT is not NULL, this is a case of multiple
5661 copies, and we put the new vector statement in the first available
5662 RELATED_STMT. */
5663 if (!STMT_VINFO_VEC_STMT (vinfo_for_stmt (next_stmt)))
5664 STMT_VINFO_VEC_STMT (vinfo_for_stmt (next_stmt)) = new_stmt;
5665 else
5667 if (!GROUP_SAME_DR_STMT (vinfo_for_stmt (next_stmt)))
5669 gimple prev_stmt =
5670 STMT_VINFO_VEC_STMT (vinfo_for_stmt (next_stmt));
5671 gimple rel_stmt =
5672 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (prev_stmt));
5673 while (rel_stmt)
5675 prev_stmt = rel_stmt;
5676 rel_stmt =
5677 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (rel_stmt));
5680 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (prev_stmt)) =
5681 new_stmt;
5685 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
5686 gap_count = 1;
5687 /* If NEXT_STMT accesses the same DR as the previous statement,
5688 put the same TMP_DATA_REF as its vectorized statement; otherwise
5689 get the next data-ref from RESULT_CHAIN. */
5690 if (!next_stmt || !GROUP_SAME_DR_STMT (vinfo_for_stmt (next_stmt)))
5691 break;
5696 /* Function vect_force_dr_alignment_p.
5698 Returns whether the alignment of a DECL can be forced to be aligned
5699 on ALIGNMENT bit boundary. */
5701 bool
5702 vect_can_force_dr_alignment_p (const_tree decl, unsigned int alignment)
5704 if (TREE_CODE (decl) != VAR_DECL)
5705 return false;
5707 if (decl_in_symtab_p (decl)
5708 && !symtab_node::get (decl)->can_increase_alignment_p ())
5709 return false;
5711 if (TREE_STATIC (decl))
5712 return (alignment <= MAX_OFILE_ALIGNMENT);
5713 else
5714 return (alignment <= MAX_STACK_ALIGNMENT);
5718 /* Return whether the data reference DR is supported with respect to its
5719 alignment.
5720 If CHECK_ALIGNED_ACCESSES is TRUE, check if the access is supported even
5721 it is aligned, i.e., check if it is possible to vectorize it with different
5722 alignment. */
5724 enum dr_alignment_support
5725 vect_supportable_dr_alignment (struct data_reference *dr,
5726 bool check_aligned_accesses)
5728 gimple stmt = DR_STMT (dr);
5729 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5730 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
5731 machine_mode mode = TYPE_MODE (vectype);
5732 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5733 struct loop *vect_loop = NULL;
5734 bool nested_in_vect_loop = false;
5736 if (aligned_access_p (dr) && !check_aligned_accesses)
5737 return dr_aligned;
5739 /* For now assume all conditional loads/stores support unaligned
5740 access without any special code. */
5741 if (is_gimple_call (stmt)
5742 && gimple_call_internal_p (stmt)
5743 && (gimple_call_internal_fn (stmt) == IFN_MASK_LOAD
5744 || gimple_call_internal_fn (stmt) == IFN_MASK_STORE))
5745 return dr_unaligned_supported;
5747 if (loop_vinfo)
5749 vect_loop = LOOP_VINFO_LOOP (loop_vinfo);
5750 nested_in_vect_loop = nested_in_vect_loop_p (vect_loop, stmt);
5753 /* Possibly unaligned access. */
5755 /* We can choose between using the implicit realignment scheme (generating
5756 a misaligned_move stmt) and the explicit realignment scheme (generating
5757 aligned loads with a REALIGN_LOAD). There are two variants to the
5758 explicit realignment scheme: optimized, and unoptimized.
5759 We can optimize the realignment only if the step between consecutive
5760 vector loads is equal to the vector size. Since the vector memory
5761 accesses advance in steps of VS (Vector Size) in the vectorized loop, it
5762 is guaranteed that the misalignment amount remains the same throughout the
5763 execution of the vectorized loop. Therefore, we can create the
5764 "realignment token" (the permutation mask that is passed to REALIGN_LOAD)
5765 at the loop preheader.
5767 However, in the case of outer-loop vectorization, when vectorizing a
5768 memory access in the inner-loop nested within the LOOP that is now being
5769 vectorized, while it is guaranteed that the misalignment of the
5770 vectorized memory access will remain the same in different outer-loop
5771 iterations, it is *not* guaranteed that is will remain the same throughout
5772 the execution of the inner-loop. This is because the inner-loop advances
5773 with the original scalar step (and not in steps of VS). If the inner-loop
5774 step happens to be a multiple of VS, then the misalignment remains fixed
5775 and we can use the optimized realignment scheme. For example:
5777 for (i=0; i<N; i++)
5778 for (j=0; j<M; j++)
5779 s += a[i+j];
5781 When vectorizing the i-loop in the above example, the step between
5782 consecutive vector loads is 1, and so the misalignment does not remain
5783 fixed across the execution of the inner-loop, and the realignment cannot
5784 be optimized (as illustrated in the following pseudo vectorized loop):
5786 for (i=0; i<N; i+=4)
5787 for (j=0; j<M; j++){
5788 vs += vp[i+j]; // misalignment of &vp[i+j] is {0,1,2,3,0,1,2,3,...}
5789 // when j is {0,1,2,3,4,5,6,7,...} respectively.
5790 // (assuming that we start from an aligned address).
5793 We therefore have to use the unoptimized realignment scheme:
5795 for (i=0; i<N; i+=4)
5796 for (j=k; j<M; j+=4)
5797 vs += vp[i+j]; // misalignment of &vp[i+j] is always k (assuming
5798 // that the misalignment of the initial address is
5799 // 0).
5801 The loop can then be vectorized as follows:
5803 for (k=0; k<4; k++){
5804 rt = get_realignment_token (&vp[k]);
5805 for (i=0; i<N; i+=4){
5806 v1 = vp[i+k];
5807 for (j=k; j<M; j+=4){
5808 v2 = vp[i+j+VS-1];
5809 va = REALIGN_LOAD <v1,v2,rt>;
5810 vs += va;
5811 v1 = v2;
5814 } */
5816 if (DR_IS_READ (dr))
5818 bool is_packed = false;
5819 tree type = (TREE_TYPE (DR_REF (dr)));
5821 if (optab_handler (vec_realign_load_optab, mode) != CODE_FOR_nothing
5822 && (!targetm.vectorize.builtin_mask_for_load
5823 || targetm.vectorize.builtin_mask_for_load ()))
5825 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
5826 if ((nested_in_vect_loop
5827 && (TREE_INT_CST_LOW (DR_STEP (dr))
5828 != GET_MODE_SIZE (TYPE_MODE (vectype))))
5829 || !loop_vinfo)
5830 return dr_explicit_realign;
5831 else
5832 return dr_explicit_realign_optimized;
5834 if (!known_alignment_for_access_p (dr))
5835 is_packed = not_size_aligned (DR_REF (dr));
5837 if ((TYPE_USER_ALIGN (type) && !is_packed)
5838 || targetm.vectorize.
5839 support_vector_misalignment (mode, type,
5840 DR_MISALIGNMENT (dr), is_packed))
5841 /* Can't software pipeline the loads, but can at least do them. */
5842 return dr_unaligned_supported;
5844 else
5846 bool is_packed = false;
5847 tree type = (TREE_TYPE (DR_REF (dr)));
5849 if (!known_alignment_for_access_p (dr))
5850 is_packed = not_size_aligned (DR_REF (dr));
5852 if ((TYPE_USER_ALIGN (type) && !is_packed)
5853 || targetm.vectorize.
5854 support_vector_misalignment (mode, type,
5855 DR_MISALIGNMENT (dr), is_packed))
5856 return dr_unaligned_supported;
5859 /* Unsupported. */
5860 return dr_unaligned_unsupported;