* config/i386/predicates.md (general_reg_operand): Use GENERAL_REGNO_P.
[official-gcc.git] / gcc / tree-vect-data-refs.c
blob26d7d8faa442faa588e9d920cc99f54e2b271000
1 /* Data References Analysis and Manipulation Utilities for Vectorization.
2 Copyright (C) 2003-2015 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
4 and Ira Rosen <irar@il.ibm.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "dumpfile.h"
26 #include "backend.h"
27 #include "tree.h"
28 #include "gimple.h"
29 #include "rtl.h"
30 #include "ssa.h"
31 #include "alias.h"
32 #include "fold-const.h"
33 #include "stor-layout.h"
34 #include "tm_p.h"
35 #include "target.h"
36 #include "gimple-pretty-print.h"
37 #include "internal-fn.h"
38 #include "tree-eh.h"
39 #include "gimplify.h"
40 #include "gimple-iterator.h"
41 #include "gimplify-me.h"
42 #include "tree-ssa-loop-ivopts.h"
43 #include "tree-ssa-loop-manip.h"
44 #include "tree-ssa-loop.h"
45 #include "cfgloop.h"
46 #include "tree-chrec.h"
47 #include "tree-scalar-evolution.h"
48 #include "tree-vectorizer.h"
49 #include "diagnostic-core.h"
50 #include "cgraph.h"
51 /* Need to include rtl.h, expr.h, etc. for optabs. */
52 #include "flags.h"
53 #include "insn-config.h"
54 #include "expmed.h"
55 #include "dojump.h"
56 #include "explow.h"
57 #include "calls.h"
58 #include "emit-rtl.h"
59 #include "varasm.h"
60 #include "stmt.h"
61 #include "expr.h"
62 #include "insn-codes.h"
63 #include "optabs.h"
64 #include "builtins.h"
66 /* Return true if load- or store-lanes optab OPTAB is implemented for
67 COUNT vectors of type VECTYPE. NAME is the name of OPTAB. */
69 static bool
70 vect_lanes_optab_supported_p (const char *name, convert_optab optab,
71 tree vectype, unsigned HOST_WIDE_INT count)
73 machine_mode mode, array_mode;
74 bool limit_p;
76 mode = TYPE_MODE (vectype);
77 limit_p = !targetm.array_mode_supported_p (mode, count);
78 array_mode = mode_for_size (count * GET_MODE_BITSIZE (mode),
79 MODE_INT, limit_p);
81 if (array_mode == BLKmode)
83 if (dump_enabled_p ())
84 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
85 "no array mode for %s[" HOST_WIDE_INT_PRINT_DEC "]\n",
86 GET_MODE_NAME (mode), count);
87 return false;
90 if (convert_optab_handler (optab, array_mode, mode) == CODE_FOR_nothing)
92 if (dump_enabled_p ())
93 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
94 "cannot use %s<%s><%s>\n", name,
95 GET_MODE_NAME (array_mode), GET_MODE_NAME (mode));
96 return false;
99 if (dump_enabled_p ())
100 dump_printf_loc (MSG_NOTE, vect_location,
101 "can use %s<%s><%s>\n", name, GET_MODE_NAME (array_mode),
102 GET_MODE_NAME (mode));
104 return true;
108 /* Return the smallest scalar part of STMT.
109 This is used to determine the vectype of the stmt. We generally set the
110 vectype according to the type of the result (lhs). For stmts whose
111 result-type is different than the type of the arguments (e.g., demotion,
112 promotion), vectype will be reset appropriately (later). Note that we have
113 to visit the smallest datatype in this function, because that determines the
114 VF. If the smallest datatype in the loop is present only as the rhs of a
115 promotion operation - we'd miss it.
116 Such a case, where a variable of this datatype does not appear in the lhs
117 anywhere in the loop, can only occur if it's an invariant: e.g.:
118 'int_x = (int) short_inv', which we'd expect to have been optimized away by
119 invariant motion. However, we cannot rely on invariant motion to always
120 take invariants out of the loop, and so in the case of promotion we also
121 have to check the rhs.
122 LHS_SIZE_UNIT and RHS_SIZE_UNIT contain the sizes of the corresponding
123 types. */
125 tree
126 vect_get_smallest_scalar_type (gimple stmt, HOST_WIDE_INT *lhs_size_unit,
127 HOST_WIDE_INT *rhs_size_unit)
129 tree scalar_type = gimple_expr_type (stmt);
130 HOST_WIDE_INT lhs, rhs;
132 lhs = rhs = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (scalar_type));
134 if (is_gimple_assign (stmt)
135 && (gimple_assign_cast_p (stmt)
136 || gimple_assign_rhs_code (stmt) == WIDEN_MULT_EXPR
137 || gimple_assign_rhs_code (stmt) == WIDEN_LSHIFT_EXPR
138 || gimple_assign_rhs_code (stmt) == FLOAT_EXPR))
140 tree rhs_type = TREE_TYPE (gimple_assign_rhs1 (stmt));
142 rhs = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (rhs_type));
143 if (rhs < lhs)
144 scalar_type = rhs_type;
147 *lhs_size_unit = lhs;
148 *rhs_size_unit = rhs;
149 return scalar_type;
153 /* Insert DDR into LOOP_VINFO list of ddrs that may alias and need to be
154 tested at run-time. Return TRUE if DDR was successfully inserted.
155 Return false if versioning is not supported. */
157 static bool
158 vect_mark_for_runtime_alias_test (ddr_p ddr, loop_vec_info loop_vinfo)
160 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
162 if ((unsigned) PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS) == 0)
163 return false;
165 if (dump_enabled_p ())
167 dump_printf_loc (MSG_NOTE, vect_location,
168 "mark for run-time aliasing test between ");
169 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (DDR_A (ddr)));
170 dump_printf (MSG_NOTE, " and ");
171 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (DDR_B (ddr)));
172 dump_printf (MSG_NOTE, "\n");
175 if (optimize_loop_nest_for_size_p (loop))
177 if (dump_enabled_p ())
178 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
179 "versioning not supported when optimizing"
180 " for size.\n");
181 return false;
184 /* FORNOW: We don't support versioning with outer-loop vectorization. */
185 if (loop->inner)
187 if (dump_enabled_p ())
188 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
189 "versioning not yet supported for outer-loops.\n");
190 return false;
193 /* FORNOW: We don't support creating runtime alias tests for non-constant
194 step. */
195 if (TREE_CODE (DR_STEP (DDR_A (ddr))) != INTEGER_CST
196 || TREE_CODE (DR_STEP (DDR_B (ddr))) != INTEGER_CST)
198 if (dump_enabled_p ())
199 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
200 "versioning not yet supported for non-constant "
201 "step\n");
202 return false;
205 LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo).safe_push (ddr);
206 return true;
210 /* Function vect_analyze_data_ref_dependence.
212 Return TRUE if there (might) exist a dependence between a memory-reference
213 DRA and a memory-reference DRB. When versioning for alias may check a
214 dependence at run-time, return FALSE. Adjust *MAX_VF according to
215 the data dependence. */
217 static bool
218 vect_analyze_data_ref_dependence (struct data_dependence_relation *ddr,
219 loop_vec_info loop_vinfo, int *max_vf)
221 unsigned int i;
222 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
223 struct data_reference *dra = DDR_A (ddr);
224 struct data_reference *drb = DDR_B (ddr);
225 stmt_vec_info stmtinfo_a = vinfo_for_stmt (DR_STMT (dra));
226 stmt_vec_info stmtinfo_b = vinfo_for_stmt (DR_STMT (drb));
227 lambda_vector dist_v;
228 unsigned int loop_depth;
230 /* In loop analysis all data references should be vectorizable. */
231 if (!STMT_VINFO_VECTORIZABLE (stmtinfo_a)
232 || !STMT_VINFO_VECTORIZABLE (stmtinfo_b))
233 gcc_unreachable ();
235 /* Independent data accesses. */
236 if (DDR_ARE_DEPENDENT (ddr) == chrec_known)
237 return false;
239 if (dra == drb
240 || (DR_IS_READ (dra) && DR_IS_READ (drb)))
241 return false;
243 /* Even if we have an anti-dependence then, as the vectorized loop covers at
244 least two scalar iterations, there is always also a true dependence.
245 As the vectorizer does not re-order loads and stores we can ignore
246 the anti-dependence if TBAA can disambiguate both DRs similar to the
247 case with known negative distance anti-dependences (positive
248 distance anti-dependences would violate TBAA constraints). */
249 if (((DR_IS_READ (dra) && DR_IS_WRITE (drb))
250 || (DR_IS_WRITE (dra) && DR_IS_READ (drb)))
251 && !alias_sets_conflict_p (get_alias_set (DR_REF (dra)),
252 get_alias_set (DR_REF (drb))))
253 return false;
255 /* Unknown data dependence. */
256 if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know)
258 /* If user asserted safelen consecutive iterations can be
259 executed concurrently, assume independence. */
260 if (loop->safelen >= 2)
262 if (loop->safelen < *max_vf)
263 *max_vf = loop->safelen;
264 LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo) = false;
265 return false;
268 if (STMT_VINFO_GATHER_P (stmtinfo_a)
269 || STMT_VINFO_GATHER_P (stmtinfo_b))
271 if (dump_enabled_p ())
273 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
274 "versioning for alias not supported for: "
275 "can't determine dependence between ");
276 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
277 DR_REF (dra));
278 dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
279 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
280 DR_REF (drb));
281 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
283 return true;
286 if (dump_enabled_p ())
288 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
289 "versioning for alias required: "
290 "can't determine dependence between ");
291 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
292 DR_REF (dra));
293 dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
294 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
295 DR_REF (drb));
296 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
299 /* Add to list of ddrs that need to be tested at run-time. */
300 return !vect_mark_for_runtime_alias_test (ddr, loop_vinfo);
303 /* Known data dependence. */
304 if (DDR_NUM_DIST_VECTS (ddr) == 0)
306 /* If user asserted safelen consecutive iterations can be
307 executed concurrently, assume independence. */
308 if (loop->safelen >= 2)
310 if (loop->safelen < *max_vf)
311 *max_vf = loop->safelen;
312 LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo) = false;
313 return false;
316 if (STMT_VINFO_GATHER_P (stmtinfo_a)
317 || STMT_VINFO_GATHER_P (stmtinfo_b))
319 if (dump_enabled_p ())
321 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
322 "versioning for alias not supported for: "
323 "bad dist vector for ");
324 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
325 DR_REF (dra));
326 dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
327 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
328 DR_REF (drb));
329 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
331 return true;
334 if (dump_enabled_p ())
336 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
337 "versioning for alias required: "
338 "bad dist vector for ");
339 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (dra));
340 dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
341 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (drb));
342 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
344 /* Add to list of ddrs that need to be tested at run-time. */
345 return !vect_mark_for_runtime_alias_test (ddr, loop_vinfo);
348 loop_depth = index_in_loop_nest (loop->num, DDR_LOOP_NEST (ddr));
349 FOR_EACH_VEC_ELT (DDR_DIST_VECTS (ddr), i, dist_v)
351 int dist = dist_v[loop_depth];
353 if (dump_enabled_p ())
354 dump_printf_loc (MSG_NOTE, vect_location,
355 "dependence distance = %d.\n", dist);
357 if (dist == 0)
359 if (dump_enabled_p ())
361 dump_printf_loc (MSG_NOTE, vect_location,
362 "dependence distance == 0 between ");
363 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra));
364 dump_printf (MSG_NOTE, " and ");
365 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb));
366 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
369 /* When we perform grouped accesses and perform implicit CSE
370 by detecting equal accesses and doing disambiguation with
371 runtime alias tests like for
372 .. = a[i];
373 .. = a[i+1];
374 a[i] = ..;
375 a[i+1] = ..;
376 *p = ..;
377 .. = a[i];
378 .. = a[i+1];
379 where we will end up loading { a[i], a[i+1] } once, make
380 sure that inserting group loads before the first load and
381 stores after the last store will do the right thing.
382 Similar for groups like
383 a[i] = ...;
384 ... = a[i];
385 a[i+1] = ...;
386 where loads from the group interleave with the store. */
387 if (STMT_VINFO_GROUPED_ACCESS (stmtinfo_a)
388 || STMT_VINFO_GROUPED_ACCESS (stmtinfo_b))
390 gimple earlier_stmt;
391 earlier_stmt = get_earlier_stmt (DR_STMT (dra), DR_STMT (drb));
392 if (DR_IS_WRITE
393 (STMT_VINFO_DATA_REF (vinfo_for_stmt (earlier_stmt))))
395 if (dump_enabled_p ())
396 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
397 "READ_WRITE dependence in interleaving."
398 "\n");
399 return true;
403 continue;
406 if (dist > 0 && DDR_REVERSED_P (ddr))
408 /* If DDR_REVERSED_P the order of the data-refs in DDR was
409 reversed (to make distance vector positive), and the actual
410 distance is negative. */
411 if (dump_enabled_p ())
412 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
413 "dependence distance negative.\n");
414 /* Record a negative dependence distance to later limit the
415 amount of stmt copying / unrolling we can perform.
416 Only need to handle read-after-write dependence. */
417 if (DR_IS_READ (drb)
418 && (STMT_VINFO_MIN_NEG_DIST (stmtinfo_b) == 0
419 || STMT_VINFO_MIN_NEG_DIST (stmtinfo_b) > (unsigned)dist))
420 STMT_VINFO_MIN_NEG_DIST (stmtinfo_b) = dist;
421 continue;
424 if (abs (dist) >= 2
425 && abs (dist) < *max_vf)
427 /* The dependence distance requires reduction of the maximal
428 vectorization factor. */
429 *max_vf = abs (dist);
430 if (dump_enabled_p ())
431 dump_printf_loc (MSG_NOTE, vect_location,
432 "adjusting maximal vectorization factor to %i\n",
433 *max_vf);
436 if (abs (dist) >= *max_vf)
438 /* Dependence distance does not create dependence, as far as
439 vectorization is concerned, in this case. */
440 if (dump_enabled_p ())
441 dump_printf_loc (MSG_NOTE, vect_location,
442 "dependence distance >= VF.\n");
443 continue;
446 if (dump_enabled_p ())
448 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
449 "not vectorized, possible dependence "
450 "between data-refs ");
451 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra));
452 dump_printf (MSG_NOTE, " and ");
453 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb));
454 dump_printf (MSG_NOTE, "\n");
457 return true;
460 return false;
463 /* Function vect_analyze_data_ref_dependences.
465 Examine all the data references in the loop, and make sure there do not
466 exist any data dependences between them. Set *MAX_VF according to
467 the maximum vectorization factor the data dependences allow. */
469 bool
470 vect_analyze_data_ref_dependences (loop_vec_info loop_vinfo, int *max_vf)
472 unsigned int i;
473 struct data_dependence_relation *ddr;
475 if (dump_enabled_p ())
476 dump_printf_loc (MSG_NOTE, vect_location,
477 "=== vect_analyze_data_ref_dependences ===\n");
479 LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo) = true;
480 if (!compute_all_dependences (LOOP_VINFO_DATAREFS (loop_vinfo),
481 &LOOP_VINFO_DDRS (loop_vinfo),
482 LOOP_VINFO_LOOP_NEST (loop_vinfo), true))
483 return false;
485 FOR_EACH_VEC_ELT (LOOP_VINFO_DDRS (loop_vinfo), i, ddr)
486 if (vect_analyze_data_ref_dependence (ddr, loop_vinfo, max_vf))
487 return false;
489 return true;
493 /* Function vect_slp_analyze_data_ref_dependence.
495 Return TRUE if there (might) exist a dependence between a memory-reference
496 DRA and a memory-reference DRB. When versioning for alias may check a
497 dependence at run-time, return FALSE. Adjust *MAX_VF according to
498 the data dependence. */
500 static bool
501 vect_slp_analyze_data_ref_dependence (struct data_dependence_relation *ddr)
503 struct data_reference *dra = DDR_A (ddr);
504 struct data_reference *drb = DDR_B (ddr);
506 /* We need to check dependences of statements marked as unvectorizable
507 as well, they still can prohibit vectorization. */
509 /* Independent data accesses. */
510 if (DDR_ARE_DEPENDENT (ddr) == chrec_known)
511 return false;
513 if (dra == drb)
514 return false;
516 /* Read-read is OK. */
517 if (DR_IS_READ (dra) && DR_IS_READ (drb))
518 return false;
520 /* If dra and drb are part of the same interleaving chain consider
521 them independent. */
522 if (STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (DR_STMT (dra)))
523 && (GROUP_FIRST_ELEMENT (vinfo_for_stmt (DR_STMT (dra)))
524 == GROUP_FIRST_ELEMENT (vinfo_for_stmt (DR_STMT (drb)))))
525 return false;
527 /* Unknown data dependence. */
528 if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know)
530 if (dump_enabled_p ())
532 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
533 "can't determine dependence between ");
534 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (dra));
535 dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
536 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (drb));
537 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
540 else if (dump_enabled_p ())
542 dump_printf_loc (MSG_NOTE, vect_location,
543 "determined dependence between ");
544 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra));
545 dump_printf (MSG_NOTE, " and ");
546 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb));
547 dump_printf (MSG_NOTE, "\n");
550 /* We do not vectorize basic blocks with write-write dependencies. */
551 if (DR_IS_WRITE (dra) && DR_IS_WRITE (drb))
552 return true;
554 /* If we have a read-write dependence check that the load is before the store.
555 When we vectorize basic blocks, vector load can be only before
556 corresponding scalar load, and vector store can be only after its
557 corresponding scalar store. So the order of the acceses is preserved in
558 case the load is before the store. */
559 gimple earlier_stmt = get_earlier_stmt (DR_STMT (dra), DR_STMT (drb));
560 if (DR_IS_READ (STMT_VINFO_DATA_REF (vinfo_for_stmt (earlier_stmt))))
562 /* That only holds for load-store pairs taking part in vectorization. */
563 if (STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dra)))
564 && STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (drb))))
565 return false;
568 return true;
572 /* Function vect_analyze_data_ref_dependences.
574 Examine all the data references in the basic-block, and make sure there
575 do not exist any data dependences between them. Set *MAX_VF according to
576 the maximum vectorization factor the data dependences allow. */
578 bool
579 vect_slp_analyze_data_ref_dependences (bb_vec_info bb_vinfo)
581 struct data_dependence_relation *ddr;
582 unsigned int i;
584 if (dump_enabled_p ())
585 dump_printf_loc (MSG_NOTE, vect_location,
586 "=== vect_slp_analyze_data_ref_dependences ===\n");
588 if (!compute_all_dependences (BB_VINFO_DATAREFS (bb_vinfo),
589 &BB_VINFO_DDRS (bb_vinfo),
590 vNULL, true))
591 return false;
593 FOR_EACH_VEC_ELT (BB_VINFO_DDRS (bb_vinfo), i, ddr)
594 if (vect_slp_analyze_data_ref_dependence (ddr))
595 return false;
597 return true;
601 /* Function vect_compute_data_ref_alignment
603 Compute the misalignment of the data reference DR.
605 Output:
606 1. If during the misalignment computation it is found that the data reference
607 cannot be vectorized then false is returned.
608 2. DR_MISALIGNMENT (DR) is defined.
610 FOR NOW: No analysis is actually performed. Misalignment is calculated
611 only for trivial cases. TODO. */
613 static bool
614 vect_compute_data_ref_alignment (struct data_reference *dr)
616 gimple stmt = DR_STMT (dr);
617 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
618 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
619 struct loop *loop = NULL;
620 tree ref = DR_REF (dr);
621 tree vectype;
622 tree base, base_addr;
623 bool base_aligned;
624 tree misalign = NULL_TREE;
625 tree aligned_to;
626 unsigned HOST_WIDE_INT alignment;
628 if (dump_enabled_p ())
629 dump_printf_loc (MSG_NOTE, vect_location,
630 "vect_compute_data_ref_alignment:\n");
632 if (loop_vinfo)
633 loop = LOOP_VINFO_LOOP (loop_vinfo);
635 /* Initialize misalignment to unknown. */
636 SET_DR_MISALIGNMENT (dr, -1);
638 /* Strided accesses perform only component accesses, misalignment information
639 is irrelevant for them. */
640 if (STMT_VINFO_STRIDED_P (stmt_info)
641 && !STMT_VINFO_GROUPED_ACCESS (stmt_info))
642 return true;
644 if (tree_fits_shwi_p (DR_STEP (dr)))
645 misalign = DR_INIT (dr);
646 aligned_to = DR_ALIGNED_TO (dr);
647 base_addr = DR_BASE_ADDRESS (dr);
648 vectype = STMT_VINFO_VECTYPE (stmt_info);
650 /* In case the dataref is in an inner-loop of the loop that is being
651 vectorized (LOOP), we use the base and misalignment information
652 relative to the outer-loop (LOOP). This is ok only if the misalignment
653 stays the same throughout the execution of the inner-loop, which is why
654 we have to check that the stride of the dataref in the inner-loop evenly
655 divides by the vector size. */
656 if (loop && nested_in_vect_loop_p (loop, stmt))
658 tree step = DR_STEP (dr);
660 if (tree_fits_shwi_p (step)
661 && tree_to_shwi (step) % GET_MODE_SIZE (TYPE_MODE (vectype)) == 0)
663 if (dump_enabled_p ())
664 dump_printf_loc (MSG_NOTE, vect_location,
665 "inner step divides the vector-size.\n");
666 misalign = STMT_VINFO_DR_INIT (stmt_info);
667 aligned_to = STMT_VINFO_DR_ALIGNED_TO (stmt_info);
668 base_addr = STMT_VINFO_DR_BASE_ADDRESS (stmt_info);
670 else
672 if (dump_enabled_p ())
673 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
674 "inner step doesn't divide the vector-size.\n");
675 misalign = NULL_TREE;
679 /* Similarly we can only use base and misalignment information relative to
680 an innermost loop if the misalignment stays the same throughout the
681 execution of the loop. As above, this is the case if the stride of
682 the dataref evenly divides by the vector size. */
683 else
685 tree step = DR_STEP (dr);
686 unsigned vf = loop ? LOOP_VINFO_VECT_FACTOR (loop_vinfo) : 1;
688 if (tree_fits_shwi_p (step)
689 && ((tree_to_shwi (step) * vf)
690 % GET_MODE_SIZE (TYPE_MODE (vectype)) != 0))
692 if (dump_enabled_p ())
693 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
694 "step doesn't divide the vector-size.\n");
695 misalign = NULL_TREE;
699 alignment = TYPE_ALIGN_UNIT (vectype);
701 if ((compare_tree_int (aligned_to, alignment) < 0)
702 || !misalign)
704 if (dump_enabled_p ())
706 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
707 "Unknown alignment for access: ");
708 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, ref);
709 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
711 return true;
714 /* To look at alignment of the base we have to preserve an inner MEM_REF
715 as that carries alignment information of the actual access. */
716 base = ref;
717 while (handled_component_p (base))
718 base = TREE_OPERAND (base, 0);
719 if (TREE_CODE (base) == MEM_REF)
720 base = build2 (MEM_REF, TREE_TYPE (base), base_addr,
721 build_int_cst (TREE_TYPE (TREE_OPERAND (base, 1)), 0));
723 if (get_object_alignment (base) >= TYPE_ALIGN (vectype))
724 base_aligned = true;
725 else
726 base_aligned = false;
728 if (!base_aligned)
730 /* Strip an inner MEM_REF to a bare decl if possible. */
731 if (TREE_CODE (base) == MEM_REF
732 && integer_zerop (TREE_OPERAND (base, 1))
733 && TREE_CODE (TREE_OPERAND (base, 0)) == ADDR_EXPR)
734 base = TREE_OPERAND (TREE_OPERAND (base, 0), 0);
736 if (!vect_can_force_dr_alignment_p (base, TYPE_ALIGN (vectype)))
738 if (dump_enabled_p ())
740 dump_printf_loc (MSG_NOTE, vect_location,
741 "can't force alignment of ref: ");
742 dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
743 dump_printf (MSG_NOTE, "\n");
745 return true;
748 /* Force the alignment of the decl.
749 NOTE: This is the only change to the code we make during
750 the analysis phase, before deciding to vectorize the loop. */
751 if (dump_enabled_p ())
753 dump_printf_loc (MSG_NOTE, vect_location, "force alignment of ");
754 dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
755 dump_printf (MSG_NOTE, "\n");
758 ((dataref_aux *)dr->aux)->base_decl = base;
759 ((dataref_aux *)dr->aux)->base_misaligned = true;
762 /* If this is a backward running DR then first access in the larger
763 vectype actually is N-1 elements before the address in the DR.
764 Adjust misalign accordingly. */
765 if (tree_int_cst_sgn (DR_STEP (dr)) < 0)
767 tree offset = ssize_int (TYPE_VECTOR_SUBPARTS (vectype) - 1);
768 /* DR_STEP(dr) is the same as -TYPE_SIZE of the scalar type,
769 otherwise we wouldn't be here. */
770 offset = fold_build2 (MULT_EXPR, ssizetype, offset, DR_STEP (dr));
771 /* PLUS because DR_STEP was negative. */
772 misalign = size_binop (PLUS_EXPR, misalign, offset);
775 SET_DR_MISALIGNMENT (dr,
776 wi::mod_floor (misalign, alignment, SIGNED).to_uhwi ());
778 if (dump_enabled_p ())
780 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
781 "misalign = %d bytes of ref ", DR_MISALIGNMENT (dr));
782 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, ref);
783 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
786 return true;
790 /* Function vect_compute_data_refs_alignment
792 Compute the misalignment of data references in the loop.
793 Return FALSE if a data reference is found that cannot be vectorized. */
795 static bool
796 vect_compute_data_refs_alignment (loop_vec_info loop_vinfo,
797 bb_vec_info bb_vinfo)
799 vec<data_reference_p> datarefs;
800 struct data_reference *dr;
801 unsigned int i;
803 if (loop_vinfo)
804 datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
805 else
806 datarefs = BB_VINFO_DATAREFS (bb_vinfo);
808 FOR_EACH_VEC_ELT (datarefs, i, dr)
809 if (STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr)))
810 && !vect_compute_data_ref_alignment (dr))
812 if (bb_vinfo)
814 /* Mark unsupported statement as unvectorizable. */
815 STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr))) = false;
816 continue;
818 else
819 return false;
822 return true;
826 /* Function vect_update_misalignment_for_peel
828 DR - the data reference whose misalignment is to be adjusted.
829 DR_PEEL - the data reference whose misalignment is being made
830 zero in the vector loop by the peel.
831 NPEEL - the number of iterations in the peel loop if the misalignment
832 of DR_PEEL is known at compile time. */
834 static void
835 vect_update_misalignment_for_peel (struct data_reference *dr,
836 struct data_reference *dr_peel, int npeel)
838 unsigned int i;
839 vec<dr_p> same_align_drs;
840 struct data_reference *current_dr;
841 int dr_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr))));
842 int dr_peel_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr_peel))));
843 stmt_vec_info stmt_info = vinfo_for_stmt (DR_STMT (dr));
844 stmt_vec_info peel_stmt_info = vinfo_for_stmt (DR_STMT (dr_peel));
846 /* For interleaved data accesses the step in the loop must be multiplied by
847 the size of the interleaving group. */
848 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
849 dr_size *= GROUP_SIZE (vinfo_for_stmt (GROUP_FIRST_ELEMENT (stmt_info)));
850 if (STMT_VINFO_GROUPED_ACCESS (peel_stmt_info))
851 dr_peel_size *= GROUP_SIZE (peel_stmt_info);
853 /* It can be assumed that the data refs with the same alignment as dr_peel
854 are aligned in the vector loop. */
855 same_align_drs
856 = STMT_VINFO_SAME_ALIGN_REFS (vinfo_for_stmt (DR_STMT (dr_peel)));
857 FOR_EACH_VEC_ELT (same_align_drs, i, current_dr)
859 if (current_dr != dr)
860 continue;
861 gcc_assert (DR_MISALIGNMENT (dr) / dr_size ==
862 DR_MISALIGNMENT (dr_peel) / dr_peel_size);
863 SET_DR_MISALIGNMENT (dr, 0);
864 return;
867 if (known_alignment_for_access_p (dr)
868 && known_alignment_for_access_p (dr_peel))
870 bool negative = tree_int_cst_compare (DR_STEP (dr), size_zero_node) < 0;
871 int misal = DR_MISALIGNMENT (dr);
872 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
873 misal += negative ? -npeel * dr_size : npeel * dr_size;
874 misal &= (TYPE_ALIGN (vectype) / BITS_PER_UNIT) - 1;
875 SET_DR_MISALIGNMENT (dr, misal);
876 return;
879 if (dump_enabled_p ())
880 dump_printf_loc (MSG_NOTE, vect_location, "Setting misalignment to -1.\n");
881 SET_DR_MISALIGNMENT (dr, -1);
885 /* Function vect_verify_datarefs_alignment
887 Return TRUE if all data references in the loop can be
888 handled with respect to alignment. */
890 bool
891 vect_verify_datarefs_alignment (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo)
893 vec<data_reference_p> datarefs;
894 struct data_reference *dr;
895 enum dr_alignment_support supportable_dr_alignment;
896 unsigned int i;
898 if (loop_vinfo)
899 datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
900 else
901 datarefs = BB_VINFO_DATAREFS (bb_vinfo);
903 FOR_EACH_VEC_ELT (datarefs, i, dr)
905 gimple stmt = DR_STMT (dr);
906 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
908 if (!STMT_VINFO_RELEVANT_P (stmt_info))
909 continue;
911 /* For interleaving, only the alignment of the first access matters.
912 Skip statements marked as not vectorizable. */
913 if ((STMT_VINFO_GROUPED_ACCESS (stmt_info)
914 && GROUP_FIRST_ELEMENT (stmt_info) != stmt)
915 || !STMT_VINFO_VECTORIZABLE (stmt_info))
916 continue;
918 /* Strided accesses perform only component accesses, alignment is
919 irrelevant for them. */
920 if (STMT_VINFO_STRIDED_P (stmt_info)
921 && !STMT_VINFO_GROUPED_ACCESS (stmt_info))
922 continue;
924 supportable_dr_alignment = vect_supportable_dr_alignment (dr, false);
925 if (!supportable_dr_alignment)
927 if (dump_enabled_p ())
929 if (DR_IS_READ (dr))
930 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
931 "not vectorized: unsupported unaligned load.");
932 else
933 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
934 "not vectorized: unsupported unaligned "
935 "store.");
937 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
938 DR_REF (dr));
939 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
941 return false;
943 if (supportable_dr_alignment != dr_aligned && dump_enabled_p ())
944 dump_printf_loc (MSG_NOTE, vect_location,
945 "Vectorizing an unaligned access.\n");
947 return true;
950 /* Given an memory reference EXP return whether its alignment is less
951 than its size. */
953 static bool
954 not_size_aligned (tree exp)
956 if (!tree_fits_uhwi_p (TYPE_SIZE (TREE_TYPE (exp))))
957 return true;
959 return (tree_to_uhwi (TYPE_SIZE (TREE_TYPE (exp)))
960 > get_object_alignment (exp));
963 /* Function vector_alignment_reachable_p
965 Return true if vector alignment for DR is reachable by peeling
966 a few loop iterations. Return false otherwise. */
968 static bool
969 vector_alignment_reachable_p (struct data_reference *dr)
971 gimple stmt = DR_STMT (dr);
972 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
973 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
975 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
977 /* For interleaved access we peel only if number of iterations in
978 the prolog loop ({VF - misalignment}), is a multiple of the
979 number of the interleaved accesses. */
980 int elem_size, mis_in_elements;
981 int nelements = TYPE_VECTOR_SUBPARTS (vectype);
983 /* FORNOW: handle only known alignment. */
984 if (!known_alignment_for_access_p (dr))
985 return false;
987 elem_size = GET_MODE_SIZE (TYPE_MODE (vectype)) / nelements;
988 mis_in_elements = DR_MISALIGNMENT (dr) / elem_size;
990 if ((nelements - mis_in_elements) % GROUP_SIZE (stmt_info))
991 return false;
994 /* If misalignment is known at the compile time then allow peeling
995 only if natural alignment is reachable through peeling. */
996 if (known_alignment_for_access_p (dr) && !aligned_access_p (dr))
998 HOST_WIDE_INT elmsize =
999 int_cst_value (TYPE_SIZE_UNIT (TREE_TYPE (vectype)));
1000 if (dump_enabled_p ())
1002 dump_printf_loc (MSG_NOTE, vect_location,
1003 "data size =" HOST_WIDE_INT_PRINT_DEC, elmsize);
1004 dump_printf (MSG_NOTE,
1005 ". misalignment = %d.\n", DR_MISALIGNMENT (dr));
1007 if (DR_MISALIGNMENT (dr) % elmsize)
1009 if (dump_enabled_p ())
1010 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1011 "data size does not divide the misalignment.\n");
1012 return false;
1016 if (!known_alignment_for_access_p (dr))
1018 tree type = TREE_TYPE (DR_REF (dr));
1019 bool is_packed = not_size_aligned (DR_REF (dr));
1020 if (dump_enabled_p ())
1021 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1022 "Unknown misalignment, is_packed = %d\n",is_packed);
1023 if ((TYPE_USER_ALIGN (type) && !is_packed)
1024 || targetm.vectorize.vector_alignment_reachable (type, is_packed))
1025 return true;
1026 else
1027 return false;
1030 return true;
1034 /* Calculate the cost of the memory access represented by DR. */
1036 static void
1037 vect_get_data_access_cost (struct data_reference *dr,
1038 unsigned int *inside_cost,
1039 unsigned int *outside_cost,
1040 stmt_vector_for_cost *body_cost_vec)
1042 gimple stmt = DR_STMT (dr);
1043 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1044 int nunits = TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info));
1045 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1046 int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1047 int ncopies = vf / nunits;
1049 if (DR_IS_READ (dr))
1050 vect_get_load_cost (dr, ncopies, true, inside_cost, outside_cost,
1051 NULL, body_cost_vec, false);
1052 else
1053 vect_get_store_cost (dr, ncopies, inside_cost, body_cost_vec);
1055 if (dump_enabled_p ())
1056 dump_printf_loc (MSG_NOTE, vect_location,
1057 "vect_get_data_access_cost: inside_cost = %d, "
1058 "outside_cost = %d.\n", *inside_cost, *outside_cost);
1062 /* Insert DR into peeling hash table with NPEEL as key. */
1064 static void
1065 vect_peeling_hash_insert (loop_vec_info loop_vinfo, struct data_reference *dr,
1066 int npeel)
1068 struct _vect_peel_info elem, *slot;
1069 _vect_peel_info **new_slot;
1070 bool supportable_dr_alignment = vect_supportable_dr_alignment (dr, true);
1072 elem.npeel = npeel;
1073 slot = LOOP_VINFO_PEELING_HTAB (loop_vinfo)->find (&elem);
1074 if (slot)
1075 slot->count++;
1076 else
1078 slot = XNEW (struct _vect_peel_info);
1079 slot->npeel = npeel;
1080 slot->dr = dr;
1081 slot->count = 1;
1082 new_slot
1083 = LOOP_VINFO_PEELING_HTAB (loop_vinfo)->find_slot (slot, INSERT);
1084 *new_slot = slot;
1087 if (!supportable_dr_alignment
1088 && unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo)))
1089 slot->count += VECT_MAX_COST;
1093 /* Traverse peeling hash table to find peeling option that aligns maximum
1094 number of data accesses. */
1097 vect_peeling_hash_get_most_frequent (_vect_peel_info **slot,
1098 _vect_peel_extended_info *max)
1100 vect_peel_info elem = *slot;
1102 if (elem->count > max->peel_info.count
1103 || (elem->count == max->peel_info.count
1104 && max->peel_info.npeel > elem->npeel))
1106 max->peel_info.npeel = elem->npeel;
1107 max->peel_info.count = elem->count;
1108 max->peel_info.dr = elem->dr;
1111 return 1;
1115 /* Traverse peeling hash table and calculate cost for each peeling option.
1116 Find the one with the lowest cost. */
1119 vect_peeling_hash_get_lowest_cost (_vect_peel_info **slot,
1120 _vect_peel_extended_info *min)
1122 vect_peel_info elem = *slot;
1123 int save_misalignment, dummy;
1124 unsigned int inside_cost = 0, outside_cost = 0, i;
1125 gimple stmt = DR_STMT (elem->dr);
1126 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1127 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1128 vec<data_reference_p> datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
1129 struct data_reference *dr;
1130 stmt_vector_for_cost prologue_cost_vec, body_cost_vec, epilogue_cost_vec;
1132 prologue_cost_vec.create (2);
1133 body_cost_vec.create (2);
1134 epilogue_cost_vec.create (2);
1136 FOR_EACH_VEC_ELT (datarefs, i, dr)
1138 stmt = DR_STMT (dr);
1139 stmt_info = vinfo_for_stmt (stmt);
1140 /* For interleaving, only the alignment of the first access
1141 matters. */
1142 if (STMT_VINFO_GROUPED_ACCESS (stmt_info)
1143 && GROUP_FIRST_ELEMENT (stmt_info) != stmt)
1144 continue;
1146 save_misalignment = DR_MISALIGNMENT (dr);
1147 vect_update_misalignment_for_peel (dr, elem->dr, elem->npeel);
1148 vect_get_data_access_cost (dr, &inside_cost, &outside_cost,
1149 &body_cost_vec);
1150 SET_DR_MISALIGNMENT (dr, save_misalignment);
1153 outside_cost += vect_get_known_peeling_cost
1154 (loop_vinfo, elem->npeel, &dummy,
1155 &LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo),
1156 &prologue_cost_vec, &epilogue_cost_vec);
1158 /* Prologue and epilogue costs are added to the target model later.
1159 These costs depend only on the scalar iteration cost, the
1160 number of peeling iterations finally chosen, and the number of
1161 misaligned statements. So discard the information found here. */
1162 prologue_cost_vec.release ();
1163 epilogue_cost_vec.release ();
1165 if (inside_cost < min->inside_cost
1166 || (inside_cost == min->inside_cost && outside_cost < min->outside_cost))
1168 min->inside_cost = inside_cost;
1169 min->outside_cost = outside_cost;
1170 min->body_cost_vec.release ();
1171 min->body_cost_vec = body_cost_vec;
1172 min->peel_info.dr = elem->dr;
1173 min->peel_info.npeel = elem->npeel;
1175 else
1176 body_cost_vec.release ();
1178 return 1;
1182 /* Choose best peeling option by traversing peeling hash table and either
1183 choosing an option with the lowest cost (if cost model is enabled) or the
1184 option that aligns as many accesses as possible. */
1186 static struct data_reference *
1187 vect_peeling_hash_choose_best_peeling (loop_vec_info loop_vinfo,
1188 unsigned int *npeel,
1189 stmt_vector_for_cost *body_cost_vec)
1191 struct _vect_peel_extended_info res;
1193 res.peel_info.dr = NULL;
1194 res.body_cost_vec = stmt_vector_for_cost ();
1196 if (!unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo)))
1198 res.inside_cost = INT_MAX;
1199 res.outside_cost = INT_MAX;
1200 LOOP_VINFO_PEELING_HTAB (loop_vinfo)
1201 ->traverse <_vect_peel_extended_info *,
1202 vect_peeling_hash_get_lowest_cost> (&res);
1204 else
1206 res.peel_info.count = 0;
1207 LOOP_VINFO_PEELING_HTAB (loop_vinfo)
1208 ->traverse <_vect_peel_extended_info *,
1209 vect_peeling_hash_get_most_frequent> (&res);
1212 *npeel = res.peel_info.npeel;
1213 *body_cost_vec = res.body_cost_vec;
1214 return res.peel_info.dr;
1218 /* Function vect_enhance_data_refs_alignment
1220 This pass will use loop versioning and loop peeling in order to enhance
1221 the alignment of data references in the loop.
1223 FOR NOW: we assume that whatever versioning/peeling takes place, only the
1224 original loop is to be vectorized. Any other loops that are created by
1225 the transformations performed in this pass - are not supposed to be
1226 vectorized. This restriction will be relaxed.
1228 This pass will require a cost model to guide it whether to apply peeling
1229 or versioning or a combination of the two. For example, the scheme that
1230 intel uses when given a loop with several memory accesses, is as follows:
1231 choose one memory access ('p') which alignment you want to force by doing
1232 peeling. Then, either (1) generate a loop in which 'p' is aligned and all
1233 other accesses are not necessarily aligned, or (2) use loop versioning to
1234 generate one loop in which all accesses are aligned, and another loop in
1235 which only 'p' is necessarily aligned.
1237 ("Automatic Intra-Register Vectorization for the Intel Architecture",
1238 Aart J.C. Bik, Milind Girkar, Paul M. Grey and Ximmin Tian, International
1239 Journal of Parallel Programming, Vol. 30, No. 2, April 2002.)
1241 Devising a cost model is the most critical aspect of this work. It will
1242 guide us on which access to peel for, whether to use loop versioning, how
1243 many versions to create, etc. The cost model will probably consist of
1244 generic considerations as well as target specific considerations (on
1245 powerpc for example, misaligned stores are more painful than misaligned
1246 loads).
1248 Here are the general steps involved in alignment enhancements:
1250 -- original loop, before alignment analysis:
1251 for (i=0; i<N; i++){
1252 x = q[i]; # DR_MISALIGNMENT(q) = unknown
1253 p[i] = y; # DR_MISALIGNMENT(p) = unknown
1256 -- After vect_compute_data_refs_alignment:
1257 for (i=0; i<N; i++){
1258 x = q[i]; # DR_MISALIGNMENT(q) = 3
1259 p[i] = y; # DR_MISALIGNMENT(p) = unknown
1262 -- Possibility 1: we do loop versioning:
1263 if (p is aligned) {
1264 for (i=0; i<N; i++){ # loop 1A
1265 x = q[i]; # DR_MISALIGNMENT(q) = 3
1266 p[i] = y; # DR_MISALIGNMENT(p) = 0
1269 else {
1270 for (i=0; i<N; i++){ # loop 1B
1271 x = q[i]; # DR_MISALIGNMENT(q) = 3
1272 p[i] = y; # DR_MISALIGNMENT(p) = unaligned
1276 -- Possibility 2: we do loop peeling:
1277 for (i = 0; i < 3; i++){ # (scalar loop, not to be vectorized).
1278 x = q[i];
1279 p[i] = y;
1281 for (i = 3; i < N; i++){ # loop 2A
1282 x = q[i]; # DR_MISALIGNMENT(q) = 0
1283 p[i] = y; # DR_MISALIGNMENT(p) = unknown
1286 -- Possibility 3: combination of loop peeling and versioning:
1287 for (i = 0; i < 3; i++){ # (scalar loop, not to be vectorized).
1288 x = q[i];
1289 p[i] = y;
1291 if (p is aligned) {
1292 for (i = 3; i<N; i++){ # loop 3A
1293 x = q[i]; # DR_MISALIGNMENT(q) = 0
1294 p[i] = y; # DR_MISALIGNMENT(p) = 0
1297 else {
1298 for (i = 3; i<N; i++){ # loop 3B
1299 x = q[i]; # DR_MISALIGNMENT(q) = 0
1300 p[i] = y; # DR_MISALIGNMENT(p) = unaligned
1304 These loops are later passed to loop_transform to be vectorized. The
1305 vectorizer will use the alignment information to guide the transformation
1306 (whether to generate regular loads/stores, or with special handling for
1307 misalignment). */
1309 bool
1310 vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo)
1312 vec<data_reference_p> datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
1313 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1314 enum dr_alignment_support supportable_dr_alignment;
1315 struct data_reference *dr0 = NULL, *first_store = NULL;
1316 struct data_reference *dr;
1317 unsigned int i, j;
1318 bool do_peeling = false;
1319 bool do_versioning = false;
1320 bool stat;
1321 gimple stmt;
1322 stmt_vec_info stmt_info;
1323 unsigned int npeel = 0;
1324 bool all_misalignments_unknown = true;
1325 unsigned int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1326 unsigned possible_npeel_number = 1;
1327 tree vectype;
1328 unsigned int nelements, mis, same_align_drs_max = 0;
1329 stmt_vector_for_cost body_cost_vec = stmt_vector_for_cost ();
1331 if (dump_enabled_p ())
1332 dump_printf_loc (MSG_NOTE, vect_location,
1333 "=== vect_enhance_data_refs_alignment ===\n");
1335 /* While cost model enhancements are expected in the future, the high level
1336 view of the code at this time is as follows:
1338 A) If there is a misaligned access then see if peeling to align
1339 this access can make all data references satisfy
1340 vect_supportable_dr_alignment. If so, update data structures
1341 as needed and return true.
1343 B) If peeling wasn't possible and there is a data reference with an
1344 unknown misalignment that does not satisfy vect_supportable_dr_alignment
1345 then see if loop versioning checks can be used to make all data
1346 references satisfy vect_supportable_dr_alignment. If so, update
1347 data structures as needed and return true.
1349 C) If neither peeling nor versioning were successful then return false if
1350 any data reference does not satisfy vect_supportable_dr_alignment.
1352 D) Return true (all data references satisfy vect_supportable_dr_alignment).
1354 Note, Possibility 3 above (which is peeling and versioning together) is not
1355 being done at this time. */
1357 /* (1) Peeling to force alignment. */
1359 /* (1.1) Decide whether to perform peeling, and how many iterations to peel:
1360 Considerations:
1361 + How many accesses will become aligned due to the peeling
1362 - How many accesses will become unaligned due to the peeling,
1363 and the cost of misaligned accesses.
1364 - The cost of peeling (the extra runtime checks, the increase
1365 in code size). */
1367 FOR_EACH_VEC_ELT (datarefs, i, dr)
1369 stmt = DR_STMT (dr);
1370 stmt_info = vinfo_for_stmt (stmt);
1372 if (!STMT_VINFO_RELEVANT_P (stmt_info))
1373 continue;
1375 /* For interleaving, only the alignment of the first access
1376 matters. */
1377 if (STMT_VINFO_GROUPED_ACCESS (stmt_info)
1378 && GROUP_FIRST_ELEMENT (stmt_info) != stmt)
1379 continue;
1381 /* For invariant accesses there is nothing to enhance. */
1382 if (integer_zerop (DR_STEP (dr)))
1383 continue;
1385 /* Strided accesses perform only component accesses, alignment is
1386 irrelevant for them. */
1387 if (STMT_VINFO_STRIDED_P (stmt_info)
1388 && !STMT_VINFO_GROUPED_ACCESS (stmt_info))
1389 continue;
1391 supportable_dr_alignment = vect_supportable_dr_alignment (dr, true);
1392 do_peeling = vector_alignment_reachable_p (dr);
1393 if (do_peeling)
1395 if (known_alignment_for_access_p (dr))
1397 unsigned int npeel_tmp;
1398 bool negative = tree_int_cst_compare (DR_STEP (dr),
1399 size_zero_node) < 0;
1401 /* Save info about DR in the hash table. */
1402 if (!LOOP_VINFO_PEELING_HTAB (loop_vinfo))
1403 LOOP_VINFO_PEELING_HTAB (loop_vinfo)
1404 = new hash_table<peel_info_hasher> (1);
1406 vectype = STMT_VINFO_VECTYPE (stmt_info);
1407 nelements = TYPE_VECTOR_SUBPARTS (vectype);
1408 mis = DR_MISALIGNMENT (dr) / GET_MODE_SIZE (TYPE_MODE (
1409 TREE_TYPE (DR_REF (dr))));
1410 npeel_tmp = (negative
1411 ? (mis - nelements) : (nelements - mis))
1412 & (nelements - 1);
1414 /* For multiple types, it is possible that the bigger type access
1415 will have more than one peeling option. E.g., a loop with two
1416 types: one of size (vector size / 4), and the other one of
1417 size (vector size / 8). Vectorization factor will 8. If both
1418 access are misaligned by 3, the first one needs one scalar
1419 iteration to be aligned, and the second one needs 5. But the
1420 the first one will be aligned also by peeling 5 scalar
1421 iterations, and in that case both accesses will be aligned.
1422 Hence, except for the immediate peeling amount, we also want
1423 to try to add full vector size, while we don't exceed
1424 vectorization factor.
1425 We do this automtically for cost model, since we calculate cost
1426 for every peeling option. */
1427 if (unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo)))
1429 if (STMT_SLP_TYPE (stmt_info))
1430 possible_npeel_number
1431 = (vf * GROUP_SIZE (stmt_info)) / nelements;
1432 else
1433 possible_npeel_number = vf / nelements;
1436 /* Handle the aligned case. We may decide to align some other
1437 access, making DR unaligned. */
1438 if (DR_MISALIGNMENT (dr) == 0)
1440 npeel_tmp = 0;
1441 if (unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo)))
1442 possible_npeel_number++;
1445 for (j = 0; j < possible_npeel_number; j++)
1447 vect_peeling_hash_insert (loop_vinfo, dr, npeel_tmp);
1448 npeel_tmp += nelements;
1451 all_misalignments_unknown = false;
1452 /* Data-ref that was chosen for the case that all the
1453 misalignments are unknown is not relevant anymore, since we
1454 have a data-ref with known alignment. */
1455 dr0 = NULL;
1457 else
1459 /* If we don't know any misalignment values, we prefer
1460 peeling for data-ref that has the maximum number of data-refs
1461 with the same alignment, unless the target prefers to align
1462 stores over load. */
1463 if (all_misalignments_unknown)
1465 unsigned same_align_drs
1466 = STMT_VINFO_SAME_ALIGN_REFS (stmt_info).length ();
1467 if (!dr0
1468 || same_align_drs_max < same_align_drs)
1470 same_align_drs_max = same_align_drs;
1471 dr0 = dr;
1473 /* For data-refs with the same number of related
1474 accesses prefer the one where the misalign
1475 computation will be invariant in the outermost loop. */
1476 else if (same_align_drs_max == same_align_drs)
1478 struct loop *ivloop0, *ivloop;
1479 ivloop0 = outermost_invariant_loop_for_expr
1480 (loop, DR_BASE_ADDRESS (dr0));
1481 ivloop = outermost_invariant_loop_for_expr
1482 (loop, DR_BASE_ADDRESS (dr));
1483 if ((ivloop && !ivloop0)
1484 || (ivloop && ivloop0
1485 && flow_loop_nested_p (ivloop, ivloop0)))
1486 dr0 = dr;
1489 if (!first_store && DR_IS_WRITE (dr))
1490 first_store = dr;
1493 /* If there are both known and unknown misaligned accesses in the
1494 loop, we choose peeling amount according to the known
1495 accesses. */
1496 if (!supportable_dr_alignment)
1498 dr0 = dr;
1499 if (!first_store && DR_IS_WRITE (dr))
1500 first_store = dr;
1504 else
1506 if (!aligned_access_p (dr))
1508 if (dump_enabled_p ())
1509 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1510 "vector alignment may not be reachable\n");
1511 break;
1516 /* Check if we can possibly peel the loop. */
1517 if (!vect_can_advance_ivs_p (loop_vinfo)
1518 || !slpeel_can_duplicate_loop_p (loop, single_exit (loop)))
1519 do_peeling = false;
1521 if (do_peeling
1522 && all_misalignments_unknown
1523 && vect_supportable_dr_alignment (dr0, false))
1525 /* Check if the target requires to prefer stores over loads, i.e., if
1526 misaligned stores are more expensive than misaligned loads (taking
1527 drs with same alignment into account). */
1528 if (first_store && DR_IS_READ (dr0))
1530 unsigned int load_inside_cost = 0, load_outside_cost = 0;
1531 unsigned int store_inside_cost = 0, store_outside_cost = 0;
1532 unsigned int load_inside_penalty = 0, load_outside_penalty = 0;
1533 unsigned int store_inside_penalty = 0, store_outside_penalty = 0;
1534 stmt_vector_for_cost dummy;
1535 dummy.create (2);
1537 vect_get_data_access_cost (dr0, &load_inside_cost, &load_outside_cost,
1538 &dummy);
1539 vect_get_data_access_cost (first_store, &store_inside_cost,
1540 &store_outside_cost, &dummy);
1542 dummy.release ();
1544 /* Calculate the penalty for leaving FIRST_STORE unaligned (by
1545 aligning the load DR0). */
1546 load_inside_penalty = store_inside_cost;
1547 load_outside_penalty = store_outside_cost;
1548 for (i = 0;
1549 STMT_VINFO_SAME_ALIGN_REFS (vinfo_for_stmt (
1550 DR_STMT (first_store))).iterate (i, &dr);
1551 i++)
1552 if (DR_IS_READ (dr))
1554 load_inside_penalty += load_inside_cost;
1555 load_outside_penalty += load_outside_cost;
1557 else
1559 load_inside_penalty += store_inside_cost;
1560 load_outside_penalty += store_outside_cost;
1563 /* Calculate the penalty for leaving DR0 unaligned (by
1564 aligning the FIRST_STORE). */
1565 store_inside_penalty = load_inside_cost;
1566 store_outside_penalty = load_outside_cost;
1567 for (i = 0;
1568 STMT_VINFO_SAME_ALIGN_REFS (vinfo_for_stmt (
1569 DR_STMT (dr0))).iterate (i, &dr);
1570 i++)
1571 if (DR_IS_READ (dr))
1573 store_inside_penalty += load_inside_cost;
1574 store_outside_penalty += load_outside_cost;
1576 else
1578 store_inside_penalty += store_inside_cost;
1579 store_outside_penalty += store_outside_cost;
1582 if (load_inside_penalty > store_inside_penalty
1583 || (load_inside_penalty == store_inside_penalty
1584 && load_outside_penalty > store_outside_penalty))
1585 dr0 = first_store;
1588 /* In case there are only loads with different unknown misalignments, use
1589 peeling only if it may help to align other accesses in the loop or
1590 if it may help improving load bandwith when we'd end up using
1591 unaligned loads. */
1592 tree dr0_vt = STMT_VINFO_VECTYPE (vinfo_for_stmt (DR_STMT (dr0)));
1593 if (!first_store
1594 && !STMT_VINFO_SAME_ALIGN_REFS (
1595 vinfo_for_stmt (DR_STMT (dr0))).length ()
1596 && (vect_supportable_dr_alignment (dr0, false)
1597 != dr_unaligned_supported
1598 || (builtin_vectorization_cost (vector_load, dr0_vt, 0)
1599 == builtin_vectorization_cost (unaligned_load, dr0_vt, -1))))
1600 do_peeling = false;
1603 if (do_peeling && !dr0)
1605 /* Peeling is possible, but there is no data access that is not supported
1606 unless aligned. So we try to choose the best possible peeling. */
1608 /* We should get here only if there are drs with known misalignment. */
1609 gcc_assert (!all_misalignments_unknown);
1611 /* Choose the best peeling from the hash table. */
1612 dr0 = vect_peeling_hash_choose_best_peeling (loop_vinfo, &npeel,
1613 &body_cost_vec);
1614 if (!dr0 || !npeel)
1615 do_peeling = false;
1618 if (do_peeling)
1620 stmt = DR_STMT (dr0);
1621 stmt_info = vinfo_for_stmt (stmt);
1622 vectype = STMT_VINFO_VECTYPE (stmt_info);
1623 nelements = TYPE_VECTOR_SUBPARTS (vectype);
1625 if (known_alignment_for_access_p (dr0))
1627 bool negative = tree_int_cst_compare (DR_STEP (dr0),
1628 size_zero_node) < 0;
1629 if (!npeel)
1631 /* Since it's known at compile time, compute the number of
1632 iterations in the peeled loop (the peeling factor) for use in
1633 updating DR_MISALIGNMENT values. The peeling factor is the
1634 vectorization factor minus the misalignment as an element
1635 count. */
1636 mis = DR_MISALIGNMENT (dr0);
1637 mis /= GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr0))));
1638 npeel = ((negative ? mis - nelements : nelements - mis)
1639 & (nelements - 1));
1642 /* For interleaved data access every iteration accesses all the
1643 members of the group, therefore we divide the number of iterations
1644 by the group size. */
1645 stmt_info = vinfo_for_stmt (DR_STMT (dr0));
1646 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
1647 npeel /= GROUP_SIZE (stmt_info);
1649 if (dump_enabled_p ())
1650 dump_printf_loc (MSG_NOTE, vect_location,
1651 "Try peeling by %d\n", npeel);
1654 /* Ensure that all data refs can be vectorized after the peel. */
1655 FOR_EACH_VEC_ELT (datarefs, i, dr)
1657 int save_misalignment;
1659 if (dr == dr0)
1660 continue;
1662 stmt = DR_STMT (dr);
1663 stmt_info = vinfo_for_stmt (stmt);
1664 /* For interleaving, only the alignment of the first access
1665 matters. */
1666 if (STMT_VINFO_GROUPED_ACCESS (stmt_info)
1667 && GROUP_FIRST_ELEMENT (stmt_info) != stmt)
1668 continue;
1670 /* Strided accesses perform only component accesses, alignment is
1671 irrelevant for them. */
1672 if (STMT_VINFO_STRIDED_P (stmt_info)
1673 && !STMT_VINFO_GROUPED_ACCESS (stmt_info))
1674 continue;
1676 save_misalignment = DR_MISALIGNMENT (dr);
1677 vect_update_misalignment_for_peel (dr, dr0, npeel);
1678 supportable_dr_alignment = vect_supportable_dr_alignment (dr, false);
1679 SET_DR_MISALIGNMENT (dr, save_misalignment);
1681 if (!supportable_dr_alignment)
1683 do_peeling = false;
1684 break;
1688 if (do_peeling && known_alignment_for_access_p (dr0) && npeel == 0)
1690 stat = vect_verify_datarefs_alignment (loop_vinfo, NULL);
1691 if (!stat)
1692 do_peeling = false;
1693 else
1695 body_cost_vec.release ();
1696 return stat;
1700 /* Cost model #1 - honor --param vect-max-peeling-for-alignment. */
1701 if (do_peeling)
1703 unsigned max_allowed_peel
1704 = PARAM_VALUE (PARAM_VECT_MAX_PEELING_FOR_ALIGNMENT);
1705 if (max_allowed_peel != (unsigned)-1)
1707 unsigned max_peel = npeel;
1708 if (max_peel == 0)
1710 gimple dr_stmt = DR_STMT (dr0);
1711 stmt_vec_info vinfo = vinfo_for_stmt (dr_stmt);
1712 tree vtype = STMT_VINFO_VECTYPE (vinfo);
1713 max_peel = TYPE_VECTOR_SUBPARTS (vtype) - 1;
1715 if (max_peel > max_allowed_peel)
1717 do_peeling = false;
1718 if (dump_enabled_p ())
1719 dump_printf_loc (MSG_NOTE, vect_location,
1720 "Disable peeling, max peels reached: %d\n", max_peel);
1725 /* Cost model #2 - if peeling may result in a remaining loop not
1726 iterating enough to be vectorized then do not peel. */
1727 if (do_peeling
1728 && LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
1730 unsigned max_peel
1731 = npeel == 0 ? LOOP_VINFO_VECT_FACTOR (loop_vinfo) - 1 : npeel;
1732 if (LOOP_VINFO_INT_NITERS (loop_vinfo)
1733 < LOOP_VINFO_VECT_FACTOR (loop_vinfo) + max_peel)
1734 do_peeling = false;
1737 if (do_peeling)
1739 /* (1.2) Update the DR_MISALIGNMENT of each data reference DR_i.
1740 If the misalignment of DR_i is identical to that of dr0 then set
1741 DR_MISALIGNMENT (DR_i) to zero. If the misalignment of DR_i and
1742 dr0 are known at compile time then increment DR_MISALIGNMENT (DR_i)
1743 by the peeling factor times the element size of DR_i (MOD the
1744 vectorization factor times the size). Otherwise, the
1745 misalignment of DR_i must be set to unknown. */
1746 FOR_EACH_VEC_ELT (datarefs, i, dr)
1747 if (dr != dr0)
1748 vect_update_misalignment_for_peel (dr, dr0, npeel);
1750 LOOP_VINFO_UNALIGNED_DR (loop_vinfo) = dr0;
1751 if (npeel)
1752 LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) = npeel;
1753 else
1754 LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo)
1755 = DR_MISALIGNMENT (dr0);
1756 SET_DR_MISALIGNMENT (dr0, 0);
1757 if (dump_enabled_p ())
1759 dump_printf_loc (MSG_NOTE, vect_location,
1760 "Alignment of access forced using peeling.\n");
1761 dump_printf_loc (MSG_NOTE, vect_location,
1762 "Peeling for alignment will be applied.\n");
1764 /* The inside-loop cost will be accounted for in vectorizable_load
1765 and vectorizable_store correctly with adjusted alignments.
1766 Drop the body_cst_vec on the floor here. */
1767 body_cost_vec.release ();
1769 stat = vect_verify_datarefs_alignment (loop_vinfo, NULL);
1770 gcc_assert (stat);
1771 return stat;
1775 body_cost_vec.release ();
1777 /* (2) Versioning to force alignment. */
1779 /* Try versioning if:
1780 1) optimize loop for speed
1781 2) there is at least one unsupported misaligned data ref with an unknown
1782 misalignment, and
1783 3) all misaligned data refs with a known misalignment are supported, and
1784 4) the number of runtime alignment checks is within reason. */
1786 do_versioning =
1787 optimize_loop_nest_for_speed_p (loop)
1788 && (!loop->inner); /* FORNOW */
1790 if (do_versioning)
1792 FOR_EACH_VEC_ELT (datarefs, i, dr)
1794 stmt = DR_STMT (dr);
1795 stmt_info = vinfo_for_stmt (stmt);
1797 /* For interleaving, only the alignment of the first access
1798 matters. */
1799 if (aligned_access_p (dr)
1800 || (STMT_VINFO_GROUPED_ACCESS (stmt_info)
1801 && GROUP_FIRST_ELEMENT (stmt_info) != stmt))
1802 continue;
1804 if (STMT_VINFO_STRIDED_P (stmt_info))
1806 /* Strided loads perform only component accesses, alignment is
1807 irrelevant for them. */
1808 if (!STMT_VINFO_GROUPED_ACCESS (stmt_info))
1809 continue;
1810 do_versioning = false;
1811 break;
1814 supportable_dr_alignment = vect_supportable_dr_alignment (dr, false);
1816 if (!supportable_dr_alignment)
1818 gimple stmt;
1819 int mask;
1820 tree vectype;
1822 if (known_alignment_for_access_p (dr)
1823 || LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).length ()
1824 >= (unsigned) PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIGNMENT_CHECKS))
1826 do_versioning = false;
1827 break;
1830 stmt = DR_STMT (dr);
1831 vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
1832 gcc_assert (vectype);
1834 /* The rightmost bits of an aligned address must be zeros.
1835 Construct the mask needed for this test. For example,
1836 GET_MODE_SIZE for the vector mode V4SI is 16 bytes so the
1837 mask must be 15 = 0xf. */
1838 mask = GET_MODE_SIZE (TYPE_MODE (vectype)) - 1;
1840 /* FORNOW: use the same mask to test all potentially unaligned
1841 references in the loop. The vectorizer currently supports
1842 a single vector size, see the reference to
1843 GET_MODE_NUNITS (TYPE_MODE (vectype)) where the
1844 vectorization factor is computed. */
1845 gcc_assert (!LOOP_VINFO_PTR_MASK (loop_vinfo)
1846 || LOOP_VINFO_PTR_MASK (loop_vinfo) == mask);
1847 LOOP_VINFO_PTR_MASK (loop_vinfo) = mask;
1848 LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).safe_push (
1849 DR_STMT (dr));
1853 /* Versioning requires at least one misaligned data reference. */
1854 if (!LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo))
1855 do_versioning = false;
1856 else if (!do_versioning)
1857 LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).truncate (0);
1860 if (do_versioning)
1862 vec<gimple> may_misalign_stmts
1863 = LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo);
1864 gimple stmt;
1866 /* It can now be assumed that the data references in the statements
1867 in LOOP_VINFO_MAY_MISALIGN_STMTS will be aligned in the version
1868 of the loop being vectorized. */
1869 FOR_EACH_VEC_ELT (may_misalign_stmts, i, stmt)
1871 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1872 dr = STMT_VINFO_DATA_REF (stmt_info);
1873 SET_DR_MISALIGNMENT (dr, 0);
1874 if (dump_enabled_p ())
1875 dump_printf_loc (MSG_NOTE, vect_location,
1876 "Alignment of access forced using versioning.\n");
1879 if (dump_enabled_p ())
1880 dump_printf_loc (MSG_NOTE, vect_location,
1881 "Versioning for alignment will be applied.\n");
1883 /* Peeling and versioning can't be done together at this time. */
1884 gcc_assert (! (do_peeling && do_versioning));
1886 stat = vect_verify_datarefs_alignment (loop_vinfo, NULL);
1887 gcc_assert (stat);
1888 return stat;
1891 /* This point is reached if neither peeling nor versioning is being done. */
1892 gcc_assert (! (do_peeling || do_versioning));
1894 stat = vect_verify_datarefs_alignment (loop_vinfo, NULL);
1895 return stat;
1899 /* Function vect_find_same_alignment_drs.
1901 Update group and alignment relations according to the chosen
1902 vectorization factor. */
1904 static void
1905 vect_find_same_alignment_drs (struct data_dependence_relation *ddr,
1906 loop_vec_info loop_vinfo)
1908 unsigned int i;
1909 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1910 int vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1911 struct data_reference *dra = DDR_A (ddr);
1912 struct data_reference *drb = DDR_B (ddr);
1913 stmt_vec_info stmtinfo_a = vinfo_for_stmt (DR_STMT (dra));
1914 stmt_vec_info stmtinfo_b = vinfo_for_stmt (DR_STMT (drb));
1915 int dra_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dra))));
1916 int drb_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (drb))));
1917 lambda_vector dist_v;
1918 unsigned int loop_depth;
1920 if (DDR_ARE_DEPENDENT (ddr) == chrec_known)
1921 return;
1923 if (dra == drb)
1924 return;
1926 if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know)
1927 return;
1929 /* Loop-based vectorization and known data dependence. */
1930 if (DDR_NUM_DIST_VECTS (ddr) == 0)
1931 return;
1933 /* Data-dependence analysis reports a distance vector of zero
1934 for data-references that overlap only in the first iteration
1935 but have different sign step (see PR45764).
1936 So as a sanity check require equal DR_STEP. */
1937 if (!operand_equal_p (DR_STEP (dra), DR_STEP (drb), 0))
1938 return;
1940 loop_depth = index_in_loop_nest (loop->num, DDR_LOOP_NEST (ddr));
1941 FOR_EACH_VEC_ELT (DDR_DIST_VECTS (ddr), i, dist_v)
1943 int dist = dist_v[loop_depth];
1945 if (dump_enabled_p ())
1946 dump_printf_loc (MSG_NOTE, vect_location,
1947 "dependence distance = %d.\n", dist);
1949 /* Same loop iteration. */
1950 if (dist == 0
1951 || (dist % vectorization_factor == 0 && dra_size == drb_size))
1953 /* Two references with distance zero have the same alignment. */
1954 STMT_VINFO_SAME_ALIGN_REFS (stmtinfo_a).safe_push (drb);
1955 STMT_VINFO_SAME_ALIGN_REFS (stmtinfo_b).safe_push (dra);
1956 if (dump_enabled_p ())
1958 dump_printf_loc (MSG_NOTE, vect_location,
1959 "accesses have the same alignment.\n");
1960 dump_printf (MSG_NOTE,
1961 "dependence distance modulo vf == 0 between ");
1962 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra));
1963 dump_printf (MSG_NOTE, " and ");
1964 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb));
1965 dump_printf (MSG_NOTE, "\n");
1972 /* Function vect_analyze_data_refs_alignment
1974 Analyze the alignment of the data-references in the loop.
1975 Return FALSE if a data reference is found that cannot be vectorized. */
1977 bool
1978 vect_analyze_data_refs_alignment (loop_vec_info loop_vinfo,
1979 bb_vec_info bb_vinfo)
1981 if (dump_enabled_p ())
1982 dump_printf_loc (MSG_NOTE, vect_location,
1983 "=== vect_analyze_data_refs_alignment ===\n");
1985 /* Mark groups of data references with same alignment using
1986 data dependence information. */
1987 if (loop_vinfo)
1989 vec<ddr_p> ddrs = LOOP_VINFO_DDRS (loop_vinfo);
1990 struct data_dependence_relation *ddr;
1991 unsigned int i;
1993 FOR_EACH_VEC_ELT (ddrs, i, ddr)
1994 vect_find_same_alignment_drs (ddr, loop_vinfo);
1997 if (!vect_compute_data_refs_alignment (loop_vinfo, bb_vinfo))
1999 if (dump_enabled_p ())
2000 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2001 "not vectorized: can't calculate alignment "
2002 "for data ref.\n");
2003 return false;
2006 return true;
2010 /* Analyze groups of accesses: check that DR belongs to a group of
2011 accesses of legal size, step, etc. Detect gaps, single element
2012 interleaving, and other special cases. Set grouped access info.
2013 Collect groups of strided stores for further use in SLP analysis. */
2015 static bool
2016 vect_analyze_group_access (struct data_reference *dr)
2018 tree step = DR_STEP (dr);
2019 tree scalar_type = TREE_TYPE (DR_REF (dr));
2020 HOST_WIDE_INT type_size = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (scalar_type));
2021 gimple stmt = DR_STMT (dr);
2022 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2023 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2024 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
2025 HOST_WIDE_INT dr_step = -1;
2026 HOST_WIDE_INT groupsize, last_accessed_element = 1;
2027 bool slp_impossible = false;
2028 struct loop *loop = NULL;
2030 if (loop_vinfo)
2031 loop = LOOP_VINFO_LOOP (loop_vinfo);
2033 /* For interleaving, GROUPSIZE is STEP counted in elements, i.e., the
2034 size of the interleaving group (including gaps). */
2035 if (tree_fits_shwi_p (step))
2037 dr_step = tree_to_shwi (step);
2038 groupsize = absu_hwi (dr_step) / type_size;
2040 else
2041 groupsize = 0;
2043 /* Not consecutive access is possible only if it is a part of interleaving. */
2044 if (!GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
2046 /* Check if it this DR is a part of interleaving, and is a single
2047 element of the group that is accessed in the loop. */
2049 /* Gaps are supported only for loads. STEP must be a multiple of the type
2050 size. The size of the group must be a power of 2. */
2051 if (DR_IS_READ (dr)
2052 && (dr_step % type_size) == 0
2053 && groupsize > 0
2054 && exact_log2 (groupsize) != -1)
2056 GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = stmt;
2057 GROUP_SIZE (vinfo_for_stmt (stmt)) = groupsize;
2058 if (dump_enabled_p ())
2060 dump_printf_loc (MSG_NOTE, vect_location,
2061 "Detected single element interleaving ");
2062 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dr));
2063 dump_printf (MSG_NOTE, " step ");
2064 dump_generic_expr (MSG_NOTE, TDF_SLIM, step);
2065 dump_printf (MSG_NOTE, "\n");
2068 if (loop_vinfo)
2070 if (dump_enabled_p ())
2071 dump_printf_loc (MSG_NOTE, vect_location,
2072 "Data access with gaps requires scalar "
2073 "epilogue loop\n");
2074 if (loop->inner)
2076 if (dump_enabled_p ())
2077 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2078 "Peeling for outer loop is not"
2079 " supported\n");
2080 return false;
2083 LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) = true;
2086 return true;
2089 if (dump_enabled_p ())
2091 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2092 "not consecutive access ");
2093 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
2094 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
2097 if (bb_vinfo)
2099 /* Mark the statement as unvectorizable. */
2100 STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr))) = false;
2101 return true;
2104 return false;
2107 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) == stmt)
2109 /* First stmt in the interleaving chain. Check the chain. */
2110 gimple next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmt));
2111 struct data_reference *data_ref = dr;
2112 unsigned int count = 1;
2113 tree prev_init = DR_INIT (data_ref);
2114 gimple prev = stmt;
2115 HOST_WIDE_INT diff, gaps = 0;
2117 while (next)
2119 /* Skip same data-refs. In case that two or more stmts share
2120 data-ref (supported only for loads), we vectorize only the first
2121 stmt, and the rest get their vectorized loads from the first
2122 one. */
2123 if (!tree_int_cst_compare (DR_INIT (data_ref),
2124 DR_INIT (STMT_VINFO_DATA_REF (
2125 vinfo_for_stmt (next)))))
2127 if (DR_IS_WRITE (data_ref))
2129 if (dump_enabled_p ())
2130 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2131 "Two store stmts share the same dr.\n");
2132 return false;
2135 /* For load use the same data-ref load. */
2136 GROUP_SAME_DR_STMT (vinfo_for_stmt (next)) = prev;
2138 prev = next;
2139 next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next));
2140 continue;
2143 prev = next;
2144 data_ref = STMT_VINFO_DATA_REF (vinfo_for_stmt (next));
2146 /* All group members have the same STEP by construction. */
2147 gcc_checking_assert (operand_equal_p (DR_STEP (data_ref), step, 0));
2149 /* Check that the distance between two accesses is equal to the type
2150 size. Otherwise, we have gaps. */
2151 diff = (TREE_INT_CST_LOW (DR_INIT (data_ref))
2152 - TREE_INT_CST_LOW (prev_init)) / type_size;
2153 if (diff != 1)
2155 /* FORNOW: SLP of accesses with gaps is not supported. */
2156 slp_impossible = true;
2157 if (DR_IS_WRITE (data_ref))
2159 if (dump_enabled_p ())
2160 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2161 "interleaved store with gaps\n");
2162 return false;
2165 gaps += diff - 1;
2168 last_accessed_element += diff;
2170 /* Store the gap from the previous member of the group. If there is no
2171 gap in the access, GROUP_GAP is always 1. */
2172 GROUP_GAP (vinfo_for_stmt (next)) = diff;
2174 prev_init = DR_INIT (data_ref);
2175 next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next));
2176 /* Count the number of data-refs in the chain. */
2177 count++;
2180 if (groupsize == 0)
2181 groupsize = count + gaps;
2183 /* Check that the size of the interleaving is equal to count for stores,
2184 i.e., that there are no gaps. */
2185 if (groupsize != count
2186 && !DR_IS_READ (dr))
2188 if (dump_enabled_p ())
2189 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2190 "interleaved store with gaps\n");
2191 return false;
2194 /* If there is a gap after the last load in the group it is the
2195 difference between the groupsize and the last accessed
2196 element.
2197 When there is no gap, this difference should be 0. */
2198 GROUP_GAP (vinfo_for_stmt (stmt)) = groupsize - last_accessed_element;
2200 GROUP_SIZE (vinfo_for_stmt (stmt)) = groupsize;
2201 if (dump_enabled_p ())
2203 dump_printf_loc (MSG_NOTE, vect_location,
2204 "Detected interleaving of size %d starting with ",
2205 (int)groupsize);
2206 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
2207 if (GROUP_GAP (vinfo_for_stmt (stmt)) != 0)
2208 dump_printf_loc (MSG_NOTE, vect_location,
2209 "There is a gap of %d elements after the group\n",
2210 (int)GROUP_GAP (vinfo_for_stmt (stmt)));
2213 /* SLP: create an SLP data structure for every interleaving group of
2214 stores for further analysis in vect_analyse_slp. */
2215 if (DR_IS_WRITE (dr) && !slp_impossible)
2217 if (loop_vinfo)
2218 LOOP_VINFO_GROUPED_STORES (loop_vinfo).safe_push (stmt);
2219 if (bb_vinfo)
2220 BB_VINFO_GROUPED_STORES (bb_vinfo).safe_push (stmt);
2223 /* If there is a gap in the end of the group or the group size cannot
2224 be made a multiple of the vector element count then we access excess
2225 elements in the last iteration and thus need to peel that off. */
2226 if (loop_vinfo
2227 && (groupsize - last_accessed_element > 0
2228 || exact_log2 (groupsize) == -1))
2231 if (dump_enabled_p ())
2232 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2233 "Data access with gaps requires scalar "
2234 "epilogue loop\n");
2235 if (loop->inner)
2237 if (dump_enabled_p ())
2238 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2239 "Peeling for outer loop is not supported\n");
2240 return false;
2243 LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) = true;
2247 return true;
2251 /* Analyze the access pattern of the data-reference DR.
2252 In case of non-consecutive accesses call vect_analyze_group_access() to
2253 analyze groups of accesses. */
2255 static bool
2256 vect_analyze_data_ref_access (struct data_reference *dr)
2258 tree step = DR_STEP (dr);
2259 tree scalar_type = TREE_TYPE (DR_REF (dr));
2260 gimple stmt = DR_STMT (dr);
2261 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2262 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2263 struct loop *loop = NULL;
2265 if (loop_vinfo)
2266 loop = LOOP_VINFO_LOOP (loop_vinfo);
2268 if (loop_vinfo && !step)
2270 if (dump_enabled_p ())
2271 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2272 "bad data-ref access in loop\n");
2273 return false;
2276 /* Allow loads with zero step in inner-loop vectorization. */
2277 if (loop_vinfo && integer_zerop (step))
2279 GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = NULL;
2280 if (!nested_in_vect_loop_p (loop, stmt))
2281 return DR_IS_READ (dr);
2282 /* Allow references with zero step for outer loops marked
2283 with pragma omp simd only - it guarantees absence of
2284 loop-carried dependencies between inner loop iterations. */
2285 if (!loop->force_vectorize)
2287 if (dump_enabled_p ())
2288 dump_printf_loc (MSG_NOTE, vect_location,
2289 "zero step in inner loop of nest\n");
2290 return false;
2294 if (loop && nested_in_vect_loop_p (loop, stmt))
2296 /* Interleaved accesses are not yet supported within outer-loop
2297 vectorization for references in the inner-loop. */
2298 GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = NULL;
2300 /* For the rest of the analysis we use the outer-loop step. */
2301 step = STMT_VINFO_DR_STEP (stmt_info);
2302 if (integer_zerop (step))
2304 if (dump_enabled_p ())
2305 dump_printf_loc (MSG_NOTE, vect_location,
2306 "zero step in outer loop.\n");
2307 if (DR_IS_READ (dr))
2308 return true;
2309 else
2310 return false;
2314 /* Consecutive? */
2315 if (TREE_CODE (step) == INTEGER_CST)
2317 HOST_WIDE_INT dr_step = TREE_INT_CST_LOW (step);
2318 if (!tree_int_cst_compare (step, TYPE_SIZE_UNIT (scalar_type))
2319 || (dr_step < 0
2320 && !compare_tree_int (TYPE_SIZE_UNIT (scalar_type), -dr_step)))
2322 /* Mark that it is not interleaving. */
2323 GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = NULL;
2324 return true;
2328 if (loop && nested_in_vect_loop_p (loop, stmt))
2330 if (dump_enabled_p ())
2331 dump_printf_loc (MSG_NOTE, vect_location,
2332 "grouped access in outer loop.\n");
2333 return false;
2337 /* Assume this is a DR handled by non-constant strided load case. */
2338 if (TREE_CODE (step) != INTEGER_CST)
2339 return (STMT_VINFO_STRIDED_P (stmt_info)
2340 && (!STMT_VINFO_GROUPED_ACCESS (stmt_info)
2341 || vect_analyze_group_access (dr)));
2343 /* Not consecutive access - check if it's a part of interleaving group. */
2344 return vect_analyze_group_access (dr);
2349 /* A helper function used in the comparator function to sort data
2350 references. T1 and T2 are two data references to be compared.
2351 The function returns -1, 0, or 1. */
2353 static int
2354 compare_tree (tree t1, tree t2)
2356 int i, cmp;
2357 enum tree_code code;
2358 char tclass;
2360 if (t1 == t2)
2361 return 0;
2362 if (t1 == NULL)
2363 return -1;
2364 if (t2 == NULL)
2365 return 1;
2368 if (TREE_CODE (t1) != TREE_CODE (t2))
2369 return TREE_CODE (t1) < TREE_CODE (t2) ? -1 : 1;
2371 code = TREE_CODE (t1);
2372 switch (code)
2374 /* For const values, we can just use hash values for comparisons. */
2375 case INTEGER_CST:
2376 case REAL_CST:
2377 case FIXED_CST:
2378 case STRING_CST:
2379 case COMPLEX_CST:
2380 case VECTOR_CST:
2382 hashval_t h1 = iterative_hash_expr (t1, 0);
2383 hashval_t h2 = iterative_hash_expr (t2, 0);
2384 if (h1 != h2)
2385 return h1 < h2 ? -1 : 1;
2386 break;
2389 case SSA_NAME:
2390 cmp = compare_tree (SSA_NAME_VAR (t1), SSA_NAME_VAR (t2));
2391 if (cmp != 0)
2392 return cmp;
2394 if (SSA_NAME_VERSION (t1) != SSA_NAME_VERSION (t2))
2395 return SSA_NAME_VERSION (t1) < SSA_NAME_VERSION (t2) ? -1 : 1;
2396 break;
2398 default:
2399 tclass = TREE_CODE_CLASS (code);
2401 /* For var-decl, we could compare their UIDs. */
2402 if (tclass == tcc_declaration)
2404 if (DECL_UID (t1) != DECL_UID (t2))
2405 return DECL_UID (t1) < DECL_UID (t2) ? -1 : 1;
2406 break;
2409 /* For expressions with operands, compare their operands recursively. */
2410 for (i = TREE_OPERAND_LENGTH (t1) - 1; i >= 0; --i)
2412 cmp = compare_tree (TREE_OPERAND (t1, i), TREE_OPERAND (t2, i));
2413 if (cmp != 0)
2414 return cmp;
2418 return 0;
2422 /* Compare two data-references DRA and DRB to group them into chunks
2423 suitable for grouping. */
2425 static int
2426 dr_group_sort_cmp (const void *dra_, const void *drb_)
2428 data_reference_p dra = *(data_reference_p *)const_cast<void *>(dra_);
2429 data_reference_p drb = *(data_reference_p *)const_cast<void *>(drb_);
2430 int cmp;
2432 /* Stabilize sort. */
2433 if (dra == drb)
2434 return 0;
2436 /* Ordering of DRs according to base. */
2437 if (!operand_equal_p (DR_BASE_ADDRESS (dra), DR_BASE_ADDRESS (drb), 0))
2439 cmp = compare_tree (DR_BASE_ADDRESS (dra), DR_BASE_ADDRESS (drb));
2440 if (cmp != 0)
2441 return cmp;
2444 /* And according to DR_OFFSET. */
2445 if (!dr_equal_offsets_p (dra, drb))
2447 cmp = compare_tree (DR_OFFSET (dra), DR_OFFSET (drb));
2448 if (cmp != 0)
2449 return cmp;
2452 /* Put reads before writes. */
2453 if (DR_IS_READ (dra) != DR_IS_READ (drb))
2454 return DR_IS_READ (dra) ? -1 : 1;
2456 /* Then sort after access size. */
2457 if (!operand_equal_p (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dra))),
2458 TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (drb))), 0))
2460 cmp = compare_tree (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dra))),
2461 TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (drb))));
2462 if (cmp != 0)
2463 return cmp;
2466 /* And after step. */
2467 if (!operand_equal_p (DR_STEP (dra), DR_STEP (drb), 0))
2469 cmp = compare_tree (DR_STEP (dra), DR_STEP (drb));
2470 if (cmp != 0)
2471 return cmp;
2474 /* Then sort after DR_INIT. In case of identical DRs sort after stmt UID. */
2475 cmp = tree_int_cst_compare (DR_INIT (dra), DR_INIT (drb));
2476 if (cmp == 0)
2477 return gimple_uid (DR_STMT (dra)) < gimple_uid (DR_STMT (drb)) ? -1 : 1;
2478 return cmp;
2481 /* Function vect_analyze_data_ref_accesses.
2483 Analyze the access pattern of all the data references in the loop.
2485 FORNOW: the only access pattern that is considered vectorizable is a
2486 simple step 1 (consecutive) access.
2488 FORNOW: handle only arrays and pointer accesses. */
2490 bool
2491 vect_analyze_data_ref_accesses (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo)
2493 unsigned int i;
2494 vec<data_reference_p> datarefs;
2495 struct data_reference *dr;
2497 if (dump_enabled_p ())
2498 dump_printf_loc (MSG_NOTE, vect_location,
2499 "=== vect_analyze_data_ref_accesses ===\n");
2501 if (loop_vinfo)
2502 datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
2503 else
2504 datarefs = BB_VINFO_DATAREFS (bb_vinfo);
2506 if (datarefs.is_empty ())
2507 return true;
2509 /* Sort the array of datarefs to make building the interleaving chains
2510 linear. Don't modify the original vector's order, it is needed for
2511 determining what dependencies are reversed. */
2512 vec<data_reference_p> datarefs_copy = datarefs.copy ();
2513 datarefs_copy.qsort (dr_group_sort_cmp);
2515 /* Build the interleaving chains. */
2516 for (i = 0; i < datarefs_copy.length () - 1;)
2518 data_reference_p dra = datarefs_copy[i];
2519 stmt_vec_info stmtinfo_a = vinfo_for_stmt (DR_STMT (dra));
2520 stmt_vec_info lastinfo = NULL;
2521 for (i = i + 1; i < datarefs_copy.length (); ++i)
2523 data_reference_p drb = datarefs_copy[i];
2524 stmt_vec_info stmtinfo_b = vinfo_for_stmt (DR_STMT (drb));
2526 /* ??? Imperfect sorting (non-compatible types, non-modulo
2527 accesses, same accesses) can lead to a group to be artificially
2528 split here as we don't just skip over those. If it really
2529 matters we can push those to a worklist and re-iterate
2530 over them. The we can just skip ahead to the next DR here. */
2532 /* Check that the data-refs have same first location (except init)
2533 and they are both either store or load (not load and store,
2534 not masked loads or stores). */
2535 if (DR_IS_READ (dra) != DR_IS_READ (drb)
2536 || !operand_equal_p (DR_BASE_ADDRESS (dra),
2537 DR_BASE_ADDRESS (drb), 0)
2538 || !dr_equal_offsets_p (dra, drb)
2539 || !gimple_assign_single_p (DR_STMT (dra))
2540 || !gimple_assign_single_p (DR_STMT (drb)))
2541 break;
2543 /* Check that the data-refs have the same constant size. */
2544 tree sza = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dra)));
2545 tree szb = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (drb)));
2546 if (!tree_fits_uhwi_p (sza)
2547 || !tree_fits_uhwi_p (szb)
2548 || !tree_int_cst_equal (sza, szb))
2549 break;
2551 /* Check that the data-refs have the same step. */
2552 if (!operand_equal_p (DR_STEP (dra), DR_STEP (drb), 0))
2553 break;
2555 /* Do not place the same access in the interleaving chain twice. */
2556 if (tree_int_cst_compare (DR_INIT (dra), DR_INIT (drb)) == 0)
2557 break;
2559 /* Check the types are compatible.
2560 ??? We don't distinguish this during sorting. */
2561 if (!types_compatible_p (TREE_TYPE (DR_REF (dra)),
2562 TREE_TYPE (DR_REF (drb))))
2563 break;
2565 /* Sorting has ensured that DR_INIT (dra) <= DR_INIT (drb). */
2566 HOST_WIDE_INT init_a = TREE_INT_CST_LOW (DR_INIT (dra));
2567 HOST_WIDE_INT init_b = TREE_INT_CST_LOW (DR_INIT (drb));
2568 gcc_assert (init_a < init_b);
2570 /* If init_b == init_a + the size of the type * k, we have an
2571 interleaving, and DRA is accessed before DRB. */
2572 HOST_WIDE_INT type_size_a = tree_to_uhwi (sza);
2573 if ((init_b - init_a) % type_size_a != 0)
2574 break;
2576 /* If we have a store, the accesses are adjacent. This splits
2577 groups into chunks we support (we don't support vectorization
2578 of stores with gaps). */
2579 if (!DR_IS_READ (dra)
2580 && (init_b - (HOST_WIDE_INT) TREE_INT_CST_LOW
2581 (DR_INIT (datarefs_copy[i-1]))
2582 != type_size_a))
2583 break;
2585 /* If the step (if not zero or non-constant) is greater than the
2586 difference between data-refs' inits this splits groups into
2587 suitable sizes. */
2588 if (tree_fits_shwi_p (DR_STEP (dra)))
2590 HOST_WIDE_INT step = tree_to_shwi (DR_STEP (dra));
2591 if (step != 0 && step <= (init_b - init_a))
2592 break;
2595 if (dump_enabled_p ())
2597 dump_printf_loc (MSG_NOTE, vect_location,
2598 "Detected interleaving ");
2599 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra));
2600 dump_printf (MSG_NOTE, " and ");
2601 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb));
2602 dump_printf (MSG_NOTE, "\n");
2605 /* Link the found element into the group list. */
2606 if (!GROUP_FIRST_ELEMENT (stmtinfo_a))
2608 GROUP_FIRST_ELEMENT (stmtinfo_a) = DR_STMT (dra);
2609 lastinfo = stmtinfo_a;
2611 GROUP_FIRST_ELEMENT (stmtinfo_b) = DR_STMT (dra);
2612 GROUP_NEXT_ELEMENT (lastinfo) = DR_STMT (drb);
2613 lastinfo = stmtinfo_b;
2617 FOR_EACH_VEC_ELT (datarefs_copy, i, dr)
2618 if (STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr)))
2619 && !vect_analyze_data_ref_access (dr))
2621 if (dump_enabled_p ())
2622 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2623 "not vectorized: complicated access pattern.\n");
2625 if (bb_vinfo)
2627 /* Mark the statement as not vectorizable. */
2628 STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr))) = false;
2629 continue;
2631 else
2633 datarefs_copy.release ();
2634 return false;
2638 datarefs_copy.release ();
2639 return true;
2643 /* Operator == between two dr_with_seg_len objects.
2645 This equality operator is used to make sure two data refs
2646 are the same one so that we will consider to combine the
2647 aliasing checks of those two pairs of data dependent data
2648 refs. */
2650 static bool
2651 operator == (const dr_with_seg_len& d1,
2652 const dr_with_seg_len& d2)
2654 return operand_equal_p (DR_BASE_ADDRESS (d1.dr),
2655 DR_BASE_ADDRESS (d2.dr), 0)
2656 && compare_tree (d1.offset, d2.offset) == 0
2657 && compare_tree (d1.seg_len, d2.seg_len) == 0;
2660 /* Function comp_dr_with_seg_len_pair.
2662 Comparison function for sorting objects of dr_with_seg_len_pair_t
2663 so that we can combine aliasing checks in one scan. */
2665 static int
2666 comp_dr_with_seg_len_pair (const void *p1_, const void *p2_)
2668 const dr_with_seg_len_pair_t* p1 = (const dr_with_seg_len_pair_t *) p1_;
2669 const dr_with_seg_len_pair_t* p2 = (const dr_with_seg_len_pair_t *) p2_;
2671 const dr_with_seg_len &p11 = p1->first,
2672 &p12 = p1->second,
2673 &p21 = p2->first,
2674 &p22 = p2->second;
2676 /* For DR pairs (a, b) and (c, d), we only consider to merge the alias checks
2677 if a and c have the same basic address snd step, and b and d have the same
2678 address and step. Therefore, if any a&c or b&d don't have the same address
2679 and step, we don't care the order of those two pairs after sorting. */
2680 int comp_res;
2682 if ((comp_res = compare_tree (DR_BASE_ADDRESS (p11.dr),
2683 DR_BASE_ADDRESS (p21.dr))) != 0)
2684 return comp_res;
2685 if ((comp_res = compare_tree (DR_BASE_ADDRESS (p12.dr),
2686 DR_BASE_ADDRESS (p22.dr))) != 0)
2687 return comp_res;
2688 if ((comp_res = compare_tree (DR_STEP (p11.dr), DR_STEP (p21.dr))) != 0)
2689 return comp_res;
2690 if ((comp_res = compare_tree (DR_STEP (p12.dr), DR_STEP (p22.dr))) != 0)
2691 return comp_res;
2692 if ((comp_res = compare_tree (p11.offset, p21.offset)) != 0)
2693 return comp_res;
2694 if ((comp_res = compare_tree (p12.offset, p22.offset)) != 0)
2695 return comp_res;
2697 return 0;
2700 /* Function vect_vfa_segment_size.
2702 Create an expression that computes the size of segment
2703 that will be accessed for a data reference. The functions takes into
2704 account that realignment loads may access one more vector.
2706 Input:
2707 DR: The data reference.
2708 LENGTH_FACTOR: segment length to consider.
2710 Return an expression whose value is the size of segment which will be
2711 accessed by DR. */
2713 static tree
2714 vect_vfa_segment_size (struct data_reference *dr, tree length_factor)
2716 tree segment_length;
2718 if (integer_zerop (DR_STEP (dr)))
2719 segment_length = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dr)));
2720 else
2721 segment_length = size_binop (MULT_EXPR,
2722 fold_convert (sizetype, DR_STEP (dr)),
2723 fold_convert (sizetype, length_factor));
2725 if (vect_supportable_dr_alignment (dr, false)
2726 == dr_explicit_realign_optimized)
2728 tree vector_size = TYPE_SIZE_UNIT
2729 (STMT_VINFO_VECTYPE (vinfo_for_stmt (DR_STMT (dr))));
2731 segment_length = size_binop (PLUS_EXPR, segment_length, vector_size);
2733 return segment_length;
2736 /* Function vect_prune_runtime_alias_test_list.
2738 Prune a list of ddrs to be tested at run-time by versioning for alias.
2739 Merge several alias checks into one if possible.
2740 Return FALSE if resulting list of ddrs is longer then allowed by
2741 PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS, otherwise return TRUE. */
2743 bool
2744 vect_prune_runtime_alias_test_list (loop_vec_info loop_vinfo)
2746 vec<ddr_p> may_alias_ddrs =
2747 LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo);
2748 vec<dr_with_seg_len_pair_t>& comp_alias_ddrs =
2749 LOOP_VINFO_COMP_ALIAS_DDRS (loop_vinfo);
2750 int vect_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2751 tree scalar_loop_iters = LOOP_VINFO_NITERS (loop_vinfo);
2753 ddr_p ddr;
2754 unsigned int i;
2755 tree length_factor;
2757 if (dump_enabled_p ())
2758 dump_printf_loc (MSG_NOTE, vect_location,
2759 "=== vect_prune_runtime_alias_test_list ===\n");
2761 if (may_alias_ddrs.is_empty ())
2762 return true;
2764 /* Basically, for each pair of dependent data refs store_ptr_0
2765 and load_ptr_0, we create an expression:
2767 ((store_ptr_0 + store_segment_length_0) <= load_ptr_0)
2768 || (load_ptr_0 + load_segment_length_0) <= store_ptr_0))
2770 for aliasing checks. However, in some cases we can decrease
2771 the number of checks by combining two checks into one. For
2772 example, suppose we have another pair of data refs store_ptr_0
2773 and load_ptr_1, and if the following condition is satisfied:
2775 load_ptr_0 < load_ptr_1 &&
2776 load_ptr_1 - load_ptr_0 - load_segment_length_0 < store_segment_length_0
2778 (this condition means, in each iteration of vectorized loop,
2779 the accessed memory of store_ptr_0 cannot be between the memory
2780 of load_ptr_0 and load_ptr_1.)
2782 we then can use only the following expression to finish the
2783 alising checks between store_ptr_0 & load_ptr_0 and
2784 store_ptr_0 & load_ptr_1:
2786 ((store_ptr_0 + store_segment_length_0) <= load_ptr_0)
2787 || (load_ptr_1 + load_segment_length_1 <= store_ptr_0))
2789 Note that we only consider that load_ptr_0 and load_ptr_1 have the
2790 same basic address. */
2792 comp_alias_ddrs.create (may_alias_ddrs.length ());
2794 /* First, we collect all data ref pairs for aliasing checks. */
2795 FOR_EACH_VEC_ELT (may_alias_ddrs, i, ddr)
2797 struct data_reference *dr_a, *dr_b;
2798 gimple dr_group_first_a, dr_group_first_b;
2799 tree segment_length_a, segment_length_b;
2800 gimple stmt_a, stmt_b;
2802 dr_a = DDR_A (ddr);
2803 stmt_a = DR_STMT (DDR_A (ddr));
2804 dr_group_first_a = GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt_a));
2805 if (dr_group_first_a)
2807 stmt_a = dr_group_first_a;
2808 dr_a = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt_a));
2811 dr_b = DDR_B (ddr);
2812 stmt_b = DR_STMT (DDR_B (ddr));
2813 dr_group_first_b = GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt_b));
2814 if (dr_group_first_b)
2816 stmt_b = dr_group_first_b;
2817 dr_b = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt_b));
2820 if (!operand_equal_p (DR_STEP (dr_a), DR_STEP (dr_b), 0))
2821 length_factor = scalar_loop_iters;
2822 else
2823 length_factor = size_int (vect_factor);
2824 segment_length_a = vect_vfa_segment_size (dr_a, length_factor);
2825 segment_length_b = vect_vfa_segment_size (dr_b, length_factor);
2827 dr_with_seg_len_pair_t dr_with_seg_len_pair
2828 (dr_with_seg_len (dr_a, segment_length_a),
2829 dr_with_seg_len (dr_b, segment_length_b));
2831 if (compare_tree (DR_BASE_ADDRESS (dr_a), DR_BASE_ADDRESS (dr_b)) > 0)
2832 std::swap (dr_with_seg_len_pair.first, dr_with_seg_len_pair.second);
2834 comp_alias_ddrs.safe_push (dr_with_seg_len_pair);
2837 /* Second, we sort the collected data ref pairs so that we can scan
2838 them once to combine all possible aliasing checks. */
2839 comp_alias_ddrs.qsort (comp_dr_with_seg_len_pair);
2841 /* Third, we scan the sorted dr pairs and check if we can combine
2842 alias checks of two neighbouring dr pairs. */
2843 for (size_t i = 1; i < comp_alias_ddrs.length (); ++i)
2845 /* Deal with two ddrs (dr_a1, dr_b1) and (dr_a2, dr_b2). */
2846 dr_with_seg_len *dr_a1 = &comp_alias_ddrs[i-1].first,
2847 *dr_b1 = &comp_alias_ddrs[i-1].second,
2848 *dr_a2 = &comp_alias_ddrs[i].first,
2849 *dr_b2 = &comp_alias_ddrs[i].second;
2851 /* Remove duplicate data ref pairs. */
2852 if (*dr_a1 == *dr_a2 && *dr_b1 == *dr_b2)
2854 if (dump_enabled_p ())
2856 dump_printf_loc (MSG_NOTE, vect_location,
2857 "found equal ranges ");
2858 dump_generic_expr (MSG_NOTE, TDF_SLIM,
2859 DR_REF (dr_a1->dr));
2860 dump_printf (MSG_NOTE, ", ");
2861 dump_generic_expr (MSG_NOTE, TDF_SLIM,
2862 DR_REF (dr_b1->dr));
2863 dump_printf (MSG_NOTE, " and ");
2864 dump_generic_expr (MSG_NOTE, TDF_SLIM,
2865 DR_REF (dr_a2->dr));
2866 dump_printf (MSG_NOTE, ", ");
2867 dump_generic_expr (MSG_NOTE, TDF_SLIM,
2868 DR_REF (dr_b2->dr));
2869 dump_printf (MSG_NOTE, "\n");
2872 comp_alias_ddrs.ordered_remove (i--);
2873 continue;
2876 if (*dr_a1 == *dr_a2 || *dr_b1 == *dr_b2)
2878 /* We consider the case that DR_B1 and DR_B2 are same memrefs,
2879 and DR_A1 and DR_A2 are two consecutive memrefs. */
2880 if (*dr_a1 == *dr_a2)
2882 std::swap (dr_a1, dr_b1);
2883 std::swap (dr_a2, dr_b2);
2886 if (!operand_equal_p (DR_BASE_ADDRESS (dr_a1->dr),
2887 DR_BASE_ADDRESS (dr_a2->dr),
2889 || !tree_fits_shwi_p (dr_a1->offset)
2890 || !tree_fits_shwi_p (dr_a2->offset))
2891 continue;
2893 HOST_WIDE_INT diff = (tree_to_shwi (dr_a2->offset)
2894 - tree_to_shwi (dr_a1->offset));
2897 /* Now we check if the following condition is satisfied:
2899 DIFF - SEGMENT_LENGTH_A < SEGMENT_LENGTH_B
2901 where DIFF = DR_A2->OFFSET - DR_A1->OFFSET. However,
2902 SEGMENT_LENGTH_A or SEGMENT_LENGTH_B may not be constant so we
2903 have to make a best estimation. We can get the minimum value
2904 of SEGMENT_LENGTH_B as a constant, represented by MIN_SEG_LEN_B,
2905 then either of the following two conditions can guarantee the
2906 one above:
2908 1: DIFF <= MIN_SEG_LEN_B
2909 2: DIFF - SEGMENT_LENGTH_A < MIN_SEG_LEN_B
2913 HOST_WIDE_INT min_seg_len_b = (tree_fits_shwi_p (dr_b1->seg_len)
2914 ? tree_to_shwi (dr_b1->seg_len)
2915 : vect_factor);
2917 if (diff <= min_seg_len_b
2918 || (tree_fits_shwi_p (dr_a1->seg_len)
2919 && diff - tree_to_shwi (dr_a1->seg_len) < min_seg_len_b))
2921 if (dump_enabled_p ())
2923 dump_printf_loc (MSG_NOTE, vect_location,
2924 "merging ranges for ");
2925 dump_generic_expr (MSG_NOTE, TDF_SLIM,
2926 DR_REF (dr_a1->dr));
2927 dump_printf (MSG_NOTE, ", ");
2928 dump_generic_expr (MSG_NOTE, TDF_SLIM,
2929 DR_REF (dr_b1->dr));
2930 dump_printf (MSG_NOTE, " and ");
2931 dump_generic_expr (MSG_NOTE, TDF_SLIM,
2932 DR_REF (dr_a2->dr));
2933 dump_printf (MSG_NOTE, ", ");
2934 dump_generic_expr (MSG_NOTE, TDF_SLIM,
2935 DR_REF (dr_b2->dr));
2936 dump_printf (MSG_NOTE, "\n");
2939 dr_a1->seg_len = size_binop (PLUS_EXPR,
2940 dr_a2->seg_len, size_int (diff));
2941 comp_alias_ddrs.ordered_remove (i--);
2946 dump_printf_loc (MSG_NOTE, vect_location,
2947 "improved number of alias checks from %d to %d\n",
2948 may_alias_ddrs.length (), comp_alias_ddrs.length ());
2949 if ((int) comp_alias_ddrs.length () >
2950 PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS))
2951 return false;
2953 return true;
2956 /* Check whether a non-affine read in stmt is suitable for gather load
2957 and if so, return a builtin decl for that operation. */
2959 tree
2960 vect_check_gather (gimple stmt, loop_vec_info loop_vinfo, tree *basep,
2961 tree *offp, int *scalep)
2963 HOST_WIDE_INT scale = 1, pbitpos, pbitsize;
2964 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2965 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2966 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
2967 tree offtype = NULL_TREE;
2968 tree decl, base, off;
2969 machine_mode pmode;
2970 int punsignedp, pvolatilep;
2972 base = DR_REF (dr);
2973 /* For masked loads/stores, DR_REF (dr) is an artificial MEM_REF,
2974 see if we can use the def stmt of the address. */
2975 if (is_gimple_call (stmt)
2976 && gimple_call_internal_p (stmt)
2977 && (gimple_call_internal_fn (stmt) == IFN_MASK_LOAD
2978 || gimple_call_internal_fn (stmt) == IFN_MASK_STORE)
2979 && TREE_CODE (base) == MEM_REF
2980 && TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME
2981 && integer_zerop (TREE_OPERAND (base, 1))
2982 && !expr_invariant_in_loop_p (loop, TREE_OPERAND (base, 0)))
2984 gimple def_stmt = SSA_NAME_DEF_STMT (TREE_OPERAND (base, 0));
2985 if (is_gimple_assign (def_stmt)
2986 && gimple_assign_rhs_code (def_stmt) == ADDR_EXPR)
2987 base = TREE_OPERAND (gimple_assign_rhs1 (def_stmt), 0);
2990 /* The gather builtins need address of the form
2991 loop_invariant + vector * {1, 2, 4, 8}
2993 loop_invariant + sign_extend (vector) * { 1, 2, 4, 8 }.
2994 Unfortunately DR_BASE_ADDRESS/DR_OFFSET can be a mixture
2995 of loop invariants/SSA_NAMEs defined in the loop, with casts,
2996 multiplications and additions in it. To get a vector, we need
2997 a single SSA_NAME that will be defined in the loop and will
2998 contain everything that is not loop invariant and that can be
2999 vectorized. The following code attempts to find such a preexistng
3000 SSA_NAME OFF and put the loop invariants into a tree BASE
3001 that can be gimplified before the loop. */
3002 base = get_inner_reference (base, &pbitsize, &pbitpos, &off,
3003 &pmode, &punsignedp, &pvolatilep, false);
3004 gcc_assert (base != NULL_TREE && (pbitpos % BITS_PER_UNIT) == 0);
3006 if (TREE_CODE (base) == MEM_REF)
3008 if (!integer_zerop (TREE_OPERAND (base, 1)))
3010 if (off == NULL_TREE)
3012 offset_int moff = mem_ref_offset (base);
3013 off = wide_int_to_tree (sizetype, moff);
3015 else
3016 off = size_binop (PLUS_EXPR, off,
3017 fold_convert (sizetype, TREE_OPERAND (base, 1)));
3019 base = TREE_OPERAND (base, 0);
3021 else
3022 base = build_fold_addr_expr (base);
3024 if (off == NULL_TREE)
3025 off = size_zero_node;
3027 /* If base is not loop invariant, either off is 0, then we start with just
3028 the constant offset in the loop invariant BASE and continue with base
3029 as OFF, otherwise give up.
3030 We could handle that case by gimplifying the addition of base + off
3031 into some SSA_NAME and use that as off, but for now punt. */
3032 if (!expr_invariant_in_loop_p (loop, base))
3034 if (!integer_zerop (off))
3035 return NULL_TREE;
3036 off = base;
3037 base = size_int (pbitpos / BITS_PER_UNIT);
3039 /* Otherwise put base + constant offset into the loop invariant BASE
3040 and continue with OFF. */
3041 else
3043 base = fold_convert (sizetype, base);
3044 base = size_binop (PLUS_EXPR, base, size_int (pbitpos / BITS_PER_UNIT));
3047 /* OFF at this point may be either a SSA_NAME or some tree expression
3048 from get_inner_reference. Try to peel off loop invariants from it
3049 into BASE as long as possible. */
3050 STRIP_NOPS (off);
3051 while (offtype == NULL_TREE)
3053 enum tree_code code;
3054 tree op0, op1, add = NULL_TREE;
3056 if (TREE_CODE (off) == SSA_NAME)
3058 gimple def_stmt = SSA_NAME_DEF_STMT (off);
3060 if (expr_invariant_in_loop_p (loop, off))
3061 return NULL_TREE;
3063 if (gimple_code (def_stmt) != GIMPLE_ASSIGN)
3064 break;
3066 op0 = gimple_assign_rhs1 (def_stmt);
3067 code = gimple_assign_rhs_code (def_stmt);
3068 op1 = gimple_assign_rhs2 (def_stmt);
3070 else
3072 if (get_gimple_rhs_class (TREE_CODE (off)) == GIMPLE_TERNARY_RHS)
3073 return NULL_TREE;
3074 code = TREE_CODE (off);
3075 extract_ops_from_tree (off, &code, &op0, &op1);
3077 switch (code)
3079 case POINTER_PLUS_EXPR:
3080 case PLUS_EXPR:
3081 if (expr_invariant_in_loop_p (loop, op0))
3083 add = op0;
3084 off = op1;
3085 do_add:
3086 add = fold_convert (sizetype, add);
3087 if (scale != 1)
3088 add = size_binop (MULT_EXPR, add, size_int (scale));
3089 base = size_binop (PLUS_EXPR, base, add);
3090 continue;
3092 if (expr_invariant_in_loop_p (loop, op1))
3094 add = op1;
3095 off = op0;
3096 goto do_add;
3098 break;
3099 case MINUS_EXPR:
3100 if (expr_invariant_in_loop_p (loop, op1))
3102 add = fold_convert (sizetype, op1);
3103 add = size_binop (MINUS_EXPR, size_zero_node, add);
3104 off = op0;
3105 goto do_add;
3107 break;
3108 case MULT_EXPR:
3109 if (scale == 1 && tree_fits_shwi_p (op1))
3111 scale = tree_to_shwi (op1);
3112 off = op0;
3113 continue;
3115 break;
3116 case SSA_NAME:
3117 off = op0;
3118 continue;
3119 CASE_CONVERT:
3120 if (!POINTER_TYPE_P (TREE_TYPE (op0))
3121 && !INTEGRAL_TYPE_P (TREE_TYPE (op0)))
3122 break;
3123 if (TYPE_PRECISION (TREE_TYPE (op0))
3124 == TYPE_PRECISION (TREE_TYPE (off)))
3126 off = op0;
3127 continue;
3129 if (TYPE_PRECISION (TREE_TYPE (op0))
3130 < TYPE_PRECISION (TREE_TYPE (off)))
3132 off = op0;
3133 offtype = TREE_TYPE (off);
3134 STRIP_NOPS (off);
3135 continue;
3137 break;
3138 default:
3139 break;
3141 break;
3144 /* If at the end OFF still isn't a SSA_NAME or isn't
3145 defined in the loop, punt. */
3146 if (TREE_CODE (off) != SSA_NAME
3147 || expr_invariant_in_loop_p (loop, off))
3148 return NULL_TREE;
3150 if (offtype == NULL_TREE)
3151 offtype = TREE_TYPE (off);
3153 decl = targetm.vectorize.builtin_gather (STMT_VINFO_VECTYPE (stmt_info),
3154 offtype, scale);
3155 if (decl == NULL_TREE)
3156 return NULL_TREE;
3158 if (basep)
3159 *basep = base;
3160 if (offp)
3161 *offp = off;
3162 if (scalep)
3163 *scalep = scale;
3164 return decl;
3167 /* Function vect_analyze_data_refs.
3169 Find all the data references in the loop or basic block.
3171 The general structure of the analysis of data refs in the vectorizer is as
3172 follows:
3173 1- vect_analyze_data_refs(loop/bb): call
3174 compute_data_dependences_for_loop/bb to find and analyze all data-refs
3175 in the loop/bb and their dependences.
3176 2- vect_analyze_dependences(): apply dependence testing using ddrs.
3177 3- vect_analyze_drs_alignment(): check that ref_stmt.alignment is ok.
3178 4- vect_analyze_drs_access(): check that ref_stmt.step is ok.
3182 bool
3183 vect_analyze_data_refs (loop_vec_info loop_vinfo,
3184 bb_vec_info bb_vinfo,
3185 int *min_vf, unsigned *n_stmts)
3187 struct loop *loop = NULL;
3188 basic_block bb = NULL;
3189 unsigned int i;
3190 vec<data_reference_p> datarefs;
3191 struct data_reference *dr;
3192 tree scalar_type;
3194 if (dump_enabled_p ())
3195 dump_printf_loc (MSG_NOTE, vect_location,
3196 "=== vect_analyze_data_refs ===\n");
3198 if (loop_vinfo)
3200 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
3202 loop = LOOP_VINFO_LOOP (loop_vinfo);
3203 datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
3204 if (!find_loop_nest (loop, &LOOP_VINFO_LOOP_NEST (loop_vinfo)))
3206 if (dump_enabled_p ())
3207 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3208 "not vectorized: loop contains function calls"
3209 " or data references that cannot be analyzed\n");
3210 return false;
3213 for (i = 0; i < loop->num_nodes; i++)
3215 gimple_stmt_iterator gsi;
3217 for (gsi = gsi_start_bb (bbs[i]); !gsi_end_p (gsi); gsi_next (&gsi))
3219 gimple stmt = gsi_stmt (gsi);
3220 if (is_gimple_debug (stmt))
3221 continue;
3222 ++*n_stmts;
3223 if (!find_data_references_in_stmt (loop, stmt, &datarefs))
3225 if (is_gimple_call (stmt) && loop->safelen)
3227 tree fndecl = gimple_call_fndecl (stmt), op;
3228 if (fndecl != NULL_TREE)
3230 struct cgraph_node *node = cgraph_node::get (fndecl);
3231 if (node != NULL && node->simd_clones != NULL)
3233 unsigned int j, n = gimple_call_num_args (stmt);
3234 for (j = 0; j < n; j++)
3236 op = gimple_call_arg (stmt, j);
3237 if (DECL_P (op)
3238 || (REFERENCE_CLASS_P (op)
3239 && get_base_address (op)))
3240 break;
3242 op = gimple_call_lhs (stmt);
3243 /* Ignore #pragma omp declare simd functions
3244 if they don't have data references in the
3245 call stmt itself. */
3246 if (j == n
3247 && !(op
3248 && (DECL_P (op)
3249 || (REFERENCE_CLASS_P (op)
3250 && get_base_address (op)))))
3251 continue;
3255 LOOP_VINFO_DATAREFS (loop_vinfo) = datarefs;
3256 if (dump_enabled_p ())
3257 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3258 "not vectorized: loop contains function "
3259 "calls or data references that cannot "
3260 "be analyzed\n");
3261 return false;
3266 LOOP_VINFO_DATAREFS (loop_vinfo) = datarefs;
3268 else
3270 gimple_stmt_iterator gsi;
3272 bb = BB_VINFO_BB (bb_vinfo);
3273 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3275 gimple stmt = gsi_stmt (gsi);
3276 if (is_gimple_debug (stmt))
3277 continue;
3278 ++*n_stmts;
3279 if (!find_data_references_in_stmt (NULL, stmt,
3280 &BB_VINFO_DATAREFS (bb_vinfo)))
3282 /* Mark the rest of the basic-block as unvectorizable. */
3283 for (; !gsi_end_p (gsi); gsi_next (&gsi))
3285 stmt = gsi_stmt (gsi);
3286 STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (stmt)) = false;
3288 break;
3292 datarefs = BB_VINFO_DATAREFS (bb_vinfo);
3295 /* Go through the data-refs, check that the analysis succeeded. Update
3296 pointer from stmt_vec_info struct to DR and vectype. */
3298 FOR_EACH_VEC_ELT (datarefs, i, dr)
3300 gimple stmt;
3301 stmt_vec_info stmt_info;
3302 tree base, offset, init;
3303 bool gather = false;
3304 bool simd_lane_access = false;
3305 int vf;
3307 again:
3308 if (!dr || !DR_REF (dr))
3310 if (dump_enabled_p ())
3311 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3312 "not vectorized: unhandled data-ref\n");
3313 return false;
3316 stmt = DR_STMT (dr);
3317 stmt_info = vinfo_for_stmt (stmt);
3319 /* Discard clobbers from the dataref vector. We will remove
3320 clobber stmts during vectorization. */
3321 if (gimple_clobber_p (stmt))
3323 free_data_ref (dr);
3324 if (i == datarefs.length () - 1)
3326 datarefs.pop ();
3327 break;
3329 datarefs.ordered_remove (i);
3330 dr = datarefs[i];
3331 goto again;
3334 /* Check that analysis of the data-ref succeeded. */
3335 if (!DR_BASE_ADDRESS (dr) || !DR_OFFSET (dr) || !DR_INIT (dr)
3336 || !DR_STEP (dr))
3338 bool maybe_gather
3339 = DR_IS_READ (dr)
3340 && !TREE_THIS_VOLATILE (DR_REF (dr))
3341 && targetm.vectorize.builtin_gather != NULL;
3342 bool maybe_simd_lane_access
3343 = loop_vinfo && loop->simduid;
3345 /* If target supports vector gather loads, or if this might be
3346 a SIMD lane access, see if they can't be used. */
3347 if (loop_vinfo
3348 && (maybe_gather || maybe_simd_lane_access)
3349 && !nested_in_vect_loop_p (loop, stmt))
3351 struct data_reference *newdr
3352 = create_data_ref (NULL, loop_containing_stmt (stmt),
3353 DR_REF (dr), stmt, true);
3354 gcc_assert (newdr != NULL && DR_REF (newdr));
3355 if (DR_BASE_ADDRESS (newdr)
3356 && DR_OFFSET (newdr)
3357 && DR_INIT (newdr)
3358 && DR_STEP (newdr)
3359 && integer_zerop (DR_STEP (newdr)))
3361 if (maybe_simd_lane_access)
3363 tree off = DR_OFFSET (newdr);
3364 STRIP_NOPS (off);
3365 if (TREE_CODE (DR_INIT (newdr)) == INTEGER_CST
3366 && TREE_CODE (off) == MULT_EXPR
3367 && tree_fits_uhwi_p (TREE_OPERAND (off, 1)))
3369 tree step = TREE_OPERAND (off, 1);
3370 off = TREE_OPERAND (off, 0);
3371 STRIP_NOPS (off);
3372 if (CONVERT_EXPR_P (off)
3373 && TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (off,
3374 0)))
3375 < TYPE_PRECISION (TREE_TYPE (off)))
3376 off = TREE_OPERAND (off, 0);
3377 if (TREE_CODE (off) == SSA_NAME)
3379 gimple def = SSA_NAME_DEF_STMT (off);
3380 tree reft = TREE_TYPE (DR_REF (newdr));
3381 if (is_gimple_call (def)
3382 && gimple_call_internal_p (def)
3383 && (gimple_call_internal_fn (def)
3384 == IFN_GOMP_SIMD_LANE))
3386 tree arg = gimple_call_arg (def, 0);
3387 gcc_assert (TREE_CODE (arg) == SSA_NAME);
3388 arg = SSA_NAME_VAR (arg);
3389 if (arg == loop->simduid
3390 /* For now. */
3391 && tree_int_cst_equal
3392 (TYPE_SIZE_UNIT (reft),
3393 step))
3395 DR_OFFSET (newdr) = ssize_int (0);
3396 DR_STEP (newdr) = step;
3397 DR_ALIGNED_TO (newdr)
3398 = size_int (BIGGEST_ALIGNMENT);
3399 dr = newdr;
3400 simd_lane_access = true;
3406 if (!simd_lane_access && maybe_gather)
3408 dr = newdr;
3409 gather = true;
3412 if (!gather && !simd_lane_access)
3413 free_data_ref (newdr);
3416 if (!gather && !simd_lane_access)
3418 if (dump_enabled_p ())
3420 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3421 "not vectorized: data ref analysis "
3422 "failed ");
3423 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3424 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3427 if (bb_vinfo)
3428 break;
3430 return false;
3434 if (TREE_CODE (DR_BASE_ADDRESS (dr)) == INTEGER_CST)
3436 if (dump_enabled_p ())
3437 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3438 "not vectorized: base addr of dr is a "
3439 "constant\n");
3441 if (bb_vinfo)
3442 break;
3444 if (gather || simd_lane_access)
3445 free_data_ref (dr);
3446 return false;
3449 if (TREE_THIS_VOLATILE (DR_REF (dr)))
3451 if (dump_enabled_p ())
3453 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3454 "not vectorized: volatile type ");
3455 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3456 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3459 if (bb_vinfo)
3460 break;
3462 return false;
3465 if (stmt_can_throw_internal (stmt))
3467 if (dump_enabled_p ())
3469 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3470 "not vectorized: statement can throw an "
3471 "exception ");
3472 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3473 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3476 if (bb_vinfo)
3477 break;
3479 if (gather || simd_lane_access)
3480 free_data_ref (dr);
3481 return false;
3484 if (TREE_CODE (DR_REF (dr)) == COMPONENT_REF
3485 && DECL_BIT_FIELD (TREE_OPERAND (DR_REF (dr), 1)))
3487 if (dump_enabled_p ())
3489 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3490 "not vectorized: statement is bitfield "
3491 "access ");
3492 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3493 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3496 if (bb_vinfo)
3497 break;
3499 if (gather || simd_lane_access)
3500 free_data_ref (dr);
3501 return false;
3504 base = unshare_expr (DR_BASE_ADDRESS (dr));
3505 offset = unshare_expr (DR_OFFSET (dr));
3506 init = unshare_expr (DR_INIT (dr));
3508 if (is_gimple_call (stmt)
3509 && (!gimple_call_internal_p (stmt)
3510 || (gimple_call_internal_fn (stmt) != IFN_MASK_LOAD
3511 && gimple_call_internal_fn (stmt) != IFN_MASK_STORE)))
3513 if (dump_enabled_p ())
3515 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3516 "not vectorized: dr in a call ");
3517 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3518 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3521 if (bb_vinfo)
3522 break;
3524 if (gather || simd_lane_access)
3525 free_data_ref (dr);
3526 return false;
3529 /* Update DR field in stmt_vec_info struct. */
3531 /* If the dataref is in an inner-loop of the loop that is considered for
3532 for vectorization, we also want to analyze the access relative to
3533 the outer-loop (DR contains information only relative to the
3534 inner-most enclosing loop). We do that by building a reference to the
3535 first location accessed by the inner-loop, and analyze it relative to
3536 the outer-loop. */
3537 if (loop && nested_in_vect_loop_p (loop, stmt))
3539 tree outer_step, outer_base, outer_init;
3540 HOST_WIDE_INT pbitsize, pbitpos;
3541 tree poffset;
3542 machine_mode pmode;
3543 int punsignedp, pvolatilep;
3544 affine_iv base_iv, offset_iv;
3545 tree dinit;
3547 /* Build a reference to the first location accessed by the
3548 inner-loop: *(BASE+INIT). (The first location is actually
3549 BASE+INIT+OFFSET, but we add OFFSET separately later). */
3550 tree inner_base = build_fold_indirect_ref
3551 (fold_build_pointer_plus (base, init));
3553 if (dump_enabled_p ())
3555 dump_printf_loc (MSG_NOTE, vect_location,
3556 "analyze in outer-loop: ");
3557 dump_generic_expr (MSG_NOTE, TDF_SLIM, inner_base);
3558 dump_printf (MSG_NOTE, "\n");
3561 outer_base = get_inner_reference (inner_base, &pbitsize, &pbitpos,
3562 &poffset, &pmode, &punsignedp, &pvolatilep, false);
3563 gcc_assert (outer_base != NULL_TREE);
3565 if (pbitpos % BITS_PER_UNIT != 0)
3567 if (dump_enabled_p ())
3568 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3569 "failed: bit offset alignment.\n");
3570 return false;
3573 outer_base = build_fold_addr_expr (outer_base);
3574 if (!simple_iv (loop, loop_containing_stmt (stmt), outer_base,
3575 &base_iv, false))
3577 if (dump_enabled_p ())
3578 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3579 "failed: evolution of base is not affine.\n");
3580 return false;
3583 if (offset)
3585 if (poffset)
3586 poffset = fold_build2 (PLUS_EXPR, TREE_TYPE (offset), offset,
3587 poffset);
3588 else
3589 poffset = offset;
3592 if (!poffset)
3594 offset_iv.base = ssize_int (0);
3595 offset_iv.step = ssize_int (0);
3597 else if (!simple_iv (loop, loop_containing_stmt (stmt), poffset,
3598 &offset_iv, false))
3600 if (dump_enabled_p ())
3601 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3602 "evolution of offset is not affine.\n");
3603 return false;
3606 outer_init = ssize_int (pbitpos / BITS_PER_UNIT);
3607 split_constant_offset (base_iv.base, &base_iv.base, &dinit);
3608 outer_init = size_binop (PLUS_EXPR, outer_init, dinit);
3609 split_constant_offset (offset_iv.base, &offset_iv.base, &dinit);
3610 outer_init = size_binop (PLUS_EXPR, outer_init, dinit);
3612 outer_step = size_binop (PLUS_EXPR,
3613 fold_convert (ssizetype, base_iv.step),
3614 fold_convert (ssizetype, offset_iv.step));
3616 STMT_VINFO_DR_STEP (stmt_info) = outer_step;
3617 /* FIXME: Use canonicalize_base_object_address (base_iv.base); */
3618 STMT_VINFO_DR_BASE_ADDRESS (stmt_info) = base_iv.base;
3619 STMT_VINFO_DR_INIT (stmt_info) = outer_init;
3620 STMT_VINFO_DR_OFFSET (stmt_info) =
3621 fold_convert (ssizetype, offset_iv.base);
3622 STMT_VINFO_DR_ALIGNED_TO (stmt_info) =
3623 size_int (highest_pow2_factor (offset_iv.base));
3625 if (dump_enabled_p ())
3627 dump_printf_loc (MSG_NOTE, vect_location,
3628 "\touter base_address: ");
3629 dump_generic_expr (MSG_NOTE, TDF_SLIM,
3630 STMT_VINFO_DR_BASE_ADDRESS (stmt_info));
3631 dump_printf (MSG_NOTE, "\n\touter offset from base address: ");
3632 dump_generic_expr (MSG_NOTE, TDF_SLIM,
3633 STMT_VINFO_DR_OFFSET (stmt_info));
3634 dump_printf (MSG_NOTE,
3635 "\n\touter constant offset from base address: ");
3636 dump_generic_expr (MSG_NOTE, TDF_SLIM,
3637 STMT_VINFO_DR_INIT (stmt_info));
3638 dump_printf (MSG_NOTE, "\n\touter step: ");
3639 dump_generic_expr (MSG_NOTE, TDF_SLIM,
3640 STMT_VINFO_DR_STEP (stmt_info));
3641 dump_printf (MSG_NOTE, "\n\touter aligned to: ");
3642 dump_generic_expr (MSG_NOTE, TDF_SLIM,
3643 STMT_VINFO_DR_ALIGNED_TO (stmt_info));
3644 dump_printf (MSG_NOTE, "\n");
3648 if (STMT_VINFO_DATA_REF (stmt_info))
3650 if (dump_enabled_p ())
3652 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3653 "not vectorized: more than one data ref "
3654 "in stmt: ");
3655 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3656 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3659 if (bb_vinfo)
3660 break;
3662 if (gather || simd_lane_access)
3663 free_data_ref (dr);
3664 return false;
3667 STMT_VINFO_DATA_REF (stmt_info) = dr;
3668 if (simd_lane_access)
3670 STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info) = true;
3671 free_data_ref (datarefs[i]);
3672 datarefs[i] = dr;
3675 /* Set vectype for STMT. */
3676 scalar_type = TREE_TYPE (DR_REF (dr));
3677 STMT_VINFO_VECTYPE (stmt_info)
3678 = get_vectype_for_scalar_type (scalar_type);
3679 if (!STMT_VINFO_VECTYPE (stmt_info))
3681 if (dump_enabled_p ())
3683 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3684 "not vectorized: no vectype for stmt: ");
3685 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3686 dump_printf (MSG_MISSED_OPTIMIZATION, " scalar_type: ");
3687 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_DETAILS,
3688 scalar_type);
3689 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3692 if (bb_vinfo)
3693 break;
3695 if (gather || simd_lane_access)
3697 STMT_VINFO_DATA_REF (stmt_info) = NULL;
3698 if (gather)
3699 free_data_ref (dr);
3701 return false;
3703 else
3705 if (dump_enabled_p ())
3707 dump_printf_loc (MSG_NOTE, vect_location,
3708 "got vectype for stmt: ");
3709 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
3710 dump_generic_expr (MSG_NOTE, TDF_SLIM,
3711 STMT_VINFO_VECTYPE (stmt_info));
3712 dump_printf (MSG_NOTE, "\n");
3716 /* Adjust the minimal vectorization factor according to the
3717 vector type. */
3718 vf = TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info));
3719 if (vf > *min_vf)
3720 *min_vf = vf;
3722 if (gather)
3724 tree off;
3726 gather = 0 != vect_check_gather (stmt, loop_vinfo, NULL, &off, NULL);
3727 if (gather
3728 && get_vectype_for_scalar_type (TREE_TYPE (off)) == NULL_TREE)
3729 gather = false;
3730 if (!gather)
3732 STMT_VINFO_DATA_REF (stmt_info) = NULL;
3733 free_data_ref (dr);
3734 if (dump_enabled_p ())
3736 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3737 "not vectorized: not suitable for gather "
3738 "load ");
3739 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3740 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3742 return false;
3745 datarefs[i] = dr;
3746 STMT_VINFO_GATHER_P (stmt_info) = true;
3748 else if (loop_vinfo
3749 && TREE_CODE (DR_STEP (dr)) != INTEGER_CST)
3751 if (nested_in_vect_loop_p (loop, stmt))
3753 if (dump_enabled_p ())
3755 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3756 "not vectorized: not suitable for strided "
3757 "load ");
3758 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3759 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3761 return false;
3763 STMT_VINFO_STRIDED_P (stmt_info) = true;
3767 /* If we stopped analysis at the first dataref we could not analyze
3768 when trying to vectorize a basic-block mark the rest of the datarefs
3769 as not vectorizable and truncate the vector of datarefs. That
3770 avoids spending useless time in analyzing their dependence. */
3771 if (i != datarefs.length ())
3773 gcc_assert (bb_vinfo != NULL);
3774 for (unsigned j = i; j < datarefs.length (); ++j)
3776 data_reference_p dr = datarefs[j];
3777 STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr))) = false;
3778 free_data_ref (dr);
3780 datarefs.truncate (i);
3783 return true;
3787 /* Function vect_get_new_vect_var.
3789 Returns a name for a new variable. The current naming scheme appends the
3790 prefix "vect_" or "vect_p" (depending on the value of VAR_KIND) to
3791 the name of vectorizer generated variables, and appends that to NAME if
3792 provided. */
3794 tree
3795 vect_get_new_vect_var (tree type, enum vect_var_kind var_kind, const char *name)
3797 const char *prefix;
3798 tree new_vect_var;
3800 switch (var_kind)
3802 case vect_simple_var:
3803 prefix = "vect";
3804 break;
3805 case vect_scalar_var:
3806 prefix = "stmp";
3807 break;
3808 case vect_pointer_var:
3809 prefix = "vectp";
3810 break;
3811 default:
3812 gcc_unreachable ();
3815 if (name)
3817 char* tmp = concat (prefix, "_", name, NULL);
3818 new_vect_var = create_tmp_reg (type, tmp);
3819 free (tmp);
3821 else
3822 new_vect_var = create_tmp_reg (type, prefix);
3824 return new_vect_var;
3827 /* Duplicate ptr info and set alignment/misaligment on NAME from DR. */
3829 static void
3830 vect_duplicate_ssa_name_ptr_info (tree name, data_reference *dr,
3831 stmt_vec_info stmt_info)
3833 duplicate_ssa_name_ptr_info (name, DR_PTR_INFO (dr));
3834 unsigned int align = TYPE_ALIGN_UNIT (STMT_VINFO_VECTYPE (stmt_info));
3835 int misalign = DR_MISALIGNMENT (dr);
3836 if (misalign == -1)
3837 mark_ptr_info_alignment_unknown (SSA_NAME_PTR_INFO (name));
3838 else
3839 set_ptr_info_alignment (SSA_NAME_PTR_INFO (name), align, misalign);
3842 /* Function vect_create_addr_base_for_vector_ref.
3844 Create an expression that computes the address of the first memory location
3845 that will be accessed for a data reference.
3847 Input:
3848 STMT: The statement containing the data reference.
3849 NEW_STMT_LIST: Must be initialized to NULL_TREE or a statement list.
3850 OFFSET: Optional. If supplied, it is be added to the initial address.
3851 LOOP: Specify relative to which loop-nest should the address be computed.
3852 For example, when the dataref is in an inner-loop nested in an
3853 outer-loop that is now being vectorized, LOOP can be either the
3854 outer-loop, or the inner-loop. The first memory location accessed
3855 by the following dataref ('in' points to short):
3857 for (i=0; i<N; i++)
3858 for (j=0; j<M; j++)
3859 s += in[i+j]
3861 is as follows:
3862 if LOOP=i_loop: &in (relative to i_loop)
3863 if LOOP=j_loop: &in+i*2B (relative to j_loop)
3864 BYTE_OFFSET: Optional, defaulted to NULL. If supplied, it is added to the
3865 initial address. Unlike OFFSET, which is number of elements to
3866 be added, BYTE_OFFSET is measured in bytes.
3868 Output:
3869 1. Return an SSA_NAME whose value is the address of the memory location of
3870 the first vector of the data reference.
3871 2. If new_stmt_list is not NULL_TREE after return then the caller must insert
3872 these statement(s) which define the returned SSA_NAME.
3874 FORNOW: We are only handling array accesses with step 1. */
3876 tree
3877 vect_create_addr_base_for_vector_ref (gimple stmt,
3878 gimple_seq *new_stmt_list,
3879 tree offset,
3880 struct loop *loop,
3881 tree byte_offset)
3883 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3884 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
3885 tree data_ref_base;
3886 const char *base_name;
3887 tree addr_base;
3888 tree dest;
3889 gimple_seq seq = NULL;
3890 tree base_offset;
3891 tree init;
3892 tree vect_ptr_type;
3893 tree step = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dr)));
3894 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3896 if (loop_vinfo && loop && loop != (gimple_bb (stmt))->loop_father)
3898 struct loop *outer_loop = LOOP_VINFO_LOOP (loop_vinfo);
3900 gcc_assert (nested_in_vect_loop_p (outer_loop, stmt));
3902 data_ref_base = unshare_expr (STMT_VINFO_DR_BASE_ADDRESS (stmt_info));
3903 base_offset = unshare_expr (STMT_VINFO_DR_OFFSET (stmt_info));
3904 init = unshare_expr (STMT_VINFO_DR_INIT (stmt_info));
3906 else
3908 data_ref_base = unshare_expr (DR_BASE_ADDRESS (dr));
3909 base_offset = unshare_expr (DR_OFFSET (dr));
3910 init = unshare_expr (DR_INIT (dr));
3913 if (loop_vinfo)
3914 base_name = get_name (data_ref_base);
3915 else
3917 base_offset = ssize_int (0);
3918 init = ssize_int (0);
3919 base_name = get_name (DR_REF (dr));
3922 /* Create base_offset */
3923 base_offset = size_binop (PLUS_EXPR,
3924 fold_convert (sizetype, base_offset),
3925 fold_convert (sizetype, init));
3927 if (offset)
3929 offset = fold_build2 (MULT_EXPR, sizetype,
3930 fold_convert (sizetype, offset), step);
3931 base_offset = fold_build2 (PLUS_EXPR, sizetype,
3932 base_offset, offset);
3934 if (byte_offset)
3936 byte_offset = fold_convert (sizetype, byte_offset);
3937 base_offset = fold_build2 (PLUS_EXPR, sizetype,
3938 base_offset, byte_offset);
3941 /* base + base_offset */
3942 if (loop_vinfo)
3943 addr_base = fold_build_pointer_plus (data_ref_base, base_offset);
3944 else
3946 addr_base = build1 (ADDR_EXPR,
3947 build_pointer_type (TREE_TYPE (DR_REF (dr))),
3948 unshare_expr (DR_REF (dr)));
3951 vect_ptr_type = build_pointer_type (STMT_VINFO_VECTYPE (stmt_info));
3952 dest = vect_get_new_vect_var (vect_ptr_type, vect_pointer_var, base_name);
3953 addr_base = force_gimple_operand (addr_base, &seq, true, dest);
3954 gimple_seq_add_seq (new_stmt_list, seq);
3956 if (DR_PTR_INFO (dr)
3957 && TREE_CODE (addr_base) == SSA_NAME
3958 && !SSA_NAME_PTR_INFO (addr_base))
3960 vect_duplicate_ssa_name_ptr_info (addr_base, dr, stmt_info);
3961 if (offset || byte_offset)
3962 mark_ptr_info_alignment_unknown (SSA_NAME_PTR_INFO (addr_base));
3965 if (dump_enabled_p ())
3967 dump_printf_loc (MSG_NOTE, vect_location, "created ");
3968 dump_generic_expr (MSG_NOTE, TDF_SLIM, addr_base);
3969 dump_printf (MSG_NOTE, "\n");
3972 return addr_base;
3976 /* Function vect_create_data_ref_ptr.
3978 Create a new pointer-to-AGGR_TYPE variable (ap), that points to the first
3979 location accessed in the loop by STMT, along with the def-use update
3980 chain to appropriately advance the pointer through the loop iterations.
3981 Also set aliasing information for the pointer. This pointer is used by
3982 the callers to this function to create a memory reference expression for
3983 vector load/store access.
3985 Input:
3986 1. STMT: a stmt that references memory. Expected to be of the form
3987 GIMPLE_ASSIGN <name, data-ref> or
3988 GIMPLE_ASSIGN <data-ref, name>.
3989 2. AGGR_TYPE: the type of the reference, which should be either a vector
3990 or an array.
3991 3. AT_LOOP: the loop where the vector memref is to be created.
3992 4. OFFSET (optional): an offset to be added to the initial address accessed
3993 by the data-ref in STMT.
3994 5. BSI: location where the new stmts are to be placed if there is no loop
3995 6. ONLY_INIT: indicate if ap is to be updated in the loop, or remain
3996 pointing to the initial address.
3997 7. BYTE_OFFSET (optional, defaults to NULL): a byte offset to be added
3998 to the initial address accessed by the data-ref in STMT. This is
3999 similar to OFFSET, but OFFSET is counted in elements, while BYTE_OFFSET
4000 in bytes.
4002 Output:
4003 1. Declare a new ptr to vector_type, and have it point to the base of the
4004 data reference (initial addressed accessed by the data reference).
4005 For example, for vector of type V8HI, the following code is generated:
4007 v8hi *ap;
4008 ap = (v8hi *)initial_address;
4010 if OFFSET is not supplied:
4011 initial_address = &a[init];
4012 if OFFSET is supplied:
4013 initial_address = &a[init + OFFSET];
4014 if BYTE_OFFSET is supplied:
4015 initial_address = &a[init] + BYTE_OFFSET;
4017 Return the initial_address in INITIAL_ADDRESS.
4019 2. If ONLY_INIT is true, just return the initial pointer. Otherwise, also
4020 update the pointer in each iteration of the loop.
4022 Return the increment stmt that updates the pointer in PTR_INCR.
4024 3. Set INV_P to true if the access pattern of the data reference in the
4025 vectorized loop is invariant. Set it to false otherwise.
4027 4. Return the pointer. */
4029 tree
4030 vect_create_data_ref_ptr (gimple stmt, tree aggr_type, struct loop *at_loop,
4031 tree offset, tree *initial_address,
4032 gimple_stmt_iterator *gsi, gimple *ptr_incr,
4033 bool only_init, bool *inv_p, tree byte_offset)
4035 const char *base_name;
4036 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4037 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4038 struct loop *loop = NULL;
4039 bool nested_in_vect_loop = false;
4040 struct loop *containing_loop = NULL;
4041 tree aggr_ptr_type;
4042 tree aggr_ptr;
4043 tree new_temp;
4044 gimple_seq new_stmt_list = NULL;
4045 edge pe = NULL;
4046 basic_block new_bb;
4047 tree aggr_ptr_init;
4048 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
4049 tree aptr;
4050 gimple_stmt_iterator incr_gsi;
4051 bool insert_after;
4052 tree indx_before_incr, indx_after_incr;
4053 gimple incr;
4054 tree step;
4055 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
4057 gcc_assert (TREE_CODE (aggr_type) == ARRAY_TYPE
4058 || TREE_CODE (aggr_type) == VECTOR_TYPE);
4060 if (loop_vinfo)
4062 loop = LOOP_VINFO_LOOP (loop_vinfo);
4063 nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
4064 containing_loop = (gimple_bb (stmt))->loop_father;
4065 pe = loop_preheader_edge (loop);
4067 else
4069 gcc_assert (bb_vinfo);
4070 only_init = true;
4071 *ptr_incr = NULL;
4074 /* Check the step (evolution) of the load in LOOP, and record
4075 whether it's invariant. */
4076 if (nested_in_vect_loop)
4077 step = STMT_VINFO_DR_STEP (stmt_info);
4078 else
4079 step = DR_STEP (STMT_VINFO_DATA_REF (stmt_info));
4081 if (integer_zerop (step))
4082 *inv_p = true;
4083 else
4084 *inv_p = false;
4086 /* Create an expression for the first address accessed by this load
4087 in LOOP. */
4088 base_name = get_name (DR_BASE_ADDRESS (dr));
4090 if (dump_enabled_p ())
4092 tree dr_base_type = TREE_TYPE (DR_BASE_OBJECT (dr));
4093 dump_printf_loc (MSG_NOTE, vect_location,
4094 "create %s-pointer variable to type: ",
4095 get_tree_code_name (TREE_CODE (aggr_type)));
4096 dump_generic_expr (MSG_NOTE, TDF_SLIM, aggr_type);
4097 if (TREE_CODE (dr_base_type) == ARRAY_TYPE)
4098 dump_printf (MSG_NOTE, " vectorizing an array ref: ");
4099 else if (TREE_CODE (dr_base_type) == VECTOR_TYPE)
4100 dump_printf (MSG_NOTE, " vectorizing a vector ref: ");
4101 else if (TREE_CODE (dr_base_type) == RECORD_TYPE)
4102 dump_printf (MSG_NOTE, " vectorizing a record based array ref: ");
4103 else
4104 dump_printf (MSG_NOTE, " vectorizing a pointer ref: ");
4105 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_BASE_OBJECT (dr));
4106 dump_printf (MSG_NOTE, "\n");
4109 /* (1) Create the new aggregate-pointer variable.
4110 Vector and array types inherit the alias set of their component
4111 type by default so we need to use a ref-all pointer if the data
4112 reference does not conflict with the created aggregated data
4113 reference because it is not addressable. */
4114 bool need_ref_all = false;
4115 if (!alias_sets_conflict_p (get_alias_set (aggr_type),
4116 get_alias_set (DR_REF (dr))))
4117 need_ref_all = true;
4118 /* Likewise for any of the data references in the stmt group. */
4119 else if (STMT_VINFO_GROUP_SIZE (stmt_info) > 1)
4121 gimple orig_stmt = STMT_VINFO_GROUP_FIRST_ELEMENT (stmt_info);
4124 stmt_vec_info sinfo = vinfo_for_stmt (orig_stmt);
4125 struct data_reference *sdr = STMT_VINFO_DATA_REF (sinfo);
4126 if (!alias_sets_conflict_p (get_alias_set (aggr_type),
4127 get_alias_set (DR_REF (sdr))))
4129 need_ref_all = true;
4130 break;
4132 orig_stmt = STMT_VINFO_GROUP_NEXT_ELEMENT (sinfo);
4134 while (orig_stmt);
4136 aggr_ptr_type = build_pointer_type_for_mode (aggr_type, ptr_mode,
4137 need_ref_all);
4138 aggr_ptr = vect_get_new_vect_var (aggr_ptr_type, vect_pointer_var, base_name);
4141 /* Note: If the dataref is in an inner-loop nested in LOOP, and we are
4142 vectorizing LOOP (i.e., outer-loop vectorization), we need to create two
4143 def-use update cycles for the pointer: one relative to the outer-loop
4144 (LOOP), which is what steps (3) and (4) below do. The other is relative
4145 to the inner-loop (which is the inner-most loop containing the dataref),
4146 and this is done be step (5) below.
4148 When vectorizing inner-most loops, the vectorized loop (LOOP) is also the
4149 inner-most loop, and so steps (3),(4) work the same, and step (5) is
4150 redundant. Steps (3),(4) create the following:
4152 vp0 = &base_addr;
4153 LOOP: vp1 = phi(vp0,vp2)
4156 vp2 = vp1 + step
4157 goto LOOP
4159 If there is an inner-loop nested in loop, then step (5) will also be
4160 applied, and an additional update in the inner-loop will be created:
4162 vp0 = &base_addr;
4163 LOOP: vp1 = phi(vp0,vp2)
4165 inner: vp3 = phi(vp1,vp4)
4166 vp4 = vp3 + inner_step
4167 if () goto inner
4169 vp2 = vp1 + step
4170 if () goto LOOP */
4172 /* (2) Calculate the initial address of the aggregate-pointer, and set
4173 the aggregate-pointer to point to it before the loop. */
4175 /* Create: (&(base[init_val+offset]+byte_offset) in the loop preheader. */
4177 new_temp = vect_create_addr_base_for_vector_ref (stmt, &new_stmt_list,
4178 offset, loop, byte_offset);
4179 if (new_stmt_list)
4181 if (pe)
4183 new_bb = gsi_insert_seq_on_edge_immediate (pe, new_stmt_list);
4184 gcc_assert (!new_bb);
4186 else
4187 gsi_insert_seq_before (gsi, new_stmt_list, GSI_SAME_STMT);
4190 *initial_address = new_temp;
4191 aggr_ptr_init = new_temp;
4193 /* (3) Handle the updating of the aggregate-pointer inside the loop.
4194 This is needed when ONLY_INIT is false, and also when AT_LOOP is the
4195 inner-loop nested in LOOP (during outer-loop vectorization). */
4197 /* No update in loop is required. */
4198 if (only_init && (!loop_vinfo || at_loop == loop))
4199 aptr = aggr_ptr_init;
4200 else
4202 /* The step of the aggregate pointer is the type size. */
4203 tree iv_step = TYPE_SIZE_UNIT (aggr_type);
4204 /* One exception to the above is when the scalar step of the load in
4205 LOOP is zero. In this case the step here is also zero. */
4206 if (*inv_p)
4207 iv_step = size_zero_node;
4208 else if (tree_int_cst_sgn (step) == -1)
4209 iv_step = fold_build1 (NEGATE_EXPR, TREE_TYPE (iv_step), iv_step);
4211 standard_iv_increment_position (loop, &incr_gsi, &insert_after);
4213 create_iv (aggr_ptr_init,
4214 fold_convert (aggr_ptr_type, iv_step),
4215 aggr_ptr, loop, &incr_gsi, insert_after,
4216 &indx_before_incr, &indx_after_incr);
4217 incr = gsi_stmt (incr_gsi);
4218 set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo, NULL));
4220 /* Copy the points-to information if it exists. */
4221 if (DR_PTR_INFO (dr))
4223 vect_duplicate_ssa_name_ptr_info (indx_before_incr, dr, stmt_info);
4224 vect_duplicate_ssa_name_ptr_info (indx_after_incr, dr, stmt_info);
4226 if (ptr_incr)
4227 *ptr_incr = incr;
4229 aptr = indx_before_incr;
4232 if (!nested_in_vect_loop || only_init)
4233 return aptr;
4236 /* (4) Handle the updating of the aggregate-pointer inside the inner-loop
4237 nested in LOOP, if exists. */
4239 gcc_assert (nested_in_vect_loop);
4240 if (!only_init)
4242 standard_iv_increment_position (containing_loop, &incr_gsi,
4243 &insert_after);
4244 create_iv (aptr, fold_convert (aggr_ptr_type, DR_STEP (dr)), aggr_ptr,
4245 containing_loop, &incr_gsi, insert_after, &indx_before_incr,
4246 &indx_after_incr);
4247 incr = gsi_stmt (incr_gsi);
4248 set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo, NULL));
4250 /* Copy the points-to information if it exists. */
4251 if (DR_PTR_INFO (dr))
4253 vect_duplicate_ssa_name_ptr_info (indx_before_incr, dr, stmt_info);
4254 vect_duplicate_ssa_name_ptr_info (indx_after_incr, dr, stmt_info);
4256 if (ptr_incr)
4257 *ptr_incr = incr;
4259 return indx_before_incr;
4261 else
4262 gcc_unreachable ();
4266 /* Function bump_vector_ptr
4268 Increment a pointer (to a vector type) by vector-size. If requested,
4269 i.e. if PTR-INCR is given, then also connect the new increment stmt
4270 to the existing def-use update-chain of the pointer, by modifying
4271 the PTR_INCR as illustrated below:
4273 The pointer def-use update-chain before this function:
4274 DATAREF_PTR = phi (p_0, p_2)
4275 ....
4276 PTR_INCR: p_2 = DATAREF_PTR + step
4278 The pointer def-use update-chain after this function:
4279 DATAREF_PTR = phi (p_0, p_2)
4280 ....
4281 NEW_DATAREF_PTR = DATAREF_PTR + BUMP
4282 ....
4283 PTR_INCR: p_2 = NEW_DATAREF_PTR + step
4285 Input:
4286 DATAREF_PTR - ssa_name of a pointer (to vector type) that is being updated
4287 in the loop.
4288 PTR_INCR - optional. The stmt that updates the pointer in each iteration of
4289 the loop. The increment amount across iterations is expected
4290 to be vector_size.
4291 BSI - location where the new update stmt is to be placed.
4292 STMT - the original scalar memory-access stmt that is being vectorized.
4293 BUMP - optional. The offset by which to bump the pointer. If not given,
4294 the offset is assumed to be vector_size.
4296 Output: Return NEW_DATAREF_PTR as illustrated above.
4300 tree
4301 bump_vector_ptr (tree dataref_ptr, gimple ptr_incr, gimple_stmt_iterator *gsi,
4302 gimple stmt, tree bump)
4304 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4305 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
4306 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
4307 tree update = TYPE_SIZE_UNIT (vectype);
4308 gassign *incr_stmt;
4309 ssa_op_iter iter;
4310 use_operand_p use_p;
4311 tree new_dataref_ptr;
4313 if (bump)
4314 update = bump;
4316 if (TREE_CODE (dataref_ptr) == SSA_NAME)
4317 new_dataref_ptr = copy_ssa_name (dataref_ptr);
4318 else
4319 new_dataref_ptr = make_ssa_name (TREE_TYPE (dataref_ptr));
4320 incr_stmt = gimple_build_assign (new_dataref_ptr, POINTER_PLUS_EXPR,
4321 dataref_ptr, update);
4322 vect_finish_stmt_generation (stmt, incr_stmt, gsi);
4324 /* Copy the points-to information if it exists. */
4325 if (DR_PTR_INFO (dr))
4327 duplicate_ssa_name_ptr_info (new_dataref_ptr, DR_PTR_INFO (dr));
4328 mark_ptr_info_alignment_unknown (SSA_NAME_PTR_INFO (new_dataref_ptr));
4331 if (!ptr_incr)
4332 return new_dataref_ptr;
4334 /* Update the vector-pointer's cross-iteration increment. */
4335 FOR_EACH_SSA_USE_OPERAND (use_p, ptr_incr, iter, SSA_OP_USE)
4337 tree use = USE_FROM_PTR (use_p);
4339 if (use == dataref_ptr)
4340 SET_USE (use_p, new_dataref_ptr);
4341 else
4342 gcc_assert (tree_int_cst_compare (use, update) == 0);
4345 return new_dataref_ptr;
4349 /* Function vect_create_destination_var.
4351 Create a new temporary of type VECTYPE. */
4353 tree
4354 vect_create_destination_var (tree scalar_dest, tree vectype)
4356 tree vec_dest;
4357 const char *name;
4358 char *new_name;
4359 tree type;
4360 enum vect_var_kind kind;
4362 kind = vectype ? vect_simple_var : vect_scalar_var;
4363 type = vectype ? vectype : TREE_TYPE (scalar_dest);
4365 gcc_assert (TREE_CODE (scalar_dest) == SSA_NAME);
4367 name = get_name (scalar_dest);
4368 if (name)
4369 new_name = xasprintf ("%s_%u", name, SSA_NAME_VERSION (scalar_dest));
4370 else
4371 new_name = xasprintf ("_%u", SSA_NAME_VERSION (scalar_dest));
4372 vec_dest = vect_get_new_vect_var (type, kind, new_name);
4373 free (new_name);
4375 return vec_dest;
4378 /* Function vect_grouped_store_supported.
4380 Returns TRUE if interleave high and interleave low permutations
4381 are supported, and FALSE otherwise. */
4383 bool
4384 vect_grouped_store_supported (tree vectype, unsigned HOST_WIDE_INT count)
4386 machine_mode mode = TYPE_MODE (vectype);
4388 /* vect_permute_store_chain requires the group size to be equal to 3 or
4389 be a power of two. */
4390 if (count != 3 && exact_log2 (count) == -1)
4392 if (dump_enabled_p ())
4393 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4394 "the size of the group of accesses"
4395 " is not a power of 2 or not eqaul to 3\n");
4396 return false;
4399 /* Check that the permutation is supported. */
4400 if (VECTOR_MODE_P (mode))
4402 unsigned int i, nelt = GET_MODE_NUNITS (mode);
4403 unsigned char *sel = XALLOCAVEC (unsigned char, nelt);
4405 if (count == 3)
4407 unsigned int j0 = 0, j1 = 0, j2 = 0;
4408 unsigned int i, j;
4410 for (j = 0; j < 3; j++)
4412 int nelt0 = ((3 - j) * nelt) % 3;
4413 int nelt1 = ((3 - j) * nelt + 1) % 3;
4414 int nelt2 = ((3 - j) * nelt + 2) % 3;
4415 for (i = 0; i < nelt; i++)
4417 if (3 * i + nelt0 < nelt)
4418 sel[3 * i + nelt0] = j0++;
4419 if (3 * i + nelt1 < nelt)
4420 sel[3 * i + nelt1] = nelt + j1++;
4421 if (3 * i + nelt2 < nelt)
4422 sel[3 * i + nelt2] = 0;
4424 if (!can_vec_perm_p (mode, false, sel))
4426 if (dump_enabled_p ())
4427 dump_printf (MSG_MISSED_OPTIMIZATION,
4428 "permutaion op not supported by target.\n");
4429 return false;
4432 for (i = 0; i < nelt; i++)
4434 if (3 * i + nelt0 < nelt)
4435 sel[3 * i + nelt0] = 3 * i + nelt0;
4436 if (3 * i + nelt1 < nelt)
4437 sel[3 * i + nelt1] = 3 * i + nelt1;
4438 if (3 * i + nelt2 < nelt)
4439 sel[3 * i + nelt2] = nelt + j2++;
4441 if (!can_vec_perm_p (mode, false, sel))
4443 if (dump_enabled_p ())
4444 dump_printf (MSG_MISSED_OPTIMIZATION,
4445 "permutaion op not supported by target.\n");
4446 return false;
4449 return true;
4451 else
4453 /* If length is not equal to 3 then only power of 2 is supported. */
4454 gcc_assert (exact_log2 (count) != -1);
4456 for (i = 0; i < nelt / 2; i++)
4458 sel[i * 2] = i;
4459 sel[i * 2 + 1] = i + nelt;
4461 if (can_vec_perm_p (mode, false, sel))
4463 for (i = 0; i < nelt; i++)
4464 sel[i] += nelt / 2;
4465 if (can_vec_perm_p (mode, false, sel))
4466 return true;
4471 if (dump_enabled_p ())
4472 dump_printf (MSG_MISSED_OPTIMIZATION,
4473 "permutaion op not supported by target.\n");
4474 return false;
4478 /* Return TRUE if vec_store_lanes is available for COUNT vectors of
4479 type VECTYPE. */
4481 bool
4482 vect_store_lanes_supported (tree vectype, unsigned HOST_WIDE_INT count)
4484 return vect_lanes_optab_supported_p ("vec_store_lanes",
4485 vec_store_lanes_optab,
4486 vectype, count);
4490 /* Function vect_permute_store_chain.
4492 Given a chain of interleaved stores in DR_CHAIN of LENGTH that must be
4493 a power of 2 or equal to 3, generate interleave_high/low stmts to reorder
4494 the data correctly for the stores. Return the final references for stores
4495 in RESULT_CHAIN.
4497 E.g., LENGTH is 4 and the scalar type is short, i.e., VF is 8.
4498 The input is 4 vectors each containing 8 elements. We assign a number to
4499 each element, the input sequence is:
4501 1st vec: 0 1 2 3 4 5 6 7
4502 2nd vec: 8 9 10 11 12 13 14 15
4503 3rd vec: 16 17 18 19 20 21 22 23
4504 4th vec: 24 25 26 27 28 29 30 31
4506 The output sequence should be:
4508 1st vec: 0 8 16 24 1 9 17 25
4509 2nd vec: 2 10 18 26 3 11 19 27
4510 3rd vec: 4 12 20 28 5 13 21 30
4511 4th vec: 6 14 22 30 7 15 23 31
4513 i.e., we interleave the contents of the four vectors in their order.
4515 We use interleave_high/low instructions to create such output. The input of
4516 each interleave_high/low operation is two vectors:
4517 1st vec 2nd vec
4518 0 1 2 3 4 5 6 7
4519 the even elements of the result vector are obtained left-to-right from the
4520 high/low elements of the first vector. The odd elements of the result are
4521 obtained left-to-right from the high/low elements of the second vector.
4522 The output of interleave_high will be: 0 4 1 5
4523 and of interleave_low: 2 6 3 7
4526 The permutation is done in log LENGTH stages. In each stage interleave_high
4527 and interleave_low stmts are created for each pair of vectors in DR_CHAIN,
4528 where the first argument is taken from the first half of DR_CHAIN and the
4529 second argument from it's second half.
4530 In our example,
4532 I1: interleave_high (1st vec, 3rd vec)
4533 I2: interleave_low (1st vec, 3rd vec)
4534 I3: interleave_high (2nd vec, 4th vec)
4535 I4: interleave_low (2nd vec, 4th vec)
4537 The output for the first stage is:
4539 I1: 0 16 1 17 2 18 3 19
4540 I2: 4 20 5 21 6 22 7 23
4541 I3: 8 24 9 25 10 26 11 27
4542 I4: 12 28 13 29 14 30 15 31
4544 The output of the second stage, i.e. the final result is:
4546 I1: 0 8 16 24 1 9 17 25
4547 I2: 2 10 18 26 3 11 19 27
4548 I3: 4 12 20 28 5 13 21 30
4549 I4: 6 14 22 30 7 15 23 31. */
4551 void
4552 vect_permute_store_chain (vec<tree> dr_chain,
4553 unsigned int length,
4554 gimple stmt,
4555 gimple_stmt_iterator *gsi,
4556 vec<tree> *result_chain)
4558 tree vect1, vect2, high, low;
4559 gimple perm_stmt;
4560 tree vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
4561 tree perm_mask_low, perm_mask_high;
4562 tree data_ref;
4563 tree perm3_mask_low, perm3_mask_high;
4564 unsigned int i, n, log_length = exact_log2 (length);
4565 unsigned int j, nelt = TYPE_VECTOR_SUBPARTS (vectype);
4566 unsigned char *sel = XALLOCAVEC (unsigned char, nelt);
4568 result_chain->quick_grow (length);
4569 memcpy (result_chain->address (), dr_chain.address (),
4570 length * sizeof (tree));
4572 if (length == 3)
4574 unsigned int j0 = 0, j1 = 0, j2 = 0;
4576 for (j = 0; j < 3; j++)
4578 int nelt0 = ((3 - j) * nelt) % 3;
4579 int nelt1 = ((3 - j) * nelt + 1) % 3;
4580 int nelt2 = ((3 - j) * nelt + 2) % 3;
4582 for (i = 0; i < nelt; i++)
4584 if (3 * i + nelt0 < nelt)
4585 sel[3 * i + nelt0] = j0++;
4586 if (3 * i + nelt1 < nelt)
4587 sel[3 * i + nelt1] = nelt + j1++;
4588 if (3 * i + nelt2 < nelt)
4589 sel[3 * i + nelt2] = 0;
4591 perm3_mask_low = vect_gen_perm_mask_checked (vectype, sel);
4593 for (i = 0; i < nelt; i++)
4595 if (3 * i + nelt0 < nelt)
4596 sel[3 * i + nelt0] = 3 * i + nelt0;
4597 if (3 * i + nelt1 < nelt)
4598 sel[3 * i + nelt1] = 3 * i + nelt1;
4599 if (3 * i + nelt2 < nelt)
4600 sel[3 * i + nelt2] = nelt + j2++;
4602 perm3_mask_high = vect_gen_perm_mask_checked (vectype, sel);
4604 vect1 = dr_chain[0];
4605 vect2 = dr_chain[1];
4607 /* Create interleaving stmt:
4608 low = VEC_PERM_EXPR <vect1, vect2,
4609 {j, nelt, *, j + 1, nelt + j + 1, *,
4610 j + 2, nelt + j + 2, *, ...}> */
4611 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle3_low");
4612 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, vect1,
4613 vect2, perm3_mask_low);
4614 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
4616 vect1 = data_ref;
4617 vect2 = dr_chain[2];
4618 /* Create interleaving stmt:
4619 low = VEC_PERM_EXPR <vect1, vect2,
4620 {0, 1, nelt + j, 3, 4, nelt + j + 1,
4621 6, 7, nelt + j + 2, ...}> */
4622 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle3_high");
4623 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, vect1,
4624 vect2, perm3_mask_high);
4625 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
4626 (*result_chain)[j] = data_ref;
4629 else
4631 /* If length is not equal to 3 then only power of 2 is supported. */
4632 gcc_assert (exact_log2 (length) != -1);
4634 for (i = 0, n = nelt / 2; i < n; i++)
4636 sel[i * 2] = i;
4637 sel[i * 2 + 1] = i + nelt;
4639 perm_mask_high = vect_gen_perm_mask_checked (vectype, sel);
4641 for (i = 0; i < nelt; i++)
4642 sel[i] += nelt / 2;
4643 perm_mask_low = vect_gen_perm_mask_checked (vectype, sel);
4645 for (i = 0, n = log_length; i < n; i++)
4647 for (j = 0; j < length/2; j++)
4649 vect1 = dr_chain[j];
4650 vect2 = dr_chain[j+length/2];
4652 /* Create interleaving stmt:
4653 high = VEC_PERM_EXPR <vect1, vect2, {0, nelt, 1, nelt+1,
4654 ...}> */
4655 high = make_temp_ssa_name (vectype, NULL, "vect_inter_high");
4656 perm_stmt = gimple_build_assign (high, VEC_PERM_EXPR, vect1,
4657 vect2, perm_mask_high);
4658 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
4659 (*result_chain)[2*j] = high;
4661 /* Create interleaving stmt:
4662 low = VEC_PERM_EXPR <vect1, vect2,
4663 {nelt/2, nelt*3/2, nelt/2+1, nelt*3/2+1,
4664 ...}> */
4665 low = make_temp_ssa_name (vectype, NULL, "vect_inter_low");
4666 perm_stmt = gimple_build_assign (low, VEC_PERM_EXPR, vect1,
4667 vect2, perm_mask_low);
4668 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
4669 (*result_chain)[2*j+1] = low;
4671 memcpy (dr_chain.address (), result_chain->address (),
4672 length * sizeof (tree));
4677 /* Function vect_setup_realignment
4679 This function is called when vectorizing an unaligned load using
4680 the dr_explicit_realign[_optimized] scheme.
4681 This function generates the following code at the loop prolog:
4683 p = initial_addr;
4684 x msq_init = *(floor(p)); # prolog load
4685 realignment_token = call target_builtin;
4686 loop:
4687 x msq = phi (msq_init, ---)
4689 The stmts marked with x are generated only for the case of
4690 dr_explicit_realign_optimized.
4692 The code above sets up a new (vector) pointer, pointing to the first
4693 location accessed by STMT, and a "floor-aligned" load using that pointer.
4694 It also generates code to compute the "realignment-token" (if the relevant
4695 target hook was defined), and creates a phi-node at the loop-header bb
4696 whose arguments are the result of the prolog-load (created by this
4697 function) and the result of a load that takes place in the loop (to be
4698 created by the caller to this function).
4700 For the case of dr_explicit_realign_optimized:
4701 The caller to this function uses the phi-result (msq) to create the
4702 realignment code inside the loop, and sets up the missing phi argument,
4703 as follows:
4704 loop:
4705 msq = phi (msq_init, lsq)
4706 lsq = *(floor(p')); # load in loop
4707 result = realign_load (msq, lsq, realignment_token);
4709 For the case of dr_explicit_realign:
4710 loop:
4711 msq = *(floor(p)); # load in loop
4712 p' = p + (VS-1);
4713 lsq = *(floor(p')); # load in loop
4714 result = realign_load (msq, lsq, realignment_token);
4716 Input:
4717 STMT - (scalar) load stmt to be vectorized. This load accesses
4718 a memory location that may be unaligned.
4719 BSI - place where new code is to be inserted.
4720 ALIGNMENT_SUPPORT_SCHEME - which of the two misalignment handling schemes
4721 is used.
4723 Output:
4724 REALIGNMENT_TOKEN - the result of a call to the builtin_mask_for_load
4725 target hook, if defined.
4726 Return value - the result of the loop-header phi node. */
4728 tree
4729 vect_setup_realignment (gimple stmt, gimple_stmt_iterator *gsi,
4730 tree *realignment_token,
4731 enum dr_alignment_support alignment_support_scheme,
4732 tree init_addr,
4733 struct loop **at_loop)
4735 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4736 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
4737 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4738 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
4739 struct loop *loop = NULL;
4740 edge pe = NULL;
4741 tree scalar_dest = gimple_assign_lhs (stmt);
4742 tree vec_dest;
4743 gimple inc;
4744 tree ptr;
4745 tree data_ref;
4746 basic_block new_bb;
4747 tree msq_init = NULL_TREE;
4748 tree new_temp;
4749 gphi *phi_stmt;
4750 tree msq = NULL_TREE;
4751 gimple_seq stmts = NULL;
4752 bool inv_p;
4753 bool compute_in_loop = false;
4754 bool nested_in_vect_loop = false;
4755 struct loop *containing_loop = (gimple_bb (stmt))->loop_father;
4756 struct loop *loop_for_initial_load = NULL;
4758 if (loop_vinfo)
4760 loop = LOOP_VINFO_LOOP (loop_vinfo);
4761 nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
4764 gcc_assert (alignment_support_scheme == dr_explicit_realign
4765 || alignment_support_scheme == dr_explicit_realign_optimized);
4767 /* We need to generate three things:
4768 1. the misalignment computation
4769 2. the extra vector load (for the optimized realignment scheme).
4770 3. the phi node for the two vectors from which the realignment is
4771 done (for the optimized realignment scheme). */
4773 /* 1. Determine where to generate the misalignment computation.
4775 If INIT_ADDR is NULL_TREE, this indicates that the misalignment
4776 calculation will be generated by this function, outside the loop (in the
4777 preheader). Otherwise, INIT_ADDR had already been computed for us by the
4778 caller, inside the loop.
4780 Background: If the misalignment remains fixed throughout the iterations of
4781 the loop, then both realignment schemes are applicable, and also the
4782 misalignment computation can be done outside LOOP. This is because we are
4783 vectorizing LOOP, and so the memory accesses in LOOP advance in steps that
4784 are a multiple of VS (the Vector Size), and therefore the misalignment in
4785 different vectorized LOOP iterations is always the same.
4786 The problem arises only if the memory access is in an inner-loop nested
4787 inside LOOP, which is now being vectorized using outer-loop vectorization.
4788 This is the only case when the misalignment of the memory access may not
4789 remain fixed throughout the iterations of the inner-loop (as explained in
4790 detail in vect_supportable_dr_alignment). In this case, not only is the
4791 optimized realignment scheme not applicable, but also the misalignment
4792 computation (and generation of the realignment token that is passed to
4793 REALIGN_LOAD) have to be done inside the loop.
4795 In short, INIT_ADDR indicates whether we are in a COMPUTE_IN_LOOP mode
4796 or not, which in turn determines if the misalignment is computed inside
4797 the inner-loop, or outside LOOP. */
4799 if (init_addr != NULL_TREE || !loop_vinfo)
4801 compute_in_loop = true;
4802 gcc_assert (alignment_support_scheme == dr_explicit_realign);
4806 /* 2. Determine where to generate the extra vector load.
4808 For the optimized realignment scheme, instead of generating two vector
4809 loads in each iteration, we generate a single extra vector load in the
4810 preheader of the loop, and in each iteration reuse the result of the
4811 vector load from the previous iteration. In case the memory access is in
4812 an inner-loop nested inside LOOP, which is now being vectorized using
4813 outer-loop vectorization, we need to determine whether this initial vector
4814 load should be generated at the preheader of the inner-loop, or can be
4815 generated at the preheader of LOOP. If the memory access has no evolution
4816 in LOOP, it can be generated in the preheader of LOOP. Otherwise, it has
4817 to be generated inside LOOP (in the preheader of the inner-loop). */
4819 if (nested_in_vect_loop)
4821 tree outerloop_step = STMT_VINFO_DR_STEP (stmt_info);
4822 bool invariant_in_outerloop =
4823 (tree_int_cst_compare (outerloop_step, size_zero_node) == 0);
4824 loop_for_initial_load = (invariant_in_outerloop ? loop : loop->inner);
4826 else
4827 loop_for_initial_load = loop;
4828 if (at_loop)
4829 *at_loop = loop_for_initial_load;
4831 if (loop_for_initial_load)
4832 pe = loop_preheader_edge (loop_for_initial_load);
4834 /* 3. For the case of the optimized realignment, create the first vector
4835 load at the loop preheader. */
4837 if (alignment_support_scheme == dr_explicit_realign_optimized)
4839 /* Create msq_init = *(floor(p1)) in the loop preheader */
4840 gassign *new_stmt;
4842 gcc_assert (!compute_in_loop);
4843 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4844 ptr = vect_create_data_ref_ptr (stmt, vectype, loop_for_initial_load,
4845 NULL_TREE, &init_addr, NULL, &inc,
4846 true, &inv_p);
4847 if (TREE_CODE (ptr) == SSA_NAME)
4848 new_temp = copy_ssa_name (ptr);
4849 else
4850 new_temp = make_ssa_name (TREE_TYPE (ptr));
4851 new_stmt = gimple_build_assign
4852 (new_temp, BIT_AND_EXPR, ptr,
4853 build_int_cst (TREE_TYPE (ptr),
4854 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
4855 new_bb = gsi_insert_on_edge_immediate (pe, new_stmt);
4856 gcc_assert (!new_bb);
4857 data_ref
4858 = build2 (MEM_REF, TREE_TYPE (vec_dest), new_temp,
4859 build_int_cst (reference_alias_ptr_type (DR_REF (dr)), 0));
4860 new_stmt = gimple_build_assign (vec_dest, data_ref);
4861 new_temp = make_ssa_name (vec_dest, new_stmt);
4862 gimple_assign_set_lhs (new_stmt, new_temp);
4863 if (pe)
4865 new_bb = gsi_insert_on_edge_immediate (pe, new_stmt);
4866 gcc_assert (!new_bb);
4868 else
4869 gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
4871 msq_init = gimple_assign_lhs (new_stmt);
4874 /* 4. Create realignment token using a target builtin, if available.
4875 It is done either inside the containing loop, or before LOOP (as
4876 determined above). */
4878 if (targetm.vectorize.builtin_mask_for_load)
4880 gcall *new_stmt;
4881 tree builtin_decl;
4883 /* Compute INIT_ADDR - the initial addressed accessed by this memref. */
4884 if (!init_addr)
4886 /* Generate the INIT_ADDR computation outside LOOP. */
4887 init_addr = vect_create_addr_base_for_vector_ref (stmt, &stmts,
4888 NULL_TREE, loop);
4889 if (loop)
4891 pe = loop_preheader_edge (loop);
4892 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
4893 gcc_assert (!new_bb);
4895 else
4896 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
4899 builtin_decl = targetm.vectorize.builtin_mask_for_load ();
4900 new_stmt = gimple_build_call (builtin_decl, 1, init_addr);
4901 vec_dest =
4902 vect_create_destination_var (scalar_dest,
4903 gimple_call_return_type (new_stmt));
4904 new_temp = make_ssa_name (vec_dest, new_stmt);
4905 gimple_call_set_lhs (new_stmt, new_temp);
4907 if (compute_in_loop)
4908 gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
4909 else
4911 /* Generate the misalignment computation outside LOOP. */
4912 pe = loop_preheader_edge (loop);
4913 new_bb = gsi_insert_on_edge_immediate (pe, new_stmt);
4914 gcc_assert (!new_bb);
4917 *realignment_token = gimple_call_lhs (new_stmt);
4919 /* The result of the CALL_EXPR to this builtin is determined from
4920 the value of the parameter and no global variables are touched
4921 which makes the builtin a "const" function. Requiring the
4922 builtin to have the "const" attribute makes it unnecessary
4923 to call mark_call_clobbered. */
4924 gcc_assert (TREE_READONLY (builtin_decl));
4927 if (alignment_support_scheme == dr_explicit_realign)
4928 return msq;
4930 gcc_assert (!compute_in_loop);
4931 gcc_assert (alignment_support_scheme == dr_explicit_realign_optimized);
4934 /* 5. Create msq = phi <msq_init, lsq> in loop */
4936 pe = loop_preheader_edge (containing_loop);
4937 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4938 msq = make_ssa_name (vec_dest);
4939 phi_stmt = create_phi_node (msq, containing_loop->header);
4940 add_phi_arg (phi_stmt, msq_init, pe, UNKNOWN_LOCATION);
4942 return msq;
4946 /* Function vect_grouped_load_supported.
4948 Returns TRUE if even and odd permutations are supported,
4949 and FALSE otherwise. */
4951 bool
4952 vect_grouped_load_supported (tree vectype, unsigned HOST_WIDE_INT count)
4954 machine_mode mode = TYPE_MODE (vectype);
4956 /* vect_permute_load_chain requires the group size to be equal to 3 or
4957 be a power of two. */
4958 if (count != 3 && exact_log2 (count) == -1)
4960 if (dump_enabled_p ())
4961 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4962 "the size of the group of accesses"
4963 " is not a power of 2 or not equal to 3\n");
4964 return false;
4967 /* Check that the permutation is supported. */
4968 if (VECTOR_MODE_P (mode))
4970 unsigned int i, j, nelt = GET_MODE_NUNITS (mode);
4971 unsigned char *sel = XALLOCAVEC (unsigned char, nelt);
4973 if (count == 3)
4975 unsigned int k;
4976 for (k = 0; k < 3; k++)
4978 for (i = 0; i < nelt; i++)
4979 if (3 * i + k < 2 * nelt)
4980 sel[i] = 3 * i + k;
4981 else
4982 sel[i] = 0;
4983 if (!can_vec_perm_p (mode, false, sel))
4985 if (dump_enabled_p ())
4986 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4987 "shuffle of 3 loads is not supported by"
4988 " target\n");
4989 return false;
4991 for (i = 0, j = 0; i < nelt; i++)
4992 if (3 * i + k < 2 * nelt)
4993 sel[i] = i;
4994 else
4995 sel[i] = nelt + ((nelt + k) % 3) + 3 * (j++);
4996 if (!can_vec_perm_p (mode, false, sel))
4998 if (dump_enabled_p ())
4999 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5000 "shuffle of 3 loads is not supported by"
5001 " target\n");
5002 return false;
5005 return true;
5007 else
5009 /* If length is not equal to 3 then only power of 2 is supported. */
5010 gcc_assert (exact_log2 (count) != -1);
5011 for (i = 0; i < nelt; i++)
5012 sel[i] = i * 2;
5013 if (can_vec_perm_p (mode, false, sel))
5015 for (i = 0; i < nelt; i++)
5016 sel[i] = i * 2 + 1;
5017 if (can_vec_perm_p (mode, false, sel))
5018 return true;
5023 if (dump_enabled_p ())
5024 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5025 "extract even/odd not supported by target\n");
5026 return false;
5029 /* Return TRUE if vec_load_lanes is available for COUNT vectors of
5030 type VECTYPE. */
5032 bool
5033 vect_load_lanes_supported (tree vectype, unsigned HOST_WIDE_INT count)
5035 return vect_lanes_optab_supported_p ("vec_load_lanes",
5036 vec_load_lanes_optab,
5037 vectype, count);
5040 /* Function vect_permute_load_chain.
5042 Given a chain of interleaved loads in DR_CHAIN of LENGTH that must be
5043 a power of 2 or equal to 3, generate extract_even/odd stmts to reorder
5044 the input data correctly. Return the final references for loads in
5045 RESULT_CHAIN.
5047 E.g., LENGTH is 4 and the scalar type is short, i.e., VF is 8.
5048 The input is 4 vectors each containing 8 elements. We assign a number to each
5049 element, the input sequence is:
5051 1st vec: 0 1 2 3 4 5 6 7
5052 2nd vec: 8 9 10 11 12 13 14 15
5053 3rd vec: 16 17 18 19 20 21 22 23
5054 4th vec: 24 25 26 27 28 29 30 31
5056 The output sequence should be:
5058 1st vec: 0 4 8 12 16 20 24 28
5059 2nd vec: 1 5 9 13 17 21 25 29
5060 3rd vec: 2 6 10 14 18 22 26 30
5061 4th vec: 3 7 11 15 19 23 27 31
5063 i.e., the first output vector should contain the first elements of each
5064 interleaving group, etc.
5066 We use extract_even/odd instructions to create such output. The input of
5067 each extract_even/odd operation is two vectors
5068 1st vec 2nd vec
5069 0 1 2 3 4 5 6 7
5071 and the output is the vector of extracted even/odd elements. The output of
5072 extract_even will be: 0 2 4 6
5073 and of extract_odd: 1 3 5 7
5076 The permutation is done in log LENGTH stages. In each stage extract_even
5077 and extract_odd stmts are created for each pair of vectors in DR_CHAIN in
5078 their order. In our example,
5080 E1: extract_even (1st vec, 2nd vec)
5081 E2: extract_odd (1st vec, 2nd vec)
5082 E3: extract_even (3rd vec, 4th vec)
5083 E4: extract_odd (3rd vec, 4th vec)
5085 The output for the first stage will be:
5087 E1: 0 2 4 6 8 10 12 14
5088 E2: 1 3 5 7 9 11 13 15
5089 E3: 16 18 20 22 24 26 28 30
5090 E4: 17 19 21 23 25 27 29 31
5092 In order to proceed and create the correct sequence for the next stage (or
5093 for the correct output, if the second stage is the last one, as in our
5094 example), we first put the output of extract_even operation and then the
5095 output of extract_odd in RESULT_CHAIN (which is then copied to DR_CHAIN).
5096 The input for the second stage is:
5098 1st vec (E1): 0 2 4 6 8 10 12 14
5099 2nd vec (E3): 16 18 20 22 24 26 28 30
5100 3rd vec (E2): 1 3 5 7 9 11 13 15
5101 4th vec (E4): 17 19 21 23 25 27 29 31
5103 The output of the second stage:
5105 E1: 0 4 8 12 16 20 24 28
5106 E2: 2 6 10 14 18 22 26 30
5107 E3: 1 5 9 13 17 21 25 29
5108 E4: 3 7 11 15 19 23 27 31
5110 And RESULT_CHAIN after reordering:
5112 1st vec (E1): 0 4 8 12 16 20 24 28
5113 2nd vec (E3): 1 5 9 13 17 21 25 29
5114 3rd vec (E2): 2 6 10 14 18 22 26 30
5115 4th vec (E4): 3 7 11 15 19 23 27 31. */
5117 static void
5118 vect_permute_load_chain (vec<tree> dr_chain,
5119 unsigned int length,
5120 gimple stmt,
5121 gimple_stmt_iterator *gsi,
5122 vec<tree> *result_chain)
5124 tree data_ref, first_vect, second_vect;
5125 tree perm_mask_even, perm_mask_odd;
5126 tree perm3_mask_low, perm3_mask_high;
5127 gimple perm_stmt;
5128 tree vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
5129 unsigned int i, j, log_length = exact_log2 (length);
5130 unsigned nelt = TYPE_VECTOR_SUBPARTS (vectype);
5131 unsigned char *sel = XALLOCAVEC (unsigned char, nelt);
5133 result_chain->quick_grow (length);
5134 memcpy (result_chain->address (), dr_chain.address (),
5135 length * sizeof (tree));
5137 if (length == 3)
5139 unsigned int k;
5141 for (k = 0; k < 3; k++)
5143 for (i = 0; i < nelt; i++)
5144 if (3 * i + k < 2 * nelt)
5145 sel[i] = 3 * i + k;
5146 else
5147 sel[i] = 0;
5148 perm3_mask_low = vect_gen_perm_mask_checked (vectype, sel);
5150 for (i = 0, j = 0; i < nelt; i++)
5151 if (3 * i + k < 2 * nelt)
5152 sel[i] = i;
5153 else
5154 sel[i] = nelt + ((nelt + k) % 3) + 3 * (j++);
5156 perm3_mask_high = vect_gen_perm_mask_checked (vectype, sel);
5158 first_vect = dr_chain[0];
5159 second_vect = dr_chain[1];
5161 /* Create interleaving stmt (low part of):
5162 low = VEC_PERM_EXPR <first_vect, second_vect2, {k, 3 + k, 6 + k,
5163 ...}> */
5164 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle3_low");
5165 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, first_vect,
5166 second_vect, perm3_mask_low);
5167 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5169 /* Create interleaving stmt (high part of):
5170 high = VEC_PERM_EXPR <first_vect, second_vect2, {k, 3 + k, 6 + k,
5171 ...}> */
5172 first_vect = data_ref;
5173 second_vect = dr_chain[2];
5174 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle3_high");
5175 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, first_vect,
5176 second_vect, perm3_mask_high);
5177 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5178 (*result_chain)[k] = data_ref;
5181 else
5183 /* If length is not equal to 3 then only power of 2 is supported. */
5184 gcc_assert (exact_log2 (length) != -1);
5186 for (i = 0; i < nelt; ++i)
5187 sel[i] = i * 2;
5188 perm_mask_even = vect_gen_perm_mask_checked (vectype, sel);
5190 for (i = 0; i < nelt; ++i)
5191 sel[i] = i * 2 + 1;
5192 perm_mask_odd = vect_gen_perm_mask_checked (vectype, sel);
5194 for (i = 0; i < log_length; i++)
5196 for (j = 0; j < length; j += 2)
5198 first_vect = dr_chain[j];
5199 second_vect = dr_chain[j+1];
5201 /* data_ref = permute_even (first_data_ref, second_data_ref); */
5202 data_ref = make_temp_ssa_name (vectype, NULL, "vect_perm_even");
5203 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
5204 first_vect, second_vect,
5205 perm_mask_even);
5206 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5207 (*result_chain)[j/2] = data_ref;
5209 /* data_ref = permute_odd (first_data_ref, second_data_ref); */
5210 data_ref = make_temp_ssa_name (vectype, NULL, "vect_perm_odd");
5211 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
5212 first_vect, second_vect,
5213 perm_mask_odd);
5214 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5215 (*result_chain)[j/2+length/2] = data_ref;
5217 memcpy (dr_chain.address (), result_chain->address (),
5218 length * sizeof (tree));
5223 /* Function vect_shift_permute_load_chain.
5225 Given a chain of loads in DR_CHAIN of LENGTH 2 or 3, generate
5226 sequence of stmts to reorder the input data accordingly.
5227 Return the final references for loads in RESULT_CHAIN.
5228 Return true if successed, false otherwise.
5230 E.g., LENGTH is 3 and the scalar type is short, i.e., VF is 8.
5231 The input is 3 vectors each containing 8 elements. We assign a
5232 number to each element, the input sequence is:
5234 1st vec: 0 1 2 3 4 5 6 7
5235 2nd vec: 8 9 10 11 12 13 14 15
5236 3rd vec: 16 17 18 19 20 21 22 23
5238 The output sequence should be:
5240 1st vec: 0 3 6 9 12 15 18 21
5241 2nd vec: 1 4 7 10 13 16 19 22
5242 3rd vec: 2 5 8 11 14 17 20 23
5244 We use 3 shuffle instructions and 3 * 3 - 1 shifts to create such output.
5246 First we shuffle all 3 vectors to get correct elements order:
5248 1st vec: ( 0 3 6) ( 1 4 7) ( 2 5)
5249 2nd vec: ( 8 11 14) ( 9 12 15) (10 13)
5250 3rd vec: (16 19 22) (17 20 23) (18 21)
5252 Next we unite and shift vector 3 times:
5254 1st step:
5255 shift right by 6 the concatenation of:
5256 "1st vec" and "2nd vec"
5257 ( 0 3 6) ( 1 4 7) |( 2 5) _ ( 8 11 14) ( 9 12 15)| (10 13)
5258 "2nd vec" and "3rd vec"
5259 ( 8 11 14) ( 9 12 15) |(10 13) _ (16 19 22) (17 20 23)| (18 21)
5260 "3rd vec" and "1st vec"
5261 (16 19 22) (17 20 23) |(18 21) _ ( 0 3 6) ( 1 4 7)| ( 2 5)
5262 | New vectors |
5264 So that now new vectors are:
5266 1st vec: ( 2 5) ( 8 11 14) ( 9 12 15)
5267 2nd vec: (10 13) (16 19 22) (17 20 23)
5268 3rd vec: (18 21) ( 0 3 6) ( 1 4 7)
5270 2nd step:
5271 shift right by 5 the concatenation of:
5272 "1st vec" and "3rd vec"
5273 ( 2 5) ( 8 11 14) |( 9 12 15) _ (18 21) ( 0 3 6)| ( 1 4 7)
5274 "2nd vec" and "1st vec"
5275 (10 13) (16 19 22) |(17 20 23) _ ( 2 5) ( 8 11 14)| ( 9 12 15)
5276 "3rd vec" and "2nd vec"
5277 (18 21) ( 0 3 6) |( 1 4 7) _ (10 13) (16 19 22)| (17 20 23)
5278 | New vectors |
5280 So that now new vectors are:
5282 1st vec: ( 9 12 15) (18 21) ( 0 3 6)
5283 2nd vec: (17 20 23) ( 2 5) ( 8 11 14)
5284 3rd vec: ( 1 4 7) (10 13) (16 19 22) READY
5286 3rd step:
5287 shift right by 5 the concatenation of:
5288 "1st vec" and "1st vec"
5289 ( 9 12 15) (18 21) |( 0 3 6) _ ( 9 12 15) (18 21)| ( 0 3 6)
5290 shift right by 3 the concatenation of:
5291 "2nd vec" and "2nd vec"
5292 (17 20 23) |( 2 5) ( 8 11 14) _ (17 20 23)| ( 2 5) ( 8 11 14)
5293 | New vectors |
5295 So that now all vectors are READY:
5296 1st vec: ( 0 3 6) ( 9 12 15) (18 21)
5297 2nd vec: ( 2 5) ( 8 11 14) (17 20 23)
5298 3rd vec: ( 1 4 7) (10 13) (16 19 22)
5300 This algorithm is faster than one in vect_permute_load_chain if:
5301 1. "shift of a concatination" is faster than general permutation.
5302 This is usually so.
5303 2. The TARGET machine can't execute vector instructions in parallel.
5304 This is because each step of the algorithm depends on previous.
5305 The algorithm in vect_permute_load_chain is much more parallel.
5307 The algorithm is applicable only for LOAD CHAIN LENGTH less than VF.
5310 static bool
5311 vect_shift_permute_load_chain (vec<tree> dr_chain,
5312 unsigned int length,
5313 gimple stmt,
5314 gimple_stmt_iterator *gsi,
5315 vec<tree> *result_chain)
5317 tree vect[3], vect_shift[3], data_ref, first_vect, second_vect;
5318 tree perm2_mask1, perm2_mask2, perm3_mask;
5319 tree select_mask, shift1_mask, shift2_mask, shift3_mask, shift4_mask;
5320 gimple perm_stmt;
5322 tree vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
5323 unsigned int i;
5324 unsigned nelt = TYPE_VECTOR_SUBPARTS (vectype);
5325 unsigned char *sel = XALLOCAVEC (unsigned char, nelt);
5326 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5327 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5329 result_chain->quick_grow (length);
5330 memcpy (result_chain->address (), dr_chain.address (),
5331 length * sizeof (tree));
5333 if (exact_log2 (length) != -1 && LOOP_VINFO_VECT_FACTOR (loop_vinfo) > 4)
5335 unsigned int j, log_length = exact_log2 (length);
5336 for (i = 0; i < nelt / 2; ++i)
5337 sel[i] = i * 2;
5338 for (i = 0; i < nelt / 2; ++i)
5339 sel[nelt / 2 + i] = i * 2 + 1;
5340 if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
5342 if (dump_enabled_p ())
5343 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5344 "shuffle of 2 fields structure is not \
5345 supported by target\n");
5346 return false;
5348 perm2_mask1 = vect_gen_perm_mask_checked (vectype, sel);
5350 for (i = 0; i < nelt / 2; ++i)
5351 sel[i] = i * 2 + 1;
5352 for (i = 0; i < nelt / 2; ++i)
5353 sel[nelt / 2 + i] = i * 2;
5354 if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
5356 if (dump_enabled_p ())
5357 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5358 "shuffle of 2 fields structure is not \
5359 supported by target\n");
5360 return false;
5362 perm2_mask2 = vect_gen_perm_mask_checked (vectype, sel);
5364 /* Generating permutation constant to shift all elements.
5365 For vector length 8 it is {4 5 6 7 8 9 10 11}. */
5366 for (i = 0; i < nelt; i++)
5367 sel[i] = nelt / 2 + i;
5368 if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
5370 if (dump_enabled_p ())
5371 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5372 "shift permutation is not supported by target\n");
5373 return false;
5375 shift1_mask = vect_gen_perm_mask_checked (vectype, sel);
5377 /* Generating permutation constant to select vector from 2.
5378 For vector length 8 it is {0 1 2 3 12 13 14 15}. */
5379 for (i = 0; i < nelt / 2; i++)
5380 sel[i] = i;
5381 for (i = nelt / 2; i < nelt; i++)
5382 sel[i] = nelt + i;
5383 if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
5385 if (dump_enabled_p ())
5386 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5387 "select is not supported by target\n");
5388 return false;
5390 select_mask = vect_gen_perm_mask_checked (vectype, sel);
5392 for (i = 0; i < log_length; i++)
5394 for (j = 0; j < length; j += 2)
5396 first_vect = dr_chain[j];
5397 second_vect = dr_chain[j + 1];
5399 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle2");
5400 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
5401 first_vect, first_vect,
5402 perm2_mask1);
5403 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5404 vect[0] = data_ref;
5406 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle2");
5407 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
5408 second_vect, second_vect,
5409 perm2_mask2);
5410 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5411 vect[1] = data_ref;
5413 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shift");
5414 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
5415 vect[0], vect[1], shift1_mask);
5416 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5417 (*result_chain)[j/2 + length/2] = data_ref;
5419 data_ref = make_temp_ssa_name (vectype, NULL, "vect_select");
5420 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
5421 vect[0], vect[1], select_mask);
5422 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5423 (*result_chain)[j/2] = data_ref;
5425 memcpy (dr_chain.address (), result_chain->address (),
5426 length * sizeof (tree));
5428 return true;
5430 if (length == 3 && LOOP_VINFO_VECT_FACTOR (loop_vinfo) > 2)
5432 unsigned int k = 0, l = 0;
5434 /* Generating permutation constant to get all elements in rigth order.
5435 For vector length 8 it is {0 3 6 1 4 7 2 5}. */
5436 for (i = 0; i < nelt; i++)
5438 if (3 * k + (l % 3) >= nelt)
5440 k = 0;
5441 l += (3 - (nelt % 3));
5443 sel[i] = 3 * k + (l % 3);
5444 k++;
5446 if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
5448 if (dump_enabled_p ())
5449 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5450 "shuffle of 3 fields structure is not \
5451 supported by target\n");
5452 return false;
5454 perm3_mask = vect_gen_perm_mask_checked (vectype, sel);
5456 /* Generating permutation constant to shift all elements.
5457 For vector length 8 it is {6 7 8 9 10 11 12 13}. */
5458 for (i = 0; i < nelt; i++)
5459 sel[i] = 2 * (nelt / 3) + (nelt % 3) + i;
5460 if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
5462 if (dump_enabled_p ())
5463 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5464 "shift permutation is not supported by target\n");
5465 return false;
5467 shift1_mask = vect_gen_perm_mask_checked (vectype, sel);
5469 /* Generating permutation constant to shift all elements.
5470 For vector length 8 it is {5 6 7 8 9 10 11 12}. */
5471 for (i = 0; i < nelt; i++)
5472 sel[i] = 2 * (nelt / 3) + 1 + i;
5473 if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
5475 if (dump_enabled_p ())
5476 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5477 "shift permutation is not supported by target\n");
5478 return false;
5480 shift2_mask = vect_gen_perm_mask_checked (vectype, sel);
5482 /* Generating permutation constant to shift all elements.
5483 For vector length 8 it is {3 4 5 6 7 8 9 10}. */
5484 for (i = 0; i < nelt; i++)
5485 sel[i] = (nelt / 3) + (nelt % 3) / 2 + i;
5486 if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
5488 if (dump_enabled_p ())
5489 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5490 "shift permutation is not supported by target\n");
5491 return false;
5493 shift3_mask = vect_gen_perm_mask_checked (vectype, sel);
5495 /* Generating permutation constant to shift all elements.
5496 For vector length 8 it is {5 6 7 8 9 10 11 12}. */
5497 for (i = 0; i < nelt; i++)
5498 sel[i] = 2 * (nelt / 3) + (nelt % 3) / 2 + i;
5499 if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
5501 if (dump_enabled_p ())
5502 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5503 "shift permutation is not supported by target\n");
5504 return false;
5506 shift4_mask = vect_gen_perm_mask_checked (vectype, sel);
5508 for (k = 0; k < 3; k++)
5510 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle3");
5511 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
5512 dr_chain[k], dr_chain[k],
5513 perm3_mask);
5514 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5515 vect[k] = data_ref;
5518 for (k = 0; k < 3; k++)
5520 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shift1");
5521 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
5522 vect[k % 3], vect[(k + 1) % 3],
5523 shift1_mask);
5524 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5525 vect_shift[k] = data_ref;
5528 for (k = 0; k < 3; k++)
5530 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shift2");
5531 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
5532 vect_shift[(4 - k) % 3],
5533 vect_shift[(3 - k) % 3],
5534 shift2_mask);
5535 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5536 vect[k] = data_ref;
5539 (*result_chain)[3 - (nelt % 3)] = vect[2];
5541 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shift3");
5542 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, vect[0],
5543 vect[0], shift3_mask);
5544 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5545 (*result_chain)[nelt % 3] = data_ref;
5547 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shift4");
5548 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, vect[1],
5549 vect[1], shift4_mask);
5550 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5551 (*result_chain)[0] = data_ref;
5552 return true;
5554 return false;
5557 /* Function vect_transform_grouped_load.
5559 Given a chain of input interleaved data-refs (in DR_CHAIN), build statements
5560 to perform their permutation and ascribe the result vectorized statements to
5561 the scalar statements.
5564 void
5565 vect_transform_grouped_load (gimple stmt, vec<tree> dr_chain, int size,
5566 gimple_stmt_iterator *gsi)
5568 machine_mode mode;
5569 vec<tree> result_chain = vNULL;
5571 /* DR_CHAIN contains input data-refs that are a part of the interleaving.
5572 RESULT_CHAIN is the output of vect_permute_load_chain, it contains permuted
5573 vectors, that are ready for vector computation. */
5574 result_chain.create (size);
5576 /* If reassociation width for vector type is 2 or greater target machine can
5577 execute 2 or more vector instructions in parallel. Otherwise try to
5578 get chain for loads group using vect_shift_permute_load_chain. */
5579 mode = TYPE_MODE (STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt)));
5580 if (targetm.sched.reassociation_width (VEC_PERM_EXPR, mode) > 1
5581 || exact_log2 (size) != -1
5582 || !vect_shift_permute_load_chain (dr_chain, size, stmt,
5583 gsi, &result_chain))
5584 vect_permute_load_chain (dr_chain, size, stmt, gsi, &result_chain);
5585 vect_record_grouped_load_vectors (stmt, result_chain);
5586 result_chain.release ();
5589 /* RESULT_CHAIN contains the output of a group of grouped loads that were
5590 generated as part of the vectorization of STMT. Assign the statement
5591 for each vector to the associated scalar statement. */
5593 void
5594 vect_record_grouped_load_vectors (gimple stmt, vec<tree> result_chain)
5596 gimple first_stmt = GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt));
5597 gimple next_stmt, new_stmt;
5598 unsigned int i, gap_count;
5599 tree tmp_data_ref;
5601 /* Put a permuted data-ref in the VECTORIZED_STMT field.
5602 Since we scan the chain starting from it's first node, their order
5603 corresponds the order of data-refs in RESULT_CHAIN. */
5604 next_stmt = first_stmt;
5605 gap_count = 1;
5606 FOR_EACH_VEC_ELT (result_chain, i, tmp_data_ref)
5608 if (!next_stmt)
5609 break;
5611 /* Skip the gaps. Loads created for the gaps will be removed by dead
5612 code elimination pass later. No need to check for the first stmt in
5613 the group, since it always exists.
5614 GROUP_GAP is the number of steps in elements from the previous
5615 access (if there is no gap GROUP_GAP is 1). We skip loads that
5616 correspond to the gaps. */
5617 if (next_stmt != first_stmt
5618 && gap_count < GROUP_GAP (vinfo_for_stmt (next_stmt)))
5620 gap_count++;
5621 continue;
5624 while (next_stmt)
5626 new_stmt = SSA_NAME_DEF_STMT (tmp_data_ref);
5627 /* We assume that if VEC_STMT is not NULL, this is a case of multiple
5628 copies, and we put the new vector statement in the first available
5629 RELATED_STMT. */
5630 if (!STMT_VINFO_VEC_STMT (vinfo_for_stmt (next_stmt)))
5631 STMT_VINFO_VEC_STMT (vinfo_for_stmt (next_stmt)) = new_stmt;
5632 else
5634 if (!GROUP_SAME_DR_STMT (vinfo_for_stmt (next_stmt)))
5636 gimple prev_stmt =
5637 STMT_VINFO_VEC_STMT (vinfo_for_stmt (next_stmt));
5638 gimple rel_stmt =
5639 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (prev_stmt));
5640 while (rel_stmt)
5642 prev_stmt = rel_stmt;
5643 rel_stmt =
5644 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (rel_stmt));
5647 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (prev_stmt)) =
5648 new_stmt;
5652 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
5653 gap_count = 1;
5654 /* If NEXT_STMT accesses the same DR as the previous statement,
5655 put the same TMP_DATA_REF as its vectorized statement; otherwise
5656 get the next data-ref from RESULT_CHAIN. */
5657 if (!next_stmt || !GROUP_SAME_DR_STMT (vinfo_for_stmt (next_stmt)))
5658 break;
5663 /* Function vect_force_dr_alignment_p.
5665 Returns whether the alignment of a DECL can be forced to be aligned
5666 on ALIGNMENT bit boundary. */
5668 bool
5669 vect_can_force_dr_alignment_p (const_tree decl, unsigned int alignment)
5671 if (TREE_CODE (decl) != VAR_DECL)
5672 return false;
5674 if (decl_in_symtab_p (decl)
5675 && !symtab_node::get (decl)->can_increase_alignment_p ())
5676 return false;
5678 if (TREE_STATIC (decl))
5679 return (alignment <= MAX_OFILE_ALIGNMENT);
5680 else
5681 return (alignment <= MAX_STACK_ALIGNMENT);
5685 /* Return whether the data reference DR is supported with respect to its
5686 alignment.
5687 If CHECK_ALIGNED_ACCESSES is TRUE, check if the access is supported even
5688 it is aligned, i.e., check if it is possible to vectorize it with different
5689 alignment. */
5691 enum dr_alignment_support
5692 vect_supportable_dr_alignment (struct data_reference *dr,
5693 bool check_aligned_accesses)
5695 gimple stmt = DR_STMT (dr);
5696 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5697 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
5698 machine_mode mode = TYPE_MODE (vectype);
5699 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5700 struct loop *vect_loop = NULL;
5701 bool nested_in_vect_loop = false;
5703 if (aligned_access_p (dr) && !check_aligned_accesses)
5704 return dr_aligned;
5706 /* For now assume all conditional loads/stores support unaligned
5707 access without any special code. */
5708 if (is_gimple_call (stmt)
5709 && gimple_call_internal_p (stmt)
5710 && (gimple_call_internal_fn (stmt) == IFN_MASK_LOAD
5711 || gimple_call_internal_fn (stmt) == IFN_MASK_STORE))
5712 return dr_unaligned_supported;
5714 if (loop_vinfo)
5716 vect_loop = LOOP_VINFO_LOOP (loop_vinfo);
5717 nested_in_vect_loop = nested_in_vect_loop_p (vect_loop, stmt);
5720 /* Possibly unaligned access. */
5722 /* We can choose between using the implicit realignment scheme (generating
5723 a misaligned_move stmt) and the explicit realignment scheme (generating
5724 aligned loads with a REALIGN_LOAD). There are two variants to the
5725 explicit realignment scheme: optimized, and unoptimized.
5726 We can optimize the realignment only if the step between consecutive
5727 vector loads is equal to the vector size. Since the vector memory
5728 accesses advance in steps of VS (Vector Size) in the vectorized loop, it
5729 is guaranteed that the misalignment amount remains the same throughout the
5730 execution of the vectorized loop. Therefore, we can create the
5731 "realignment token" (the permutation mask that is passed to REALIGN_LOAD)
5732 at the loop preheader.
5734 However, in the case of outer-loop vectorization, when vectorizing a
5735 memory access in the inner-loop nested within the LOOP that is now being
5736 vectorized, while it is guaranteed that the misalignment of the
5737 vectorized memory access will remain the same in different outer-loop
5738 iterations, it is *not* guaranteed that is will remain the same throughout
5739 the execution of the inner-loop. This is because the inner-loop advances
5740 with the original scalar step (and not in steps of VS). If the inner-loop
5741 step happens to be a multiple of VS, then the misalignment remains fixed
5742 and we can use the optimized realignment scheme. For example:
5744 for (i=0; i<N; i++)
5745 for (j=0; j<M; j++)
5746 s += a[i+j];
5748 When vectorizing the i-loop in the above example, the step between
5749 consecutive vector loads is 1, and so the misalignment does not remain
5750 fixed across the execution of the inner-loop, and the realignment cannot
5751 be optimized (as illustrated in the following pseudo vectorized loop):
5753 for (i=0; i<N; i+=4)
5754 for (j=0; j<M; j++){
5755 vs += vp[i+j]; // misalignment of &vp[i+j] is {0,1,2,3,0,1,2,3,...}
5756 // when j is {0,1,2,3,4,5,6,7,...} respectively.
5757 // (assuming that we start from an aligned address).
5760 We therefore have to use the unoptimized realignment scheme:
5762 for (i=0; i<N; i+=4)
5763 for (j=k; j<M; j+=4)
5764 vs += vp[i+j]; // misalignment of &vp[i+j] is always k (assuming
5765 // that the misalignment of the initial address is
5766 // 0).
5768 The loop can then be vectorized as follows:
5770 for (k=0; k<4; k++){
5771 rt = get_realignment_token (&vp[k]);
5772 for (i=0; i<N; i+=4){
5773 v1 = vp[i+k];
5774 for (j=k; j<M; j+=4){
5775 v2 = vp[i+j+VS-1];
5776 va = REALIGN_LOAD <v1,v2,rt>;
5777 vs += va;
5778 v1 = v2;
5781 } */
5783 if (DR_IS_READ (dr))
5785 bool is_packed = false;
5786 tree type = (TREE_TYPE (DR_REF (dr)));
5788 if (optab_handler (vec_realign_load_optab, mode) != CODE_FOR_nothing
5789 && (!targetm.vectorize.builtin_mask_for_load
5790 || targetm.vectorize.builtin_mask_for_load ()))
5792 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
5793 if ((nested_in_vect_loop
5794 && (TREE_INT_CST_LOW (DR_STEP (dr))
5795 != GET_MODE_SIZE (TYPE_MODE (vectype))))
5796 || !loop_vinfo)
5797 return dr_explicit_realign;
5798 else
5799 return dr_explicit_realign_optimized;
5801 if (!known_alignment_for_access_p (dr))
5802 is_packed = not_size_aligned (DR_REF (dr));
5804 if ((TYPE_USER_ALIGN (type) && !is_packed)
5805 || targetm.vectorize.
5806 support_vector_misalignment (mode, type,
5807 DR_MISALIGNMENT (dr), is_packed))
5808 /* Can't software pipeline the loads, but can at least do them. */
5809 return dr_unaligned_supported;
5811 else
5813 bool is_packed = false;
5814 tree type = (TREE_TYPE (DR_REF (dr)));
5816 if (!known_alignment_for_access_p (dr))
5817 is_packed = not_size_aligned (DR_REF (dr));
5819 if ((TYPE_USER_ALIGN (type) && !is_packed)
5820 || targetm.vectorize.
5821 support_vector_misalignment (mode, type,
5822 DR_MISALIGNMENT (dr), is_packed))
5823 return dr_unaligned_supported;
5826 /* Unsupported. */
5827 return dr_unaligned_unsupported;