2011-11-04 Tom de Vries <tom@codesourcery.com>
[official-gcc.git] / gcc / tree-vect-data-refs.c
blob6dfc7629a60e495b32ba21253b2918ed94741a84
1 /* Data References Analysis and Manipulation Utilities for Vectorization.
2 Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
3 Free Software Foundation, Inc.
4 Contributed by Dorit Naishlos <dorit@il.ibm.com>
5 and Ira Rosen <irar@il.ibm.com>
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
12 version.
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "ggc.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "target.h"
31 #include "basic-block.h"
32 #include "tree-pretty-print.h"
33 #include "gimple-pretty-print.h"
34 #include "tree-flow.h"
35 #include "tree-dump.h"
36 #include "cfgloop.h"
37 #include "tree-chrec.h"
38 #include "tree-scalar-evolution.h"
39 #include "tree-vectorizer.h"
40 #include "diagnostic-core.h"
42 /* Need to include rtl.h, expr.h, etc. for optabs. */
43 #include "expr.h"
44 #include "optabs.h"
46 /* Return true if load- or store-lanes optab OPTAB is implemented for
47 COUNT vectors of type VECTYPE. NAME is the name of OPTAB. */
49 static bool
50 vect_lanes_optab_supported_p (const char *name, convert_optab optab,
51 tree vectype, unsigned HOST_WIDE_INT count)
53 enum machine_mode mode, array_mode;
54 bool limit_p;
56 mode = TYPE_MODE (vectype);
57 limit_p = !targetm.array_mode_supported_p (mode, count);
58 array_mode = mode_for_size (count * GET_MODE_BITSIZE (mode),
59 MODE_INT, limit_p);
61 if (array_mode == BLKmode)
63 if (vect_print_dump_info (REPORT_DETAILS))
64 fprintf (vect_dump, "no array mode for %s[" HOST_WIDE_INT_PRINT_DEC "]",
65 GET_MODE_NAME (mode), count);
66 return false;
69 if (convert_optab_handler (optab, array_mode, mode) == CODE_FOR_nothing)
71 if (vect_print_dump_info (REPORT_DETAILS))
72 fprintf (vect_dump, "cannot use %s<%s><%s>",
73 name, GET_MODE_NAME (array_mode), GET_MODE_NAME (mode));
74 return false;
77 if (vect_print_dump_info (REPORT_DETAILS))
78 fprintf (vect_dump, "can use %s<%s><%s>",
79 name, GET_MODE_NAME (array_mode), GET_MODE_NAME (mode));
81 return true;
85 /* Return the smallest scalar part of STMT.
86 This is used to determine the vectype of the stmt. We generally set the
87 vectype according to the type of the result (lhs). For stmts whose
88 result-type is different than the type of the arguments (e.g., demotion,
89 promotion), vectype will be reset appropriately (later). Note that we have
90 to visit the smallest datatype in this function, because that determines the
91 VF. If the smallest datatype in the loop is present only as the rhs of a
92 promotion operation - we'd miss it.
93 Such a case, where a variable of this datatype does not appear in the lhs
94 anywhere in the loop, can only occur if it's an invariant: e.g.:
95 'int_x = (int) short_inv', which we'd expect to have been optimized away by
96 invariant motion. However, we cannot rely on invariant motion to always
97 take invariants out of the loop, and so in the case of promotion we also
98 have to check the rhs.
99 LHS_SIZE_UNIT and RHS_SIZE_UNIT contain the sizes of the corresponding
100 types. */
102 tree
103 vect_get_smallest_scalar_type (gimple stmt, HOST_WIDE_INT *lhs_size_unit,
104 HOST_WIDE_INT *rhs_size_unit)
106 tree scalar_type = gimple_expr_type (stmt);
107 HOST_WIDE_INT lhs, rhs;
109 lhs = rhs = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (scalar_type));
111 if (is_gimple_assign (stmt)
112 && (gimple_assign_cast_p (stmt)
113 || gimple_assign_rhs_code (stmt) == WIDEN_MULT_EXPR
114 || gimple_assign_rhs_code (stmt) == FLOAT_EXPR))
116 tree rhs_type = TREE_TYPE (gimple_assign_rhs1 (stmt));
118 rhs = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (rhs_type));
119 if (rhs < lhs)
120 scalar_type = rhs_type;
123 *lhs_size_unit = lhs;
124 *rhs_size_unit = rhs;
125 return scalar_type;
129 /* Find the place of the data-ref in STMT in the interleaving chain that starts
130 from FIRST_STMT. Return -1 if the data-ref is not a part of the chain. */
133 vect_get_place_in_interleaving_chain (gimple stmt, gimple first_stmt)
135 gimple next_stmt = first_stmt;
136 int result = 0;
138 if (first_stmt != GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
139 return -1;
141 while (next_stmt && next_stmt != stmt)
143 result++;
144 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
147 if (next_stmt)
148 return result;
149 else
150 return -1;
154 /* Function vect_insert_into_interleaving_chain.
156 Insert DRA into the interleaving chain of DRB according to DRA's INIT. */
158 static void
159 vect_insert_into_interleaving_chain (struct data_reference *dra,
160 struct data_reference *drb)
162 gimple prev, next;
163 tree next_init;
164 stmt_vec_info stmtinfo_a = vinfo_for_stmt (DR_STMT (dra));
165 stmt_vec_info stmtinfo_b = vinfo_for_stmt (DR_STMT (drb));
167 prev = GROUP_FIRST_ELEMENT (stmtinfo_b);
168 next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (prev));
169 while (next)
171 next_init = DR_INIT (STMT_VINFO_DATA_REF (vinfo_for_stmt (next)));
172 if (tree_int_cst_compare (next_init, DR_INIT (dra)) > 0)
174 /* Insert here. */
175 GROUP_NEXT_ELEMENT (vinfo_for_stmt (prev)) = DR_STMT (dra);
176 GROUP_NEXT_ELEMENT (stmtinfo_a) = next;
177 return;
179 prev = next;
180 next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (prev));
183 /* We got to the end of the list. Insert here. */
184 GROUP_NEXT_ELEMENT (vinfo_for_stmt (prev)) = DR_STMT (dra);
185 GROUP_NEXT_ELEMENT (stmtinfo_a) = NULL;
189 /* Function vect_update_interleaving_chain.
191 For two data-refs DRA and DRB that are a part of a chain interleaved data
192 accesses, update the interleaving chain. DRB's INIT is smaller than DRA's.
194 There are four possible cases:
195 1. New stmts - both DRA and DRB are not a part of any chain:
196 FIRST_DR = DRB
197 NEXT_DR (DRB) = DRA
198 2. DRB is a part of a chain and DRA is not:
199 no need to update FIRST_DR
200 no need to insert DRB
201 insert DRA according to init
202 3. DRA is a part of a chain and DRB is not:
203 if (init of FIRST_DR > init of DRB)
204 FIRST_DR = DRB
205 NEXT(FIRST_DR) = previous FIRST_DR
206 else
207 insert DRB according to its init
208 4. both DRA and DRB are in some interleaving chains:
209 choose the chain with the smallest init of FIRST_DR
210 insert the nodes of the second chain into the first one. */
212 static void
213 vect_update_interleaving_chain (struct data_reference *drb,
214 struct data_reference *dra)
216 stmt_vec_info stmtinfo_a = vinfo_for_stmt (DR_STMT (dra));
217 stmt_vec_info stmtinfo_b = vinfo_for_stmt (DR_STMT (drb));
218 tree next_init, init_dra_chain, init_drb_chain;
219 gimple first_a, first_b;
220 tree node_init;
221 gimple node, prev, next, first_stmt;
223 /* 1. New stmts - both DRA and DRB are not a part of any chain. */
224 if (!GROUP_FIRST_ELEMENT (stmtinfo_a) && !GROUP_FIRST_ELEMENT (stmtinfo_b))
226 GROUP_FIRST_ELEMENT (stmtinfo_a) = DR_STMT (drb);
227 GROUP_FIRST_ELEMENT (stmtinfo_b) = DR_STMT (drb);
228 GROUP_NEXT_ELEMENT (stmtinfo_b) = DR_STMT (dra);
229 return;
232 /* 2. DRB is a part of a chain and DRA is not. */
233 if (!GROUP_FIRST_ELEMENT (stmtinfo_a) && GROUP_FIRST_ELEMENT (stmtinfo_b))
235 GROUP_FIRST_ELEMENT (stmtinfo_a) = GROUP_FIRST_ELEMENT (stmtinfo_b);
236 /* Insert DRA into the chain of DRB. */
237 vect_insert_into_interleaving_chain (dra, drb);
238 return;
241 /* 3. DRA is a part of a chain and DRB is not. */
242 if (GROUP_FIRST_ELEMENT (stmtinfo_a) && !GROUP_FIRST_ELEMENT (stmtinfo_b))
244 gimple old_first_stmt = GROUP_FIRST_ELEMENT (stmtinfo_a);
245 tree init_old = DR_INIT (STMT_VINFO_DATA_REF (vinfo_for_stmt (
246 old_first_stmt)));
247 gimple tmp;
249 if (tree_int_cst_compare (init_old, DR_INIT (drb)) > 0)
251 /* DRB's init is smaller than the init of the stmt previously marked
252 as the first stmt of the interleaving chain of DRA. Therefore, we
253 update FIRST_STMT and put DRB in the head of the list. */
254 GROUP_FIRST_ELEMENT (stmtinfo_b) = DR_STMT (drb);
255 GROUP_NEXT_ELEMENT (stmtinfo_b) = old_first_stmt;
257 /* Update all the stmts in the list to point to the new FIRST_STMT. */
258 tmp = old_first_stmt;
259 while (tmp)
261 GROUP_FIRST_ELEMENT (vinfo_for_stmt (tmp)) = DR_STMT (drb);
262 tmp = GROUP_NEXT_ELEMENT (vinfo_for_stmt (tmp));
265 else
267 /* Insert DRB in the list of DRA. */
268 vect_insert_into_interleaving_chain (drb, dra);
269 GROUP_FIRST_ELEMENT (stmtinfo_b) = GROUP_FIRST_ELEMENT (stmtinfo_a);
271 return;
274 /* 4. both DRA and DRB are in some interleaving chains. */
275 first_a = GROUP_FIRST_ELEMENT (stmtinfo_a);
276 first_b = GROUP_FIRST_ELEMENT (stmtinfo_b);
277 if (first_a == first_b)
278 return;
279 init_dra_chain = DR_INIT (STMT_VINFO_DATA_REF (vinfo_for_stmt (first_a)));
280 init_drb_chain = DR_INIT (STMT_VINFO_DATA_REF (vinfo_for_stmt (first_b)));
282 if (tree_int_cst_compare (init_dra_chain, init_drb_chain) > 0)
284 /* Insert the nodes of DRA chain into the DRB chain.
285 After inserting a node, continue from this node of the DRB chain (don't
286 start from the beginning. */
287 node = GROUP_FIRST_ELEMENT (stmtinfo_a);
288 prev = GROUP_FIRST_ELEMENT (stmtinfo_b);
289 first_stmt = first_b;
291 else
293 /* Insert the nodes of DRB chain into the DRA chain.
294 After inserting a node, continue from this node of the DRA chain (don't
295 start from the beginning. */
296 node = GROUP_FIRST_ELEMENT (stmtinfo_b);
297 prev = GROUP_FIRST_ELEMENT (stmtinfo_a);
298 first_stmt = first_a;
301 while (node)
303 node_init = DR_INIT (STMT_VINFO_DATA_REF (vinfo_for_stmt (node)));
304 next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (prev));
305 while (next)
307 next_init = DR_INIT (STMT_VINFO_DATA_REF (vinfo_for_stmt (next)));
308 if (tree_int_cst_compare (next_init, node_init) > 0)
310 /* Insert here. */
311 GROUP_NEXT_ELEMENT (vinfo_for_stmt (prev)) = node;
312 GROUP_NEXT_ELEMENT (vinfo_for_stmt (node)) = next;
313 prev = node;
314 break;
316 prev = next;
317 next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (prev));
319 if (!next)
321 /* We got to the end of the list. Insert here. */
322 GROUP_NEXT_ELEMENT (vinfo_for_stmt (prev)) = node;
323 GROUP_NEXT_ELEMENT (vinfo_for_stmt (node)) = NULL;
324 prev = node;
326 GROUP_FIRST_ELEMENT (vinfo_for_stmt (node)) = first_stmt;
327 node = GROUP_NEXT_ELEMENT (vinfo_for_stmt (node));
331 /* Check dependence between DRA and DRB for basic block vectorization.
332 If the accesses share same bases and offsets, we can compare their initial
333 constant offsets to decide whether they differ or not. In case of a read-
334 write dependence we check that the load is before the store to ensure that
335 vectorization will not change the order of the accesses. */
337 static bool
338 vect_drs_dependent_in_basic_block (struct data_reference *dra,
339 struct data_reference *drb)
341 HOST_WIDE_INT type_size_a, type_size_b, init_a, init_b;
342 gimple earlier_stmt;
344 /* We only call this function for pairs of loads and stores, but we verify
345 it here. */
346 if (DR_IS_READ (dra) == DR_IS_READ (drb))
348 if (DR_IS_READ (dra))
349 return false;
350 else
351 return true;
354 /* Check that the data-refs have same bases and offsets. If not, we can't
355 determine if they are dependent. */
356 if (!operand_equal_p (DR_BASE_ADDRESS (dra), DR_BASE_ADDRESS (drb), 0)
357 || !dr_equal_offsets_p (dra, drb))
358 return true;
360 /* Check the types. */
361 type_size_a = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dra))));
362 type_size_b = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (drb))));
364 if (type_size_a != type_size_b
365 || !types_compatible_p (TREE_TYPE (DR_REF (dra)),
366 TREE_TYPE (DR_REF (drb))))
367 return true;
369 init_a = TREE_INT_CST_LOW (DR_INIT (dra));
370 init_b = TREE_INT_CST_LOW (DR_INIT (drb));
372 /* Two different locations - no dependence. */
373 if (init_a != init_b)
374 return false;
376 /* We have a read-write dependence. Check that the load is before the store.
377 When we vectorize basic blocks, vector load can be only before
378 corresponding scalar load, and vector store can be only after its
379 corresponding scalar store. So the order of the acceses is preserved in
380 case the load is before the store. */
381 earlier_stmt = get_earlier_stmt (DR_STMT (dra), DR_STMT (drb));
382 if (DR_IS_READ (STMT_VINFO_DATA_REF (vinfo_for_stmt (earlier_stmt))))
383 return false;
385 return true;
389 /* Function vect_check_interleaving.
391 Check if DRA and DRB are a part of interleaving. In case they are, insert
392 DRA and DRB in an interleaving chain. */
394 static bool
395 vect_check_interleaving (struct data_reference *dra,
396 struct data_reference *drb)
398 HOST_WIDE_INT type_size_a, type_size_b, diff_mod_size, step, init_a, init_b;
400 /* Check that the data-refs have same first location (except init) and they
401 are both either store or load (not load and store). */
402 if (!operand_equal_p (DR_BASE_ADDRESS (dra), DR_BASE_ADDRESS (drb), 0)
403 || !dr_equal_offsets_p (dra, drb)
404 || !tree_int_cst_compare (DR_INIT (dra), DR_INIT (drb))
405 || DR_IS_READ (dra) != DR_IS_READ (drb))
406 return false;
408 /* Check:
409 1. data-refs are of the same type
410 2. their steps are equal
411 3. the step (if greater than zero) is greater than the difference between
412 data-refs' inits. */
413 type_size_a = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dra))));
414 type_size_b = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (drb))));
416 if (type_size_a != type_size_b
417 || tree_int_cst_compare (DR_STEP (dra), DR_STEP (drb))
418 || !types_compatible_p (TREE_TYPE (DR_REF (dra)),
419 TREE_TYPE (DR_REF (drb))))
420 return false;
422 init_a = TREE_INT_CST_LOW (DR_INIT (dra));
423 init_b = TREE_INT_CST_LOW (DR_INIT (drb));
424 step = TREE_INT_CST_LOW (DR_STEP (dra));
426 if (init_a > init_b)
428 /* If init_a == init_b + the size of the type * k, we have an interleaving,
429 and DRB is accessed before DRA. */
430 diff_mod_size = (init_a - init_b) % type_size_a;
432 if (step && (init_a - init_b) > step)
433 return false;
435 if (diff_mod_size == 0)
437 vect_update_interleaving_chain (drb, dra);
438 if (vect_print_dump_info (REPORT_DR_DETAILS))
440 fprintf (vect_dump, "Detected interleaving ");
441 print_generic_expr (vect_dump, DR_REF (dra), TDF_SLIM);
442 fprintf (vect_dump, " and ");
443 print_generic_expr (vect_dump, DR_REF (drb), TDF_SLIM);
445 return true;
448 else
450 /* If init_b == init_a + the size of the type * k, we have an
451 interleaving, and DRA is accessed before DRB. */
452 diff_mod_size = (init_b - init_a) % type_size_a;
454 if (step && (init_b - init_a) > step)
455 return false;
457 if (diff_mod_size == 0)
459 vect_update_interleaving_chain (dra, drb);
460 if (vect_print_dump_info (REPORT_DR_DETAILS))
462 fprintf (vect_dump, "Detected interleaving ");
463 print_generic_expr (vect_dump, DR_REF (dra), TDF_SLIM);
464 fprintf (vect_dump, " and ");
465 print_generic_expr (vect_dump, DR_REF (drb), TDF_SLIM);
467 return true;
471 return false;
474 /* Check if data references pointed by DR_I and DR_J are same or
475 belong to same interleaving group. Return FALSE if drs are
476 different, otherwise return TRUE. */
478 static bool
479 vect_same_range_drs (data_reference_p dr_i, data_reference_p dr_j)
481 gimple stmt_i = DR_STMT (dr_i);
482 gimple stmt_j = DR_STMT (dr_j);
484 if (operand_equal_p (DR_REF (dr_i), DR_REF (dr_j), 0)
485 || (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt_i))
486 && GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt_j))
487 && (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt_i))
488 == GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt_j)))))
489 return true;
490 else
491 return false;
494 /* If address ranges represented by DDR_I and DDR_J are equal,
495 return TRUE, otherwise return FALSE. */
497 static bool
498 vect_vfa_range_equal (ddr_p ddr_i, ddr_p ddr_j)
500 if ((vect_same_range_drs (DDR_A (ddr_i), DDR_A (ddr_j))
501 && vect_same_range_drs (DDR_B (ddr_i), DDR_B (ddr_j)))
502 || (vect_same_range_drs (DDR_A (ddr_i), DDR_B (ddr_j))
503 && vect_same_range_drs (DDR_B (ddr_i), DDR_A (ddr_j))))
504 return true;
505 else
506 return false;
509 /* Insert DDR into LOOP_VINFO list of ddrs that may alias and need to be
510 tested at run-time. Return TRUE if DDR was successfully inserted.
511 Return false if versioning is not supported. */
513 static bool
514 vect_mark_for_runtime_alias_test (ddr_p ddr, loop_vec_info loop_vinfo)
516 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
518 if ((unsigned) PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS) == 0)
519 return false;
521 if (vect_print_dump_info (REPORT_DR_DETAILS))
523 fprintf (vect_dump, "mark for run-time aliasing test between ");
524 print_generic_expr (vect_dump, DR_REF (DDR_A (ddr)), TDF_SLIM);
525 fprintf (vect_dump, " and ");
526 print_generic_expr (vect_dump, DR_REF (DDR_B (ddr)), TDF_SLIM);
529 if (optimize_loop_nest_for_size_p (loop))
531 if (vect_print_dump_info (REPORT_DR_DETAILS))
532 fprintf (vect_dump, "versioning not supported when optimizing for size.");
533 return false;
536 /* FORNOW: We don't support versioning with outer-loop vectorization. */
537 if (loop->inner)
539 if (vect_print_dump_info (REPORT_DR_DETAILS))
540 fprintf (vect_dump, "versioning not yet supported for outer-loops.");
541 return false;
544 VEC_safe_push (ddr_p, heap, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo), ddr);
545 return true;
549 /* Function vect_analyze_data_ref_dependence.
551 Return TRUE if there (might) exist a dependence between a memory-reference
552 DRA and a memory-reference DRB. When versioning for alias may check a
553 dependence at run-time, return FALSE. Adjust *MAX_VF according to
554 the data dependence. */
556 static bool
557 vect_analyze_data_ref_dependence (struct data_dependence_relation *ddr,
558 loop_vec_info loop_vinfo, int *max_vf)
560 unsigned int i;
561 struct loop *loop = NULL;
562 struct data_reference *dra = DDR_A (ddr);
563 struct data_reference *drb = DDR_B (ddr);
564 stmt_vec_info stmtinfo_a = vinfo_for_stmt (DR_STMT (dra));
565 stmt_vec_info stmtinfo_b = vinfo_for_stmt (DR_STMT (drb));
566 lambda_vector dist_v;
567 unsigned int loop_depth;
569 /* Don't bother to analyze statements marked as unvectorizable. */
570 if (!STMT_VINFO_VECTORIZABLE (stmtinfo_a)
571 || !STMT_VINFO_VECTORIZABLE (stmtinfo_b))
572 return false;
574 if (DDR_ARE_DEPENDENT (ddr) == chrec_known)
576 /* Independent data accesses. */
577 vect_check_interleaving (dra, drb);
578 return false;
581 if (loop_vinfo)
582 loop = LOOP_VINFO_LOOP (loop_vinfo);
584 if ((DR_IS_READ (dra) && DR_IS_READ (drb) && loop_vinfo) || dra == drb)
585 return false;
587 if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know)
589 gimple earlier_stmt;
591 if (loop_vinfo)
593 if (vect_print_dump_info (REPORT_DR_DETAILS))
595 fprintf (vect_dump, "versioning for alias required: "
596 "can't determine dependence between ");
597 print_generic_expr (vect_dump, DR_REF (dra), TDF_SLIM);
598 fprintf (vect_dump, " and ");
599 print_generic_expr (vect_dump, DR_REF (drb), TDF_SLIM);
602 /* Add to list of ddrs that need to be tested at run-time. */
603 return !vect_mark_for_runtime_alias_test (ddr, loop_vinfo);
606 /* When vectorizing a basic block unknown depnedence can still mean
607 strided access. */
608 if (vect_check_interleaving (dra, drb))
609 return false;
611 /* Read-read is OK (we need this check here, after checking for
612 interleaving). */
613 if (DR_IS_READ (dra) && DR_IS_READ (drb))
614 return false;
616 if (vect_print_dump_info (REPORT_DR_DETAILS))
618 fprintf (vect_dump, "can't determine dependence between ");
619 print_generic_expr (vect_dump, DR_REF (dra), TDF_SLIM);
620 fprintf (vect_dump, " and ");
621 print_generic_expr (vect_dump, DR_REF (drb), TDF_SLIM);
624 /* We do not vectorize basic blocks with write-write dependencies. */
625 if (DR_IS_WRITE (dra) && DR_IS_WRITE (drb))
626 return true;
628 /* Check that it's not a load-after-store dependence. */
629 earlier_stmt = get_earlier_stmt (DR_STMT (dra), DR_STMT (drb));
630 if (DR_IS_WRITE (STMT_VINFO_DATA_REF (vinfo_for_stmt (earlier_stmt))))
631 return true;
633 return false;
636 /* Versioning for alias is not yet supported for basic block SLP, and
637 dependence distance is unapplicable, hence, in case of known data
638 dependence, basic block vectorization is impossible for now. */
639 if (!loop_vinfo)
641 if (dra != drb && vect_check_interleaving (dra, drb))
642 return false;
644 if (vect_print_dump_info (REPORT_DR_DETAILS))
646 fprintf (vect_dump, "determined dependence between ");
647 print_generic_expr (vect_dump, DR_REF (dra), TDF_SLIM);
648 fprintf (vect_dump, " and ");
649 print_generic_expr (vect_dump, DR_REF (drb), TDF_SLIM);
652 /* Do not vectorize basic blcoks with write-write dependences. */
653 if (DR_IS_WRITE (dra) && DR_IS_WRITE (drb))
654 return true;
656 /* Check if this dependence is allowed in basic block vectorization. */
657 return vect_drs_dependent_in_basic_block (dra, drb);
660 /* Loop-based vectorization and known data dependence. */
661 if (DDR_NUM_DIST_VECTS (ddr) == 0)
663 if (vect_print_dump_info (REPORT_DR_DETAILS))
665 fprintf (vect_dump, "versioning for alias required: bad dist vector for ");
666 print_generic_expr (vect_dump, DR_REF (dra), TDF_SLIM);
667 fprintf (vect_dump, " and ");
668 print_generic_expr (vect_dump, DR_REF (drb), TDF_SLIM);
670 /* Add to list of ddrs that need to be tested at run-time. */
671 return !vect_mark_for_runtime_alias_test (ddr, loop_vinfo);
674 loop_depth = index_in_loop_nest (loop->num, DDR_LOOP_NEST (ddr));
675 FOR_EACH_VEC_ELT (lambda_vector, DDR_DIST_VECTS (ddr), i, dist_v)
677 int dist = dist_v[loop_depth];
679 if (vect_print_dump_info (REPORT_DR_DETAILS))
680 fprintf (vect_dump, "dependence distance = %d.", dist);
682 if (dist == 0)
684 if (vect_print_dump_info (REPORT_DR_DETAILS))
686 fprintf (vect_dump, "dependence distance == 0 between ");
687 print_generic_expr (vect_dump, DR_REF (dra), TDF_SLIM);
688 fprintf (vect_dump, " and ");
689 print_generic_expr (vect_dump, DR_REF (drb), TDF_SLIM);
692 /* For interleaving, mark that there is a read-write dependency if
693 necessary. We check before that one of the data-refs is store. */
694 if (DR_IS_READ (dra))
695 GROUP_READ_WRITE_DEPENDENCE (stmtinfo_a) = true;
696 else
698 if (DR_IS_READ (drb))
699 GROUP_READ_WRITE_DEPENDENCE (stmtinfo_b) = true;
702 continue;
705 if (dist > 0 && DDR_REVERSED_P (ddr))
707 /* If DDR_REVERSED_P the order of the data-refs in DDR was
708 reversed (to make distance vector positive), and the actual
709 distance is negative. */
710 if (vect_print_dump_info (REPORT_DR_DETAILS))
711 fprintf (vect_dump, "dependence distance negative.");
712 continue;
715 if (abs (dist) >= 2
716 && abs (dist) < *max_vf)
718 /* The dependence distance requires reduction of the maximal
719 vectorization factor. */
720 *max_vf = abs (dist);
721 if (vect_print_dump_info (REPORT_DR_DETAILS))
722 fprintf (vect_dump, "adjusting maximal vectorization factor to %i",
723 *max_vf);
726 if (abs (dist) >= *max_vf)
728 /* Dependence distance does not create dependence, as far as
729 vectorization is concerned, in this case. */
730 if (vect_print_dump_info (REPORT_DR_DETAILS))
731 fprintf (vect_dump, "dependence distance >= VF.");
732 continue;
735 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
737 fprintf (vect_dump, "not vectorized, possible dependence "
738 "between data-refs ");
739 print_generic_expr (vect_dump, DR_REF (dra), TDF_SLIM);
740 fprintf (vect_dump, " and ");
741 print_generic_expr (vect_dump, DR_REF (drb), TDF_SLIM);
744 return true;
747 return false;
750 /* Function vect_analyze_data_ref_dependences.
752 Examine all the data references in the loop, and make sure there do not
753 exist any data dependences between them. Set *MAX_VF according to
754 the maximum vectorization factor the data dependences allow. */
756 bool
757 vect_analyze_data_ref_dependences (loop_vec_info loop_vinfo,
758 bb_vec_info bb_vinfo, int *max_vf)
760 unsigned int i;
761 VEC (ddr_p, heap) *ddrs = NULL;
762 struct data_dependence_relation *ddr;
764 if (vect_print_dump_info (REPORT_DETAILS))
765 fprintf (vect_dump, "=== vect_analyze_dependences ===");
767 if (loop_vinfo)
768 ddrs = LOOP_VINFO_DDRS (loop_vinfo);
769 else
770 ddrs = BB_VINFO_DDRS (bb_vinfo);
772 FOR_EACH_VEC_ELT (ddr_p, ddrs, i, ddr)
773 if (vect_analyze_data_ref_dependence (ddr, loop_vinfo, max_vf))
774 return false;
776 return true;
780 /* Function vect_compute_data_ref_alignment
782 Compute the misalignment of the data reference DR.
784 Output:
785 1. If during the misalignment computation it is found that the data reference
786 cannot be vectorized then false is returned.
787 2. DR_MISALIGNMENT (DR) is defined.
789 FOR NOW: No analysis is actually performed. Misalignment is calculated
790 only for trivial cases. TODO. */
792 static bool
793 vect_compute_data_ref_alignment (struct data_reference *dr)
795 gimple stmt = DR_STMT (dr);
796 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
797 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
798 struct loop *loop = NULL;
799 tree ref = DR_REF (dr);
800 tree vectype;
801 tree base, base_addr;
802 bool base_aligned;
803 tree misalign;
804 tree aligned_to, alignment;
806 if (vect_print_dump_info (REPORT_DETAILS))
807 fprintf (vect_dump, "vect_compute_data_ref_alignment:");
809 if (loop_vinfo)
810 loop = LOOP_VINFO_LOOP (loop_vinfo);
812 /* Initialize misalignment to unknown. */
813 SET_DR_MISALIGNMENT (dr, -1);
815 misalign = DR_INIT (dr);
816 aligned_to = DR_ALIGNED_TO (dr);
817 base_addr = DR_BASE_ADDRESS (dr);
818 vectype = STMT_VINFO_VECTYPE (stmt_info);
820 /* In case the dataref is in an inner-loop of the loop that is being
821 vectorized (LOOP), we use the base and misalignment information
822 relative to the outer-loop (LOOP). This is ok only if the misalignment
823 stays the same throughout the execution of the inner-loop, which is why
824 we have to check that the stride of the dataref in the inner-loop evenly
825 divides by the vector size. */
826 if (loop && nested_in_vect_loop_p (loop, stmt))
828 tree step = DR_STEP (dr);
829 HOST_WIDE_INT dr_step = TREE_INT_CST_LOW (step);
831 if (dr_step % GET_MODE_SIZE (TYPE_MODE (vectype)) == 0)
833 if (vect_print_dump_info (REPORT_ALIGNMENT))
834 fprintf (vect_dump, "inner step divides the vector-size.");
835 misalign = STMT_VINFO_DR_INIT (stmt_info);
836 aligned_to = STMT_VINFO_DR_ALIGNED_TO (stmt_info);
837 base_addr = STMT_VINFO_DR_BASE_ADDRESS (stmt_info);
839 else
841 if (vect_print_dump_info (REPORT_ALIGNMENT))
842 fprintf (vect_dump, "inner step doesn't divide the vector-size.");
843 misalign = NULL_TREE;
847 base = build_fold_indirect_ref (base_addr);
848 alignment = ssize_int (TYPE_ALIGN (vectype)/BITS_PER_UNIT);
850 if ((aligned_to && tree_int_cst_compare (aligned_to, alignment) < 0)
851 || !misalign)
853 if (vect_print_dump_info (REPORT_ALIGNMENT))
855 fprintf (vect_dump, "Unknown alignment for access: ");
856 print_generic_expr (vect_dump, base, TDF_SLIM);
858 return true;
861 if ((DECL_P (base)
862 && tree_int_cst_compare (ssize_int (DECL_ALIGN_UNIT (base)),
863 alignment) >= 0)
864 || (TREE_CODE (base_addr) == SSA_NAME
865 && tree_int_cst_compare (ssize_int (TYPE_ALIGN_UNIT (TREE_TYPE (
866 TREE_TYPE (base_addr)))),
867 alignment) >= 0)
868 || (get_pointer_alignment (base_addr) >= TYPE_ALIGN (vectype)))
869 base_aligned = true;
870 else
871 base_aligned = false;
873 if (!base_aligned)
875 /* Do not change the alignment of global variables if
876 flag_section_anchors is enabled. */
877 if (!vect_can_force_dr_alignment_p (base, TYPE_ALIGN (vectype))
878 || (TREE_STATIC (base) && flag_section_anchors))
880 if (vect_print_dump_info (REPORT_DETAILS))
882 fprintf (vect_dump, "can't force alignment of ref: ");
883 print_generic_expr (vect_dump, ref, TDF_SLIM);
885 return true;
888 /* Force the alignment of the decl.
889 NOTE: This is the only change to the code we make during
890 the analysis phase, before deciding to vectorize the loop. */
891 if (vect_print_dump_info (REPORT_DETAILS))
893 fprintf (vect_dump, "force alignment of ");
894 print_generic_expr (vect_dump, ref, TDF_SLIM);
897 DECL_ALIGN (base) = TYPE_ALIGN (vectype);
898 DECL_USER_ALIGN (base) = 1;
901 /* At this point we assume that the base is aligned. */
902 gcc_assert (base_aligned
903 || (TREE_CODE (base) == VAR_DECL
904 && DECL_ALIGN (base) >= TYPE_ALIGN (vectype)));
906 /* If this is a backward running DR then first access in the larger
907 vectype actually is N-1 elements before the address in the DR.
908 Adjust misalign accordingly. */
909 if (tree_int_cst_compare (DR_STEP (dr), size_zero_node) < 0)
911 tree offset = ssize_int (TYPE_VECTOR_SUBPARTS (vectype) - 1);
912 /* DR_STEP(dr) is the same as -TYPE_SIZE of the scalar type,
913 otherwise we wouldn't be here. */
914 offset = fold_build2 (MULT_EXPR, ssizetype, offset, DR_STEP (dr));
915 /* PLUS because DR_STEP was negative. */
916 misalign = size_binop (PLUS_EXPR, misalign, offset);
919 /* Modulo alignment. */
920 misalign = size_binop (FLOOR_MOD_EXPR, misalign, alignment);
922 if (!host_integerp (misalign, 1))
924 /* Negative or overflowed misalignment value. */
925 if (vect_print_dump_info (REPORT_DETAILS))
926 fprintf (vect_dump, "unexpected misalign value");
927 return false;
930 SET_DR_MISALIGNMENT (dr, TREE_INT_CST_LOW (misalign));
932 if (vect_print_dump_info (REPORT_DETAILS))
934 fprintf (vect_dump, "misalign = %d bytes of ref ", DR_MISALIGNMENT (dr));
935 print_generic_expr (vect_dump, ref, TDF_SLIM);
938 return true;
942 /* Function vect_compute_data_refs_alignment
944 Compute the misalignment of data references in the loop.
945 Return FALSE if a data reference is found that cannot be vectorized. */
947 static bool
948 vect_compute_data_refs_alignment (loop_vec_info loop_vinfo,
949 bb_vec_info bb_vinfo)
951 VEC (data_reference_p, heap) *datarefs;
952 struct data_reference *dr;
953 unsigned int i;
955 if (loop_vinfo)
956 datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
957 else
958 datarefs = BB_VINFO_DATAREFS (bb_vinfo);
960 FOR_EACH_VEC_ELT (data_reference_p, datarefs, i, dr)
961 if (STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr)))
962 && !vect_compute_data_ref_alignment (dr))
964 if (bb_vinfo)
966 /* Mark unsupported statement as unvectorizable. */
967 STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr))) = false;
968 continue;
970 else
971 return false;
974 return true;
978 /* Function vect_update_misalignment_for_peel
980 DR - the data reference whose misalignment is to be adjusted.
981 DR_PEEL - the data reference whose misalignment is being made
982 zero in the vector loop by the peel.
983 NPEEL - the number of iterations in the peel loop if the misalignment
984 of DR_PEEL is known at compile time. */
986 static void
987 vect_update_misalignment_for_peel (struct data_reference *dr,
988 struct data_reference *dr_peel, int npeel)
990 unsigned int i;
991 VEC(dr_p,heap) *same_align_drs;
992 struct data_reference *current_dr;
993 int dr_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr))));
994 int dr_peel_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr_peel))));
995 stmt_vec_info stmt_info = vinfo_for_stmt (DR_STMT (dr));
996 stmt_vec_info peel_stmt_info = vinfo_for_stmt (DR_STMT (dr_peel));
998 /* For interleaved data accesses the step in the loop must be multiplied by
999 the size of the interleaving group. */
1000 if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
1001 dr_size *= GROUP_SIZE (vinfo_for_stmt (GROUP_FIRST_ELEMENT (stmt_info)));
1002 if (STMT_VINFO_STRIDED_ACCESS (peel_stmt_info))
1003 dr_peel_size *= GROUP_SIZE (peel_stmt_info);
1005 /* It can be assumed that the data refs with the same alignment as dr_peel
1006 are aligned in the vector loop. */
1007 same_align_drs
1008 = STMT_VINFO_SAME_ALIGN_REFS (vinfo_for_stmt (DR_STMT (dr_peel)));
1009 FOR_EACH_VEC_ELT (dr_p, same_align_drs, i, current_dr)
1011 if (current_dr != dr)
1012 continue;
1013 gcc_assert (DR_MISALIGNMENT (dr) / dr_size ==
1014 DR_MISALIGNMENT (dr_peel) / dr_peel_size);
1015 SET_DR_MISALIGNMENT (dr, 0);
1016 return;
1019 if (known_alignment_for_access_p (dr)
1020 && known_alignment_for_access_p (dr_peel))
1022 bool negative = tree_int_cst_compare (DR_STEP (dr), size_zero_node) < 0;
1023 int misal = DR_MISALIGNMENT (dr);
1024 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1025 misal += negative ? -npeel * dr_size : npeel * dr_size;
1026 misal &= GET_MODE_SIZE (TYPE_MODE (vectype)) - 1;
1027 SET_DR_MISALIGNMENT (dr, misal);
1028 return;
1031 if (vect_print_dump_info (REPORT_DETAILS))
1032 fprintf (vect_dump, "Setting misalignment to -1.");
1033 SET_DR_MISALIGNMENT (dr, -1);
1037 /* Function vect_verify_datarefs_alignment
1039 Return TRUE if all data references in the loop can be
1040 handled with respect to alignment. */
1042 bool
1043 vect_verify_datarefs_alignment (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo)
1045 VEC (data_reference_p, heap) *datarefs;
1046 struct data_reference *dr;
1047 enum dr_alignment_support supportable_dr_alignment;
1048 unsigned int i;
1050 if (loop_vinfo)
1051 datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
1052 else
1053 datarefs = BB_VINFO_DATAREFS (bb_vinfo);
1055 FOR_EACH_VEC_ELT (data_reference_p, datarefs, i, dr)
1057 gimple stmt = DR_STMT (dr);
1058 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1060 /* For interleaving, only the alignment of the first access matters.
1061 Skip statements marked as not vectorizable. */
1062 if ((STMT_VINFO_STRIDED_ACCESS (stmt_info)
1063 && GROUP_FIRST_ELEMENT (stmt_info) != stmt)
1064 || !STMT_VINFO_VECTORIZABLE (stmt_info))
1065 continue;
1067 supportable_dr_alignment = vect_supportable_dr_alignment (dr, false);
1068 if (!supportable_dr_alignment)
1070 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1072 if (DR_IS_READ (dr))
1073 fprintf (vect_dump,
1074 "not vectorized: unsupported unaligned load.");
1075 else
1076 fprintf (vect_dump,
1077 "not vectorized: unsupported unaligned store.");
1079 print_generic_expr (vect_dump, DR_REF (dr), TDF_SLIM);
1081 return false;
1083 if (supportable_dr_alignment != dr_aligned
1084 && vect_print_dump_info (REPORT_ALIGNMENT))
1085 fprintf (vect_dump, "Vectorizing an unaligned access.");
1087 return true;
1091 /* Function vector_alignment_reachable_p
1093 Return true if vector alignment for DR is reachable by peeling
1094 a few loop iterations. Return false otherwise. */
1096 static bool
1097 vector_alignment_reachable_p (struct data_reference *dr)
1099 gimple stmt = DR_STMT (dr);
1100 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1101 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1103 if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
1105 /* For interleaved access we peel only if number of iterations in
1106 the prolog loop ({VF - misalignment}), is a multiple of the
1107 number of the interleaved accesses. */
1108 int elem_size, mis_in_elements;
1109 int nelements = TYPE_VECTOR_SUBPARTS (vectype);
1111 /* FORNOW: handle only known alignment. */
1112 if (!known_alignment_for_access_p (dr))
1113 return false;
1115 elem_size = GET_MODE_SIZE (TYPE_MODE (vectype)) / nelements;
1116 mis_in_elements = DR_MISALIGNMENT (dr) / elem_size;
1118 if ((nelements - mis_in_elements) % GROUP_SIZE (stmt_info))
1119 return false;
1122 /* If misalignment is known at the compile time then allow peeling
1123 only if natural alignment is reachable through peeling. */
1124 if (known_alignment_for_access_p (dr) && !aligned_access_p (dr))
1126 HOST_WIDE_INT elmsize =
1127 int_cst_value (TYPE_SIZE_UNIT (TREE_TYPE (vectype)));
1128 if (vect_print_dump_info (REPORT_DETAILS))
1130 fprintf (vect_dump, "data size =" HOST_WIDE_INT_PRINT_DEC, elmsize);
1131 fprintf (vect_dump, ". misalignment = %d. ", DR_MISALIGNMENT (dr));
1133 if (DR_MISALIGNMENT (dr) % elmsize)
1135 if (vect_print_dump_info (REPORT_DETAILS))
1136 fprintf (vect_dump, "data size does not divide the misalignment.\n");
1137 return false;
1141 if (!known_alignment_for_access_p (dr))
1143 tree type = (TREE_TYPE (DR_REF (dr)));
1144 tree ba = DR_BASE_OBJECT (dr);
1145 bool is_packed = false;
1147 if (ba)
1148 is_packed = contains_packed_reference (ba);
1150 if (compare_tree_int (TYPE_SIZE (type), TYPE_ALIGN (type)) > 0)
1151 is_packed = true;
1153 if (vect_print_dump_info (REPORT_DETAILS))
1154 fprintf (vect_dump, "Unknown misalignment, is_packed = %d",is_packed);
1155 if (targetm.vectorize.vector_alignment_reachable (type, is_packed))
1156 return true;
1157 else
1158 return false;
1161 return true;
1165 /* Calculate the cost of the memory access represented by DR. */
1167 static void
1168 vect_get_data_access_cost (struct data_reference *dr,
1169 unsigned int *inside_cost,
1170 unsigned int *outside_cost)
1172 gimple stmt = DR_STMT (dr);
1173 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1174 int nunits = TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info));
1175 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1176 int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1177 int ncopies = vf / nunits;
1178 bool supportable_dr_alignment = vect_supportable_dr_alignment (dr, true);
1180 if (!supportable_dr_alignment)
1181 *inside_cost = VECT_MAX_COST;
1182 else
1184 if (DR_IS_READ (dr))
1185 vect_get_load_cost (dr, ncopies, true, inside_cost, outside_cost);
1186 else
1187 vect_get_store_cost (dr, ncopies, inside_cost);
1190 if (vect_print_dump_info (REPORT_COST))
1191 fprintf (vect_dump, "vect_get_data_access_cost: inside_cost = %d, "
1192 "outside_cost = %d.", *inside_cost, *outside_cost);
1196 static hashval_t
1197 vect_peeling_hash (const void *elem)
1199 const struct _vect_peel_info *peel_info;
1201 peel_info = (const struct _vect_peel_info *) elem;
1202 return (hashval_t) peel_info->npeel;
1206 static int
1207 vect_peeling_hash_eq (const void *elem1, const void *elem2)
1209 const struct _vect_peel_info *a, *b;
1211 a = (const struct _vect_peel_info *) elem1;
1212 b = (const struct _vect_peel_info *) elem2;
1213 return (a->npeel == b->npeel);
1217 /* Insert DR into peeling hash table with NPEEL as key. */
1219 static void
1220 vect_peeling_hash_insert (loop_vec_info loop_vinfo, struct data_reference *dr,
1221 int npeel)
1223 struct _vect_peel_info elem, *slot;
1224 void **new_slot;
1225 bool supportable_dr_alignment = vect_supportable_dr_alignment (dr, true);
1227 elem.npeel = npeel;
1228 slot = (vect_peel_info) htab_find (LOOP_VINFO_PEELING_HTAB (loop_vinfo),
1229 &elem);
1230 if (slot)
1231 slot->count++;
1232 else
1234 slot = XNEW (struct _vect_peel_info);
1235 slot->npeel = npeel;
1236 slot->dr = dr;
1237 slot->count = 1;
1238 new_slot = htab_find_slot (LOOP_VINFO_PEELING_HTAB (loop_vinfo), slot,
1239 INSERT);
1240 *new_slot = slot;
1243 if (!supportable_dr_alignment && !flag_vect_cost_model)
1244 slot->count += VECT_MAX_COST;
1248 /* Traverse peeling hash table to find peeling option that aligns maximum
1249 number of data accesses. */
1251 static int
1252 vect_peeling_hash_get_most_frequent (void **slot, void *data)
1254 vect_peel_info elem = (vect_peel_info) *slot;
1255 vect_peel_extended_info max = (vect_peel_extended_info) data;
1257 if (elem->count > max->peel_info.count
1258 || (elem->count == max->peel_info.count
1259 && max->peel_info.npeel > elem->npeel))
1261 max->peel_info.npeel = elem->npeel;
1262 max->peel_info.count = elem->count;
1263 max->peel_info.dr = elem->dr;
1266 return 1;
1270 /* Traverse peeling hash table and calculate cost for each peeling option.
1271 Find the one with the lowest cost. */
1273 static int
1274 vect_peeling_hash_get_lowest_cost (void **slot, void *data)
1276 vect_peel_info elem = (vect_peel_info) *slot;
1277 vect_peel_extended_info min = (vect_peel_extended_info) data;
1278 int save_misalignment, dummy;
1279 unsigned int inside_cost = 0, outside_cost = 0, i;
1280 gimple stmt = DR_STMT (elem->dr);
1281 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1282 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1283 VEC (data_reference_p, heap) *datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
1284 struct data_reference *dr;
1286 FOR_EACH_VEC_ELT (data_reference_p, datarefs, i, dr)
1288 stmt = DR_STMT (dr);
1289 stmt_info = vinfo_for_stmt (stmt);
1290 /* For interleaving, only the alignment of the first access
1291 matters. */
1292 if (STMT_VINFO_STRIDED_ACCESS (stmt_info)
1293 && GROUP_FIRST_ELEMENT (stmt_info) != stmt)
1294 continue;
1296 save_misalignment = DR_MISALIGNMENT (dr);
1297 vect_update_misalignment_for_peel (dr, elem->dr, elem->npeel);
1298 vect_get_data_access_cost (dr, &inside_cost, &outside_cost);
1299 SET_DR_MISALIGNMENT (dr, save_misalignment);
1302 outside_cost += vect_get_known_peeling_cost (loop_vinfo, elem->npeel, &dummy,
1303 vect_get_single_scalar_iteraion_cost (loop_vinfo));
1305 if (inside_cost < min->inside_cost
1306 || (inside_cost == min->inside_cost && outside_cost < min->outside_cost))
1308 min->inside_cost = inside_cost;
1309 min->outside_cost = outside_cost;
1310 min->peel_info.dr = elem->dr;
1311 min->peel_info.npeel = elem->npeel;
1314 return 1;
1318 /* Choose best peeling option by traversing peeling hash table and either
1319 choosing an option with the lowest cost (if cost model is enabled) or the
1320 option that aligns as many accesses as possible. */
1322 static struct data_reference *
1323 vect_peeling_hash_choose_best_peeling (loop_vec_info loop_vinfo,
1324 unsigned int *npeel)
1326 struct _vect_peel_extended_info res;
1328 res.peel_info.dr = NULL;
1330 if (flag_vect_cost_model)
1332 res.inside_cost = INT_MAX;
1333 res.outside_cost = INT_MAX;
1334 htab_traverse (LOOP_VINFO_PEELING_HTAB (loop_vinfo),
1335 vect_peeling_hash_get_lowest_cost, &res);
1337 else
1339 res.peel_info.count = 0;
1340 htab_traverse (LOOP_VINFO_PEELING_HTAB (loop_vinfo),
1341 vect_peeling_hash_get_most_frequent, &res);
1344 *npeel = res.peel_info.npeel;
1345 return res.peel_info.dr;
1349 /* Function vect_enhance_data_refs_alignment
1351 This pass will use loop versioning and loop peeling in order to enhance
1352 the alignment of data references in the loop.
1354 FOR NOW: we assume that whatever versioning/peeling takes place, only the
1355 original loop is to be vectorized. Any other loops that are created by
1356 the transformations performed in this pass - are not supposed to be
1357 vectorized. This restriction will be relaxed.
1359 This pass will require a cost model to guide it whether to apply peeling
1360 or versioning or a combination of the two. For example, the scheme that
1361 intel uses when given a loop with several memory accesses, is as follows:
1362 choose one memory access ('p') which alignment you want to force by doing
1363 peeling. Then, either (1) generate a loop in which 'p' is aligned and all
1364 other accesses are not necessarily aligned, or (2) use loop versioning to
1365 generate one loop in which all accesses are aligned, and another loop in
1366 which only 'p' is necessarily aligned.
1368 ("Automatic Intra-Register Vectorization for the Intel Architecture",
1369 Aart J.C. Bik, Milind Girkar, Paul M. Grey and Ximmin Tian, International
1370 Journal of Parallel Programming, Vol. 30, No. 2, April 2002.)
1372 Devising a cost model is the most critical aspect of this work. It will
1373 guide us on which access to peel for, whether to use loop versioning, how
1374 many versions to create, etc. The cost model will probably consist of
1375 generic considerations as well as target specific considerations (on
1376 powerpc for example, misaligned stores are more painful than misaligned
1377 loads).
1379 Here are the general steps involved in alignment enhancements:
1381 -- original loop, before alignment analysis:
1382 for (i=0; i<N; i++){
1383 x = q[i]; # DR_MISALIGNMENT(q) = unknown
1384 p[i] = y; # DR_MISALIGNMENT(p) = unknown
1387 -- After vect_compute_data_refs_alignment:
1388 for (i=0; i<N; i++){
1389 x = q[i]; # DR_MISALIGNMENT(q) = 3
1390 p[i] = y; # DR_MISALIGNMENT(p) = unknown
1393 -- Possibility 1: we do loop versioning:
1394 if (p is aligned) {
1395 for (i=0; i<N; i++){ # loop 1A
1396 x = q[i]; # DR_MISALIGNMENT(q) = 3
1397 p[i] = y; # DR_MISALIGNMENT(p) = 0
1400 else {
1401 for (i=0; i<N; i++){ # loop 1B
1402 x = q[i]; # DR_MISALIGNMENT(q) = 3
1403 p[i] = y; # DR_MISALIGNMENT(p) = unaligned
1407 -- Possibility 2: we do loop peeling:
1408 for (i = 0; i < 3; i++){ # (scalar loop, not to be vectorized).
1409 x = q[i];
1410 p[i] = y;
1412 for (i = 3; i < N; i++){ # loop 2A
1413 x = q[i]; # DR_MISALIGNMENT(q) = 0
1414 p[i] = y; # DR_MISALIGNMENT(p) = unknown
1417 -- Possibility 3: combination of loop peeling and versioning:
1418 for (i = 0; i < 3; i++){ # (scalar loop, not to be vectorized).
1419 x = q[i];
1420 p[i] = y;
1422 if (p is aligned) {
1423 for (i = 3; i<N; i++){ # loop 3A
1424 x = q[i]; # DR_MISALIGNMENT(q) = 0
1425 p[i] = y; # DR_MISALIGNMENT(p) = 0
1428 else {
1429 for (i = 3; i<N; i++){ # loop 3B
1430 x = q[i]; # DR_MISALIGNMENT(q) = 0
1431 p[i] = y; # DR_MISALIGNMENT(p) = unaligned
1435 These loops are later passed to loop_transform to be vectorized. The
1436 vectorizer will use the alignment information to guide the transformation
1437 (whether to generate regular loads/stores, or with special handling for
1438 misalignment). */
1440 bool
1441 vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo)
1443 VEC (data_reference_p, heap) *datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
1444 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1445 enum dr_alignment_support supportable_dr_alignment;
1446 struct data_reference *dr0 = NULL, *first_store = NULL;
1447 struct data_reference *dr;
1448 unsigned int i, j;
1449 bool do_peeling = false;
1450 bool do_versioning = false;
1451 bool stat;
1452 gimple stmt;
1453 stmt_vec_info stmt_info;
1454 int vect_versioning_for_alias_required;
1455 unsigned int npeel = 0;
1456 bool all_misalignments_unknown = true;
1457 unsigned int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1458 unsigned possible_npeel_number = 1;
1459 tree vectype;
1460 unsigned int nelements, mis, same_align_drs_max = 0;
1462 if (vect_print_dump_info (REPORT_DETAILS))
1463 fprintf (vect_dump, "=== vect_enhance_data_refs_alignment ===");
1465 /* While cost model enhancements are expected in the future, the high level
1466 view of the code at this time is as follows:
1468 A) If there is a misaligned access then see if peeling to align
1469 this access can make all data references satisfy
1470 vect_supportable_dr_alignment. If so, update data structures
1471 as needed and return true.
1473 B) If peeling wasn't possible and there is a data reference with an
1474 unknown misalignment that does not satisfy vect_supportable_dr_alignment
1475 then see if loop versioning checks can be used to make all data
1476 references satisfy vect_supportable_dr_alignment. If so, update
1477 data structures as needed and return true.
1479 C) If neither peeling nor versioning were successful then return false if
1480 any data reference does not satisfy vect_supportable_dr_alignment.
1482 D) Return true (all data references satisfy vect_supportable_dr_alignment).
1484 Note, Possibility 3 above (which is peeling and versioning together) is not
1485 being done at this time. */
1487 /* (1) Peeling to force alignment. */
1489 /* (1.1) Decide whether to perform peeling, and how many iterations to peel:
1490 Considerations:
1491 + How many accesses will become aligned due to the peeling
1492 - How many accesses will become unaligned due to the peeling,
1493 and the cost of misaligned accesses.
1494 - The cost of peeling (the extra runtime checks, the increase
1495 in code size). */
1497 FOR_EACH_VEC_ELT (data_reference_p, datarefs, i, dr)
1499 stmt = DR_STMT (dr);
1500 stmt_info = vinfo_for_stmt (stmt);
1502 if (!STMT_VINFO_RELEVANT (stmt_info))
1503 continue;
1505 /* For interleaving, only the alignment of the first access
1506 matters. */
1507 if (STMT_VINFO_STRIDED_ACCESS (stmt_info)
1508 && GROUP_FIRST_ELEMENT (stmt_info) != stmt)
1509 continue;
1511 /* For invariant accesses there is nothing to enhance. */
1512 if (integer_zerop (DR_STEP (dr)))
1513 continue;
1515 supportable_dr_alignment = vect_supportable_dr_alignment (dr, true);
1516 do_peeling = vector_alignment_reachable_p (dr);
1517 if (do_peeling)
1519 if (known_alignment_for_access_p (dr))
1521 unsigned int npeel_tmp;
1522 bool negative = tree_int_cst_compare (DR_STEP (dr),
1523 size_zero_node) < 0;
1525 /* Save info about DR in the hash table. */
1526 if (!LOOP_VINFO_PEELING_HTAB (loop_vinfo))
1527 LOOP_VINFO_PEELING_HTAB (loop_vinfo) =
1528 htab_create (1, vect_peeling_hash,
1529 vect_peeling_hash_eq, free);
1531 vectype = STMT_VINFO_VECTYPE (stmt_info);
1532 nelements = TYPE_VECTOR_SUBPARTS (vectype);
1533 mis = DR_MISALIGNMENT (dr) / GET_MODE_SIZE (TYPE_MODE (
1534 TREE_TYPE (DR_REF (dr))));
1535 npeel_tmp = (negative
1536 ? (mis - nelements) : (nelements - mis))
1537 & (nelements - 1);
1539 /* For multiple types, it is possible that the bigger type access
1540 will have more than one peeling option. E.g., a loop with two
1541 types: one of size (vector size / 4), and the other one of
1542 size (vector size / 8). Vectorization factor will 8. If both
1543 access are misaligned by 3, the first one needs one scalar
1544 iteration to be aligned, and the second one needs 5. But the
1545 the first one will be aligned also by peeling 5 scalar
1546 iterations, and in that case both accesses will be aligned.
1547 Hence, except for the immediate peeling amount, we also want
1548 to try to add full vector size, while we don't exceed
1549 vectorization factor.
1550 We do this automtically for cost model, since we calculate cost
1551 for every peeling option. */
1552 if (!flag_vect_cost_model)
1553 possible_npeel_number = vf /nelements;
1555 /* Handle the aligned case. We may decide to align some other
1556 access, making DR unaligned. */
1557 if (DR_MISALIGNMENT (dr) == 0)
1559 npeel_tmp = 0;
1560 if (!flag_vect_cost_model)
1561 possible_npeel_number++;
1564 for (j = 0; j < possible_npeel_number; j++)
1566 gcc_assert (npeel_tmp <= vf);
1567 vect_peeling_hash_insert (loop_vinfo, dr, npeel_tmp);
1568 npeel_tmp += nelements;
1571 all_misalignments_unknown = false;
1572 /* Data-ref that was chosen for the case that all the
1573 misalignments are unknown is not relevant anymore, since we
1574 have a data-ref with known alignment. */
1575 dr0 = NULL;
1577 else
1579 /* If we don't know all the misalignment values, we prefer
1580 peeling for data-ref that has maximum number of data-refs
1581 with the same alignment, unless the target prefers to align
1582 stores over load. */
1583 if (all_misalignments_unknown)
1585 if (same_align_drs_max < VEC_length (dr_p,
1586 STMT_VINFO_SAME_ALIGN_REFS (stmt_info))
1587 || !dr0)
1589 same_align_drs_max = VEC_length (dr_p,
1590 STMT_VINFO_SAME_ALIGN_REFS (stmt_info));
1591 dr0 = dr;
1594 if (!first_store && DR_IS_WRITE (dr))
1595 first_store = dr;
1598 /* If there are both known and unknown misaligned accesses in the
1599 loop, we choose peeling amount according to the known
1600 accesses. */
1603 if (!supportable_dr_alignment)
1605 dr0 = dr;
1606 if (!first_store && DR_IS_WRITE (dr))
1607 first_store = dr;
1611 else
1613 if (!aligned_access_p (dr))
1615 if (vect_print_dump_info (REPORT_DETAILS))
1616 fprintf (vect_dump, "vector alignment may not be reachable");
1618 break;
1623 vect_versioning_for_alias_required
1624 = LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo);
1626 /* Temporarily, if versioning for alias is required, we disable peeling
1627 until we support peeling and versioning. Often peeling for alignment
1628 will require peeling for loop-bound, which in turn requires that we
1629 know how to adjust the loop ivs after the loop. */
1630 if (vect_versioning_for_alias_required
1631 || !vect_can_advance_ivs_p (loop_vinfo)
1632 || !slpeel_can_duplicate_loop_p (loop, single_exit (loop)))
1633 do_peeling = false;
1635 if (do_peeling && all_misalignments_unknown
1636 && vect_supportable_dr_alignment (dr0, false))
1639 /* Check if the target requires to prefer stores over loads, i.e., if
1640 misaligned stores are more expensive than misaligned loads (taking
1641 drs with same alignment into account). */
1642 if (first_store && DR_IS_READ (dr0))
1644 unsigned int load_inside_cost = 0, load_outside_cost = 0;
1645 unsigned int store_inside_cost = 0, store_outside_cost = 0;
1646 unsigned int load_inside_penalty = 0, load_outside_penalty = 0;
1647 unsigned int store_inside_penalty = 0, store_outside_penalty = 0;
1649 vect_get_data_access_cost (dr0, &load_inside_cost,
1650 &load_outside_cost);
1651 vect_get_data_access_cost (first_store, &store_inside_cost,
1652 &store_outside_cost);
1654 /* Calculate the penalty for leaving FIRST_STORE unaligned (by
1655 aligning the load DR0). */
1656 load_inside_penalty = store_inside_cost;
1657 load_outside_penalty = store_outside_cost;
1658 for (i = 0; VEC_iterate (dr_p, STMT_VINFO_SAME_ALIGN_REFS
1659 (vinfo_for_stmt (DR_STMT (first_store))),
1660 i, dr);
1661 i++)
1662 if (DR_IS_READ (dr))
1664 load_inside_penalty += load_inside_cost;
1665 load_outside_penalty += load_outside_cost;
1667 else
1669 load_inside_penalty += store_inside_cost;
1670 load_outside_penalty += store_outside_cost;
1673 /* Calculate the penalty for leaving DR0 unaligned (by
1674 aligning the FIRST_STORE). */
1675 store_inside_penalty = load_inside_cost;
1676 store_outside_penalty = load_outside_cost;
1677 for (i = 0; VEC_iterate (dr_p, STMT_VINFO_SAME_ALIGN_REFS
1678 (vinfo_for_stmt (DR_STMT (dr0))),
1679 i, dr);
1680 i++)
1681 if (DR_IS_READ (dr))
1683 store_inside_penalty += load_inside_cost;
1684 store_outside_penalty += load_outside_cost;
1686 else
1688 store_inside_penalty += store_inside_cost;
1689 store_outside_penalty += store_outside_cost;
1692 if (load_inside_penalty > store_inside_penalty
1693 || (load_inside_penalty == store_inside_penalty
1694 && load_outside_penalty > store_outside_penalty))
1695 dr0 = first_store;
1698 /* In case there are only loads with different unknown misalignments, use
1699 peeling only if it may help to align other accesses in the loop. */
1700 if (!first_store && !VEC_length (dr_p, STMT_VINFO_SAME_ALIGN_REFS
1701 (vinfo_for_stmt (DR_STMT (dr0))))
1702 && vect_supportable_dr_alignment (dr0, false)
1703 != dr_unaligned_supported)
1704 do_peeling = false;
1707 if (do_peeling && !dr0)
1709 /* Peeling is possible, but there is no data access that is not supported
1710 unless aligned. So we try to choose the best possible peeling. */
1712 /* We should get here only if there are drs with known misalignment. */
1713 gcc_assert (!all_misalignments_unknown);
1715 /* Choose the best peeling from the hash table. */
1716 dr0 = vect_peeling_hash_choose_best_peeling (loop_vinfo, &npeel);
1717 if (!dr0 || !npeel)
1718 do_peeling = false;
1721 if (do_peeling)
1723 stmt = DR_STMT (dr0);
1724 stmt_info = vinfo_for_stmt (stmt);
1725 vectype = STMT_VINFO_VECTYPE (stmt_info);
1726 nelements = TYPE_VECTOR_SUBPARTS (vectype);
1728 if (known_alignment_for_access_p (dr0))
1730 bool negative = tree_int_cst_compare (DR_STEP (dr0),
1731 size_zero_node) < 0;
1732 if (!npeel)
1734 /* Since it's known at compile time, compute the number of
1735 iterations in the peeled loop (the peeling factor) for use in
1736 updating DR_MISALIGNMENT values. The peeling factor is the
1737 vectorization factor minus the misalignment as an element
1738 count. */
1739 mis = DR_MISALIGNMENT (dr0);
1740 mis /= GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr0))));
1741 npeel = ((negative ? mis - nelements : nelements - mis)
1742 & (nelements - 1));
1745 /* For interleaved data access every iteration accesses all the
1746 members of the group, therefore we divide the number of iterations
1747 by the group size. */
1748 stmt_info = vinfo_for_stmt (DR_STMT (dr0));
1749 if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
1750 npeel /= GROUP_SIZE (stmt_info);
1752 if (vect_print_dump_info (REPORT_DETAILS))
1753 fprintf (vect_dump, "Try peeling by %d", npeel);
1756 /* Ensure that all data refs can be vectorized after the peel. */
1757 FOR_EACH_VEC_ELT (data_reference_p, datarefs, i, dr)
1759 int save_misalignment;
1761 if (dr == dr0)
1762 continue;
1764 stmt = DR_STMT (dr);
1765 stmt_info = vinfo_for_stmt (stmt);
1766 /* For interleaving, only the alignment of the first access
1767 matters. */
1768 if (STMT_VINFO_STRIDED_ACCESS (stmt_info)
1769 && GROUP_FIRST_ELEMENT (stmt_info) != stmt)
1770 continue;
1772 save_misalignment = DR_MISALIGNMENT (dr);
1773 vect_update_misalignment_for_peel (dr, dr0, npeel);
1774 supportable_dr_alignment = vect_supportable_dr_alignment (dr, false);
1775 SET_DR_MISALIGNMENT (dr, save_misalignment);
1777 if (!supportable_dr_alignment)
1779 do_peeling = false;
1780 break;
1784 if (do_peeling && known_alignment_for_access_p (dr0) && npeel == 0)
1786 stat = vect_verify_datarefs_alignment (loop_vinfo, NULL);
1787 if (!stat)
1788 do_peeling = false;
1789 else
1790 return stat;
1793 if (do_peeling)
1795 /* (1.2) Update the DR_MISALIGNMENT of each data reference DR_i.
1796 If the misalignment of DR_i is identical to that of dr0 then set
1797 DR_MISALIGNMENT (DR_i) to zero. If the misalignment of DR_i and
1798 dr0 are known at compile time then increment DR_MISALIGNMENT (DR_i)
1799 by the peeling factor times the element size of DR_i (MOD the
1800 vectorization factor times the size). Otherwise, the
1801 misalignment of DR_i must be set to unknown. */
1802 FOR_EACH_VEC_ELT (data_reference_p, datarefs, i, dr)
1803 if (dr != dr0)
1804 vect_update_misalignment_for_peel (dr, dr0, npeel);
1806 LOOP_VINFO_UNALIGNED_DR (loop_vinfo) = dr0;
1807 if (npeel)
1808 LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo) = npeel;
1809 else
1810 LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo) = DR_MISALIGNMENT (dr0);
1811 SET_DR_MISALIGNMENT (dr0, 0);
1812 if (vect_print_dump_info (REPORT_ALIGNMENT))
1813 fprintf (vect_dump, "Alignment of access forced using peeling.");
1815 if (vect_print_dump_info (REPORT_DETAILS))
1816 fprintf (vect_dump, "Peeling for alignment will be applied.");
1818 stat = vect_verify_datarefs_alignment (loop_vinfo, NULL);
1819 gcc_assert (stat);
1820 return stat;
1825 /* (2) Versioning to force alignment. */
1827 /* Try versioning if:
1828 1) flag_tree_vect_loop_version is TRUE
1829 2) optimize loop for speed
1830 3) there is at least one unsupported misaligned data ref with an unknown
1831 misalignment, and
1832 4) all misaligned data refs with a known misalignment are supported, and
1833 5) the number of runtime alignment checks is within reason. */
1835 do_versioning =
1836 flag_tree_vect_loop_version
1837 && optimize_loop_nest_for_speed_p (loop)
1838 && (!loop->inner); /* FORNOW */
1840 if (do_versioning)
1842 FOR_EACH_VEC_ELT (data_reference_p, datarefs, i, dr)
1844 stmt = DR_STMT (dr);
1845 stmt_info = vinfo_for_stmt (stmt);
1847 /* For interleaving, only the alignment of the first access
1848 matters. */
1849 if (aligned_access_p (dr)
1850 || (STMT_VINFO_STRIDED_ACCESS (stmt_info)
1851 && GROUP_FIRST_ELEMENT (stmt_info) != stmt))
1852 continue;
1854 supportable_dr_alignment = vect_supportable_dr_alignment (dr, false);
1856 if (!supportable_dr_alignment)
1858 gimple stmt;
1859 int mask;
1860 tree vectype;
1862 if (known_alignment_for_access_p (dr)
1863 || VEC_length (gimple,
1864 LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo))
1865 >= (unsigned) PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIGNMENT_CHECKS))
1867 do_versioning = false;
1868 break;
1871 stmt = DR_STMT (dr);
1872 vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
1873 gcc_assert (vectype);
1875 /* The rightmost bits of an aligned address must be zeros.
1876 Construct the mask needed for this test. For example,
1877 GET_MODE_SIZE for the vector mode V4SI is 16 bytes so the
1878 mask must be 15 = 0xf. */
1879 mask = GET_MODE_SIZE (TYPE_MODE (vectype)) - 1;
1881 /* FORNOW: use the same mask to test all potentially unaligned
1882 references in the loop. The vectorizer currently supports
1883 a single vector size, see the reference to
1884 GET_MODE_NUNITS (TYPE_MODE (vectype)) where the
1885 vectorization factor is computed. */
1886 gcc_assert (!LOOP_VINFO_PTR_MASK (loop_vinfo)
1887 || LOOP_VINFO_PTR_MASK (loop_vinfo) == mask);
1888 LOOP_VINFO_PTR_MASK (loop_vinfo) = mask;
1889 VEC_safe_push (gimple, heap,
1890 LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo),
1891 DR_STMT (dr));
1895 /* Versioning requires at least one misaligned data reference. */
1896 if (!LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo))
1897 do_versioning = false;
1898 else if (!do_versioning)
1899 VEC_truncate (gimple, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo), 0);
1902 if (do_versioning)
1904 VEC(gimple,heap) *may_misalign_stmts
1905 = LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo);
1906 gimple stmt;
1908 /* It can now be assumed that the data references in the statements
1909 in LOOP_VINFO_MAY_MISALIGN_STMTS will be aligned in the version
1910 of the loop being vectorized. */
1911 FOR_EACH_VEC_ELT (gimple, may_misalign_stmts, i, stmt)
1913 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1914 dr = STMT_VINFO_DATA_REF (stmt_info);
1915 SET_DR_MISALIGNMENT (dr, 0);
1916 if (vect_print_dump_info (REPORT_ALIGNMENT))
1917 fprintf (vect_dump, "Alignment of access forced using versioning.");
1920 if (vect_print_dump_info (REPORT_DETAILS))
1921 fprintf (vect_dump, "Versioning for alignment will be applied.");
1923 /* Peeling and versioning can't be done together at this time. */
1924 gcc_assert (! (do_peeling && do_versioning));
1926 stat = vect_verify_datarefs_alignment (loop_vinfo, NULL);
1927 gcc_assert (stat);
1928 return stat;
1931 /* This point is reached if neither peeling nor versioning is being done. */
1932 gcc_assert (! (do_peeling || do_versioning));
1934 stat = vect_verify_datarefs_alignment (loop_vinfo, NULL);
1935 return stat;
1939 /* Function vect_find_same_alignment_drs.
1941 Update group and alignment relations according to the chosen
1942 vectorization factor. */
1944 static void
1945 vect_find_same_alignment_drs (struct data_dependence_relation *ddr,
1946 loop_vec_info loop_vinfo)
1948 unsigned int i;
1949 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1950 int vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1951 struct data_reference *dra = DDR_A (ddr);
1952 struct data_reference *drb = DDR_B (ddr);
1953 stmt_vec_info stmtinfo_a = vinfo_for_stmt (DR_STMT (dra));
1954 stmt_vec_info stmtinfo_b = vinfo_for_stmt (DR_STMT (drb));
1955 int dra_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dra))));
1956 int drb_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (drb))));
1957 lambda_vector dist_v;
1958 unsigned int loop_depth;
1960 if (DDR_ARE_DEPENDENT (ddr) == chrec_known)
1961 return;
1963 if (dra == drb)
1964 return;
1966 if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know)
1967 return;
1969 /* Loop-based vectorization and known data dependence. */
1970 if (DDR_NUM_DIST_VECTS (ddr) == 0)
1971 return;
1973 /* Data-dependence analysis reports a distance vector of zero
1974 for data-references that overlap only in the first iteration
1975 but have different sign step (see PR45764).
1976 So as a sanity check require equal DR_STEP. */
1977 if (!operand_equal_p (DR_STEP (dra), DR_STEP (drb), 0))
1978 return;
1980 loop_depth = index_in_loop_nest (loop->num, DDR_LOOP_NEST (ddr));
1981 FOR_EACH_VEC_ELT (lambda_vector, DDR_DIST_VECTS (ddr), i, dist_v)
1983 int dist = dist_v[loop_depth];
1985 if (vect_print_dump_info (REPORT_DR_DETAILS))
1986 fprintf (vect_dump, "dependence distance = %d.", dist);
1988 /* Same loop iteration. */
1989 if (dist == 0
1990 || (dist % vectorization_factor == 0 && dra_size == drb_size))
1992 /* Two references with distance zero have the same alignment. */
1993 VEC_safe_push (dr_p, heap, STMT_VINFO_SAME_ALIGN_REFS (stmtinfo_a), drb);
1994 VEC_safe_push (dr_p, heap, STMT_VINFO_SAME_ALIGN_REFS (stmtinfo_b), dra);
1995 if (vect_print_dump_info (REPORT_ALIGNMENT))
1996 fprintf (vect_dump, "accesses have the same alignment.");
1997 if (vect_print_dump_info (REPORT_DR_DETAILS))
1999 fprintf (vect_dump, "dependence distance modulo vf == 0 between ");
2000 print_generic_expr (vect_dump, DR_REF (dra), TDF_SLIM);
2001 fprintf (vect_dump, " and ");
2002 print_generic_expr (vect_dump, DR_REF (drb), TDF_SLIM);
2009 /* Function vect_analyze_data_refs_alignment
2011 Analyze the alignment of the data-references in the loop.
2012 Return FALSE if a data reference is found that cannot be vectorized. */
2014 bool
2015 vect_analyze_data_refs_alignment (loop_vec_info loop_vinfo,
2016 bb_vec_info bb_vinfo)
2018 if (vect_print_dump_info (REPORT_DETAILS))
2019 fprintf (vect_dump, "=== vect_analyze_data_refs_alignment ===");
2021 /* Mark groups of data references with same alignment using
2022 data dependence information. */
2023 if (loop_vinfo)
2025 VEC (ddr_p, heap) *ddrs = LOOP_VINFO_DDRS (loop_vinfo);
2026 struct data_dependence_relation *ddr;
2027 unsigned int i;
2029 FOR_EACH_VEC_ELT (ddr_p, ddrs, i, ddr)
2030 vect_find_same_alignment_drs (ddr, loop_vinfo);
2033 if (!vect_compute_data_refs_alignment (loop_vinfo, bb_vinfo))
2035 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
2036 fprintf (vect_dump,
2037 "not vectorized: can't calculate alignment for data ref.");
2038 return false;
2041 return true;
2045 /* Analyze groups of strided accesses: check that DR belongs to a group of
2046 strided accesses of legal size, step, etc. Detect gaps, single element
2047 interleaving, and other special cases. Set strided access info.
2048 Collect groups of strided stores for further use in SLP analysis. */
2050 static bool
2051 vect_analyze_group_access (struct data_reference *dr)
2053 tree step = DR_STEP (dr);
2054 tree scalar_type = TREE_TYPE (DR_REF (dr));
2055 HOST_WIDE_INT type_size = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (scalar_type));
2056 gimple stmt = DR_STMT (dr);
2057 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2058 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2059 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
2060 HOST_WIDE_INT dr_step = TREE_INT_CST_LOW (step);
2061 HOST_WIDE_INT stride, last_accessed_element = 1;
2062 bool slp_impossible = false;
2063 struct loop *loop = NULL;
2065 if (loop_vinfo)
2066 loop = LOOP_VINFO_LOOP (loop_vinfo);
2068 /* For interleaving, STRIDE is STEP counted in elements, i.e., the size of the
2069 interleaving group (including gaps). */
2070 stride = dr_step / type_size;
2072 /* Not consecutive access is possible only if it is a part of interleaving. */
2073 if (!GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
2075 /* Check if it this DR is a part of interleaving, and is a single
2076 element of the group that is accessed in the loop. */
2078 /* Gaps are supported only for loads. STEP must be a multiple of the type
2079 size. The size of the group must be a power of 2. */
2080 if (DR_IS_READ (dr)
2081 && (dr_step % type_size) == 0
2082 && stride > 0
2083 && exact_log2 (stride) != -1)
2085 GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = stmt;
2086 GROUP_SIZE (vinfo_for_stmt (stmt)) = stride;
2087 if (vect_print_dump_info (REPORT_DR_DETAILS))
2089 fprintf (vect_dump, "Detected single element interleaving ");
2090 print_generic_expr (vect_dump, DR_REF (dr), TDF_SLIM);
2091 fprintf (vect_dump, " step ");
2092 print_generic_expr (vect_dump, step, TDF_SLIM);
2095 if (loop_vinfo)
2097 if (vect_print_dump_info (REPORT_DETAILS))
2098 fprintf (vect_dump, "Data access with gaps requires scalar "
2099 "epilogue loop");
2100 if (loop->inner)
2102 if (vect_print_dump_info (REPORT_DETAILS))
2103 fprintf (vect_dump, "Peeling for outer loop is not"
2104 " supported");
2105 return false;
2108 LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) = true;
2111 return true;
2114 if (vect_print_dump_info (REPORT_DETAILS))
2116 fprintf (vect_dump, "not consecutive access ");
2117 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
2120 if (bb_vinfo)
2122 /* Mark the statement as unvectorizable. */
2123 STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr))) = false;
2124 return true;
2127 return false;
2130 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) == stmt)
2132 /* First stmt in the interleaving chain. Check the chain. */
2133 gimple next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmt));
2134 struct data_reference *data_ref = dr;
2135 unsigned int count = 1;
2136 tree next_step;
2137 tree prev_init = DR_INIT (data_ref);
2138 gimple prev = stmt;
2139 HOST_WIDE_INT diff, count_in_bytes, gaps = 0;
2141 while (next)
2143 /* Skip same data-refs. In case that two or more stmts share
2144 data-ref (supported only for loads), we vectorize only the first
2145 stmt, and the rest get their vectorized loads from the first
2146 one. */
2147 if (!tree_int_cst_compare (DR_INIT (data_ref),
2148 DR_INIT (STMT_VINFO_DATA_REF (
2149 vinfo_for_stmt (next)))))
2151 if (DR_IS_WRITE (data_ref))
2153 if (vect_print_dump_info (REPORT_DETAILS))
2154 fprintf (vect_dump, "Two store stmts share the same dr.");
2155 return false;
2158 /* Check that there is no load-store dependencies for this loads
2159 to prevent a case of load-store-load to the same location. */
2160 if (GROUP_READ_WRITE_DEPENDENCE (vinfo_for_stmt (next))
2161 || GROUP_READ_WRITE_DEPENDENCE (vinfo_for_stmt (prev)))
2163 if (vect_print_dump_info (REPORT_DETAILS))
2164 fprintf (vect_dump,
2165 "READ_WRITE dependence in interleaving.");
2166 return false;
2169 /* For load use the same data-ref load. */
2170 GROUP_SAME_DR_STMT (vinfo_for_stmt (next)) = prev;
2172 prev = next;
2173 next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next));
2174 continue;
2177 prev = next;
2179 /* Check that all the accesses have the same STEP. */
2180 next_step = DR_STEP (STMT_VINFO_DATA_REF (vinfo_for_stmt (next)));
2181 if (tree_int_cst_compare (step, next_step))
2183 if (vect_print_dump_info (REPORT_DETAILS))
2184 fprintf (vect_dump, "not consecutive access in interleaving");
2185 return false;
2188 data_ref = STMT_VINFO_DATA_REF (vinfo_for_stmt (next));
2189 /* Check that the distance between two accesses is equal to the type
2190 size. Otherwise, we have gaps. */
2191 diff = (TREE_INT_CST_LOW (DR_INIT (data_ref))
2192 - TREE_INT_CST_LOW (prev_init)) / type_size;
2193 if (diff != 1)
2195 /* FORNOW: SLP of accesses with gaps is not supported. */
2196 slp_impossible = true;
2197 if (DR_IS_WRITE (data_ref))
2199 if (vect_print_dump_info (REPORT_DETAILS))
2200 fprintf (vect_dump, "interleaved store with gaps");
2201 return false;
2204 gaps += diff - 1;
2207 last_accessed_element += diff;
2209 /* Store the gap from the previous member of the group. If there is no
2210 gap in the access, GROUP_GAP is always 1. */
2211 GROUP_GAP (vinfo_for_stmt (next)) = diff;
2213 prev_init = DR_INIT (data_ref);
2214 next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next));
2215 /* Count the number of data-refs in the chain. */
2216 count++;
2219 /* COUNT is the number of accesses found, we multiply it by the size of
2220 the type to get COUNT_IN_BYTES. */
2221 count_in_bytes = type_size * count;
2223 /* Check that the size of the interleaving (including gaps) is not
2224 greater than STEP. */
2225 if (dr_step && dr_step < count_in_bytes + gaps * type_size)
2227 if (vect_print_dump_info (REPORT_DETAILS))
2229 fprintf (vect_dump, "interleaving size is greater than step for ");
2230 print_generic_expr (vect_dump, DR_REF (dr), TDF_SLIM);
2232 return false;
2235 /* Check that the size of the interleaving is equal to STEP for stores,
2236 i.e., that there are no gaps. */
2237 if (dr_step && dr_step != count_in_bytes)
2239 if (DR_IS_READ (dr))
2241 slp_impossible = true;
2242 /* There is a gap after the last load in the group. This gap is a
2243 difference between the stride and the number of elements. When
2244 there is no gap, this difference should be 0. */
2245 GROUP_GAP (vinfo_for_stmt (stmt)) = stride - count;
2247 else
2249 if (vect_print_dump_info (REPORT_DETAILS))
2250 fprintf (vect_dump, "interleaved store with gaps");
2251 return false;
2255 /* Check that STEP is a multiple of type size. */
2256 if (dr_step && (dr_step % type_size) != 0)
2258 if (vect_print_dump_info (REPORT_DETAILS))
2260 fprintf (vect_dump, "step is not a multiple of type size: step ");
2261 print_generic_expr (vect_dump, step, TDF_SLIM);
2262 fprintf (vect_dump, " size ");
2263 print_generic_expr (vect_dump, TYPE_SIZE_UNIT (scalar_type),
2264 TDF_SLIM);
2266 return false;
2269 if (stride == 0)
2270 stride = count;
2272 GROUP_SIZE (vinfo_for_stmt (stmt)) = stride;
2273 if (vect_print_dump_info (REPORT_DETAILS))
2274 fprintf (vect_dump, "Detected interleaving of size %d", (int)stride);
2276 /* SLP: create an SLP data structure for every interleaving group of
2277 stores for further analysis in vect_analyse_slp. */
2278 if (DR_IS_WRITE (dr) && !slp_impossible)
2280 if (loop_vinfo)
2281 VEC_safe_push (gimple, heap, LOOP_VINFO_STRIDED_STORES (loop_vinfo),
2282 stmt);
2283 if (bb_vinfo)
2284 VEC_safe_push (gimple, heap, BB_VINFO_STRIDED_STORES (bb_vinfo),
2285 stmt);
2288 /* There is a gap in the end of the group. */
2289 if (stride - last_accessed_element > 0 && loop_vinfo)
2291 if (vect_print_dump_info (REPORT_DETAILS))
2292 fprintf (vect_dump, "Data access with gaps requires scalar "
2293 "epilogue loop");
2294 if (loop->inner)
2296 if (vect_print_dump_info (REPORT_DETAILS))
2297 fprintf (vect_dump, "Peeling for outer loop is not supported");
2298 return false;
2301 LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) = true;
2305 return true;
2309 /* Analyze the access pattern of the data-reference DR.
2310 In case of non-consecutive accesses call vect_analyze_group_access() to
2311 analyze groups of strided accesses. */
2313 static bool
2314 vect_analyze_data_ref_access (struct data_reference *dr)
2316 tree step = DR_STEP (dr);
2317 tree scalar_type = TREE_TYPE (DR_REF (dr));
2318 gimple stmt = DR_STMT (dr);
2319 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2320 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2321 struct loop *loop = NULL;
2322 HOST_WIDE_INT dr_step = TREE_INT_CST_LOW (step);
2324 if (loop_vinfo)
2325 loop = LOOP_VINFO_LOOP (loop_vinfo);
2327 if (loop_vinfo && !step)
2329 if (vect_print_dump_info (REPORT_DETAILS))
2330 fprintf (vect_dump, "bad data-ref access in loop");
2331 return false;
2334 /* Allow invariant loads in loops. */
2335 if (loop_vinfo && dr_step == 0)
2337 GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = NULL;
2338 return DR_IS_READ (dr);
2341 if (loop && nested_in_vect_loop_p (loop, stmt))
2343 /* Interleaved accesses are not yet supported within outer-loop
2344 vectorization for references in the inner-loop. */
2345 GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = NULL;
2347 /* For the rest of the analysis we use the outer-loop step. */
2348 step = STMT_VINFO_DR_STEP (stmt_info);
2349 dr_step = TREE_INT_CST_LOW (step);
2351 if (dr_step == 0)
2353 if (vect_print_dump_info (REPORT_ALIGNMENT))
2354 fprintf (vect_dump, "zero step in outer loop.");
2355 if (DR_IS_READ (dr))
2356 return true;
2357 else
2358 return false;
2362 /* Consecutive? */
2363 if (!tree_int_cst_compare (step, TYPE_SIZE_UNIT (scalar_type))
2364 || (dr_step < 0
2365 && !compare_tree_int (TYPE_SIZE_UNIT (scalar_type), -dr_step)))
2367 /* Mark that it is not interleaving. */
2368 GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = NULL;
2369 return true;
2372 if (loop && nested_in_vect_loop_p (loop, stmt))
2374 if (vect_print_dump_info (REPORT_ALIGNMENT))
2375 fprintf (vect_dump, "strided access in outer loop.");
2376 return false;
2379 /* Not consecutive access - check if it's a part of interleaving group. */
2380 return vect_analyze_group_access (dr);
2384 /* Function vect_analyze_data_ref_accesses.
2386 Analyze the access pattern of all the data references in the loop.
2388 FORNOW: the only access pattern that is considered vectorizable is a
2389 simple step 1 (consecutive) access.
2391 FORNOW: handle only arrays and pointer accesses. */
2393 bool
2394 vect_analyze_data_ref_accesses (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo)
2396 unsigned int i;
2397 VEC (data_reference_p, heap) *datarefs;
2398 struct data_reference *dr;
2400 if (vect_print_dump_info (REPORT_DETAILS))
2401 fprintf (vect_dump, "=== vect_analyze_data_ref_accesses ===");
2403 if (loop_vinfo)
2404 datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
2405 else
2406 datarefs = BB_VINFO_DATAREFS (bb_vinfo);
2408 FOR_EACH_VEC_ELT (data_reference_p, datarefs, i, dr)
2409 if (STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr)))
2410 && !vect_analyze_data_ref_access (dr))
2412 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
2413 fprintf (vect_dump, "not vectorized: complicated access pattern.");
2415 if (bb_vinfo)
2417 /* Mark the statement as not vectorizable. */
2418 STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr))) = false;
2419 continue;
2421 else
2422 return false;
2425 return true;
2428 /* Function vect_prune_runtime_alias_test_list.
2430 Prune a list of ddrs to be tested at run-time by versioning for alias.
2431 Return FALSE if resulting list of ddrs is longer then allowed by
2432 PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS, otherwise return TRUE. */
2434 bool
2435 vect_prune_runtime_alias_test_list (loop_vec_info loop_vinfo)
2437 VEC (ddr_p, heap) * ddrs =
2438 LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo);
2439 unsigned i, j;
2441 if (vect_print_dump_info (REPORT_DETAILS))
2442 fprintf (vect_dump, "=== vect_prune_runtime_alias_test_list ===");
2444 for (i = 0; i < VEC_length (ddr_p, ddrs); )
2446 bool found;
2447 ddr_p ddr_i;
2449 ddr_i = VEC_index (ddr_p, ddrs, i);
2450 found = false;
2452 for (j = 0; j < i; j++)
2454 ddr_p ddr_j = VEC_index (ddr_p, ddrs, j);
2456 if (vect_vfa_range_equal (ddr_i, ddr_j))
2458 if (vect_print_dump_info (REPORT_DR_DETAILS))
2460 fprintf (vect_dump, "found equal ranges ");
2461 print_generic_expr (vect_dump, DR_REF (DDR_A (ddr_i)), TDF_SLIM);
2462 fprintf (vect_dump, ", ");
2463 print_generic_expr (vect_dump, DR_REF (DDR_B (ddr_i)), TDF_SLIM);
2464 fprintf (vect_dump, " and ");
2465 print_generic_expr (vect_dump, DR_REF (DDR_A (ddr_j)), TDF_SLIM);
2466 fprintf (vect_dump, ", ");
2467 print_generic_expr (vect_dump, DR_REF (DDR_B (ddr_j)), TDF_SLIM);
2469 found = true;
2470 break;
2474 if (found)
2476 VEC_ordered_remove (ddr_p, ddrs, i);
2477 continue;
2479 i++;
2482 if (VEC_length (ddr_p, ddrs) >
2483 (unsigned) PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS))
2485 if (vect_print_dump_info (REPORT_DR_DETAILS))
2487 fprintf (vect_dump,
2488 "disable versioning for alias - max number of generated "
2489 "checks exceeded.");
2492 VEC_truncate (ddr_p, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo), 0);
2494 return false;
2497 return true;
2501 /* Function vect_analyze_data_refs.
2503 Find all the data references in the loop or basic block.
2505 The general structure of the analysis of data refs in the vectorizer is as
2506 follows:
2507 1- vect_analyze_data_refs(loop/bb): call
2508 compute_data_dependences_for_loop/bb to find and analyze all data-refs
2509 in the loop/bb and their dependences.
2510 2- vect_analyze_dependences(): apply dependence testing using ddrs.
2511 3- vect_analyze_drs_alignment(): check that ref_stmt.alignment is ok.
2512 4- vect_analyze_drs_access(): check that ref_stmt.step is ok.
2516 bool
2517 vect_analyze_data_refs (loop_vec_info loop_vinfo,
2518 bb_vec_info bb_vinfo,
2519 int *min_vf)
2521 struct loop *loop = NULL;
2522 basic_block bb = NULL;
2523 unsigned int i;
2524 VEC (data_reference_p, heap) *datarefs;
2525 struct data_reference *dr;
2526 tree scalar_type;
2527 bool res;
2529 if (vect_print_dump_info (REPORT_DETAILS))
2530 fprintf (vect_dump, "=== vect_analyze_data_refs ===\n");
2532 if (loop_vinfo)
2534 loop = LOOP_VINFO_LOOP (loop_vinfo);
2535 res = compute_data_dependences_for_loop
2536 (loop, true,
2537 &LOOP_VINFO_LOOP_NEST (loop_vinfo),
2538 &LOOP_VINFO_DATAREFS (loop_vinfo),
2539 &LOOP_VINFO_DDRS (loop_vinfo));
2541 if (!res)
2543 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
2544 fprintf (vect_dump, "not vectorized: loop contains function calls"
2545 " or data references that cannot be analyzed");
2546 return false;
2549 datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
2551 else
2553 bb = BB_VINFO_BB (bb_vinfo);
2554 res = compute_data_dependences_for_bb (bb, true,
2555 &BB_VINFO_DATAREFS (bb_vinfo),
2556 &BB_VINFO_DDRS (bb_vinfo));
2557 if (!res)
2559 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
2560 fprintf (vect_dump, "not vectorized: basic block contains function"
2561 " calls or data references that cannot be analyzed");
2562 return false;
2565 datarefs = BB_VINFO_DATAREFS (bb_vinfo);
2568 /* Go through the data-refs, check that the analysis succeeded. Update
2569 pointer from stmt_vec_info struct to DR and vectype. */
2571 FOR_EACH_VEC_ELT (data_reference_p, datarefs, i, dr)
2573 gimple stmt;
2574 stmt_vec_info stmt_info;
2575 tree base, offset, init;
2576 int vf;
2578 if (!dr || !DR_REF (dr))
2580 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
2581 fprintf (vect_dump, "not vectorized: unhandled data-ref ");
2583 return false;
2586 stmt = DR_STMT (dr);
2587 stmt_info = vinfo_for_stmt (stmt);
2589 /* Check that analysis of the data-ref succeeded. */
2590 if (!DR_BASE_ADDRESS (dr) || !DR_OFFSET (dr) || !DR_INIT (dr)
2591 || !DR_STEP (dr))
2593 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
2595 fprintf (vect_dump, "not vectorized: data ref analysis failed ");
2596 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
2599 return false;
2602 if (TREE_CODE (DR_BASE_ADDRESS (dr)) == INTEGER_CST)
2604 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
2605 fprintf (vect_dump, "not vectorized: base addr of dr is a "
2606 "constant");
2607 return false;
2610 if (TREE_THIS_VOLATILE (DR_REF (dr)))
2612 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
2614 fprintf (vect_dump, "not vectorized: volatile type ");
2615 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
2617 return false;
2620 base = unshare_expr (DR_BASE_ADDRESS (dr));
2621 offset = unshare_expr (DR_OFFSET (dr));
2622 init = unshare_expr (DR_INIT (dr));
2624 if (stmt_can_throw_internal (stmt))
2626 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
2628 fprintf (vect_dump, "not vectorized: statement can throw an "
2629 "exception ");
2630 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
2632 return false;
2635 /* Update DR field in stmt_vec_info struct. */
2637 /* If the dataref is in an inner-loop of the loop that is considered for
2638 for vectorization, we also want to analyze the access relative to
2639 the outer-loop (DR contains information only relative to the
2640 inner-most enclosing loop). We do that by building a reference to the
2641 first location accessed by the inner-loop, and analyze it relative to
2642 the outer-loop. */
2643 if (loop && nested_in_vect_loop_p (loop, stmt))
2645 tree outer_step, outer_base, outer_init;
2646 HOST_WIDE_INT pbitsize, pbitpos;
2647 tree poffset;
2648 enum machine_mode pmode;
2649 int punsignedp, pvolatilep;
2650 affine_iv base_iv, offset_iv;
2651 tree dinit;
2653 /* Build a reference to the first location accessed by the
2654 inner-loop: *(BASE+INIT). (The first location is actually
2655 BASE+INIT+OFFSET, but we add OFFSET separately later). */
2656 tree inner_base = build_fold_indirect_ref
2657 (fold_build_pointer_plus (base, init));
2659 if (vect_print_dump_info (REPORT_DETAILS))
2661 fprintf (vect_dump, "analyze in outer-loop: ");
2662 print_generic_expr (vect_dump, inner_base, TDF_SLIM);
2665 outer_base = get_inner_reference (inner_base, &pbitsize, &pbitpos,
2666 &poffset, &pmode, &punsignedp, &pvolatilep, false);
2667 gcc_assert (outer_base != NULL_TREE);
2669 if (pbitpos % BITS_PER_UNIT != 0)
2671 if (vect_print_dump_info (REPORT_DETAILS))
2672 fprintf (vect_dump, "failed: bit offset alignment.\n");
2673 return false;
2676 outer_base = build_fold_addr_expr (outer_base);
2677 if (!simple_iv (loop, loop_containing_stmt (stmt), outer_base,
2678 &base_iv, false))
2680 if (vect_print_dump_info (REPORT_DETAILS))
2681 fprintf (vect_dump, "failed: evolution of base is not affine.\n");
2682 return false;
2685 if (offset)
2687 if (poffset)
2688 poffset = fold_build2 (PLUS_EXPR, TREE_TYPE (offset), offset,
2689 poffset);
2690 else
2691 poffset = offset;
2694 if (!poffset)
2696 offset_iv.base = ssize_int (0);
2697 offset_iv.step = ssize_int (0);
2699 else if (!simple_iv (loop, loop_containing_stmt (stmt), poffset,
2700 &offset_iv, false))
2702 if (vect_print_dump_info (REPORT_DETAILS))
2703 fprintf (vect_dump, "evolution of offset is not affine.\n");
2704 return false;
2707 outer_init = ssize_int (pbitpos / BITS_PER_UNIT);
2708 split_constant_offset (base_iv.base, &base_iv.base, &dinit);
2709 outer_init = size_binop (PLUS_EXPR, outer_init, dinit);
2710 split_constant_offset (offset_iv.base, &offset_iv.base, &dinit);
2711 outer_init = size_binop (PLUS_EXPR, outer_init, dinit);
2713 outer_step = size_binop (PLUS_EXPR,
2714 fold_convert (ssizetype, base_iv.step),
2715 fold_convert (ssizetype, offset_iv.step));
2717 STMT_VINFO_DR_STEP (stmt_info) = outer_step;
2718 /* FIXME: Use canonicalize_base_object_address (base_iv.base); */
2719 STMT_VINFO_DR_BASE_ADDRESS (stmt_info) = base_iv.base;
2720 STMT_VINFO_DR_INIT (stmt_info) = outer_init;
2721 STMT_VINFO_DR_OFFSET (stmt_info) =
2722 fold_convert (ssizetype, offset_iv.base);
2723 STMT_VINFO_DR_ALIGNED_TO (stmt_info) =
2724 size_int (highest_pow2_factor (offset_iv.base));
2726 if (vect_print_dump_info (REPORT_DETAILS))
2728 fprintf (vect_dump, "\touter base_address: ");
2729 print_generic_expr (vect_dump, STMT_VINFO_DR_BASE_ADDRESS (stmt_info), TDF_SLIM);
2730 fprintf (vect_dump, "\n\touter offset from base address: ");
2731 print_generic_expr (vect_dump, STMT_VINFO_DR_OFFSET (stmt_info), TDF_SLIM);
2732 fprintf (vect_dump, "\n\touter constant offset from base address: ");
2733 print_generic_expr (vect_dump, STMT_VINFO_DR_INIT (stmt_info), TDF_SLIM);
2734 fprintf (vect_dump, "\n\touter step: ");
2735 print_generic_expr (vect_dump, STMT_VINFO_DR_STEP (stmt_info), TDF_SLIM);
2736 fprintf (vect_dump, "\n\touter aligned to: ");
2737 print_generic_expr (vect_dump, STMT_VINFO_DR_ALIGNED_TO (stmt_info), TDF_SLIM);
2741 if (STMT_VINFO_DATA_REF (stmt_info))
2743 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
2745 fprintf (vect_dump,
2746 "not vectorized: more than one data ref in stmt: ");
2747 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
2749 return false;
2752 STMT_VINFO_DATA_REF (stmt_info) = dr;
2754 /* Set vectype for STMT. */
2755 scalar_type = TREE_TYPE (DR_REF (dr));
2756 STMT_VINFO_VECTYPE (stmt_info) =
2757 get_vectype_for_scalar_type (scalar_type);
2758 if (!STMT_VINFO_VECTYPE (stmt_info))
2760 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
2762 fprintf (vect_dump,
2763 "not vectorized: no vectype for stmt: ");
2764 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
2765 fprintf (vect_dump, " scalar_type: ");
2766 print_generic_expr (vect_dump, scalar_type, TDF_DETAILS);
2769 if (bb_vinfo)
2771 /* Mark the statement as not vectorizable. */
2772 STMT_VINFO_VECTORIZABLE (stmt_info) = false;
2773 continue;
2775 else
2776 return false;
2779 /* Adjust the minimal vectorization factor according to the
2780 vector type. */
2781 vf = TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info));
2782 if (vf > *min_vf)
2783 *min_vf = vf;
2786 return true;
2790 /* Function vect_get_new_vect_var.
2792 Returns a name for a new variable. The current naming scheme appends the
2793 prefix "vect_" or "vect_p" (depending on the value of VAR_KIND) to
2794 the name of vectorizer generated variables, and appends that to NAME if
2795 provided. */
2797 tree
2798 vect_get_new_vect_var (tree type, enum vect_var_kind var_kind, const char *name)
2800 const char *prefix;
2801 tree new_vect_var;
2803 switch (var_kind)
2805 case vect_simple_var:
2806 prefix = "vect_";
2807 break;
2808 case vect_scalar_var:
2809 prefix = "stmp_";
2810 break;
2811 case vect_pointer_var:
2812 prefix = "vect_p";
2813 break;
2814 default:
2815 gcc_unreachable ();
2818 if (name)
2820 char* tmp = concat (prefix, name, NULL);
2821 new_vect_var = create_tmp_var (type, tmp);
2822 free (tmp);
2824 else
2825 new_vect_var = create_tmp_var (type, prefix);
2827 /* Mark vector typed variable as a gimple register variable. */
2828 if (TREE_CODE (type) == VECTOR_TYPE)
2829 DECL_GIMPLE_REG_P (new_vect_var) = true;
2831 return new_vect_var;
2835 /* Function vect_create_addr_base_for_vector_ref.
2837 Create an expression that computes the address of the first memory location
2838 that will be accessed for a data reference.
2840 Input:
2841 STMT: The statement containing the data reference.
2842 NEW_STMT_LIST: Must be initialized to NULL_TREE or a statement list.
2843 OFFSET: Optional. If supplied, it is be added to the initial address.
2844 LOOP: Specify relative to which loop-nest should the address be computed.
2845 For example, when the dataref is in an inner-loop nested in an
2846 outer-loop that is now being vectorized, LOOP can be either the
2847 outer-loop, or the inner-loop. The first memory location accessed
2848 by the following dataref ('in' points to short):
2850 for (i=0; i<N; i++)
2851 for (j=0; j<M; j++)
2852 s += in[i+j]
2854 is as follows:
2855 if LOOP=i_loop: &in (relative to i_loop)
2856 if LOOP=j_loop: &in+i*2B (relative to j_loop)
2858 Output:
2859 1. Return an SSA_NAME whose value is the address of the memory location of
2860 the first vector of the data reference.
2861 2. If new_stmt_list is not NULL_TREE after return then the caller must insert
2862 these statement(s) which define the returned SSA_NAME.
2864 FORNOW: We are only handling array accesses with step 1. */
2866 tree
2867 vect_create_addr_base_for_vector_ref (gimple stmt,
2868 gimple_seq *new_stmt_list,
2869 tree offset,
2870 struct loop *loop)
2872 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2873 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
2874 tree data_ref_base = unshare_expr (DR_BASE_ADDRESS (dr));
2875 tree base_name;
2876 tree data_ref_base_var;
2877 tree vec_stmt;
2878 tree addr_base, addr_expr;
2879 tree dest;
2880 gimple_seq seq = NULL;
2881 tree base_offset = unshare_expr (DR_OFFSET (dr));
2882 tree init = unshare_expr (DR_INIT (dr));
2883 tree vect_ptr_type;
2884 tree step = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dr)));
2885 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2886 tree base;
2888 if (loop_vinfo && loop && loop != (gimple_bb (stmt))->loop_father)
2890 struct loop *outer_loop = LOOP_VINFO_LOOP (loop_vinfo);
2892 gcc_assert (nested_in_vect_loop_p (outer_loop, stmt));
2894 data_ref_base = unshare_expr (STMT_VINFO_DR_BASE_ADDRESS (stmt_info));
2895 base_offset = unshare_expr (STMT_VINFO_DR_OFFSET (stmt_info));
2896 init = unshare_expr (STMT_VINFO_DR_INIT (stmt_info));
2899 if (loop_vinfo)
2900 base_name = build_fold_indirect_ref (data_ref_base);
2901 else
2903 base_offset = ssize_int (0);
2904 init = ssize_int (0);
2905 base_name = build_fold_indirect_ref (unshare_expr (DR_REF (dr)));
2908 data_ref_base_var = create_tmp_var (TREE_TYPE (data_ref_base), "batmp");
2909 add_referenced_var (data_ref_base_var);
2910 data_ref_base = force_gimple_operand (data_ref_base, &seq, true,
2911 data_ref_base_var);
2912 gimple_seq_add_seq (new_stmt_list, seq);
2914 /* Create base_offset */
2915 base_offset = size_binop (PLUS_EXPR,
2916 fold_convert (sizetype, base_offset),
2917 fold_convert (sizetype, init));
2918 dest = create_tmp_var (sizetype, "base_off");
2919 add_referenced_var (dest);
2920 base_offset = force_gimple_operand (base_offset, &seq, true, dest);
2921 gimple_seq_add_seq (new_stmt_list, seq);
2923 if (offset)
2925 tree tmp = create_tmp_var (sizetype, "offset");
2927 add_referenced_var (tmp);
2928 offset = fold_build2 (MULT_EXPR, sizetype,
2929 fold_convert (sizetype, offset), step);
2930 base_offset = fold_build2 (PLUS_EXPR, sizetype,
2931 base_offset, offset);
2932 base_offset = force_gimple_operand (base_offset, &seq, false, tmp);
2933 gimple_seq_add_seq (new_stmt_list, seq);
2936 /* base + base_offset */
2937 if (loop_vinfo)
2938 addr_base = fold_build_pointer_plus (data_ref_base, base_offset);
2939 else
2941 addr_base = build1 (ADDR_EXPR,
2942 build_pointer_type (TREE_TYPE (DR_REF (dr))),
2943 unshare_expr (DR_REF (dr)));
2946 vect_ptr_type = build_pointer_type (STMT_VINFO_VECTYPE (stmt_info));
2947 base = get_base_address (DR_REF (dr));
2948 if (base
2949 && TREE_CODE (base) == MEM_REF)
2950 vect_ptr_type
2951 = build_qualified_type (vect_ptr_type,
2952 TYPE_QUALS (TREE_TYPE (TREE_OPERAND (base, 0))));
2954 vec_stmt = fold_convert (vect_ptr_type, addr_base);
2955 addr_expr = vect_get_new_vect_var (vect_ptr_type, vect_pointer_var,
2956 get_name (base_name));
2957 add_referenced_var (addr_expr);
2958 vec_stmt = force_gimple_operand (vec_stmt, &seq, false, addr_expr);
2959 gimple_seq_add_seq (new_stmt_list, seq);
2961 if (DR_PTR_INFO (dr)
2962 && TREE_CODE (vec_stmt) == SSA_NAME)
2964 duplicate_ssa_name_ptr_info (vec_stmt, DR_PTR_INFO (dr));
2965 if (offset)
2967 SSA_NAME_PTR_INFO (vec_stmt)->align = 1;
2968 SSA_NAME_PTR_INFO (vec_stmt)->misalign = 0;
2972 if (vect_print_dump_info (REPORT_DETAILS))
2974 fprintf (vect_dump, "created ");
2975 print_generic_expr (vect_dump, vec_stmt, TDF_SLIM);
2978 return vec_stmt;
2982 /* Function vect_create_data_ref_ptr.
2984 Create a new pointer-to-AGGR_TYPE variable (ap), that points to the first
2985 location accessed in the loop by STMT, along with the def-use update
2986 chain to appropriately advance the pointer through the loop iterations.
2987 Also set aliasing information for the pointer. This pointer is used by
2988 the callers to this function to create a memory reference expression for
2989 vector load/store access.
2991 Input:
2992 1. STMT: a stmt that references memory. Expected to be of the form
2993 GIMPLE_ASSIGN <name, data-ref> or
2994 GIMPLE_ASSIGN <data-ref, name>.
2995 2. AGGR_TYPE: the type of the reference, which should be either a vector
2996 or an array.
2997 3. AT_LOOP: the loop where the vector memref is to be created.
2998 4. OFFSET (optional): an offset to be added to the initial address accessed
2999 by the data-ref in STMT.
3000 5. BSI: location where the new stmts are to be placed if there is no loop
3001 6. ONLY_INIT: indicate if ap is to be updated in the loop, or remain
3002 pointing to the initial address.
3004 Output:
3005 1. Declare a new ptr to vector_type, and have it point to the base of the
3006 data reference (initial addressed accessed by the data reference).
3007 For example, for vector of type V8HI, the following code is generated:
3009 v8hi *ap;
3010 ap = (v8hi *)initial_address;
3012 if OFFSET is not supplied:
3013 initial_address = &a[init];
3014 if OFFSET is supplied:
3015 initial_address = &a[init + OFFSET];
3017 Return the initial_address in INITIAL_ADDRESS.
3019 2. If ONLY_INIT is true, just return the initial pointer. Otherwise, also
3020 update the pointer in each iteration of the loop.
3022 Return the increment stmt that updates the pointer in PTR_INCR.
3024 3. Set INV_P to true if the access pattern of the data reference in the
3025 vectorized loop is invariant. Set it to false otherwise.
3027 4. Return the pointer. */
3029 tree
3030 vect_create_data_ref_ptr (gimple stmt, tree aggr_type, struct loop *at_loop,
3031 tree offset, tree *initial_address,
3032 gimple_stmt_iterator *gsi, gimple *ptr_incr,
3033 bool only_init, bool *inv_p)
3035 tree base_name;
3036 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3037 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3038 struct loop *loop = NULL;
3039 bool nested_in_vect_loop = false;
3040 struct loop *containing_loop = NULL;
3041 tree aggr_ptr_type;
3042 tree aggr_ptr;
3043 tree new_temp;
3044 gimple vec_stmt;
3045 gimple_seq new_stmt_list = NULL;
3046 edge pe = NULL;
3047 basic_block new_bb;
3048 tree aggr_ptr_init;
3049 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
3050 tree aptr;
3051 gimple_stmt_iterator incr_gsi;
3052 bool insert_after;
3053 bool negative;
3054 tree indx_before_incr, indx_after_incr;
3055 gimple incr;
3056 tree step;
3057 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
3058 tree base;
3060 gcc_assert (TREE_CODE (aggr_type) == ARRAY_TYPE
3061 || TREE_CODE (aggr_type) == VECTOR_TYPE);
3063 if (loop_vinfo)
3065 loop = LOOP_VINFO_LOOP (loop_vinfo);
3066 nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
3067 containing_loop = (gimple_bb (stmt))->loop_father;
3068 pe = loop_preheader_edge (loop);
3070 else
3072 gcc_assert (bb_vinfo);
3073 only_init = true;
3074 *ptr_incr = NULL;
3077 /* Check the step (evolution) of the load in LOOP, and record
3078 whether it's invariant. */
3079 if (nested_in_vect_loop)
3080 step = STMT_VINFO_DR_STEP (stmt_info);
3081 else
3082 step = DR_STEP (STMT_VINFO_DATA_REF (stmt_info));
3084 if (tree_int_cst_compare (step, size_zero_node) == 0)
3085 *inv_p = true;
3086 else
3087 *inv_p = false;
3088 negative = tree_int_cst_compare (step, size_zero_node) < 0;
3090 /* Create an expression for the first address accessed by this load
3091 in LOOP. */
3092 base_name = build_fold_indirect_ref (unshare_expr (DR_BASE_ADDRESS (dr)));
3094 if (vect_print_dump_info (REPORT_DETAILS))
3096 tree data_ref_base = base_name;
3097 fprintf (vect_dump, "create %s-pointer variable to type: ",
3098 tree_code_name[(int) TREE_CODE (aggr_type)]);
3099 print_generic_expr (vect_dump, aggr_type, TDF_SLIM);
3100 if (TREE_CODE (data_ref_base) == VAR_DECL
3101 || TREE_CODE (data_ref_base) == ARRAY_REF)
3102 fprintf (vect_dump, " vectorizing an array ref: ");
3103 else if (TREE_CODE (data_ref_base) == COMPONENT_REF)
3104 fprintf (vect_dump, " vectorizing a record based array ref: ");
3105 else if (TREE_CODE (data_ref_base) == SSA_NAME)
3106 fprintf (vect_dump, " vectorizing a pointer ref: ");
3107 print_generic_expr (vect_dump, base_name, TDF_SLIM);
3110 /* (1) Create the new aggregate-pointer variable. */
3111 aggr_ptr_type = build_pointer_type (aggr_type);
3112 base = get_base_address (DR_REF (dr));
3113 if (base
3114 && TREE_CODE (base) == MEM_REF)
3115 aggr_ptr_type
3116 = build_qualified_type (aggr_ptr_type,
3117 TYPE_QUALS (TREE_TYPE (TREE_OPERAND (base, 0))));
3118 aggr_ptr = vect_get_new_vect_var (aggr_ptr_type, vect_pointer_var,
3119 get_name (base_name));
3121 /* Vector and array types inherit the alias set of their component
3122 type by default so we need to use a ref-all pointer if the data
3123 reference does not conflict with the created aggregated data
3124 reference because it is not addressable. */
3125 if (!alias_sets_conflict_p (get_deref_alias_set (aggr_ptr),
3126 get_alias_set (DR_REF (dr))))
3128 aggr_ptr_type
3129 = build_pointer_type_for_mode (aggr_type,
3130 TYPE_MODE (aggr_ptr_type), true);
3131 aggr_ptr = vect_get_new_vect_var (aggr_ptr_type, vect_pointer_var,
3132 get_name (base_name));
3135 /* Likewise for any of the data references in the stmt group. */
3136 else if (STMT_VINFO_GROUP_SIZE (stmt_info) > 1)
3138 gimple orig_stmt = STMT_VINFO_GROUP_FIRST_ELEMENT (stmt_info);
3141 tree lhs = gimple_assign_lhs (orig_stmt);
3142 if (!alias_sets_conflict_p (get_deref_alias_set (aggr_ptr),
3143 get_alias_set (lhs)))
3145 aggr_ptr_type
3146 = build_pointer_type_for_mode (aggr_type,
3147 TYPE_MODE (aggr_ptr_type), true);
3148 aggr_ptr
3149 = vect_get_new_vect_var (aggr_ptr_type, vect_pointer_var,
3150 get_name (base_name));
3151 break;
3154 orig_stmt = STMT_VINFO_GROUP_NEXT_ELEMENT (vinfo_for_stmt (orig_stmt));
3156 while (orig_stmt);
3159 add_referenced_var (aggr_ptr);
3161 /* Note: If the dataref is in an inner-loop nested in LOOP, and we are
3162 vectorizing LOOP (i.e., outer-loop vectorization), we need to create two
3163 def-use update cycles for the pointer: one relative to the outer-loop
3164 (LOOP), which is what steps (3) and (4) below do. The other is relative
3165 to the inner-loop (which is the inner-most loop containing the dataref),
3166 and this is done be step (5) below.
3168 When vectorizing inner-most loops, the vectorized loop (LOOP) is also the
3169 inner-most loop, and so steps (3),(4) work the same, and step (5) is
3170 redundant. Steps (3),(4) create the following:
3172 vp0 = &base_addr;
3173 LOOP: vp1 = phi(vp0,vp2)
3176 vp2 = vp1 + step
3177 goto LOOP
3179 If there is an inner-loop nested in loop, then step (5) will also be
3180 applied, and an additional update in the inner-loop will be created:
3182 vp0 = &base_addr;
3183 LOOP: vp1 = phi(vp0,vp2)
3185 inner: vp3 = phi(vp1,vp4)
3186 vp4 = vp3 + inner_step
3187 if () goto inner
3189 vp2 = vp1 + step
3190 if () goto LOOP */
3192 /* (2) Calculate the initial address of the aggregate-pointer, and set
3193 the aggregate-pointer to point to it before the loop. */
3195 /* Create: (&(base[init_val+offset]) in the loop preheader. */
3197 new_temp = vect_create_addr_base_for_vector_ref (stmt, &new_stmt_list,
3198 offset, loop);
3199 if (new_stmt_list)
3201 if (pe)
3203 new_bb = gsi_insert_seq_on_edge_immediate (pe, new_stmt_list);
3204 gcc_assert (!new_bb);
3206 else
3207 gsi_insert_seq_before (gsi, new_stmt_list, GSI_SAME_STMT);
3210 *initial_address = new_temp;
3212 /* Create: p = (aggr_type *) initial_base */
3213 if (TREE_CODE (new_temp) != SSA_NAME
3214 || !useless_type_conversion_p (aggr_ptr_type, TREE_TYPE (new_temp)))
3216 vec_stmt = gimple_build_assign (aggr_ptr,
3217 fold_convert (aggr_ptr_type, new_temp));
3218 aggr_ptr_init = make_ssa_name (aggr_ptr, vec_stmt);
3219 /* Copy the points-to information if it exists. */
3220 if (DR_PTR_INFO (dr))
3221 duplicate_ssa_name_ptr_info (aggr_ptr_init, DR_PTR_INFO (dr));
3222 gimple_assign_set_lhs (vec_stmt, aggr_ptr_init);
3223 if (pe)
3225 new_bb = gsi_insert_on_edge_immediate (pe, vec_stmt);
3226 gcc_assert (!new_bb);
3228 else
3229 gsi_insert_before (gsi, vec_stmt, GSI_SAME_STMT);
3231 else
3232 aggr_ptr_init = new_temp;
3234 /* (3) Handle the updating of the aggregate-pointer inside the loop.
3235 This is needed when ONLY_INIT is false, and also when AT_LOOP is the
3236 inner-loop nested in LOOP (during outer-loop vectorization). */
3238 /* No update in loop is required. */
3239 if (only_init && (!loop_vinfo || at_loop == loop))
3240 aptr = aggr_ptr_init;
3241 else
3243 /* The step of the aggregate pointer is the type size. */
3244 tree step = TYPE_SIZE_UNIT (aggr_type);
3245 /* One exception to the above is when the scalar step of the load in
3246 LOOP is zero. In this case the step here is also zero. */
3247 if (*inv_p)
3248 step = size_zero_node;
3249 else if (negative)
3250 step = fold_build1 (NEGATE_EXPR, TREE_TYPE (step), step);
3252 standard_iv_increment_position (loop, &incr_gsi, &insert_after);
3254 create_iv (aggr_ptr_init,
3255 fold_convert (aggr_ptr_type, step),
3256 aggr_ptr, loop, &incr_gsi, insert_after,
3257 &indx_before_incr, &indx_after_incr);
3258 incr = gsi_stmt (incr_gsi);
3259 set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo, NULL));
3261 /* Copy the points-to information if it exists. */
3262 if (DR_PTR_INFO (dr))
3264 duplicate_ssa_name_ptr_info (indx_before_incr, DR_PTR_INFO (dr));
3265 duplicate_ssa_name_ptr_info (indx_after_incr, DR_PTR_INFO (dr));
3267 if (ptr_incr)
3268 *ptr_incr = incr;
3270 aptr = indx_before_incr;
3273 if (!nested_in_vect_loop || only_init)
3274 return aptr;
3277 /* (4) Handle the updating of the aggregate-pointer inside the inner-loop
3278 nested in LOOP, if exists. */
3280 gcc_assert (nested_in_vect_loop);
3281 if (!only_init)
3283 standard_iv_increment_position (containing_loop, &incr_gsi,
3284 &insert_after);
3285 create_iv (aptr, fold_convert (aggr_ptr_type, DR_STEP (dr)), aggr_ptr,
3286 containing_loop, &incr_gsi, insert_after, &indx_before_incr,
3287 &indx_after_incr);
3288 incr = gsi_stmt (incr_gsi);
3289 set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo, NULL));
3291 /* Copy the points-to information if it exists. */
3292 if (DR_PTR_INFO (dr))
3294 duplicate_ssa_name_ptr_info (indx_before_incr, DR_PTR_INFO (dr));
3295 duplicate_ssa_name_ptr_info (indx_after_incr, DR_PTR_INFO (dr));
3297 if (ptr_incr)
3298 *ptr_incr = incr;
3300 return indx_before_incr;
3302 else
3303 gcc_unreachable ();
3307 /* Function bump_vector_ptr
3309 Increment a pointer (to a vector type) by vector-size. If requested,
3310 i.e. if PTR-INCR is given, then also connect the new increment stmt
3311 to the existing def-use update-chain of the pointer, by modifying
3312 the PTR_INCR as illustrated below:
3314 The pointer def-use update-chain before this function:
3315 DATAREF_PTR = phi (p_0, p_2)
3316 ....
3317 PTR_INCR: p_2 = DATAREF_PTR + step
3319 The pointer def-use update-chain after this function:
3320 DATAREF_PTR = phi (p_0, p_2)
3321 ....
3322 NEW_DATAREF_PTR = DATAREF_PTR + BUMP
3323 ....
3324 PTR_INCR: p_2 = NEW_DATAREF_PTR + step
3326 Input:
3327 DATAREF_PTR - ssa_name of a pointer (to vector type) that is being updated
3328 in the loop.
3329 PTR_INCR - optional. The stmt that updates the pointer in each iteration of
3330 the loop. The increment amount across iterations is expected
3331 to be vector_size.
3332 BSI - location where the new update stmt is to be placed.
3333 STMT - the original scalar memory-access stmt that is being vectorized.
3334 BUMP - optional. The offset by which to bump the pointer. If not given,
3335 the offset is assumed to be vector_size.
3337 Output: Return NEW_DATAREF_PTR as illustrated above.
3341 tree
3342 bump_vector_ptr (tree dataref_ptr, gimple ptr_incr, gimple_stmt_iterator *gsi,
3343 gimple stmt, tree bump)
3345 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3346 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
3347 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
3348 tree ptr_var = SSA_NAME_VAR (dataref_ptr);
3349 tree update = TYPE_SIZE_UNIT (vectype);
3350 gimple incr_stmt;
3351 ssa_op_iter iter;
3352 use_operand_p use_p;
3353 tree new_dataref_ptr;
3355 if (bump)
3356 update = bump;
3358 incr_stmt = gimple_build_assign_with_ops (POINTER_PLUS_EXPR, ptr_var,
3359 dataref_ptr, update);
3360 new_dataref_ptr = make_ssa_name (ptr_var, incr_stmt);
3361 gimple_assign_set_lhs (incr_stmt, new_dataref_ptr);
3362 vect_finish_stmt_generation (stmt, incr_stmt, gsi);
3364 /* Copy the points-to information if it exists. */
3365 if (DR_PTR_INFO (dr))
3367 duplicate_ssa_name_ptr_info (new_dataref_ptr, DR_PTR_INFO (dr));
3368 SSA_NAME_PTR_INFO (new_dataref_ptr)->align = 1;
3369 SSA_NAME_PTR_INFO (new_dataref_ptr)->misalign = 0;
3372 if (!ptr_incr)
3373 return new_dataref_ptr;
3375 /* Update the vector-pointer's cross-iteration increment. */
3376 FOR_EACH_SSA_USE_OPERAND (use_p, ptr_incr, iter, SSA_OP_USE)
3378 tree use = USE_FROM_PTR (use_p);
3380 if (use == dataref_ptr)
3381 SET_USE (use_p, new_dataref_ptr);
3382 else
3383 gcc_assert (tree_int_cst_compare (use, update) == 0);
3386 return new_dataref_ptr;
3390 /* Function vect_create_destination_var.
3392 Create a new temporary of type VECTYPE. */
3394 tree
3395 vect_create_destination_var (tree scalar_dest, tree vectype)
3397 tree vec_dest;
3398 const char *new_name;
3399 tree type;
3400 enum vect_var_kind kind;
3402 kind = vectype ? vect_simple_var : vect_scalar_var;
3403 type = vectype ? vectype : TREE_TYPE (scalar_dest);
3405 gcc_assert (TREE_CODE (scalar_dest) == SSA_NAME);
3407 new_name = get_name (scalar_dest);
3408 if (!new_name)
3409 new_name = "var_";
3410 vec_dest = vect_get_new_vect_var (type, kind, new_name);
3411 add_referenced_var (vec_dest);
3413 return vec_dest;
3416 /* Function vect_strided_store_supported.
3418 Returns TRUE is INTERLEAVE_HIGH and INTERLEAVE_LOW operations are supported,
3419 and FALSE otherwise. */
3421 bool
3422 vect_strided_store_supported (tree vectype, unsigned HOST_WIDE_INT count)
3424 optab ih_optab, il_optab;
3425 enum machine_mode mode;
3427 mode = TYPE_MODE (vectype);
3429 /* vect_permute_store_chain requires the group size to be a power of two. */
3430 if (exact_log2 (count) == -1)
3432 if (vect_print_dump_info (REPORT_DETAILS))
3433 fprintf (vect_dump, "the size of the group of strided accesses"
3434 " is not a power of 2");
3435 return false;
3438 /* Check that the operation is supported. */
3439 ih_optab = optab_for_tree_code (VEC_INTERLEAVE_HIGH_EXPR,
3440 vectype, optab_default);
3441 il_optab = optab_for_tree_code (VEC_INTERLEAVE_LOW_EXPR,
3442 vectype, optab_default);
3443 if (il_optab && ih_optab
3444 && optab_handler (ih_optab, mode) != CODE_FOR_nothing
3445 && optab_handler (il_optab, mode) != CODE_FOR_nothing)
3446 return true;
3448 if (can_vec_perm_for_code_p (VEC_INTERLEAVE_HIGH_EXPR, mode, NULL)
3449 && can_vec_perm_for_code_p (VEC_INTERLEAVE_LOW_EXPR, mode, NULL))
3450 return true;
3452 if (vect_print_dump_info (REPORT_DETAILS))
3453 fprintf (vect_dump, "interleave op not supported by target.");
3454 return false;
3458 /* Return TRUE if vec_store_lanes is available for COUNT vectors of
3459 type VECTYPE. */
3461 bool
3462 vect_store_lanes_supported (tree vectype, unsigned HOST_WIDE_INT count)
3464 return vect_lanes_optab_supported_p ("vec_store_lanes",
3465 vec_store_lanes_optab,
3466 vectype, count);
3470 /* Function vect_permute_store_chain.
3472 Given a chain of interleaved stores in DR_CHAIN of LENGTH that must be
3473 a power of 2, generate interleave_high/low stmts to reorder the data
3474 correctly for the stores. Return the final references for stores in
3475 RESULT_CHAIN.
3477 E.g., LENGTH is 4 and the scalar type is short, i.e., VF is 8.
3478 The input is 4 vectors each containing 8 elements. We assign a number to
3479 each element, the input sequence is:
3481 1st vec: 0 1 2 3 4 5 6 7
3482 2nd vec: 8 9 10 11 12 13 14 15
3483 3rd vec: 16 17 18 19 20 21 22 23
3484 4th vec: 24 25 26 27 28 29 30 31
3486 The output sequence should be:
3488 1st vec: 0 8 16 24 1 9 17 25
3489 2nd vec: 2 10 18 26 3 11 19 27
3490 3rd vec: 4 12 20 28 5 13 21 30
3491 4th vec: 6 14 22 30 7 15 23 31
3493 i.e., we interleave the contents of the four vectors in their order.
3495 We use interleave_high/low instructions to create such output. The input of
3496 each interleave_high/low operation is two vectors:
3497 1st vec 2nd vec
3498 0 1 2 3 4 5 6 7
3499 the even elements of the result vector are obtained left-to-right from the
3500 high/low elements of the first vector. The odd elements of the result are
3501 obtained left-to-right from the high/low elements of the second vector.
3502 The output of interleave_high will be: 0 4 1 5
3503 and of interleave_low: 2 6 3 7
3506 The permutation is done in log LENGTH stages. In each stage interleave_high
3507 and interleave_low stmts are created for each pair of vectors in DR_CHAIN,
3508 where the first argument is taken from the first half of DR_CHAIN and the
3509 second argument from it's second half.
3510 In our example,
3512 I1: interleave_high (1st vec, 3rd vec)
3513 I2: interleave_low (1st vec, 3rd vec)
3514 I3: interleave_high (2nd vec, 4th vec)
3515 I4: interleave_low (2nd vec, 4th vec)
3517 The output for the first stage is:
3519 I1: 0 16 1 17 2 18 3 19
3520 I2: 4 20 5 21 6 22 7 23
3521 I3: 8 24 9 25 10 26 11 27
3522 I4: 12 28 13 29 14 30 15 31
3524 The output of the second stage, i.e. the final result is:
3526 I1: 0 8 16 24 1 9 17 25
3527 I2: 2 10 18 26 3 11 19 27
3528 I3: 4 12 20 28 5 13 21 30
3529 I4: 6 14 22 30 7 15 23 31. */
3531 void
3532 vect_permute_store_chain (VEC(tree,heap) *dr_chain,
3533 unsigned int length,
3534 gimple stmt,
3535 gimple_stmt_iterator *gsi,
3536 VEC(tree,heap) **result_chain)
3538 tree perm_dest, vect1, vect2, high, low;
3539 gimple perm_stmt;
3540 tree vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
3541 int i;
3542 unsigned int j;
3543 enum tree_code high_code, low_code;
3545 gcc_assert (vect_strided_store_supported (vectype, length));
3547 *result_chain = VEC_copy (tree, heap, dr_chain);
3549 for (i = 0; i < exact_log2 (length); i++)
3551 for (j = 0; j < length/2; j++)
3553 vect1 = VEC_index (tree, dr_chain, j);
3554 vect2 = VEC_index (tree, dr_chain, j+length/2);
3556 /* Create interleaving stmt:
3557 in the case of big endian:
3558 high = interleave_high (vect1, vect2)
3559 and in the case of little endian:
3560 high = interleave_low (vect1, vect2). */
3561 perm_dest = create_tmp_var (vectype, "vect_inter_high");
3562 DECL_GIMPLE_REG_P (perm_dest) = 1;
3563 add_referenced_var (perm_dest);
3564 if (BYTES_BIG_ENDIAN)
3566 high_code = VEC_INTERLEAVE_HIGH_EXPR;
3567 low_code = VEC_INTERLEAVE_LOW_EXPR;
3569 else
3571 low_code = VEC_INTERLEAVE_HIGH_EXPR;
3572 high_code = VEC_INTERLEAVE_LOW_EXPR;
3574 perm_stmt = gimple_build_assign_with_ops (high_code, perm_dest,
3575 vect1, vect2);
3576 high = make_ssa_name (perm_dest, perm_stmt);
3577 gimple_assign_set_lhs (perm_stmt, high);
3578 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
3579 VEC_replace (tree, *result_chain, 2*j, high);
3581 /* Create interleaving stmt:
3582 in the case of big endian:
3583 low = interleave_low (vect1, vect2)
3584 and in the case of little endian:
3585 low = interleave_high (vect1, vect2). */
3586 perm_dest = create_tmp_var (vectype, "vect_inter_low");
3587 DECL_GIMPLE_REG_P (perm_dest) = 1;
3588 add_referenced_var (perm_dest);
3589 perm_stmt = gimple_build_assign_with_ops (low_code, perm_dest,
3590 vect1, vect2);
3591 low = make_ssa_name (perm_dest, perm_stmt);
3592 gimple_assign_set_lhs (perm_stmt, low);
3593 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
3594 VEC_replace (tree, *result_chain, 2*j+1, low);
3596 dr_chain = VEC_copy (tree, heap, *result_chain);
3600 /* Function vect_setup_realignment
3602 This function is called when vectorizing an unaligned load using
3603 the dr_explicit_realign[_optimized] scheme.
3604 This function generates the following code at the loop prolog:
3606 p = initial_addr;
3607 x msq_init = *(floor(p)); # prolog load
3608 realignment_token = call target_builtin;
3609 loop:
3610 x msq = phi (msq_init, ---)
3612 The stmts marked with x are generated only for the case of
3613 dr_explicit_realign_optimized.
3615 The code above sets up a new (vector) pointer, pointing to the first
3616 location accessed by STMT, and a "floor-aligned" load using that pointer.
3617 It also generates code to compute the "realignment-token" (if the relevant
3618 target hook was defined), and creates a phi-node at the loop-header bb
3619 whose arguments are the result of the prolog-load (created by this
3620 function) and the result of a load that takes place in the loop (to be
3621 created by the caller to this function).
3623 For the case of dr_explicit_realign_optimized:
3624 The caller to this function uses the phi-result (msq) to create the
3625 realignment code inside the loop, and sets up the missing phi argument,
3626 as follows:
3627 loop:
3628 msq = phi (msq_init, lsq)
3629 lsq = *(floor(p')); # load in loop
3630 result = realign_load (msq, lsq, realignment_token);
3632 For the case of dr_explicit_realign:
3633 loop:
3634 msq = *(floor(p)); # load in loop
3635 p' = p + (VS-1);
3636 lsq = *(floor(p')); # load in loop
3637 result = realign_load (msq, lsq, realignment_token);
3639 Input:
3640 STMT - (scalar) load stmt to be vectorized. This load accesses
3641 a memory location that may be unaligned.
3642 BSI - place where new code is to be inserted.
3643 ALIGNMENT_SUPPORT_SCHEME - which of the two misalignment handling schemes
3644 is used.
3646 Output:
3647 REALIGNMENT_TOKEN - the result of a call to the builtin_mask_for_load
3648 target hook, if defined.
3649 Return value - the result of the loop-header phi node. */
3651 tree
3652 vect_setup_realignment (gimple stmt, gimple_stmt_iterator *gsi,
3653 tree *realignment_token,
3654 enum dr_alignment_support alignment_support_scheme,
3655 tree init_addr,
3656 struct loop **at_loop)
3658 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3659 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
3660 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3661 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
3662 struct loop *loop = NULL;
3663 edge pe = NULL;
3664 tree scalar_dest = gimple_assign_lhs (stmt);
3665 tree vec_dest;
3666 gimple inc;
3667 tree ptr;
3668 tree data_ref;
3669 gimple new_stmt;
3670 basic_block new_bb;
3671 tree msq_init = NULL_TREE;
3672 tree new_temp;
3673 gimple phi_stmt;
3674 tree msq = NULL_TREE;
3675 gimple_seq stmts = NULL;
3676 bool inv_p;
3677 bool compute_in_loop = false;
3678 bool nested_in_vect_loop = false;
3679 struct loop *containing_loop = (gimple_bb (stmt))->loop_father;
3680 struct loop *loop_for_initial_load = NULL;
3682 if (loop_vinfo)
3684 loop = LOOP_VINFO_LOOP (loop_vinfo);
3685 nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
3688 gcc_assert (alignment_support_scheme == dr_explicit_realign
3689 || alignment_support_scheme == dr_explicit_realign_optimized);
3691 /* We need to generate three things:
3692 1. the misalignment computation
3693 2. the extra vector load (for the optimized realignment scheme).
3694 3. the phi node for the two vectors from which the realignment is
3695 done (for the optimized realignment scheme). */
3697 /* 1. Determine where to generate the misalignment computation.
3699 If INIT_ADDR is NULL_TREE, this indicates that the misalignment
3700 calculation will be generated by this function, outside the loop (in the
3701 preheader). Otherwise, INIT_ADDR had already been computed for us by the
3702 caller, inside the loop.
3704 Background: If the misalignment remains fixed throughout the iterations of
3705 the loop, then both realignment schemes are applicable, and also the
3706 misalignment computation can be done outside LOOP. This is because we are
3707 vectorizing LOOP, and so the memory accesses in LOOP advance in steps that
3708 are a multiple of VS (the Vector Size), and therefore the misalignment in
3709 different vectorized LOOP iterations is always the same.
3710 The problem arises only if the memory access is in an inner-loop nested
3711 inside LOOP, which is now being vectorized using outer-loop vectorization.
3712 This is the only case when the misalignment of the memory access may not
3713 remain fixed throughout the iterations of the inner-loop (as explained in
3714 detail in vect_supportable_dr_alignment). In this case, not only is the
3715 optimized realignment scheme not applicable, but also the misalignment
3716 computation (and generation of the realignment token that is passed to
3717 REALIGN_LOAD) have to be done inside the loop.
3719 In short, INIT_ADDR indicates whether we are in a COMPUTE_IN_LOOP mode
3720 or not, which in turn determines if the misalignment is computed inside
3721 the inner-loop, or outside LOOP. */
3723 if (init_addr != NULL_TREE || !loop_vinfo)
3725 compute_in_loop = true;
3726 gcc_assert (alignment_support_scheme == dr_explicit_realign);
3730 /* 2. Determine where to generate the extra vector load.
3732 For the optimized realignment scheme, instead of generating two vector
3733 loads in each iteration, we generate a single extra vector load in the
3734 preheader of the loop, and in each iteration reuse the result of the
3735 vector load from the previous iteration. In case the memory access is in
3736 an inner-loop nested inside LOOP, which is now being vectorized using
3737 outer-loop vectorization, we need to determine whether this initial vector
3738 load should be generated at the preheader of the inner-loop, or can be
3739 generated at the preheader of LOOP. If the memory access has no evolution
3740 in LOOP, it can be generated in the preheader of LOOP. Otherwise, it has
3741 to be generated inside LOOP (in the preheader of the inner-loop). */
3743 if (nested_in_vect_loop)
3745 tree outerloop_step = STMT_VINFO_DR_STEP (stmt_info);
3746 bool invariant_in_outerloop =
3747 (tree_int_cst_compare (outerloop_step, size_zero_node) == 0);
3748 loop_for_initial_load = (invariant_in_outerloop ? loop : loop->inner);
3750 else
3751 loop_for_initial_load = loop;
3752 if (at_loop)
3753 *at_loop = loop_for_initial_load;
3755 if (loop_for_initial_load)
3756 pe = loop_preheader_edge (loop_for_initial_load);
3758 /* 3. For the case of the optimized realignment, create the first vector
3759 load at the loop preheader. */
3761 if (alignment_support_scheme == dr_explicit_realign_optimized)
3763 /* Create msq_init = *(floor(p1)) in the loop preheader */
3765 gcc_assert (!compute_in_loop);
3766 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3767 ptr = vect_create_data_ref_ptr (stmt, vectype, loop_for_initial_load,
3768 NULL_TREE, &init_addr, NULL, &inc,
3769 true, &inv_p);
3770 new_stmt = gimple_build_assign_with_ops
3771 (BIT_AND_EXPR, NULL_TREE, ptr,
3772 build_int_cst (TREE_TYPE (ptr),
3773 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
3774 new_temp = make_ssa_name (SSA_NAME_VAR (ptr), new_stmt);
3775 gimple_assign_set_lhs (new_stmt, new_temp);
3776 new_bb = gsi_insert_on_edge_immediate (pe, new_stmt);
3777 gcc_assert (!new_bb);
3778 data_ref
3779 = build2 (MEM_REF, TREE_TYPE (vec_dest), new_temp,
3780 build_int_cst (reference_alias_ptr_type (DR_REF (dr)), 0));
3781 new_stmt = gimple_build_assign (vec_dest, data_ref);
3782 new_temp = make_ssa_name (vec_dest, new_stmt);
3783 gimple_assign_set_lhs (new_stmt, new_temp);
3784 mark_symbols_for_renaming (new_stmt);
3785 if (pe)
3787 new_bb = gsi_insert_on_edge_immediate (pe, new_stmt);
3788 gcc_assert (!new_bb);
3790 else
3791 gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
3793 msq_init = gimple_assign_lhs (new_stmt);
3796 /* 4. Create realignment token using a target builtin, if available.
3797 It is done either inside the containing loop, or before LOOP (as
3798 determined above). */
3800 if (targetm.vectorize.builtin_mask_for_load)
3802 tree builtin_decl;
3804 /* Compute INIT_ADDR - the initial addressed accessed by this memref. */
3805 if (!init_addr)
3807 /* Generate the INIT_ADDR computation outside LOOP. */
3808 init_addr = vect_create_addr_base_for_vector_ref (stmt, &stmts,
3809 NULL_TREE, loop);
3810 if (loop)
3812 pe = loop_preheader_edge (loop);
3813 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
3814 gcc_assert (!new_bb);
3816 else
3817 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
3820 builtin_decl = targetm.vectorize.builtin_mask_for_load ();
3821 new_stmt = gimple_build_call (builtin_decl, 1, init_addr);
3822 vec_dest =
3823 vect_create_destination_var (scalar_dest,
3824 gimple_call_return_type (new_stmt));
3825 new_temp = make_ssa_name (vec_dest, new_stmt);
3826 gimple_call_set_lhs (new_stmt, new_temp);
3828 if (compute_in_loop)
3829 gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
3830 else
3832 /* Generate the misalignment computation outside LOOP. */
3833 pe = loop_preheader_edge (loop);
3834 new_bb = gsi_insert_on_edge_immediate (pe, new_stmt);
3835 gcc_assert (!new_bb);
3838 *realignment_token = gimple_call_lhs (new_stmt);
3840 /* The result of the CALL_EXPR to this builtin is determined from
3841 the value of the parameter and no global variables are touched
3842 which makes the builtin a "const" function. Requiring the
3843 builtin to have the "const" attribute makes it unnecessary
3844 to call mark_call_clobbered. */
3845 gcc_assert (TREE_READONLY (builtin_decl));
3848 if (alignment_support_scheme == dr_explicit_realign)
3849 return msq;
3851 gcc_assert (!compute_in_loop);
3852 gcc_assert (alignment_support_scheme == dr_explicit_realign_optimized);
3855 /* 5. Create msq = phi <msq_init, lsq> in loop */
3857 pe = loop_preheader_edge (containing_loop);
3858 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3859 msq = make_ssa_name (vec_dest, NULL);
3860 phi_stmt = create_phi_node (msq, containing_loop->header);
3861 SSA_NAME_DEF_STMT (msq) = phi_stmt;
3862 add_phi_arg (phi_stmt, msq_init, pe, UNKNOWN_LOCATION);
3864 return msq;
3868 /* Function vect_strided_load_supported.
3870 Returns TRUE is EXTRACT_EVEN and EXTRACT_ODD operations are supported,
3871 and FALSE otherwise. */
3873 bool
3874 vect_strided_load_supported (tree vectype, unsigned HOST_WIDE_INT count)
3876 optab ee_optab, eo_optab;
3877 enum machine_mode mode;
3879 mode = TYPE_MODE (vectype);
3881 /* vect_permute_load_chain requires the group size to be a power of two. */
3882 if (exact_log2 (count) == -1)
3884 if (vect_print_dump_info (REPORT_DETAILS))
3885 fprintf (vect_dump, "the size of the group of strided accesses"
3886 " is not a power of 2");
3887 return false;
3890 ee_optab = optab_for_tree_code (VEC_EXTRACT_EVEN_EXPR,
3891 vectype, optab_default);
3892 eo_optab = optab_for_tree_code (VEC_EXTRACT_ODD_EXPR,
3893 vectype, optab_default);
3894 if (ee_optab && eo_optab
3895 && optab_handler (ee_optab, mode) != CODE_FOR_nothing
3896 && optab_handler (eo_optab, mode) != CODE_FOR_nothing)
3897 return true;
3899 if (can_vec_perm_for_code_p (VEC_EXTRACT_EVEN_EXPR, mode, NULL)
3900 && can_vec_perm_for_code_p (VEC_EXTRACT_ODD_EXPR, mode, NULL))
3901 return true;
3903 if (vect_print_dump_info (REPORT_DETAILS))
3904 fprintf (vect_dump, "extract even/odd not supported by target");
3905 return false;
3908 /* Return TRUE if vec_load_lanes is available for COUNT vectors of
3909 type VECTYPE. */
3911 bool
3912 vect_load_lanes_supported (tree vectype, unsigned HOST_WIDE_INT count)
3914 return vect_lanes_optab_supported_p ("vec_load_lanes",
3915 vec_load_lanes_optab,
3916 vectype, count);
3919 /* Function vect_permute_load_chain.
3921 Given a chain of interleaved loads in DR_CHAIN of LENGTH that must be
3922 a power of 2, generate extract_even/odd stmts to reorder the input data
3923 correctly. Return the final references for loads in RESULT_CHAIN.
3925 E.g., LENGTH is 4 and the scalar type is short, i.e., VF is 8.
3926 The input is 4 vectors each containing 8 elements. We assign a number to each
3927 element, the input sequence is:
3929 1st vec: 0 1 2 3 4 5 6 7
3930 2nd vec: 8 9 10 11 12 13 14 15
3931 3rd vec: 16 17 18 19 20 21 22 23
3932 4th vec: 24 25 26 27 28 29 30 31
3934 The output sequence should be:
3936 1st vec: 0 4 8 12 16 20 24 28
3937 2nd vec: 1 5 9 13 17 21 25 29
3938 3rd vec: 2 6 10 14 18 22 26 30
3939 4th vec: 3 7 11 15 19 23 27 31
3941 i.e., the first output vector should contain the first elements of each
3942 interleaving group, etc.
3944 We use extract_even/odd instructions to create such output. The input of
3945 each extract_even/odd operation is two vectors
3946 1st vec 2nd vec
3947 0 1 2 3 4 5 6 7
3949 and the output is the vector of extracted even/odd elements. The output of
3950 extract_even will be: 0 2 4 6
3951 and of extract_odd: 1 3 5 7
3954 The permutation is done in log LENGTH stages. In each stage extract_even
3955 and extract_odd stmts are created for each pair of vectors in DR_CHAIN in
3956 their order. In our example,
3958 E1: extract_even (1st vec, 2nd vec)
3959 E2: extract_odd (1st vec, 2nd vec)
3960 E3: extract_even (3rd vec, 4th vec)
3961 E4: extract_odd (3rd vec, 4th vec)
3963 The output for the first stage will be:
3965 E1: 0 2 4 6 8 10 12 14
3966 E2: 1 3 5 7 9 11 13 15
3967 E3: 16 18 20 22 24 26 28 30
3968 E4: 17 19 21 23 25 27 29 31
3970 In order to proceed and create the correct sequence for the next stage (or
3971 for the correct output, if the second stage is the last one, as in our
3972 example), we first put the output of extract_even operation and then the
3973 output of extract_odd in RESULT_CHAIN (which is then copied to DR_CHAIN).
3974 The input for the second stage is:
3976 1st vec (E1): 0 2 4 6 8 10 12 14
3977 2nd vec (E3): 16 18 20 22 24 26 28 30
3978 3rd vec (E2): 1 3 5 7 9 11 13 15
3979 4th vec (E4): 17 19 21 23 25 27 29 31
3981 The output of the second stage:
3983 E1: 0 4 8 12 16 20 24 28
3984 E2: 2 6 10 14 18 22 26 30
3985 E3: 1 5 9 13 17 21 25 29
3986 E4: 3 7 11 15 19 23 27 31
3988 And RESULT_CHAIN after reordering:
3990 1st vec (E1): 0 4 8 12 16 20 24 28
3991 2nd vec (E3): 1 5 9 13 17 21 25 29
3992 3rd vec (E2): 2 6 10 14 18 22 26 30
3993 4th vec (E4): 3 7 11 15 19 23 27 31. */
3995 static void
3996 vect_permute_load_chain (VEC(tree,heap) *dr_chain,
3997 unsigned int length,
3998 gimple stmt,
3999 gimple_stmt_iterator *gsi,
4000 VEC(tree,heap) **result_chain)
4002 tree perm_dest, data_ref, first_vect, second_vect;
4003 gimple perm_stmt;
4004 tree vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
4005 int i;
4006 unsigned int j;
4008 gcc_assert (vect_strided_load_supported (vectype, length));
4010 *result_chain = VEC_copy (tree, heap, dr_chain);
4011 for (i = 0; i < exact_log2 (length); i++)
4013 for (j = 0; j < length; j +=2)
4015 first_vect = VEC_index (tree, dr_chain, j);
4016 second_vect = VEC_index (tree, dr_chain, j+1);
4018 /* data_ref = permute_even (first_data_ref, second_data_ref); */
4019 perm_dest = create_tmp_var (vectype, "vect_perm_even");
4020 DECL_GIMPLE_REG_P (perm_dest) = 1;
4021 add_referenced_var (perm_dest);
4023 perm_stmt = gimple_build_assign_with_ops (VEC_EXTRACT_EVEN_EXPR,
4024 perm_dest, first_vect,
4025 second_vect);
4027 data_ref = make_ssa_name (perm_dest, perm_stmt);
4028 gimple_assign_set_lhs (perm_stmt, data_ref);
4029 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
4030 mark_symbols_for_renaming (perm_stmt);
4032 VEC_replace (tree, *result_chain, j/2, data_ref);
4034 /* data_ref = permute_odd (first_data_ref, second_data_ref); */
4035 perm_dest = create_tmp_var (vectype, "vect_perm_odd");
4036 DECL_GIMPLE_REG_P (perm_dest) = 1;
4037 add_referenced_var (perm_dest);
4039 perm_stmt = gimple_build_assign_with_ops (VEC_EXTRACT_ODD_EXPR,
4040 perm_dest, first_vect,
4041 second_vect);
4042 data_ref = make_ssa_name (perm_dest, perm_stmt);
4043 gimple_assign_set_lhs (perm_stmt, data_ref);
4044 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
4045 mark_symbols_for_renaming (perm_stmt);
4047 VEC_replace (tree, *result_chain, j/2+length/2, data_ref);
4049 dr_chain = VEC_copy (tree, heap, *result_chain);
4054 /* Function vect_transform_strided_load.
4056 Given a chain of input interleaved data-refs (in DR_CHAIN), build statements
4057 to perform their permutation and ascribe the result vectorized statements to
4058 the scalar statements.
4061 void
4062 vect_transform_strided_load (gimple stmt, VEC(tree,heap) *dr_chain, int size,
4063 gimple_stmt_iterator *gsi)
4065 VEC(tree,heap) *result_chain = NULL;
4067 /* DR_CHAIN contains input data-refs that are a part of the interleaving.
4068 RESULT_CHAIN is the output of vect_permute_load_chain, it contains permuted
4069 vectors, that are ready for vector computation. */
4070 result_chain = VEC_alloc (tree, heap, size);
4071 vect_permute_load_chain (dr_chain, size, stmt, gsi, &result_chain);
4072 vect_record_strided_load_vectors (stmt, result_chain);
4073 VEC_free (tree, heap, result_chain);
4076 /* RESULT_CHAIN contains the output of a group of strided loads that were
4077 generated as part of the vectorization of STMT. Assign the statement
4078 for each vector to the associated scalar statement. */
4080 void
4081 vect_record_strided_load_vectors (gimple stmt, VEC(tree,heap) *result_chain)
4083 gimple first_stmt = GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt));
4084 gimple next_stmt, new_stmt;
4085 unsigned int i, gap_count;
4086 tree tmp_data_ref;
4088 /* Put a permuted data-ref in the VECTORIZED_STMT field.
4089 Since we scan the chain starting from it's first node, their order
4090 corresponds the order of data-refs in RESULT_CHAIN. */
4091 next_stmt = first_stmt;
4092 gap_count = 1;
4093 FOR_EACH_VEC_ELT (tree, result_chain, i, tmp_data_ref)
4095 if (!next_stmt)
4096 break;
4098 /* Skip the gaps. Loads created for the gaps will be removed by dead
4099 code elimination pass later. No need to check for the first stmt in
4100 the group, since it always exists.
4101 GROUP_GAP is the number of steps in elements from the previous
4102 access (if there is no gap GROUP_GAP is 1). We skip loads that
4103 correspond to the gaps. */
4104 if (next_stmt != first_stmt
4105 && gap_count < GROUP_GAP (vinfo_for_stmt (next_stmt)))
4107 gap_count++;
4108 continue;
4111 while (next_stmt)
4113 new_stmt = SSA_NAME_DEF_STMT (tmp_data_ref);
4114 /* We assume that if VEC_STMT is not NULL, this is a case of multiple
4115 copies, and we put the new vector statement in the first available
4116 RELATED_STMT. */
4117 if (!STMT_VINFO_VEC_STMT (vinfo_for_stmt (next_stmt)))
4118 STMT_VINFO_VEC_STMT (vinfo_for_stmt (next_stmt)) = new_stmt;
4119 else
4121 if (!GROUP_SAME_DR_STMT (vinfo_for_stmt (next_stmt)))
4123 gimple prev_stmt =
4124 STMT_VINFO_VEC_STMT (vinfo_for_stmt (next_stmt));
4125 gimple rel_stmt =
4126 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (prev_stmt));
4127 while (rel_stmt)
4129 prev_stmt = rel_stmt;
4130 rel_stmt =
4131 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (rel_stmt));
4134 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (prev_stmt)) =
4135 new_stmt;
4139 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
4140 gap_count = 1;
4141 /* If NEXT_STMT accesses the same DR as the previous statement,
4142 put the same TMP_DATA_REF as its vectorized statement; otherwise
4143 get the next data-ref from RESULT_CHAIN. */
4144 if (!next_stmt || !GROUP_SAME_DR_STMT (vinfo_for_stmt (next_stmt)))
4145 break;
4150 /* Function vect_force_dr_alignment_p.
4152 Returns whether the alignment of a DECL can be forced to be aligned
4153 on ALIGNMENT bit boundary. */
4155 bool
4156 vect_can_force_dr_alignment_p (const_tree decl, unsigned int alignment)
4158 if (TREE_CODE (decl) != VAR_DECL)
4159 return false;
4161 if (DECL_EXTERNAL (decl))
4162 return false;
4164 if (TREE_ASM_WRITTEN (decl))
4165 return false;
4167 if (TREE_STATIC (decl))
4168 return (alignment <= MAX_OFILE_ALIGNMENT);
4169 else
4170 return (alignment <= MAX_STACK_ALIGNMENT);
4174 /* Return whether the data reference DR is supported with respect to its
4175 alignment.
4176 If CHECK_ALIGNED_ACCESSES is TRUE, check if the access is supported even
4177 it is aligned, i.e., check if it is possible to vectorize it with different
4178 alignment. */
4180 enum dr_alignment_support
4181 vect_supportable_dr_alignment (struct data_reference *dr,
4182 bool check_aligned_accesses)
4184 gimple stmt = DR_STMT (dr);
4185 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4186 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
4187 enum machine_mode mode = TYPE_MODE (vectype);
4188 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4189 struct loop *vect_loop = NULL;
4190 bool nested_in_vect_loop = false;
4192 if (aligned_access_p (dr) && !check_aligned_accesses)
4193 return dr_aligned;
4195 if (loop_vinfo)
4197 vect_loop = LOOP_VINFO_LOOP (loop_vinfo);
4198 nested_in_vect_loop = nested_in_vect_loop_p (vect_loop, stmt);
4201 /* Possibly unaligned access. */
4203 /* We can choose between using the implicit realignment scheme (generating
4204 a misaligned_move stmt) and the explicit realignment scheme (generating
4205 aligned loads with a REALIGN_LOAD). There are two variants to the
4206 explicit realignment scheme: optimized, and unoptimized.
4207 We can optimize the realignment only if the step between consecutive
4208 vector loads is equal to the vector size. Since the vector memory
4209 accesses advance in steps of VS (Vector Size) in the vectorized loop, it
4210 is guaranteed that the misalignment amount remains the same throughout the
4211 execution of the vectorized loop. Therefore, we can create the
4212 "realignment token" (the permutation mask that is passed to REALIGN_LOAD)
4213 at the loop preheader.
4215 However, in the case of outer-loop vectorization, when vectorizing a
4216 memory access in the inner-loop nested within the LOOP that is now being
4217 vectorized, while it is guaranteed that the misalignment of the
4218 vectorized memory access will remain the same in different outer-loop
4219 iterations, it is *not* guaranteed that is will remain the same throughout
4220 the execution of the inner-loop. This is because the inner-loop advances
4221 with the original scalar step (and not in steps of VS). If the inner-loop
4222 step happens to be a multiple of VS, then the misalignment remains fixed
4223 and we can use the optimized realignment scheme. For example:
4225 for (i=0; i<N; i++)
4226 for (j=0; j<M; j++)
4227 s += a[i+j];
4229 When vectorizing the i-loop in the above example, the step between
4230 consecutive vector loads is 1, and so the misalignment does not remain
4231 fixed across the execution of the inner-loop, and the realignment cannot
4232 be optimized (as illustrated in the following pseudo vectorized loop):
4234 for (i=0; i<N; i+=4)
4235 for (j=0; j<M; j++){
4236 vs += vp[i+j]; // misalignment of &vp[i+j] is {0,1,2,3,0,1,2,3,...}
4237 // when j is {0,1,2,3,4,5,6,7,...} respectively.
4238 // (assuming that we start from an aligned address).
4241 We therefore have to use the unoptimized realignment scheme:
4243 for (i=0; i<N; i+=4)
4244 for (j=k; j<M; j+=4)
4245 vs += vp[i+j]; // misalignment of &vp[i+j] is always k (assuming
4246 // that the misalignment of the initial address is
4247 // 0).
4249 The loop can then be vectorized as follows:
4251 for (k=0; k<4; k++){
4252 rt = get_realignment_token (&vp[k]);
4253 for (i=0; i<N; i+=4){
4254 v1 = vp[i+k];
4255 for (j=k; j<M; j+=4){
4256 v2 = vp[i+j+VS-1];
4257 va = REALIGN_LOAD <v1,v2,rt>;
4258 vs += va;
4259 v1 = v2;
4262 } */
4264 if (DR_IS_READ (dr))
4266 bool is_packed = false;
4267 tree type = (TREE_TYPE (DR_REF (dr)));
4269 if (optab_handler (vec_realign_load_optab, mode) != CODE_FOR_nothing
4270 && (!targetm.vectorize.builtin_mask_for_load
4271 || targetm.vectorize.builtin_mask_for_load ()))
4273 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
4274 if ((nested_in_vect_loop
4275 && (TREE_INT_CST_LOW (DR_STEP (dr))
4276 != GET_MODE_SIZE (TYPE_MODE (vectype))))
4277 || !loop_vinfo)
4278 return dr_explicit_realign;
4279 else
4280 return dr_explicit_realign_optimized;
4282 if (!known_alignment_for_access_p (dr))
4284 tree ba = DR_BASE_OBJECT (dr);
4286 if (ba)
4287 is_packed = contains_packed_reference (ba);
4290 if (targetm.vectorize.
4291 support_vector_misalignment (mode, type,
4292 DR_MISALIGNMENT (dr), is_packed))
4293 /* Can't software pipeline the loads, but can at least do them. */
4294 return dr_unaligned_supported;
4296 else
4298 bool is_packed = false;
4299 tree type = (TREE_TYPE (DR_REF (dr)));
4301 if (!known_alignment_for_access_p (dr))
4303 tree ba = DR_BASE_OBJECT (dr);
4305 if (ba)
4306 is_packed = contains_packed_reference (ba);
4309 if (targetm.vectorize.
4310 support_vector_misalignment (mode, type,
4311 DR_MISALIGNMENT (dr), is_packed))
4312 return dr_unaligned_supported;
4315 /* Unsupported. */
4316 return dr_unaligned_unsupported;