2011-10-30 Dmitry Plotnikov <dplotnikov@ispras.ru>
[official-gcc.git] / gcc / tree-vect-data-refs.c
bloba239216cf2df38a697bc647c1946ec8ea79e4e05
1 /* Data References Analysis and Manipulation Utilities for Vectorization.
2 Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
3 Free Software Foundation, Inc.
4 Contributed by Dorit Naishlos <dorit@il.ibm.com>
5 and Ira Rosen <irar@il.ibm.com>
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
12 version.
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "ggc.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "target.h"
31 #include "basic-block.h"
32 #include "tree-pretty-print.h"
33 #include "gimple-pretty-print.h"
34 #include "tree-flow.h"
35 #include "tree-dump.h"
36 #include "cfgloop.h"
37 #include "tree-chrec.h"
38 #include "tree-scalar-evolution.h"
39 #include "tree-vectorizer.h"
40 #include "diagnostic-core.h"
42 /* Need to include rtl.h, expr.h, etc. for optabs. */
43 #include "expr.h"
44 #include "optabs.h"
46 /* Return true if load- or store-lanes optab OPTAB is implemented for
47 COUNT vectors of type VECTYPE. NAME is the name of OPTAB. */
49 static bool
50 vect_lanes_optab_supported_p (const char *name, convert_optab optab,
51 tree vectype, unsigned HOST_WIDE_INT count)
53 enum machine_mode mode, array_mode;
54 bool limit_p;
56 mode = TYPE_MODE (vectype);
57 limit_p = !targetm.array_mode_supported_p (mode, count);
58 array_mode = mode_for_size (count * GET_MODE_BITSIZE (mode),
59 MODE_INT, limit_p);
61 if (array_mode == BLKmode)
63 if (vect_print_dump_info (REPORT_DETAILS))
64 fprintf (vect_dump, "no array mode for %s[" HOST_WIDE_INT_PRINT_DEC "]",
65 GET_MODE_NAME (mode), count);
66 return false;
69 if (convert_optab_handler (optab, array_mode, mode) == CODE_FOR_nothing)
71 if (vect_print_dump_info (REPORT_DETAILS))
72 fprintf (vect_dump, "cannot use %s<%s><%s>",
73 name, GET_MODE_NAME (array_mode), GET_MODE_NAME (mode));
74 return false;
77 if (vect_print_dump_info (REPORT_DETAILS))
78 fprintf (vect_dump, "can use %s<%s><%s>",
79 name, GET_MODE_NAME (array_mode), GET_MODE_NAME (mode));
81 return true;
85 /* Return the smallest scalar part of STMT.
86 This is used to determine the vectype of the stmt. We generally set the
87 vectype according to the type of the result (lhs). For stmts whose
88 result-type is different than the type of the arguments (e.g., demotion,
89 promotion), vectype will be reset appropriately (later). Note that we have
90 to visit the smallest datatype in this function, because that determines the
91 VF. If the smallest datatype in the loop is present only as the rhs of a
92 promotion operation - we'd miss it.
93 Such a case, where a variable of this datatype does not appear in the lhs
94 anywhere in the loop, can only occur if it's an invariant: e.g.:
95 'int_x = (int) short_inv', which we'd expect to have been optimized away by
96 invariant motion. However, we cannot rely on invariant motion to always
97 take invariants out of the loop, and so in the case of promotion we also
98 have to check the rhs.
99 LHS_SIZE_UNIT and RHS_SIZE_UNIT contain the sizes of the corresponding
100 types. */
102 tree
103 vect_get_smallest_scalar_type (gimple stmt, HOST_WIDE_INT *lhs_size_unit,
104 HOST_WIDE_INT *rhs_size_unit)
106 tree scalar_type = gimple_expr_type (stmt);
107 HOST_WIDE_INT lhs, rhs;
109 lhs = rhs = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (scalar_type));
111 if (is_gimple_assign (stmt)
112 && (gimple_assign_cast_p (stmt)
113 || gimple_assign_rhs_code (stmt) == WIDEN_MULT_EXPR
114 || gimple_assign_rhs_code (stmt) == FLOAT_EXPR))
116 tree rhs_type = TREE_TYPE (gimple_assign_rhs1 (stmt));
118 rhs = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (rhs_type));
119 if (rhs < lhs)
120 scalar_type = rhs_type;
123 *lhs_size_unit = lhs;
124 *rhs_size_unit = rhs;
125 return scalar_type;
129 /* Find the place of the data-ref in STMT in the interleaving chain that starts
130 from FIRST_STMT. Return -1 if the data-ref is not a part of the chain. */
133 vect_get_place_in_interleaving_chain (gimple stmt, gimple first_stmt)
135 gimple next_stmt = first_stmt;
136 int result = 0;
138 if (first_stmt != GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
139 return -1;
141 while (next_stmt && next_stmt != stmt)
143 result++;
144 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
147 if (next_stmt)
148 return result;
149 else
150 return -1;
154 /* Function vect_insert_into_interleaving_chain.
156 Insert DRA into the interleaving chain of DRB according to DRA's INIT. */
158 static void
159 vect_insert_into_interleaving_chain (struct data_reference *dra,
160 struct data_reference *drb)
162 gimple prev, next;
163 tree next_init;
164 stmt_vec_info stmtinfo_a = vinfo_for_stmt (DR_STMT (dra));
165 stmt_vec_info stmtinfo_b = vinfo_for_stmt (DR_STMT (drb));
167 prev = GROUP_FIRST_ELEMENT (stmtinfo_b);
168 next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (prev));
169 while (next)
171 next_init = DR_INIT (STMT_VINFO_DATA_REF (vinfo_for_stmt (next)));
172 if (tree_int_cst_compare (next_init, DR_INIT (dra)) > 0)
174 /* Insert here. */
175 GROUP_NEXT_ELEMENT (vinfo_for_stmt (prev)) = DR_STMT (dra);
176 GROUP_NEXT_ELEMENT (stmtinfo_a) = next;
177 return;
179 prev = next;
180 next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (prev));
183 /* We got to the end of the list. Insert here. */
184 GROUP_NEXT_ELEMENT (vinfo_for_stmt (prev)) = DR_STMT (dra);
185 GROUP_NEXT_ELEMENT (stmtinfo_a) = NULL;
189 /* Function vect_update_interleaving_chain.
191 For two data-refs DRA and DRB that are a part of a chain interleaved data
192 accesses, update the interleaving chain. DRB's INIT is smaller than DRA's.
194 There are four possible cases:
195 1. New stmts - both DRA and DRB are not a part of any chain:
196 FIRST_DR = DRB
197 NEXT_DR (DRB) = DRA
198 2. DRB is a part of a chain and DRA is not:
199 no need to update FIRST_DR
200 no need to insert DRB
201 insert DRA according to init
202 3. DRA is a part of a chain and DRB is not:
203 if (init of FIRST_DR > init of DRB)
204 FIRST_DR = DRB
205 NEXT(FIRST_DR) = previous FIRST_DR
206 else
207 insert DRB according to its init
208 4. both DRA and DRB are in some interleaving chains:
209 choose the chain with the smallest init of FIRST_DR
210 insert the nodes of the second chain into the first one. */
212 static void
213 vect_update_interleaving_chain (struct data_reference *drb,
214 struct data_reference *dra)
216 stmt_vec_info stmtinfo_a = vinfo_for_stmt (DR_STMT (dra));
217 stmt_vec_info stmtinfo_b = vinfo_for_stmt (DR_STMT (drb));
218 tree next_init, init_dra_chain, init_drb_chain;
219 gimple first_a, first_b;
220 tree node_init;
221 gimple node, prev, next, first_stmt;
223 /* 1. New stmts - both DRA and DRB are not a part of any chain. */
224 if (!GROUP_FIRST_ELEMENT (stmtinfo_a) && !GROUP_FIRST_ELEMENT (stmtinfo_b))
226 GROUP_FIRST_ELEMENT (stmtinfo_a) = DR_STMT (drb);
227 GROUP_FIRST_ELEMENT (stmtinfo_b) = DR_STMT (drb);
228 GROUP_NEXT_ELEMENT (stmtinfo_b) = DR_STMT (dra);
229 return;
232 /* 2. DRB is a part of a chain and DRA is not. */
233 if (!GROUP_FIRST_ELEMENT (stmtinfo_a) && GROUP_FIRST_ELEMENT (stmtinfo_b))
235 GROUP_FIRST_ELEMENT (stmtinfo_a) = GROUP_FIRST_ELEMENT (stmtinfo_b);
236 /* Insert DRA into the chain of DRB. */
237 vect_insert_into_interleaving_chain (dra, drb);
238 return;
241 /* 3. DRA is a part of a chain and DRB is not. */
242 if (GROUP_FIRST_ELEMENT (stmtinfo_a) && !GROUP_FIRST_ELEMENT (stmtinfo_b))
244 gimple old_first_stmt = GROUP_FIRST_ELEMENT (stmtinfo_a);
245 tree init_old = DR_INIT (STMT_VINFO_DATA_REF (vinfo_for_stmt (
246 old_first_stmt)));
247 gimple tmp;
249 if (tree_int_cst_compare (init_old, DR_INIT (drb)) > 0)
251 /* DRB's init is smaller than the init of the stmt previously marked
252 as the first stmt of the interleaving chain of DRA. Therefore, we
253 update FIRST_STMT and put DRB in the head of the list. */
254 GROUP_FIRST_ELEMENT (stmtinfo_b) = DR_STMT (drb);
255 GROUP_NEXT_ELEMENT (stmtinfo_b) = old_first_stmt;
257 /* Update all the stmts in the list to point to the new FIRST_STMT. */
258 tmp = old_first_stmt;
259 while (tmp)
261 GROUP_FIRST_ELEMENT (vinfo_for_stmt (tmp)) = DR_STMT (drb);
262 tmp = GROUP_NEXT_ELEMENT (vinfo_for_stmt (tmp));
265 else
267 /* Insert DRB in the list of DRA. */
268 vect_insert_into_interleaving_chain (drb, dra);
269 GROUP_FIRST_ELEMENT (stmtinfo_b) = GROUP_FIRST_ELEMENT (stmtinfo_a);
271 return;
274 /* 4. both DRA and DRB are in some interleaving chains. */
275 first_a = GROUP_FIRST_ELEMENT (stmtinfo_a);
276 first_b = GROUP_FIRST_ELEMENT (stmtinfo_b);
277 if (first_a == first_b)
278 return;
279 init_dra_chain = DR_INIT (STMT_VINFO_DATA_REF (vinfo_for_stmt (first_a)));
280 init_drb_chain = DR_INIT (STMT_VINFO_DATA_REF (vinfo_for_stmt (first_b)));
282 if (tree_int_cst_compare (init_dra_chain, init_drb_chain) > 0)
284 /* Insert the nodes of DRA chain into the DRB chain.
285 After inserting a node, continue from this node of the DRB chain (don't
286 start from the beginning. */
287 node = GROUP_FIRST_ELEMENT (stmtinfo_a);
288 prev = GROUP_FIRST_ELEMENT (stmtinfo_b);
289 first_stmt = first_b;
291 else
293 /* Insert the nodes of DRB chain into the DRA chain.
294 After inserting a node, continue from this node of the DRA chain (don't
295 start from the beginning. */
296 node = GROUP_FIRST_ELEMENT (stmtinfo_b);
297 prev = GROUP_FIRST_ELEMENT (stmtinfo_a);
298 first_stmt = first_a;
301 while (node)
303 node_init = DR_INIT (STMT_VINFO_DATA_REF (vinfo_for_stmt (node)));
304 next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (prev));
305 while (next)
307 next_init = DR_INIT (STMT_VINFO_DATA_REF (vinfo_for_stmt (next)));
308 if (tree_int_cst_compare (next_init, node_init) > 0)
310 /* Insert here. */
311 GROUP_NEXT_ELEMENT (vinfo_for_stmt (prev)) = node;
312 GROUP_NEXT_ELEMENT (vinfo_for_stmt (node)) = next;
313 prev = node;
314 break;
316 prev = next;
317 next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (prev));
319 if (!next)
321 /* We got to the end of the list. Insert here. */
322 GROUP_NEXT_ELEMENT (vinfo_for_stmt (prev)) = node;
323 GROUP_NEXT_ELEMENT (vinfo_for_stmt (node)) = NULL;
324 prev = node;
326 GROUP_FIRST_ELEMENT (vinfo_for_stmt (node)) = first_stmt;
327 node = GROUP_NEXT_ELEMENT (vinfo_for_stmt (node));
331 /* Check dependence between DRA and DRB for basic block vectorization.
332 If the accesses share same bases and offsets, we can compare their initial
333 constant offsets to decide whether they differ or not. In case of a read-
334 write dependence we check that the load is before the store to ensure that
335 vectorization will not change the order of the accesses. */
337 static bool
338 vect_drs_dependent_in_basic_block (struct data_reference *dra,
339 struct data_reference *drb)
341 HOST_WIDE_INT type_size_a, type_size_b, init_a, init_b;
342 gimple earlier_stmt;
344 /* We only call this function for pairs of loads and stores, but we verify
345 it here. */
346 if (DR_IS_READ (dra) == DR_IS_READ (drb))
348 if (DR_IS_READ (dra))
349 return false;
350 else
351 return true;
354 /* Check that the data-refs have same bases and offsets. If not, we can't
355 determine if they are dependent. */
356 if (!operand_equal_p (DR_BASE_ADDRESS (dra), DR_BASE_ADDRESS (drb), 0)
357 || !dr_equal_offsets_p (dra, drb))
358 return true;
360 /* Check the types. */
361 type_size_a = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dra))));
362 type_size_b = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (drb))));
364 if (type_size_a != type_size_b
365 || !types_compatible_p (TREE_TYPE (DR_REF (dra)),
366 TREE_TYPE (DR_REF (drb))))
367 return true;
369 init_a = TREE_INT_CST_LOW (DR_INIT (dra));
370 init_b = TREE_INT_CST_LOW (DR_INIT (drb));
372 /* Two different locations - no dependence. */
373 if (init_a != init_b)
374 return false;
376 /* We have a read-write dependence. Check that the load is before the store.
377 When we vectorize basic blocks, vector load can be only before
378 corresponding scalar load, and vector store can be only after its
379 corresponding scalar store. So the order of the acceses is preserved in
380 case the load is before the store. */
381 earlier_stmt = get_earlier_stmt (DR_STMT (dra), DR_STMT (drb));
382 if (DR_IS_READ (STMT_VINFO_DATA_REF (vinfo_for_stmt (earlier_stmt))))
383 return false;
385 return true;
389 /* Function vect_check_interleaving.
391 Check if DRA and DRB are a part of interleaving. In case they are, insert
392 DRA and DRB in an interleaving chain. */
394 static bool
395 vect_check_interleaving (struct data_reference *dra,
396 struct data_reference *drb)
398 HOST_WIDE_INT type_size_a, type_size_b, diff_mod_size, step, init_a, init_b;
400 /* Check that the data-refs have same first location (except init) and they
401 are both either store or load (not load and store). */
402 if (!operand_equal_p (DR_BASE_ADDRESS (dra), DR_BASE_ADDRESS (drb), 0)
403 || !dr_equal_offsets_p (dra, drb)
404 || !tree_int_cst_compare (DR_INIT (dra), DR_INIT (drb))
405 || DR_IS_READ (dra) != DR_IS_READ (drb))
406 return false;
408 /* Check:
409 1. data-refs are of the same type
410 2. their steps are equal
411 3. the step (if greater than zero) is greater than the difference between
412 data-refs' inits. */
413 type_size_a = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dra))));
414 type_size_b = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (drb))));
416 if (type_size_a != type_size_b
417 || tree_int_cst_compare (DR_STEP (dra), DR_STEP (drb))
418 || !types_compatible_p (TREE_TYPE (DR_REF (dra)),
419 TREE_TYPE (DR_REF (drb))))
420 return false;
422 init_a = TREE_INT_CST_LOW (DR_INIT (dra));
423 init_b = TREE_INT_CST_LOW (DR_INIT (drb));
424 step = TREE_INT_CST_LOW (DR_STEP (dra));
426 if (init_a > init_b)
428 /* If init_a == init_b + the size of the type * k, we have an interleaving,
429 and DRB is accessed before DRA. */
430 diff_mod_size = (init_a - init_b) % type_size_a;
432 if (step && (init_a - init_b) > step)
433 return false;
435 if (diff_mod_size == 0)
437 vect_update_interleaving_chain (drb, dra);
438 if (vect_print_dump_info (REPORT_DR_DETAILS))
440 fprintf (vect_dump, "Detected interleaving ");
441 print_generic_expr (vect_dump, DR_REF (dra), TDF_SLIM);
442 fprintf (vect_dump, " and ");
443 print_generic_expr (vect_dump, DR_REF (drb), TDF_SLIM);
445 return true;
448 else
450 /* If init_b == init_a + the size of the type * k, we have an
451 interleaving, and DRA is accessed before DRB. */
452 diff_mod_size = (init_b - init_a) % type_size_a;
454 if (step && (init_b - init_a) > step)
455 return false;
457 if (diff_mod_size == 0)
459 vect_update_interleaving_chain (dra, drb);
460 if (vect_print_dump_info (REPORT_DR_DETAILS))
462 fprintf (vect_dump, "Detected interleaving ");
463 print_generic_expr (vect_dump, DR_REF (dra), TDF_SLIM);
464 fprintf (vect_dump, " and ");
465 print_generic_expr (vect_dump, DR_REF (drb), TDF_SLIM);
467 return true;
471 return false;
474 /* Check if data references pointed by DR_I and DR_J are same or
475 belong to same interleaving group. Return FALSE if drs are
476 different, otherwise return TRUE. */
478 static bool
479 vect_same_range_drs (data_reference_p dr_i, data_reference_p dr_j)
481 gimple stmt_i = DR_STMT (dr_i);
482 gimple stmt_j = DR_STMT (dr_j);
484 if (operand_equal_p (DR_REF (dr_i), DR_REF (dr_j), 0)
485 || (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt_i))
486 && GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt_j))
487 && (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt_i))
488 == GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt_j)))))
489 return true;
490 else
491 return false;
494 /* If address ranges represented by DDR_I and DDR_J are equal,
495 return TRUE, otherwise return FALSE. */
497 static bool
498 vect_vfa_range_equal (ddr_p ddr_i, ddr_p ddr_j)
500 if ((vect_same_range_drs (DDR_A (ddr_i), DDR_A (ddr_j))
501 && vect_same_range_drs (DDR_B (ddr_i), DDR_B (ddr_j)))
502 || (vect_same_range_drs (DDR_A (ddr_i), DDR_B (ddr_j))
503 && vect_same_range_drs (DDR_B (ddr_i), DDR_A (ddr_j))))
504 return true;
505 else
506 return false;
509 /* Insert DDR into LOOP_VINFO list of ddrs that may alias and need to be
510 tested at run-time. Return TRUE if DDR was successfully inserted.
511 Return false if versioning is not supported. */
513 static bool
514 vect_mark_for_runtime_alias_test (ddr_p ddr, loop_vec_info loop_vinfo)
516 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
518 if ((unsigned) PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS) == 0)
519 return false;
521 if (vect_print_dump_info (REPORT_DR_DETAILS))
523 fprintf (vect_dump, "mark for run-time aliasing test between ");
524 print_generic_expr (vect_dump, DR_REF (DDR_A (ddr)), TDF_SLIM);
525 fprintf (vect_dump, " and ");
526 print_generic_expr (vect_dump, DR_REF (DDR_B (ddr)), TDF_SLIM);
529 if (optimize_loop_nest_for_size_p (loop))
531 if (vect_print_dump_info (REPORT_DR_DETAILS))
532 fprintf (vect_dump, "versioning not supported when optimizing for size.");
533 return false;
536 /* FORNOW: We don't support versioning with outer-loop vectorization. */
537 if (loop->inner)
539 if (vect_print_dump_info (REPORT_DR_DETAILS))
540 fprintf (vect_dump, "versioning not yet supported for outer-loops.");
541 return false;
544 VEC_safe_push (ddr_p, heap, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo), ddr);
545 return true;
549 /* Function vect_analyze_data_ref_dependence.
551 Return TRUE if there (might) exist a dependence between a memory-reference
552 DRA and a memory-reference DRB. When versioning for alias may check a
553 dependence at run-time, return FALSE. Adjust *MAX_VF according to
554 the data dependence. */
556 static bool
557 vect_analyze_data_ref_dependence (struct data_dependence_relation *ddr,
558 loop_vec_info loop_vinfo, int *max_vf)
560 unsigned int i;
561 struct loop *loop = NULL;
562 struct data_reference *dra = DDR_A (ddr);
563 struct data_reference *drb = DDR_B (ddr);
564 stmt_vec_info stmtinfo_a = vinfo_for_stmt (DR_STMT (dra));
565 stmt_vec_info stmtinfo_b = vinfo_for_stmt (DR_STMT (drb));
566 lambda_vector dist_v;
567 unsigned int loop_depth;
569 /* Don't bother to analyze statements marked as unvectorizable. */
570 if (!STMT_VINFO_VECTORIZABLE (stmtinfo_a)
571 || !STMT_VINFO_VECTORIZABLE (stmtinfo_b))
572 return false;
574 if (DDR_ARE_DEPENDENT (ddr) == chrec_known)
576 /* Independent data accesses. */
577 vect_check_interleaving (dra, drb);
578 return false;
581 if (loop_vinfo)
582 loop = LOOP_VINFO_LOOP (loop_vinfo);
584 if ((DR_IS_READ (dra) && DR_IS_READ (drb) && loop_vinfo) || dra == drb)
585 return false;
587 if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know)
589 gimple earlier_stmt;
591 if (loop_vinfo)
593 if (vect_print_dump_info (REPORT_DR_DETAILS))
595 fprintf (vect_dump, "versioning for alias required: "
596 "can't determine dependence between ");
597 print_generic_expr (vect_dump, DR_REF (dra), TDF_SLIM);
598 fprintf (vect_dump, " and ");
599 print_generic_expr (vect_dump, DR_REF (drb), TDF_SLIM);
602 /* Add to list of ddrs that need to be tested at run-time. */
603 return !vect_mark_for_runtime_alias_test (ddr, loop_vinfo);
606 /* When vectorizing a basic block unknown depnedence can still mean
607 strided access. */
608 if (vect_check_interleaving (dra, drb))
609 return false;
611 /* Read-read is OK (we need this check here, after checking for
612 interleaving). */
613 if (DR_IS_READ (dra) && DR_IS_READ (drb))
614 return false;
616 if (vect_print_dump_info (REPORT_DR_DETAILS))
618 fprintf (vect_dump, "can't determine dependence between ");
619 print_generic_expr (vect_dump, DR_REF (dra), TDF_SLIM);
620 fprintf (vect_dump, " and ");
621 print_generic_expr (vect_dump, DR_REF (drb), TDF_SLIM);
624 /* We do not vectorize basic blocks with write-write dependencies. */
625 if (DR_IS_WRITE (dra) && DR_IS_WRITE (drb))
626 return true;
628 /* Check that it's not a load-after-store dependence. */
629 earlier_stmt = get_earlier_stmt (DR_STMT (dra), DR_STMT (drb));
630 if (DR_IS_WRITE (STMT_VINFO_DATA_REF (vinfo_for_stmt (earlier_stmt))))
631 return true;
633 return false;
636 /* Versioning for alias is not yet supported for basic block SLP, and
637 dependence distance is unapplicable, hence, in case of known data
638 dependence, basic block vectorization is impossible for now. */
639 if (!loop_vinfo)
641 if (dra != drb && vect_check_interleaving (dra, drb))
642 return false;
644 if (vect_print_dump_info (REPORT_DR_DETAILS))
646 fprintf (vect_dump, "determined dependence between ");
647 print_generic_expr (vect_dump, DR_REF (dra), TDF_SLIM);
648 fprintf (vect_dump, " and ");
649 print_generic_expr (vect_dump, DR_REF (drb), TDF_SLIM);
652 /* Do not vectorize basic blcoks with write-write dependences. */
653 if (DR_IS_WRITE (dra) && DR_IS_WRITE (drb))
654 return true;
656 /* Check if this dependence is allowed in basic block vectorization. */
657 return vect_drs_dependent_in_basic_block (dra, drb);
660 /* Loop-based vectorization and known data dependence. */
661 if (DDR_NUM_DIST_VECTS (ddr) == 0)
663 if (vect_print_dump_info (REPORT_DR_DETAILS))
665 fprintf (vect_dump, "versioning for alias required: bad dist vector for ");
666 print_generic_expr (vect_dump, DR_REF (dra), TDF_SLIM);
667 fprintf (vect_dump, " and ");
668 print_generic_expr (vect_dump, DR_REF (drb), TDF_SLIM);
670 /* Add to list of ddrs that need to be tested at run-time. */
671 return !vect_mark_for_runtime_alias_test (ddr, loop_vinfo);
674 loop_depth = index_in_loop_nest (loop->num, DDR_LOOP_NEST (ddr));
675 FOR_EACH_VEC_ELT (lambda_vector, DDR_DIST_VECTS (ddr), i, dist_v)
677 int dist = dist_v[loop_depth];
679 if (vect_print_dump_info (REPORT_DR_DETAILS))
680 fprintf (vect_dump, "dependence distance = %d.", dist);
682 if (dist == 0)
684 if (vect_print_dump_info (REPORT_DR_DETAILS))
686 fprintf (vect_dump, "dependence distance == 0 between ");
687 print_generic_expr (vect_dump, DR_REF (dra), TDF_SLIM);
688 fprintf (vect_dump, " and ");
689 print_generic_expr (vect_dump, DR_REF (drb), TDF_SLIM);
692 /* For interleaving, mark that there is a read-write dependency if
693 necessary. We check before that one of the data-refs is store. */
694 if (DR_IS_READ (dra))
695 GROUP_READ_WRITE_DEPENDENCE (stmtinfo_a) = true;
696 else
698 if (DR_IS_READ (drb))
699 GROUP_READ_WRITE_DEPENDENCE (stmtinfo_b) = true;
702 continue;
705 if (dist > 0 && DDR_REVERSED_P (ddr))
707 /* If DDR_REVERSED_P the order of the data-refs in DDR was
708 reversed (to make distance vector positive), and the actual
709 distance is negative. */
710 if (vect_print_dump_info (REPORT_DR_DETAILS))
711 fprintf (vect_dump, "dependence distance negative.");
712 continue;
715 if (abs (dist) >= 2
716 && abs (dist) < *max_vf)
718 /* The dependence distance requires reduction of the maximal
719 vectorization factor. */
720 *max_vf = abs (dist);
721 if (vect_print_dump_info (REPORT_DR_DETAILS))
722 fprintf (vect_dump, "adjusting maximal vectorization factor to %i",
723 *max_vf);
726 if (abs (dist) >= *max_vf)
728 /* Dependence distance does not create dependence, as far as
729 vectorization is concerned, in this case. */
730 if (vect_print_dump_info (REPORT_DR_DETAILS))
731 fprintf (vect_dump, "dependence distance >= VF.");
732 continue;
735 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
737 fprintf (vect_dump, "not vectorized, possible dependence "
738 "between data-refs ");
739 print_generic_expr (vect_dump, DR_REF (dra), TDF_SLIM);
740 fprintf (vect_dump, " and ");
741 print_generic_expr (vect_dump, DR_REF (drb), TDF_SLIM);
744 return true;
747 return false;
750 /* Function vect_analyze_data_ref_dependences.
752 Examine all the data references in the loop, and make sure there do not
753 exist any data dependences between them. Set *MAX_VF according to
754 the maximum vectorization factor the data dependences allow. */
756 bool
757 vect_analyze_data_ref_dependences (loop_vec_info loop_vinfo,
758 bb_vec_info bb_vinfo, int *max_vf)
760 unsigned int i;
761 VEC (ddr_p, heap) *ddrs = NULL;
762 struct data_dependence_relation *ddr;
764 if (vect_print_dump_info (REPORT_DETAILS))
765 fprintf (vect_dump, "=== vect_analyze_dependences ===");
767 if (loop_vinfo)
768 ddrs = LOOP_VINFO_DDRS (loop_vinfo);
769 else
770 ddrs = BB_VINFO_DDRS (bb_vinfo);
772 FOR_EACH_VEC_ELT (ddr_p, ddrs, i, ddr)
773 if (vect_analyze_data_ref_dependence (ddr, loop_vinfo, max_vf))
774 return false;
776 return true;
780 /* Function vect_compute_data_ref_alignment
782 Compute the misalignment of the data reference DR.
784 Output:
785 1. If during the misalignment computation it is found that the data reference
786 cannot be vectorized then false is returned.
787 2. DR_MISALIGNMENT (DR) is defined.
789 FOR NOW: No analysis is actually performed. Misalignment is calculated
790 only for trivial cases. TODO. */
792 static bool
793 vect_compute_data_ref_alignment (struct data_reference *dr)
795 gimple stmt = DR_STMT (dr);
796 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
797 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
798 struct loop *loop = NULL;
799 tree ref = DR_REF (dr);
800 tree vectype;
801 tree base, base_addr;
802 bool base_aligned;
803 tree misalign;
804 tree aligned_to, alignment;
806 if (vect_print_dump_info (REPORT_DETAILS))
807 fprintf (vect_dump, "vect_compute_data_ref_alignment:");
809 if (loop_vinfo)
810 loop = LOOP_VINFO_LOOP (loop_vinfo);
812 /* Initialize misalignment to unknown. */
813 SET_DR_MISALIGNMENT (dr, -1);
815 misalign = DR_INIT (dr);
816 aligned_to = DR_ALIGNED_TO (dr);
817 base_addr = DR_BASE_ADDRESS (dr);
818 vectype = STMT_VINFO_VECTYPE (stmt_info);
820 /* In case the dataref is in an inner-loop of the loop that is being
821 vectorized (LOOP), we use the base and misalignment information
822 relative to the outer-loop (LOOP). This is ok only if the misalignment
823 stays the same throughout the execution of the inner-loop, which is why
824 we have to check that the stride of the dataref in the inner-loop evenly
825 divides by the vector size. */
826 if (loop && nested_in_vect_loop_p (loop, stmt))
828 tree step = DR_STEP (dr);
829 HOST_WIDE_INT dr_step = TREE_INT_CST_LOW (step);
831 if (dr_step % GET_MODE_SIZE (TYPE_MODE (vectype)) == 0)
833 if (vect_print_dump_info (REPORT_ALIGNMENT))
834 fprintf (vect_dump, "inner step divides the vector-size.");
835 misalign = STMT_VINFO_DR_INIT (stmt_info);
836 aligned_to = STMT_VINFO_DR_ALIGNED_TO (stmt_info);
837 base_addr = STMT_VINFO_DR_BASE_ADDRESS (stmt_info);
839 else
841 if (vect_print_dump_info (REPORT_ALIGNMENT))
842 fprintf (vect_dump, "inner step doesn't divide the vector-size.");
843 misalign = NULL_TREE;
847 base = build_fold_indirect_ref (base_addr);
848 alignment = ssize_int (TYPE_ALIGN (vectype)/BITS_PER_UNIT);
850 if ((aligned_to && tree_int_cst_compare (aligned_to, alignment) < 0)
851 || !misalign)
853 if (vect_print_dump_info (REPORT_ALIGNMENT))
855 fprintf (vect_dump, "Unknown alignment for access: ");
856 print_generic_expr (vect_dump, base, TDF_SLIM);
858 return true;
861 if ((DECL_P (base)
862 && tree_int_cst_compare (ssize_int (DECL_ALIGN_UNIT (base)),
863 alignment) >= 0)
864 || (TREE_CODE (base_addr) == SSA_NAME
865 && tree_int_cst_compare (ssize_int (TYPE_ALIGN_UNIT (TREE_TYPE (
866 TREE_TYPE (base_addr)))),
867 alignment) >= 0)
868 || (get_pointer_alignment (base_addr) >= TYPE_ALIGN (vectype)))
869 base_aligned = true;
870 else
871 base_aligned = false;
873 if (!base_aligned)
875 /* Do not change the alignment of global variables if
876 flag_section_anchors is enabled. */
877 if (!vect_can_force_dr_alignment_p (base, TYPE_ALIGN (vectype))
878 || (TREE_STATIC (base) && flag_section_anchors))
880 if (vect_print_dump_info (REPORT_DETAILS))
882 fprintf (vect_dump, "can't force alignment of ref: ");
883 print_generic_expr (vect_dump, ref, TDF_SLIM);
885 return true;
888 /* Force the alignment of the decl.
889 NOTE: This is the only change to the code we make during
890 the analysis phase, before deciding to vectorize the loop. */
891 if (vect_print_dump_info (REPORT_DETAILS))
893 fprintf (vect_dump, "force alignment of ");
894 print_generic_expr (vect_dump, ref, TDF_SLIM);
897 DECL_ALIGN (base) = TYPE_ALIGN (vectype);
898 DECL_USER_ALIGN (base) = 1;
901 /* At this point we assume that the base is aligned. */
902 gcc_assert (base_aligned
903 || (TREE_CODE (base) == VAR_DECL
904 && DECL_ALIGN (base) >= TYPE_ALIGN (vectype)));
906 /* If this is a backward running DR then first access in the larger
907 vectype actually is N-1 elements before the address in the DR.
908 Adjust misalign accordingly. */
909 if (tree_int_cst_compare (DR_STEP (dr), size_zero_node) < 0)
911 tree offset = ssize_int (TYPE_VECTOR_SUBPARTS (vectype) - 1);
912 /* DR_STEP(dr) is the same as -TYPE_SIZE of the scalar type,
913 otherwise we wouldn't be here. */
914 offset = fold_build2 (MULT_EXPR, ssizetype, offset, DR_STEP (dr));
915 /* PLUS because DR_STEP was negative. */
916 misalign = size_binop (PLUS_EXPR, misalign, offset);
919 /* Modulo alignment. */
920 misalign = size_binop (FLOOR_MOD_EXPR, misalign, alignment);
922 if (!host_integerp (misalign, 1))
924 /* Negative or overflowed misalignment value. */
925 if (vect_print_dump_info (REPORT_DETAILS))
926 fprintf (vect_dump, "unexpected misalign value");
927 return false;
930 SET_DR_MISALIGNMENT (dr, TREE_INT_CST_LOW (misalign));
932 if (vect_print_dump_info (REPORT_DETAILS))
934 fprintf (vect_dump, "misalign = %d bytes of ref ", DR_MISALIGNMENT (dr));
935 print_generic_expr (vect_dump, ref, TDF_SLIM);
938 return true;
942 /* Function vect_compute_data_refs_alignment
944 Compute the misalignment of data references in the loop.
945 Return FALSE if a data reference is found that cannot be vectorized. */
947 static bool
948 vect_compute_data_refs_alignment (loop_vec_info loop_vinfo,
949 bb_vec_info bb_vinfo)
951 VEC (data_reference_p, heap) *datarefs;
952 struct data_reference *dr;
953 unsigned int i;
955 if (loop_vinfo)
956 datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
957 else
958 datarefs = BB_VINFO_DATAREFS (bb_vinfo);
960 FOR_EACH_VEC_ELT (data_reference_p, datarefs, i, dr)
961 if (STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr)))
962 && !vect_compute_data_ref_alignment (dr))
964 if (bb_vinfo)
966 /* Mark unsupported statement as unvectorizable. */
967 STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr))) = false;
968 continue;
970 else
971 return false;
974 return true;
978 /* Function vect_update_misalignment_for_peel
980 DR - the data reference whose misalignment is to be adjusted.
981 DR_PEEL - the data reference whose misalignment is being made
982 zero in the vector loop by the peel.
983 NPEEL - the number of iterations in the peel loop if the misalignment
984 of DR_PEEL is known at compile time. */
986 static void
987 vect_update_misalignment_for_peel (struct data_reference *dr,
988 struct data_reference *dr_peel, int npeel)
990 unsigned int i;
991 VEC(dr_p,heap) *same_align_drs;
992 struct data_reference *current_dr;
993 int dr_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr))));
994 int dr_peel_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr_peel))));
995 stmt_vec_info stmt_info = vinfo_for_stmt (DR_STMT (dr));
996 stmt_vec_info peel_stmt_info = vinfo_for_stmt (DR_STMT (dr_peel));
998 /* For interleaved data accesses the step in the loop must be multiplied by
999 the size of the interleaving group. */
1000 if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
1001 dr_size *= GROUP_SIZE (vinfo_for_stmt (GROUP_FIRST_ELEMENT (stmt_info)));
1002 if (STMT_VINFO_STRIDED_ACCESS (peel_stmt_info))
1003 dr_peel_size *= GROUP_SIZE (peel_stmt_info);
1005 /* It can be assumed that the data refs with the same alignment as dr_peel
1006 are aligned in the vector loop. */
1007 same_align_drs
1008 = STMT_VINFO_SAME_ALIGN_REFS (vinfo_for_stmt (DR_STMT (dr_peel)));
1009 FOR_EACH_VEC_ELT (dr_p, same_align_drs, i, current_dr)
1011 if (current_dr != dr)
1012 continue;
1013 gcc_assert (DR_MISALIGNMENT (dr) / dr_size ==
1014 DR_MISALIGNMENT (dr_peel) / dr_peel_size);
1015 SET_DR_MISALIGNMENT (dr, 0);
1016 return;
1019 if (known_alignment_for_access_p (dr)
1020 && known_alignment_for_access_p (dr_peel))
1022 bool negative = tree_int_cst_compare (DR_STEP (dr), size_zero_node) < 0;
1023 int misal = DR_MISALIGNMENT (dr);
1024 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1025 misal += negative ? -npeel * dr_size : npeel * dr_size;
1026 misal &= GET_MODE_SIZE (TYPE_MODE (vectype)) - 1;
1027 SET_DR_MISALIGNMENT (dr, misal);
1028 return;
1031 if (vect_print_dump_info (REPORT_DETAILS))
1032 fprintf (vect_dump, "Setting misalignment to -1.");
1033 SET_DR_MISALIGNMENT (dr, -1);
1037 /* Function vect_verify_datarefs_alignment
1039 Return TRUE if all data references in the loop can be
1040 handled with respect to alignment. */
1042 bool
1043 vect_verify_datarefs_alignment (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo)
1045 VEC (data_reference_p, heap) *datarefs;
1046 struct data_reference *dr;
1047 enum dr_alignment_support supportable_dr_alignment;
1048 unsigned int i;
1050 if (loop_vinfo)
1051 datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
1052 else
1053 datarefs = BB_VINFO_DATAREFS (bb_vinfo);
1055 FOR_EACH_VEC_ELT (data_reference_p, datarefs, i, dr)
1057 gimple stmt = DR_STMT (dr);
1058 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1060 /* For interleaving, only the alignment of the first access matters.
1061 Skip statements marked as not vectorizable. */
1062 if ((STMT_VINFO_STRIDED_ACCESS (stmt_info)
1063 && GROUP_FIRST_ELEMENT (stmt_info) != stmt)
1064 || !STMT_VINFO_VECTORIZABLE (stmt_info))
1065 continue;
1067 supportable_dr_alignment = vect_supportable_dr_alignment (dr, false);
1068 if (!supportable_dr_alignment)
1070 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1072 if (DR_IS_READ (dr))
1073 fprintf (vect_dump,
1074 "not vectorized: unsupported unaligned load.");
1075 else
1076 fprintf (vect_dump,
1077 "not vectorized: unsupported unaligned store.");
1079 print_generic_expr (vect_dump, DR_REF (dr), TDF_SLIM);
1081 return false;
1083 if (supportable_dr_alignment != dr_aligned
1084 && vect_print_dump_info (REPORT_ALIGNMENT))
1085 fprintf (vect_dump, "Vectorizing an unaligned access.");
1087 return true;
1091 /* Function vector_alignment_reachable_p
1093 Return true if vector alignment for DR is reachable by peeling
1094 a few loop iterations. Return false otherwise. */
1096 static bool
1097 vector_alignment_reachable_p (struct data_reference *dr)
1099 gimple stmt = DR_STMT (dr);
1100 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1101 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1103 if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
1105 /* For interleaved access we peel only if number of iterations in
1106 the prolog loop ({VF - misalignment}), is a multiple of the
1107 number of the interleaved accesses. */
1108 int elem_size, mis_in_elements;
1109 int nelements = TYPE_VECTOR_SUBPARTS (vectype);
1111 /* FORNOW: handle only known alignment. */
1112 if (!known_alignment_for_access_p (dr))
1113 return false;
1115 elem_size = GET_MODE_SIZE (TYPE_MODE (vectype)) / nelements;
1116 mis_in_elements = DR_MISALIGNMENT (dr) / elem_size;
1118 if ((nelements - mis_in_elements) % GROUP_SIZE (stmt_info))
1119 return false;
1122 /* If misalignment is known at the compile time then allow peeling
1123 only if natural alignment is reachable through peeling. */
1124 if (known_alignment_for_access_p (dr) && !aligned_access_p (dr))
1126 HOST_WIDE_INT elmsize =
1127 int_cst_value (TYPE_SIZE_UNIT (TREE_TYPE (vectype)));
1128 if (vect_print_dump_info (REPORT_DETAILS))
1130 fprintf (vect_dump, "data size =" HOST_WIDE_INT_PRINT_DEC, elmsize);
1131 fprintf (vect_dump, ". misalignment = %d. ", DR_MISALIGNMENT (dr));
1133 if (DR_MISALIGNMENT (dr) % elmsize)
1135 if (vect_print_dump_info (REPORT_DETAILS))
1136 fprintf (vect_dump, "data size does not divide the misalignment.\n");
1137 return false;
1141 if (!known_alignment_for_access_p (dr))
1143 tree type = (TREE_TYPE (DR_REF (dr)));
1144 tree ba = DR_BASE_OBJECT (dr);
1145 bool is_packed = false;
1147 if (ba)
1148 is_packed = contains_packed_reference (ba);
1150 if (compare_tree_int (TYPE_SIZE (type), TYPE_ALIGN (type)) > 0)
1151 is_packed = true;
1153 if (vect_print_dump_info (REPORT_DETAILS))
1154 fprintf (vect_dump, "Unknown misalignment, is_packed = %d",is_packed);
1155 if (targetm.vectorize.vector_alignment_reachable (type, is_packed))
1156 return true;
1157 else
1158 return false;
1161 return true;
1165 /* Calculate the cost of the memory access represented by DR. */
1167 static void
1168 vect_get_data_access_cost (struct data_reference *dr,
1169 unsigned int *inside_cost,
1170 unsigned int *outside_cost)
1172 gimple stmt = DR_STMT (dr);
1173 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1174 int nunits = TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info));
1175 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1176 int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1177 int ncopies = vf / nunits;
1178 bool supportable_dr_alignment = vect_supportable_dr_alignment (dr, true);
1180 if (!supportable_dr_alignment)
1181 *inside_cost = VECT_MAX_COST;
1182 else
1184 if (DR_IS_READ (dr))
1185 vect_get_load_cost (dr, ncopies, true, inside_cost, outside_cost);
1186 else
1187 vect_get_store_cost (dr, ncopies, inside_cost);
1190 if (vect_print_dump_info (REPORT_COST))
1191 fprintf (vect_dump, "vect_get_data_access_cost: inside_cost = %d, "
1192 "outside_cost = %d.", *inside_cost, *outside_cost);
1196 static hashval_t
1197 vect_peeling_hash (const void *elem)
1199 const struct _vect_peel_info *peel_info;
1201 peel_info = (const struct _vect_peel_info *) elem;
1202 return (hashval_t) peel_info->npeel;
1206 static int
1207 vect_peeling_hash_eq (const void *elem1, const void *elem2)
1209 const struct _vect_peel_info *a, *b;
1211 a = (const struct _vect_peel_info *) elem1;
1212 b = (const struct _vect_peel_info *) elem2;
1213 return (a->npeel == b->npeel);
1217 /* Insert DR into peeling hash table with NPEEL as key. */
1219 static void
1220 vect_peeling_hash_insert (loop_vec_info loop_vinfo, struct data_reference *dr,
1221 int npeel)
1223 struct _vect_peel_info elem, *slot;
1224 void **new_slot;
1225 bool supportable_dr_alignment = vect_supportable_dr_alignment (dr, true);
1227 elem.npeel = npeel;
1228 slot = (vect_peel_info) htab_find (LOOP_VINFO_PEELING_HTAB (loop_vinfo),
1229 &elem);
1230 if (slot)
1231 slot->count++;
1232 else
1234 slot = XNEW (struct _vect_peel_info);
1235 slot->npeel = npeel;
1236 slot->dr = dr;
1237 slot->count = 1;
1238 new_slot = htab_find_slot (LOOP_VINFO_PEELING_HTAB (loop_vinfo), slot,
1239 INSERT);
1240 *new_slot = slot;
1243 if (!supportable_dr_alignment && !flag_vect_cost_model)
1244 slot->count += VECT_MAX_COST;
1248 /* Traverse peeling hash table to find peeling option that aligns maximum
1249 number of data accesses. */
1251 static int
1252 vect_peeling_hash_get_most_frequent (void **slot, void *data)
1254 vect_peel_info elem = (vect_peel_info) *slot;
1255 vect_peel_extended_info max = (vect_peel_extended_info) data;
1257 if (elem->count > max->peel_info.count
1258 || (elem->count == max->peel_info.count
1259 && max->peel_info.npeel > elem->npeel))
1261 max->peel_info.npeel = elem->npeel;
1262 max->peel_info.count = elem->count;
1263 max->peel_info.dr = elem->dr;
1266 return 1;
1270 /* Traverse peeling hash table and calculate cost for each peeling option.
1271 Find the one with the lowest cost. */
1273 static int
1274 vect_peeling_hash_get_lowest_cost (void **slot, void *data)
1276 vect_peel_info elem = (vect_peel_info) *slot;
1277 vect_peel_extended_info min = (vect_peel_extended_info) data;
1278 int save_misalignment, dummy;
1279 unsigned int inside_cost = 0, outside_cost = 0, i;
1280 gimple stmt = DR_STMT (elem->dr);
1281 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1282 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1283 VEC (data_reference_p, heap) *datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
1284 struct data_reference *dr;
1286 FOR_EACH_VEC_ELT (data_reference_p, datarefs, i, dr)
1288 stmt = DR_STMT (dr);
1289 stmt_info = vinfo_for_stmt (stmt);
1290 /* For interleaving, only the alignment of the first access
1291 matters. */
1292 if (STMT_VINFO_STRIDED_ACCESS (stmt_info)
1293 && GROUP_FIRST_ELEMENT (stmt_info) != stmt)
1294 continue;
1296 save_misalignment = DR_MISALIGNMENT (dr);
1297 vect_update_misalignment_for_peel (dr, elem->dr, elem->npeel);
1298 vect_get_data_access_cost (dr, &inside_cost, &outside_cost);
1299 SET_DR_MISALIGNMENT (dr, save_misalignment);
1302 outside_cost += vect_get_known_peeling_cost (loop_vinfo, elem->npeel, &dummy,
1303 vect_get_single_scalar_iteraion_cost (loop_vinfo));
1305 if (inside_cost < min->inside_cost
1306 || (inside_cost == min->inside_cost && outside_cost < min->outside_cost))
1308 min->inside_cost = inside_cost;
1309 min->outside_cost = outside_cost;
1310 min->peel_info.dr = elem->dr;
1311 min->peel_info.npeel = elem->npeel;
1314 return 1;
1318 /* Choose best peeling option by traversing peeling hash table and either
1319 choosing an option with the lowest cost (if cost model is enabled) or the
1320 option that aligns as many accesses as possible. */
1322 static struct data_reference *
1323 vect_peeling_hash_choose_best_peeling (loop_vec_info loop_vinfo,
1324 unsigned int *npeel)
1326 struct _vect_peel_extended_info res;
1328 res.peel_info.dr = NULL;
1330 if (flag_vect_cost_model)
1332 res.inside_cost = INT_MAX;
1333 res.outside_cost = INT_MAX;
1334 htab_traverse (LOOP_VINFO_PEELING_HTAB (loop_vinfo),
1335 vect_peeling_hash_get_lowest_cost, &res);
1337 else
1339 res.peel_info.count = 0;
1340 htab_traverse (LOOP_VINFO_PEELING_HTAB (loop_vinfo),
1341 vect_peeling_hash_get_most_frequent, &res);
1344 *npeel = res.peel_info.npeel;
1345 return res.peel_info.dr;
1349 /* Function vect_enhance_data_refs_alignment
1351 This pass will use loop versioning and loop peeling in order to enhance
1352 the alignment of data references in the loop.
1354 FOR NOW: we assume that whatever versioning/peeling takes place, only the
1355 original loop is to be vectorized. Any other loops that are created by
1356 the transformations performed in this pass - are not supposed to be
1357 vectorized. This restriction will be relaxed.
1359 This pass will require a cost model to guide it whether to apply peeling
1360 or versioning or a combination of the two. For example, the scheme that
1361 intel uses when given a loop with several memory accesses, is as follows:
1362 choose one memory access ('p') which alignment you want to force by doing
1363 peeling. Then, either (1) generate a loop in which 'p' is aligned and all
1364 other accesses are not necessarily aligned, or (2) use loop versioning to
1365 generate one loop in which all accesses are aligned, and another loop in
1366 which only 'p' is necessarily aligned.
1368 ("Automatic Intra-Register Vectorization for the Intel Architecture",
1369 Aart J.C. Bik, Milind Girkar, Paul M. Grey and Ximmin Tian, International
1370 Journal of Parallel Programming, Vol. 30, No. 2, April 2002.)
1372 Devising a cost model is the most critical aspect of this work. It will
1373 guide us on which access to peel for, whether to use loop versioning, how
1374 many versions to create, etc. The cost model will probably consist of
1375 generic considerations as well as target specific considerations (on
1376 powerpc for example, misaligned stores are more painful than misaligned
1377 loads).
1379 Here are the general steps involved in alignment enhancements:
1381 -- original loop, before alignment analysis:
1382 for (i=0; i<N; i++){
1383 x = q[i]; # DR_MISALIGNMENT(q) = unknown
1384 p[i] = y; # DR_MISALIGNMENT(p) = unknown
1387 -- After vect_compute_data_refs_alignment:
1388 for (i=0; i<N; i++){
1389 x = q[i]; # DR_MISALIGNMENT(q) = 3
1390 p[i] = y; # DR_MISALIGNMENT(p) = unknown
1393 -- Possibility 1: we do loop versioning:
1394 if (p is aligned) {
1395 for (i=0; i<N; i++){ # loop 1A
1396 x = q[i]; # DR_MISALIGNMENT(q) = 3
1397 p[i] = y; # DR_MISALIGNMENT(p) = 0
1400 else {
1401 for (i=0; i<N; i++){ # loop 1B
1402 x = q[i]; # DR_MISALIGNMENT(q) = 3
1403 p[i] = y; # DR_MISALIGNMENT(p) = unaligned
1407 -- Possibility 2: we do loop peeling:
1408 for (i = 0; i < 3; i++){ # (scalar loop, not to be vectorized).
1409 x = q[i];
1410 p[i] = y;
1412 for (i = 3; i < N; i++){ # loop 2A
1413 x = q[i]; # DR_MISALIGNMENT(q) = 0
1414 p[i] = y; # DR_MISALIGNMENT(p) = unknown
1417 -- Possibility 3: combination of loop peeling and versioning:
1418 for (i = 0; i < 3; i++){ # (scalar loop, not to be vectorized).
1419 x = q[i];
1420 p[i] = y;
1422 if (p is aligned) {
1423 for (i = 3; i<N; i++){ # loop 3A
1424 x = q[i]; # DR_MISALIGNMENT(q) = 0
1425 p[i] = y; # DR_MISALIGNMENT(p) = 0
1428 else {
1429 for (i = 3; i<N; i++){ # loop 3B
1430 x = q[i]; # DR_MISALIGNMENT(q) = 0
1431 p[i] = y; # DR_MISALIGNMENT(p) = unaligned
1435 These loops are later passed to loop_transform to be vectorized. The
1436 vectorizer will use the alignment information to guide the transformation
1437 (whether to generate regular loads/stores, or with special handling for
1438 misalignment). */
1440 bool
1441 vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo)
1443 VEC (data_reference_p, heap) *datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
1444 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1445 enum dr_alignment_support supportable_dr_alignment;
1446 struct data_reference *dr0 = NULL, *first_store = NULL;
1447 struct data_reference *dr;
1448 unsigned int i, j;
1449 bool do_peeling = false;
1450 bool do_versioning = false;
1451 bool stat;
1452 gimple stmt;
1453 stmt_vec_info stmt_info;
1454 int vect_versioning_for_alias_required;
1455 unsigned int npeel = 0;
1456 bool all_misalignments_unknown = true;
1457 unsigned int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1458 unsigned possible_npeel_number = 1;
1459 tree vectype;
1460 unsigned int nelements, mis, same_align_drs_max = 0;
1462 if (vect_print_dump_info (REPORT_DETAILS))
1463 fprintf (vect_dump, "=== vect_enhance_data_refs_alignment ===");
1465 /* While cost model enhancements are expected in the future, the high level
1466 view of the code at this time is as follows:
1468 A) If there is a misaligned access then see if peeling to align
1469 this access can make all data references satisfy
1470 vect_supportable_dr_alignment. If so, update data structures
1471 as needed and return true.
1473 B) If peeling wasn't possible and there is a data reference with an
1474 unknown misalignment that does not satisfy vect_supportable_dr_alignment
1475 then see if loop versioning checks can be used to make all data
1476 references satisfy vect_supportable_dr_alignment. If so, update
1477 data structures as needed and return true.
1479 C) If neither peeling nor versioning were successful then return false if
1480 any data reference does not satisfy vect_supportable_dr_alignment.
1482 D) Return true (all data references satisfy vect_supportable_dr_alignment).
1484 Note, Possibility 3 above (which is peeling and versioning together) is not
1485 being done at this time. */
1487 /* (1) Peeling to force alignment. */
1489 /* (1.1) Decide whether to perform peeling, and how many iterations to peel:
1490 Considerations:
1491 + How many accesses will become aligned due to the peeling
1492 - How many accesses will become unaligned due to the peeling,
1493 and the cost of misaligned accesses.
1494 - The cost of peeling (the extra runtime checks, the increase
1495 in code size). */
1497 FOR_EACH_VEC_ELT (data_reference_p, datarefs, i, dr)
1499 stmt = DR_STMT (dr);
1500 stmt_info = vinfo_for_stmt (stmt);
1502 if (!STMT_VINFO_RELEVANT (stmt_info))
1503 continue;
1505 /* For interleaving, only the alignment of the first access
1506 matters. */
1507 if (STMT_VINFO_STRIDED_ACCESS (stmt_info)
1508 && GROUP_FIRST_ELEMENT (stmt_info) != stmt)
1509 continue;
1511 /* For invariant accesses there is nothing to enhance. */
1512 if (integer_zerop (DR_STEP (dr)))
1513 continue;
1515 supportable_dr_alignment = vect_supportable_dr_alignment (dr, true);
1516 do_peeling = vector_alignment_reachable_p (dr);
1517 if (do_peeling)
1519 if (known_alignment_for_access_p (dr))
1521 unsigned int npeel_tmp;
1522 bool negative = tree_int_cst_compare (DR_STEP (dr),
1523 size_zero_node) < 0;
1525 /* Save info about DR in the hash table. */
1526 if (!LOOP_VINFO_PEELING_HTAB (loop_vinfo))
1527 LOOP_VINFO_PEELING_HTAB (loop_vinfo) =
1528 htab_create (1, vect_peeling_hash,
1529 vect_peeling_hash_eq, free);
1531 vectype = STMT_VINFO_VECTYPE (stmt_info);
1532 nelements = TYPE_VECTOR_SUBPARTS (vectype);
1533 mis = DR_MISALIGNMENT (dr) / GET_MODE_SIZE (TYPE_MODE (
1534 TREE_TYPE (DR_REF (dr))));
1535 npeel_tmp = (negative
1536 ? (mis - nelements) : (nelements - mis))
1537 & (nelements - 1);
1539 /* For multiple types, it is possible that the bigger type access
1540 will have more than one peeling option. E.g., a loop with two
1541 types: one of size (vector size / 4), and the other one of
1542 size (vector size / 8). Vectorization factor will 8. If both
1543 access are misaligned by 3, the first one needs one scalar
1544 iteration to be aligned, and the second one needs 5. But the
1545 the first one will be aligned also by peeling 5 scalar
1546 iterations, and in that case both accesses will be aligned.
1547 Hence, except for the immediate peeling amount, we also want
1548 to try to add full vector size, while we don't exceed
1549 vectorization factor.
1550 We do this automtically for cost model, since we calculate cost
1551 for every peeling option. */
1552 if (!flag_vect_cost_model)
1553 possible_npeel_number = vf /nelements;
1555 /* Handle the aligned case. We may decide to align some other
1556 access, making DR unaligned. */
1557 if (DR_MISALIGNMENT (dr) == 0)
1559 npeel_tmp = 0;
1560 if (!flag_vect_cost_model)
1561 possible_npeel_number++;
1564 for (j = 0; j < possible_npeel_number; j++)
1566 gcc_assert (npeel_tmp <= vf);
1567 vect_peeling_hash_insert (loop_vinfo, dr, npeel_tmp);
1568 npeel_tmp += nelements;
1571 all_misalignments_unknown = false;
1572 /* Data-ref that was chosen for the case that all the
1573 misalignments are unknown is not relevant anymore, since we
1574 have a data-ref with known alignment. */
1575 dr0 = NULL;
1577 else
1579 /* If we don't know all the misalignment values, we prefer
1580 peeling for data-ref that has maximum number of data-refs
1581 with the same alignment, unless the target prefers to align
1582 stores over load. */
1583 if (all_misalignments_unknown)
1585 if (same_align_drs_max < VEC_length (dr_p,
1586 STMT_VINFO_SAME_ALIGN_REFS (stmt_info))
1587 || !dr0)
1589 same_align_drs_max = VEC_length (dr_p,
1590 STMT_VINFO_SAME_ALIGN_REFS (stmt_info));
1591 dr0 = dr;
1594 if (!first_store && DR_IS_WRITE (dr))
1595 first_store = dr;
1598 /* If there are both known and unknown misaligned accesses in the
1599 loop, we choose peeling amount according to the known
1600 accesses. */
1603 if (!supportable_dr_alignment)
1605 dr0 = dr;
1606 if (!first_store && DR_IS_WRITE (dr))
1607 first_store = dr;
1611 else
1613 if (!aligned_access_p (dr))
1615 if (vect_print_dump_info (REPORT_DETAILS))
1616 fprintf (vect_dump, "vector alignment may not be reachable");
1618 break;
1623 vect_versioning_for_alias_required
1624 = LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo);
1626 /* Temporarily, if versioning for alias is required, we disable peeling
1627 until we support peeling and versioning. Often peeling for alignment
1628 will require peeling for loop-bound, which in turn requires that we
1629 know how to adjust the loop ivs after the loop. */
1630 if (vect_versioning_for_alias_required
1631 || !vect_can_advance_ivs_p (loop_vinfo)
1632 || !slpeel_can_duplicate_loop_p (loop, single_exit (loop)))
1633 do_peeling = false;
1635 if (do_peeling && all_misalignments_unknown
1636 && vect_supportable_dr_alignment (dr0, false))
1639 /* Check if the target requires to prefer stores over loads, i.e., if
1640 misaligned stores are more expensive than misaligned loads (taking
1641 drs with same alignment into account). */
1642 if (first_store && DR_IS_READ (dr0))
1644 unsigned int load_inside_cost = 0, load_outside_cost = 0;
1645 unsigned int store_inside_cost = 0, store_outside_cost = 0;
1646 unsigned int load_inside_penalty = 0, load_outside_penalty = 0;
1647 unsigned int store_inside_penalty = 0, store_outside_penalty = 0;
1649 vect_get_data_access_cost (dr0, &load_inside_cost,
1650 &load_outside_cost);
1651 vect_get_data_access_cost (first_store, &store_inside_cost,
1652 &store_outside_cost);
1654 /* Calculate the penalty for leaving FIRST_STORE unaligned (by
1655 aligning the load DR0). */
1656 load_inside_penalty = store_inside_cost;
1657 load_outside_penalty = store_outside_cost;
1658 for (i = 0; VEC_iterate (dr_p, STMT_VINFO_SAME_ALIGN_REFS
1659 (vinfo_for_stmt (DR_STMT (first_store))),
1660 i, dr);
1661 i++)
1662 if (DR_IS_READ (dr))
1664 load_inside_penalty += load_inside_cost;
1665 load_outside_penalty += load_outside_cost;
1667 else
1669 load_inside_penalty += store_inside_cost;
1670 load_outside_penalty += store_outside_cost;
1673 /* Calculate the penalty for leaving DR0 unaligned (by
1674 aligning the FIRST_STORE). */
1675 store_inside_penalty = load_inside_cost;
1676 store_outside_penalty = load_outside_cost;
1677 for (i = 0; VEC_iterate (dr_p, STMT_VINFO_SAME_ALIGN_REFS
1678 (vinfo_for_stmt (DR_STMT (dr0))),
1679 i, dr);
1680 i++)
1681 if (DR_IS_READ (dr))
1683 store_inside_penalty += load_inside_cost;
1684 store_outside_penalty += load_outside_cost;
1686 else
1688 store_inside_penalty += store_inside_cost;
1689 store_outside_penalty += store_outside_cost;
1692 if (load_inside_penalty > store_inside_penalty
1693 || (load_inside_penalty == store_inside_penalty
1694 && load_outside_penalty > store_outside_penalty))
1695 dr0 = first_store;
1698 /* In case there are only loads with different unknown misalignments, use
1699 peeling only if it may help to align other accesses in the loop. */
1700 if (!first_store && !VEC_length (dr_p, STMT_VINFO_SAME_ALIGN_REFS
1701 (vinfo_for_stmt (DR_STMT (dr0))))
1702 && vect_supportable_dr_alignment (dr0, false)
1703 != dr_unaligned_supported)
1704 do_peeling = false;
1707 if (do_peeling && !dr0)
1709 /* Peeling is possible, but there is no data access that is not supported
1710 unless aligned. So we try to choose the best possible peeling. */
1712 /* We should get here only if there are drs with known misalignment. */
1713 gcc_assert (!all_misalignments_unknown);
1715 /* Choose the best peeling from the hash table. */
1716 dr0 = vect_peeling_hash_choose_best_peeling (loop_vinfo, &npeel);
1717 if (!dr0 || !npeel)
1718 do_peeling = false;
1721 if (do_peeling)
1723 stmt = DR_STMT (dr0);
1724 stmt_info = vinfo_for_stmt (stmt);
1725 vectype = STMT_VINFO_VECTYPE (stmt_info);
1726 nelements = TYPE_VECTOR_SUBPARTS (vectype);
1728 if (known_alignment_for_access_p (dr0))
1730 bool negative = tree_int_cst_compare (DR_STEP (dr0),
1731 size_zero_node) < 0;
1732 if (!npeel)
1734 /* Since it's known at compile time, compute the number of
1735 iterations in the peeled loop (the peeling factor) for use in
1736 updating DR_MISALIGNMENT values. The peeling factor is the
1737 vectorization factor minus the misalignment as an element
1738 count. */
1739 mis = DR_MISALIGNMENT (dr0);
1740 mis /= GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr0))));
1741 npeel = ((negative ? mis - nelements : nelements - mis)
1742 & (nelements - 1));
1745 /* For interleaved data access every iteration accesses all the
1746 members of the group, therefore we divide the number of iterations
1747 by the group size. */
1748 stmt_info = vinfo_for_stmt (DR_STMT (dr0));
1749 if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
1750 npeel /= GROUP_SIZE (stmt_info);
1752 if (vect_print_dump_info (REPORT_DETAILS))
1753 fprintf (vect_dump, "Try peeling by %d", npeel);
1756 /* Ensure that all data refs can be vectorized after the peel. */
1757 FOR_EACH_VEC_ELT (data_reference_p, datarefs, i, dr)
1759 int save_misalignment;
1761 if (dr == dr0)
1762 continue;
1764 stmt = DR_STMT (dr);
1765 stmt_info = vinfo_for_stmt (stmt);
1766 /* For interleaving, only the alignment of the first access
1767 matters. */
1768 if (STMT_VINFO_STRIDED_ACCESS (stmt_info)
1769 && GROUP_FIRST_ELEMENT (stmt_info) != stmt)
1770 continue;
1772 save_misalignment = DR_MISALIGNMENT (dr);
1773 vect_update_misalignment_for_peel (dr, dr0, npeel);
1774 supportable_dr_alignment = vect_supportable_dr_alignment (dr, false);
1775 SET_DR_MISALIGNMENT (dr, save_misalignment);
1777 if (!supportable_dr_alignment)
1779 do_peeling = false;
1780 break;
1784 if (do_peeling && known_alignment_for_access_p (dr0) && npeel == 0)
1786 stat = vect_verify_datarefs_alignment (loop_vinfo, NULL);
1787 if (!stat)
1788 do_peeling = false;
1789 else
1790 return stat;
1793 if (do_peeling)
1795 /* (1.2) Update the DR_MISALIGNMENT of each data reference DR_i.
1796 If the misalignment of DR_i is identical to that of dr0 then set
1797 DR_MISALIGNMENT (DR_i) to zero. If the misalignment of DR_i and
1798 dr0 are known at compile time then increment DR_MISALIGNMENT (DR_i)
1799 by the peeling factor times the element size of DR_i (MOD the
1800 vectorization factor times the size). Otherwise, the
1801 misalignment of DR_i must be set to unknown. */
1802 FOR_EACH_VEC_ELT (data_reference_p, datarefs, i, dr)
1803 if (dr != dr0)
1804 vect_update_misalignment_for_peel (dr, dr0, npeel);
1806 LOOP_VINFO_UNALIGNED_DR (loop_vinfo) = dr0;
1807 if (npeel)
1808 LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo) = npeel;
1809 else
1810 LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo) = DR_MISALIGNMENT (dr0);
1811 SET_DR_MISALIGNMENT (dr0, 0);
1812 if (vect_print_dump_info (REPORT_ALIGNMENT))
1813 fprintf (vect_dump, "Alignment of access forced using peeling.");
1815 if (vect_print_dump_info (REPORT_DETAILS))
1816 fprintf (vect_dump, "Peeling for alignment will be applied.");
1818 stat = vect_verify_datarefs_alignment (loop_vinfo, NULL);
1819 gcc_assert (stat);
1820 return stat;
1825 /* (2) Versioning to force alignment. */
1827 /* Try versioning if:
1828 1) flag_tree_vect_loop_version is TRUE
1829 2) optimize loop for speed
1830 3) there is at least one unsupported misaligned data ref with an unknown
1831 misalignment, and
1832 4) all misaligned data refs with a known misalignment are supported, and
1833 5) the number of runtime alignment checks is within reason. */
1835 do_versioning =
1836 flag_tree_vect_loop_version
1837 && optimize_loop_nest_for_speed_p (loop)
1838 && (!loop->inner); /* FORNOW */
1840 if (do_versioning)
1842 FOR_EACH_VEC_ELT (data_reference_p, datarefs, i, dr)
1844 stmt = DR_STMT (dr);
1845 stmt_info = vinfo_for_stmt (stmt);
1847 /* For interleaving, only the alignment of the first access
1848 matters. */
1849 if (aligned_access_p (dr)
1850 || (STMT_VINFO_STRIDED_ACCESS (stmt_info)
1851 && GROUP_FIRST_ELEMENT (stmt_info) != stmt))
1852 continue;
1854 supportable_dr_alignment = vect_supportable_dr_alignment (dr, false);
1856 if (!supportable_dr_alignment)
1858 gimple stmt;
1859 int mask;
1860 tree vectype;
1862 if (known_alignment_for_access_p (dr)
1863 || VEC_length (gimple,
1864 LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo))
1865 >= (unsigned) PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIGNMENT_CHECKS))
1867 do_versioning = false;
1868 break;
1871 stmt = DR_STMT (dr);
1872 vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
1873 gcc_assert (vectype);
1875 /* The rightmost bits of an aligned address must be zeros.
1876 Construct the mask needed for this test. For example,
1877 GET_MODE_SIZE for the vector mode V4SI is 16 bytes so the
1878 mask must be 15 = 0xf. */
1879 mask = GET_MODE_SIZE (TYPE_MODE (vectype)) - 1;
1881 /* FORNOW: use the same mask to test all potentially unaligned
1882 references in the loop. The vectorizer currently supports
1883 a single vector size, see the reference to
1884 GET_MODE_NUNITS (TYPE_MODE (vectype)) where the
1885 vectorization factor is computed. */
1886 gcc_assert (!LOOP_VINFO_PTR_MASK (loop_vinfo)
1887 || LOOP_VINFO_PTR_MASK (loop_vinfo) == mask);
1888 LOOP_VINFO_PTR_MASK (loop_vinfo) = mask;
1889 VEC_safe_push (gimple, heap,
1890 LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo),
1891 DR_STMT (dr));
1895 /* Versioning requires at least one misaligned data reference. */
1896 if (!LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo))
1897 do_versioning = false;
1898 else if (!do_versioning)
1899 VEC_truncate (gimple, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo), 0);
1902 if (do_versioning)
1904 VEC(gimple,heap) *may_misalign_stmts
1905 = LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo);
1906 gimple stmt;
1908 /* It can now be assumed that the data references in the statements
1909 in LOOP_VINFO_MAY_MISALIGN_STMTS will be aligned in the version
1910 of the loop being vectorized. */
1911 FOR_EACH_VEC_ELT (gimple, may_misalign_stmts, i, stmt)
1913 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1914 dr = STMT_VINFO_DATA_REF (stmt_info);
1915 SET_DR_MISALIGNMENT (dr, 0);
1916 if (vect_print_dump_info (REPORT_ALIGNMENT))
1917 fprintf (vect_dump, "Alignment of access forced using versioning.");
1920 if (vect_print_dump_info (REPORT_DETAILS))
1921 fprintf (vect_dump, "Versioning for alignment will be applied.");
1923 /* Peeling and versioning can't be done together at this time. */
1924 gcc_assert (! (do_peeling && do_versioning));
1926 stat = vect_verify_datarefs_alignment (loop_vinfo, NULL);
1927 gcc_assert (stat);
1928 return stat;
1931 /* This point is reached if neither peeling nor versioning is being done. */
1932 gcc_assert (! (do_peeling || do_versioning));
1934 stat = vect_verify_datarefs_alignment (loop_vinfo, NULL);
1935 return stat;
1939 /* Function vect_find_same_alignment_drs.
1941 Update group and alignment relations according to the chosen
1942 vectorization factor. */
1944 static void
1945 vect_find_same_alignment_drs (struct data_dependence_relation *ddr,
1946 loop_vec_info loop_vinfo)
1948 unsigned int i;
1949 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1950 int vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1951 struct data_reference *dra = DDR_A (ddr);
1952 struct data_reference *drb = DDR_B (ddr);
1953 stmt_vec_info stmtinfo_a = vinfo_for_stmt (DR_STMT (dra));
1954 stmt_vec_info stmtinfo_b = vinfo_for_stmt (DR_STMT (drb));
1955 int dra_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dra))));
1956 int drb_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (drb))));
1957 lambda_vector dist_v;
1958 unsigned int loop_depth;
1960 if (DDR_ARE_DEPENDENT (ddr) == chrec_known)
1961 return;
1963 if (dra == drb)
1964 return;
1966 if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know)
1967 return;
1969 /* Loop-based vectorization and known data dependence. */
1970 if (DDR_NUM_DIST_VECTS (ddr) == 0)
1971 return;
1973 /* Data-dependence analysis reports a distance vector of zero
1974 for data-references that overlap only in the first iteration
1975 but have different sign step (see PR45764).
1976 So as a sanity check require equal DR_STEP. */
1977 if (!operand_equal_p (DR_STEP (dra), DR_STEP (drb), 0))
1978 return;
1980 loop_depth = index_in_loop_nest (loop->num, DDR_LOOP_NEST (ddr));
1981 FOR_EACH_VEC_ELT (lambda_vector, DDR_DIST_VECTS (ddr), i, dist_v)
1983 int dist = dist_v[loop_depth];
1985 if (vect_print_dump_info (REPORT_DR_DETAILS))
1986 fprintf (vect_dump, "dependence distance = %d.", dist);
1988 /* Same loop iteration. */
1989 if (dist == 0
1990 || (dist % vectorization_factor == 0 && dra_size == drb_size))
1992 /* Two references with distance zero have the same alignment. */
1993 VEC_safe_push (dr_p, heap, STMT_VINFO_SAME_ALIGN_REFS (stmtinfo_a), drb);
1994 VEC_safe_push (dr_p, heap, STMT_VINFO_SAME_ALIGN_REFS (stmtinfo_b), dra);
1995 if (vect_print_dump_info (REPORT_ALIGNMENT))
1996 fprintf (vect_dump, "accesses have the same alignment.");
1997 if (vect_print_dump_info (REPORT_DR_DETAILS))
1999 fprintf (vect_dump, "dependence distance modulo vf == 0 between ");
2000 print_generic_expr (vect_dump, DR_REF (dra), TDF_SLIM);
2001 fprintf (vect_dump, " and ");
2002 print_generic_expr (vect_dump, DR_REF (drb), TDF_SLIM);
2009 /* Function vect_analyze_data_refs_alignment
2011 Analyze the alignment of the data-references in the loop.
2012 Return FALSE if a data reference is found that cannot be vectorized. */
2014 bool
2015 vect_analyze_data_refs_alignment (loop_vec_info loop_vinfo,
2016 bb_vec_info bb_vinfo)
2018 if (vect_print_dump_info (REPORT_DETAILS))
2019 fprintf (vect_dump, "=== vect_analyze_data_refs_alignment ===");
2021 /* Mark groups of data references with same alignment using
2022 data dependence information. */
2023 if (loop_vinfo)
2025 VEC (ddr_p, heap) *ddrs = LOOP_VINFO_DDRS (loop_vinfo);
2026 struct data_dependence_relation *ddr;
2027 unsigned int i;
2029 FOR_EACH_VEC_ELT (ddr_p, ddrs, i, ddr)
2030 vect_find_same_alignment_drs (ddr, loop_vinfo);
2033 if (!vect_compute_data_refs_alignment (loop_vinfo, bb_vinfo))
2035 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
2036 fprintf (vect_dump,
2037 "not vectorized: can't calculate alignment for data ref.");
2038 return false;
2041 return true;
2045 /* Analyze groups of strided accesses: check that DR belongs to a group of
2046 strided accesses of legal size, step, etc. Detect gaps, single element
2047 interleaving, and other special cases. Set strided access info.
2048 Collect groups of strided stores for further use in SLP analysis. */
2050 static bool
2051 vect_analyze_group_access (struct data_reference *dr)
2053 tree step = DR_STEP (dr);
2054 tree scalar_type = TREE_TYPE (DR_REF (dr));
2055 HOST_WIDE_INT type_size = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (scalar_type));
2056 gimple stmt = DR_STMT (dr);
2057 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2058 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2059 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
2060 HOST_WIDE_INT dr_step = TREE_INT_CST_LOW (step);
2061 HOST_WIDE_INT stride, last_accessed_element = 1;
2062 bool slp_impossible = false;
2063 struct loop *loop = NULL;
2065 if (loop_vinfo)
2066 loop = LOOP_VINFO_LOOP (loop_vinfo);
2068 /* For interleaving, STRIDE is STEP counted in elements, i.e., the size of the
2069 interleaving group (including gaps). */
2070 stride = dr_step / type_size;
2072 /* Not consecutive access is possible only if it is a part of interleaving. */
2073 if (!GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
2075 /* Check if it this DR is a part of interleaving, and is a single
2076 element of the group that is accessed in the loop. */
2078 /* Gaps are supported only for loads. STEP must be a multiple of the type
2079 size. The size of the group must be a power of 2. */
2080 if (DR_IS_READ (dr)
2081 && (dr_step % type_size) == 0
2082 && stride > 0
2083 && exact_log2 (stride) != -1)
2085 GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = stmt;
2086 GROUP_SIZE (vinfo_for_stmt (stmt)) = stride;
2087 if (vect_print_dump_info (REPORT_DR_DETAILS))
2089 fprintf (vect_dump, "Detected single element interleaving ");
2090 print_generic_expr (vect_dump, DR_REF (dr), TDF_SLIM);
2091 fprintf (vect_dump, " step ");
2092 print_generic_expr (vect_dump, step, TDF_SLIM);
2095 if (loop_vinfo)
2097 if (vect_print_dump_info (REPORT_DETAILS))
2098 fprintf (vect_dump, "Data access with gaps requires scalar "
2099 "epilogue loop");
2100 if (loop->inner)
2102 if (vect_print_dump_info (REPORT_DETAILS))
2103 fprintf (vect_dump, "Peeling for outer loop is not"
2104 " supported");
2105 return false;
2108 LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) = true;
2111 return true;
2114 if (vect_print_dump_info (REPORT_DETAILS))
2116 fprintf (vect_dump, "not consecutive access ");
2117 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
2120 if (bb_vinfo)
2122 /* Mark the statement as unvectorizable. */
2123 STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr))) = false;
2124 return true;
2127 return false;
2130 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) == stmt)
2132 /* First stmt in the interleaving chain. Check the chain. */
2133 gimple next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmt));
2134 struct data_reference *data_ref = dr;
2135 unsigned int count = 1;
2136 tree next_step;
2137 tree prev_init = DR_INIT (data_ref);
2138 gimple prev = stmt;
2139 HOST_WIDE_INT diff, count_in_bytes, gaps = 0;
2141 while (next)
2143 /* Skip same data-refs. In case that two or more stmts share
2144 data-ref (supported only for loads), we vectorize only the first
2145 stmt, and the rest get their vectorized loads from the first
2146 one. */
2147 if (!tree_int_cst_compare (DR_INIT (data_ref),
2148 DR_INIT (STMT_VINFO_DATA_REF (
2149 vinfo_for_stmt (next)))))
2151 if (DR_IS_WRITE (data_ref))
2153 if (vect_print_dump_info (REPORT_DETAILS))
2154 fprintf (vect_dump, "Two store stmts share the same dr.");
2155 return false;
2158 /* Check that there is no load-store dependencies for this loads
2159 to prevent a case of load-store-load to the same location. */
2160 if (GROUP_READ_WRITE_DEPENDENCE (vinfo_for_stmt (next))
2161 || GROUP_READ_WRITE_DEPENDENCE (vinfo_for_stmt (prev)))
2163 if (vect_print_dump_info (REPORT_DETAILS))
2164 fprintf (vect_dump,
2165 "READ_WRITE dependence in interleaving.");
2166 return false;
2169 /* For load use the same data-ref load. */
2170 GROUP_SAME_DR_STMT (vinfo_for_stmt (next)) = prev;
2172 prev = next;
2173 next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next));
2174 continue;
2177 prev = next;
2179 /* Check that all the accesses have the same STEP. */
2180 next_step = DR_STEP (STMT_VINFO_DATA_REF (vinfo_for_stmt (next)));
2181 if (tree_int_cst_compare (step, next_step))
2183 if (vect_print_dump_info (REPORT_DETAILS))
2184 fprintf (vect_dump, "not consecutive access in interleaving");
2185 return false;
2188 data_ref = STMT_VINFO_DATA_REF (vinfo_for_stmt (next));
2189 /* Check that the distance between two accesses is equal to the type
2190 size. Otherwise, we have gaps. */
2191 diff = (TREE_INT_CST_LOW (DR_INIT (data_ref))
2192 - TREE_INT_CST_LOW (prev_init)) / type_size;
2193 if (diff != 1)
2195 /* FORNOW: SLP of accesses with gaps is not supported. */
2196 slp_impossible = true;
2197 if (DR_IS_WRITE (data_ref))
2199 if (vect_print_dump_info (REPORT_DETAILS))
2200 fprintf (vect_dump, "interleaved store with gaps");
2201 return false;
2204 gaps += diff - 1;
2207 last_accessed_element += diff;
2209 /* Store the gap from the previous member of the group. If there is no
2210 gap in the access, GROUP_GAP is always 1. */
2211 GROUP_GAP (vinfo_for_stmt (next)) = diff;
2213 prev_init = DR_INIT (data_ref);
2214 next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next));
2215 /* Count the number of data-refs in the chain. */
2216 count++;
2219 /* COUNT is the number of accesses found, we multiply it by the size of
2220 the type to get COUNT_IN_BYTES. */
2221 count_in_bytes = type_size * count;
2223 /* Check that the size of the interleaving (including gaps) is not
2224 greater than STEP. */
2225 if (dr_step && dr_step < count_in_bytes + gaps * type_size)
2227 if (vect_print_dump_info (REPORT_DETAILS))
2229 fprintf (vect_dump, "interleaving size is greater than step for ");
2230 print_generic_expr (vect_dump, DR_REF (dr), TDF_SLIM);
2232 return false;
2235 /* Check that the size of the interleaving is equal to STEP for stores,
2236 i.e., that there are no gaps. */
2237 if (dr_step && dr_step != count_in_bytes)
2239 if (DR_IS_READ (dr))
2241 slp_impossible = true;
2242 /* There is a gap after the last load in the group. This gap is a
2243 difference between the stride and the number of elements. When
2244 there is no gap, this difference should be 0. */
2245 GROUP_GAP (vinfo_for_stmt (stmt)) = stride - count;
2247 else
2249 if (vect_print_dump_info (REPORT_DETAILS))
2250 fprintf (vect_dump, "interleaved store with gaps");
2251 return false;
2255 /* Check that STEP is a multiple of type size. */
2256 if (dr_step && (dr_step % type_size) != 0)
2258 if (vect_print_dump_info (REPORT_DETAILS))
2260 fprintf (vect_dump, "step is not a multiple of type size: step ");
2261 print_generic_expr (vect_dump, step, TDF_SLIM);
2262 fprintf (vect_dump, " size ");
2263 print_generic_expr (vect_dump, TYPE_SIZE_UNIT (scalar_type),
2264 TDF_SLIM);
2266 return false;
2269 if (stride == 0)
2270 stride = count;
2272 GROUP_SIZE (vinfo_for_stmt (stmt)) = stride;
2273 if (vect_print_dump_info (REPORT_DETAILS))
2274 fprintf (vect_dump, "Detected interleaving of size %d", (int)stride);
2276 /* SLP: create an SLP data structure for every interleaving group of
2277 stores for further analysis in vect_analyse_slp. */
2278 if (DR_IS_WRITE (dr) && !slp_impossible)
2280 if (loop_vinfo)
2281 VEC_safe_push (gimple, heap, LOOP_VINFO_STRIDED_STORES (loop_vinfo),
2282 stmt);
2283 if (bb_vinfo)
2284 VEC_safe_push (gimple, heap, BB_VINFO_STRIDED_STORES (bb_vinfo),
2285 stmt);
2288 /* There is a gap in the end of the group. */
2289 if (stride - last_accessed_element > 0 && loop_vinfo)
2291 if (vect_print_dump_info (REPORT_DETAILS))
2292 fprintf (vect_dump, "Data access with gaps requires scalar "
2293 "epilogue loop");
2294 if (loop->inner)
2296 if (vect_print_dump_info (REPORT_DETAILS))
2297 fprintf (vect_dump, "Peeling for outer loop is not supported");
2298 return false;
2301 LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) = true;
2305 return true;
2309 /* Analyze the access pattern of the data-reference DR.
2310 In case of non-consecutive accesses call vect_analyze_group_access() to
2311 analyze groups of strided accesses. */
2313 static bool
2314 vect_analyze_data_ref_access (struct data_reference *dr)
2316 tree step = DR_STEP (dr);
2317 tree scalar_type = TREE_TYPE (DR_REF (dr));
2318 gimple stmt = DR_STMT (dr);
2319 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2320 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2321 struct loop *loop = NULL;
2322 HOST_WIDE_INT dr_step = TREE_INT_CST_LOW (step);
2324 if (loop_vinfo)
2325 loop = LOOP_VINFO_LOOP (loop_vinfo);
2327 if (loop_vinfo && !step)
2329 if (vect_print_dump_info (REPORT_DETAILS))
2330 fprintf (vect_dump, "bad data-ref access in loop");
2331 return false;
2334 /* Allow invariant loads in loops. */
2335 if (loop_vinfo && dr_step == 0)
2337 GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = NULL;
2338 return DR_IS_READ (dr);
2341 if (loop && nested_in_vect_loop_p (loop, stmt))
2343 /* Interleaved accesses are not yet supported within outer-loop
2344 vectorization for references in the inner-loop. */
2345 GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = NULL;
2347 /* For the rest of the analysis we use the outer-loop step. */
2348 step = STMT_VINFO_DR_STEP (stmt_info);
2349 dr_step = TREE_INT_CST_LOW (step);
2351 if (dr_step == 0)
2353 if (vect_print_dump_info (REPORT_ALIGNMENT))
2354 fprintf (vect_dump, "zero step in outer loop.");
2355 if (DR_IS_READ (dr))
2356 return true;
2357 else
2358 return false;
2362 /* Consecutive? */
2363 if (!tree_int_cst_compare (step, TYPE_SIZE_UNIT (scalar_type))
2364 || (dr_step < 0
2365 && !compare_tree_int (TYPE_SIZE_UNIT (scalar_type), -dr_step)))
2367 /* Mark that it is not interleaving. */
2368 GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = NULL;
2369 return true;
2372 if (loop && nested_in_vect_loop_p (loop, stmt))
2374 if (vect_print_dump_info (REPORT_ALIGNMENT))
2375 fprintf (vect_dump, "strided access in outer loop.");
2376 return false;
2379 /* Not consecutive access - check if it's a part of interleaving group. */
2380 return vect_analyze_group_access (dr);
2384 /* Function vect_analyze_data_ref_accesses.
2386 Analyze the access pattern of all the data references in the loop.
2388 FORNOW: the only access pattern that is considered vectorizable is a
2389 simple step 1 (consecutive) access.
2391 FORNOW: handle only arrays and pointer accesses. */
2393 bool
2394 vect_analyze_data_ref_accesses (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo)
2396 unsigned int i;
2397 VEC (data_reference_p, heap) *datarefs;
2398 struct data_reference *dr;
2400 if (vect_print_dump_info (REPORT_DETAILS))
2401 fprintf (vect_dump, "=== vect_analyze_data_ref_accesses ===");
2403 if (loop_vinfo)
2404 datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
2405 else
2406 datarefs = BB_VINFO_DATAREFS (bb_vinfo);
2408 FOR_EACH_VEC_ELT (data_reference_p, datarefs, i, dr)
2409 if (STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr)))
2410 && !vect_analyze_data_ref_access (dr))
2412 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
2413 fprintf (vect_dump, "not vectorized: complicated access pattern.");
2415 if (bb_vinfo)
2417 /* Mark the statement as not vectorizable. */
2418 STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr))) = false;
2419 continue;
2421 else
2422 return false;
2425 return true;
2428 /* Function vect_prune_runtime_alias_test_list.
2430 Prune a list of ddrs to be tested at run-time by versioning for alias.
2431 Return FALSE if resulting list of ddrs is longer then allowed by
2432 PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS, otherwise return TRUE. */
2434 bool
2435 vect_prune_runtime_alias_test_list (loop_vec_info loop_vinfo)
2437 VEC (ddr_p, heap) * ddrs =
2438 LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo);
2439 unsigned i, j;
2441 if (vect_print_dump_info (REPORT_DETAILS))
2442 fprintf (vect_dump, "=== vect_prune_runtime_alias_test_list ===");
2444 for (i = 0; i < VEC_length (ddr_p, ddrs); )
2446 bool found;
2447 ddr_p ddr_i;
2449 ddr_i = VEC_index (ddr_p, ddrs, i);
2450 found = false;
2452 for (j = 0; j < i; j++)
2454 ddr_p ddr_j = VEC_index (ddr_p, ddrs, j);
2456 if (vect_vfa_range_equal (ddr_i, ddr_j))
2458 if (vect_print_dump_info (REPORT_DR_DETAILS))
2460 fprintf (vect_dump, "found equal ranges ");
2461 print_generic_expr (vect_dump, DR_REF (DDR_A (ddr_i)), TDF_SLIM);
2462 fprintf (vect_dump, ", ");
2463 print_generic_expr (vect_dump, DR_REF (DDR_B (ddr_i)), TDF_SLIM);
2464 fprintf (vect_dump, " and ");
2465 print_generic_expr (vect_dump, DR_REF (DDR_A (ddr_j)), TDF_SLIM);
2466 fprintf (vect_dump, ", ");
2467 print_generic_expr (vect_dump, DR_REF (DDR_B (ddr_j)), TDF_SLIM);
2469 found = true;
2470 break;
2474 if (found)
2476 VEC_ordered_remove (ddr_p, ddrs, i);
2477 continue;
2479 i++;
2482 if (VEC_length (ddr_p, ddrs) >
2483 (unsigned) PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS))
2485 if (vect_print_dump_info (REPORT_DR_DETAILS))
2487 fprintf (vect_dump,
2488 "disable versioning for alias - max number of generated "
2489 "checks exceeded.");
2492 VEC_truncate (ddr_p, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo), 0);
2494 return false;
2497 return true;
2501 /* Function vect_analyze_data_refs.
2503 Find all the data references in the loop or basic block.
2505 The general structure of the analysis of data refs in the vectorizer is as
2506 follows:
2507 1- vect_analyze_data_refs(loop/bb): call
2508 compute_data_dependences_for_loop/bb to find and analyze all data-refs
2509 in the loop/bb and their dependences.
2510 2- vect_analyze_dependences(): apply dependence testing using ddrs.
2511 3- vect_analyze_drs_alignment(): check that ref_stmt.alignment is ok.
2512 4- vect_analyze_drs_access(): check that ref_stmt.step is ok.
2516 bool
2517 vect_analyze_data_refs (loop_vec_info loop_vinfo,
2518 bb_vec_info bb_vinfo,
2519 int *min_vf)
2521 struct loop *loop = NULL;
2522 basic_block bb = NULL;
2523 unsigned int i;
2524 VEC (data_reference_p, heap) *datarefs;
2525 struct data_reference *dr;
2526 tree scalar_type;
2527 bool res, stop_bb_analysis = false;
2529 if (vect_print_dump_info (REPORT_DETAILS))
2530 fprintf (vect_dump, "=== vect_analyze_data_refs ===\n");
2532 if (loop_vinfo)
2534 loop = LOOP_VINFO_LOOP (loop_vinfo);
2535 res = compute_data_dependences_for_loop
2536 (loop, true,
2537 &LOOP_VINFO_LOOP_NEST (loop_vinfo),
2538 &LOOP_VINFO_DATAREFS (loop_vinfo),
2539 &LOOP_VINFO_DDRS (loop_vinfo));
2541 if (!res)
2543 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
2544 fprintf (vect_dump, "not vectorized: loop contains function calls"
2545 " or data references that cannot be analyzed");
2546 return false;
2549 datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
2551 else
2553 bb = BB_VINFO_BB (bb_vinfo);
2554 res = compute_data_dependences_for_bb (bb, true,
2555 &BB_VINFO_DATAREFS (bb_vinfo),
2556 &BB_VINFO_DDRS (bb_vinfo));
2557 if (!res)
2559 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
2560 fprintf (vect_dump, "not vectorized: basic block contains function"
2561 " calls or data references that cannot be analyzed");
2562 return false;
2565 datarefs = BB_VINFO_DATAREFS (bb_vinfo);
2568 /* Go through the data-refs, check that the analysis succeeded. Update
2569 pointer from stmt_vec_info struct to DR and vectype. */
2571 FOR_EACH_VEC_ELT (data_reference_p, datarefs, i, dr)
2573 gimple stmt;
2574 stmt_vec_info stmt_info;
2575 tree base, offset, init;
2576 int vf;
2578 if (!dr || !DR_REF (dr))
2580 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
2581 fprintf (vect_dump, "not vectorized: unhandled data-ref ");
2583 return false;
2586 stmt = DR_STMT (dr);
2587 stmt_info = vinfo_for_stmt (stmt);
2589 if (stop_bb_analysis)
2591 STMT_VINFO_VECTORIZABLE (stmt_info) = false;
2592 continue;
2595 /* Check that analysis of the data-ref succeeded. */
2596 if (!DR_BASE_ADDRESS (dr) || !DR_OFFSET (dr) || !DR_INIT (dr)
2597 || !DR_STEP (dr))
2599 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
2601 fprintf (vect_dump, "not vectorized: data ref analysis failed ");
2602 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
2605 if (bb_vinfo)
2607 STMT_VINFO_VECTORIZABLE (stmt_info) = false;
2608 stop_bb_analysis = true;
2609 continue;
2612 return false;
2615 if (TREE_CODE (DR_BASE_ADDRESS (dr)) == INTEGER_CST)
2617 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
2618 fprintf (vect_dump, "not vectorized: base addr of dr is a "
2619 "constant");
2621 if (bb_vinfo)
2623 STMT_VINFO_VECTORIZABLE (stmt_info) = false;
2624 stop_bb_analysis = true;
2625 continue;
2628 return false;
2631 if (TREE_THIS_VOLATILE (DR_REF (dr)))
2633 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
2635 fprintf (vect_dump, "not vectorized: volatile type ");
2636 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
2639 if (bb_vinfo)
2641 STMT_VINFO_VECTORIZABLE (stmt_info) = false;
2642 stop_bb_analysis = true;
2643 continue;
2646 return false;
2649 base = unshare_expr (DR_BASE_ADDRESS (dr));
2650 offset = unshare_expr (DR_OFFSET (dr));
2651 init = unshare_expr (DR_INIT (dr));
2653 if (stmt_can_throw_internal (stmt))
2655 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
2657 fprintf (vect_dump, "not vectorized: statement can throw an "
2658 "exception ");
2659 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
2662 if (bb_vinfo)
2664 STMT_VINFO_VECTORIZABLE (stmt_info) = false;
2665 stop_bb_analysis = true;
2666 continue;
2669 return false;
2672 /* Update DR field in stmt_vec_info struct. */
2674 /* If the dataref is in an inner-loop of the loop that is considered for
2675 for vectorization, we also want to analyze the access relative to
2676 the outer-loop (DR contains information only relative to the
2677 inner-most enclosing loop). We do that by building a reference to the
2678 first location accessed by the inner-loop, and analyze it relative to
2679 the outer-loop. */
2680 if (loop && nested_in_vect_loop_p (loop, stmt))
2682 tree outer_step, outer_base, outer_init;
2683 HOST_WIDE_INT pbitsize, pbitpos;
2684 tree poffset;
2685 enum machine_mode pmode;
2686 int punsignedp, pvolatilep;
2687 affine_iv base_iv, offset_iv;
2688 tree dinit;
2690 /* Build a reference to the first location accessed by the
2691 inner-loop: *(BASE+INIT). (The first location is actually
2692 BASE+INIT+OFFSET, but we add OFFSET separately later). */
2693 tree inner_base = build_fold_indirect_ref
2694 (fold_build_pointer_plus (base, init));
2696 if (vect_print_dump_info (REPORT_DETAILS))
2698 fprintf (vect_dump, "analyze in outer-loop: ");
2699 print_generic_expr (vect_dump, inner_base, TDF_SLIM);
2702 outer_base = get_inner_reference (inner_base, &pbitsize, &pbitpos,
2703 &poffset, &pmode, &punsignedp, &pvolatilep, false);
2704 gcc_assert (outer_base != NULL_TREE);
2706 if (pbitpos % BITS_PER_UNIT != 0)
2708 if (vect_print_dump_info (REPORT_DETAILS))
2709 fprintf (vect_dump, "failed: bit offset alignment.\n");
2710 return false;
2713 outer_base = build_fold_addr_expr (outer_base);
2714 if (!simple_iv (loop, loop_containing_stmt (stmt), outer_base,
2715 &base_iv, false))
2717 if (vect_print_dump_info (REPORT_DETAILS))
2718 fprintf (vect_dump, "failed: evolution of base is not affine.\n");
2719 return false;
2722 if (offset)
2724 if (poffset)
2725 poffset = fold_build2 (PLUS_EXPR, TREE_TYPE (offset), offset,
2726 poffset);
2727 else
2728 poffset = offset;
2731 if (!poffset)
2733 offset_iv.base = ssize_int (0);
2734 offset_iv.step = ssize_int (0);
2736 else if (!simple_iv (loop, loop_containing_stmt (stmt), poffset,
2737 &offset_iv, false))
2739 if (vect_print_dump_info (REPORT_DETAILS))
2740 fprintf (vect_dump, "evolution of offset is not affine.\n");
2741 return false;
2744 outer_init = ssize_int (pbitpos / BITS_PER_UNIT);
2745 split_constant_offset (base_iv.base, &base_iv.base, &dinit);
2746 outer_init = size_binop (PLUS_EXPR, outer_init, dinit);
2747 split_constant_offset (offset_iv.base, &offset_iv.base, &dinit);
2748 outer_init = size_binop (PLUS_EXPR, outer_init, dinit);
2750 outer_step = size_binop (PLUS_EXPR,
2751 fold_convert (ssizetype, base_iv.step),
2752 fold_convert (ssizetype, offset_iv.step));
2754 STMT_VINFO_DR_STEP (stmt_info) = outer_step;
2755 /* FIXME: Use canonicalize_base_object_address (base_iv.base); */
2756 STMT_VINFO_DR_BASE_ADDRESS (stmt_info) = base_iv.base;
2757 STMT_VINFO_DR_INIT (stmt_info) = outer_init;
2758 STMT_VINFO_DR_OFFSET (stmt_info) =
2759 fold_convert (ssizetype, offset_iv.base);
2760 STMT_VINFO_DR_ALIGNED_TO (stmt_info) =
2761 size_int (highest_pow2_factor (offset_iv.base));
2763 if (vect_print_dump_info (REPORT_DETAILS))
2765 fprintf (vect_dump, "\touter base_address: ");
2766 print_generic_expr (vect_dump, STMT_VINFO_DR_BASE_ADDRESS (stmt_info), TDF_SLIM);
2767 fprintf (vect_dump, "\n\touter offset from base address: ");
2768 print_generic_expr (vect_dump, STMT_VINFO_DR_OFFSET (stmt_info), TDF_SLIM);
2769 fprintf (vect_dump, "\n\touter constant offset from base address: ");
2770 print_generic_expr (vect_dump, STMT_VINFO_DR_INIT (stmt_info), TDF_SLIM);
2771 fprintf (vect_dump, "\n\touter step: ");
2772 print_generic_expr (vect_dump, STMT_VINFO_DR_STEP (stmt_info), TDF_SLIM);
2773 fprintf (vect_dump, "\n\touter aligned to: ");
2774 print_generic_expr (vect_dump, STMT_VINFO_DR_ALIGNED_TO (stmt_info), TDF_SLIM);
2778 if (STMT_VINFO_DATA_REF (stmt_info))
2780 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
2782 fprintf (vect_dump,
2783 "not vectorized: more than one data ref in stmt: ");
2784 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
2787 if (bb_vinfo)
2789 STMT_VINFO_VECTORIZABLE (stmt_info) = false;
2790 stop_bb_analysis = true;
2791 continue;
2794 return false;
2797 STMT_VINFO_DATA_REF (stmt_info) = dr;
2799 /* Set vectype for STMT. */
2800 scalar_type = TREE_TYPE (DR_REF (dr));
2801 STMT_VINFO_VECTYPE (stmt_info) =
2802 get_vectype_for_scalar_type (scalar_type);
2803 if (!STMT_VINFO_VECTYPE (stmt_info))
2805 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
2807 fprintf (vect_dump,
2808 "not vectorized: no vectype for stmt: ");
2809 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
2810 fprintf (vect_dump, " scalar_type: ");
2811 print_generic_expr (vect_dump, scalar_type, TDF_DETAILS);
2814 if (bb_vinfo)
2816 /* Mark the statement as not vectorizable. */
2817 STMT_VINFO_VECTORIZABLE (stmt_info) = false;
2818 stop_bb_analysis = true;
2819 continue;
2821 else
2822 return false;
2825 /* Adjust the minimal vectorization factor according to the
2826 vector type. */
2827 vf = TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info));
2828 if (vf > *min_vf)
2829 *min_vf = vf;
2832 return true;
2836 /* Function vect_get_new_vect_var.
2838 Returns a name for a new variable. The current naming scheme appends the
2839 prefix "vect_" or "vect_p" (depending on the value of VAR_KIND) to
2840 the name of vectorizer generated variables, and appends that to NAME if
2841 provided. */
2843 tree
2844 vect_get_new_vect_var (tree type, enum vect_var_kind var_kind, const char *name)
2846 const char *prefix;
2847 tree new_vect_var;
2849 switch (var_kind)
2851 case vect_simple_var:
2852 prefix = "vect_";
2853 break;
2854 case vect_scalar_var:
2855 prefix = "stmp_";
2856 break;
2857 case vect_pointer_var:
2858 prefix = "vect_p";
2859 break;
2860 default:
2861 gcc_unreachable ();
2864 if (name)
2866 char* tmp = concat (prefix, name, NULL);
2867 new_vect_var = create_tmp_var (type, tmp);
2868 free (tmp);
2870 else
2871 new_vect_var = create_tmp_var (type, prefix);
2873 /* Mark vector typed variable as a gimple register variable. */
2874 if (TREE_CODE (type) == VECTOR_TYPE)
2875 DECL_GIMPLE_REG_P (new_vect_var) = true;
2877 return new_vect_var;
2881 /* Function vect_create_addr_base_for_vector_ref.
2883 Create an expression that computes the address of the first memory location
2884 that will be accessed for a data reference.
2886 Input:
2887 STMT: The statement containing the data reference.
2888 NEW_STMT_LIST: Must be initialized to NULL_TREE or a statement list.
2889 OFFSET: Optional. If supplied, it is be added to the initial address.
2890 LOOP: Specify relative to which loop-nest should the address be computed.
2891 For example, when the dataref is in an inner-loop nested in an
2892 outer-loop that is now being vectorized, LOOP can be either the
2893 outer-loop, or the inner-loop. The first memory location accessed
2894 by the following dataref ('in' points to short):
2896 for (i=0; i<N; i++)
2897 for (j=0; j<M; j++)
2898 s += in[i+j]
2900 is as follows:
2901 if LOOP=i_loop: &in (relative to i_loop)
2902 if LOOP=j_loop: &in+i*2B (relative to j_loop)
2904 Output:
2905 1. Return an SSA_NAME whose value is the address of the memory location of
2906 the first vector of the data reference.
2907 2. If new_stmt_list is not NULL_TREE after return then the caller must insert
2908 these statement(s) which define the returned SSA_NAME.
2910 FORNOW: We are only handling array accesses with step 1. */
2912 tree
2913 vect_create_addr_base_for_vector_ref (gimple stmt,
2914 gimple_seq *new_stmt_list,
2915 tree offset,
2916 struct loop *loop)
2918 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2919 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
2920 tree data_ref_base = unshare_expr (DR_BASE_ADDRESS (dr));
2921 tree base_name;
2922 tree data_ref_base_var;
2923 tree vec_stmt;
2924 tree addr_base, addr_expr;
2925 tree dest;
2926 gimple_seq seq = NULL;
2927 tree base_offset = unshare_expr (DR_OFFSET (dr));
2928 tree init = unshare_expr (DR_INIT (dr));
2929 tree vect_ptr_type;
2930 tree step = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dr)));
2931 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2932 tree base;
2934 if (loop_vinfo && loop && loop != (gimple_bb (stmt))->loop_father)
2936 struct loop *outer_loop = LOOP_VINFO_LOOP (loop_vinfo);
2938 gcc_assert (nested_in_vect_loop_p (outer_loop, stmt));
2940 data_ref_base = unshare_expr (STMT_VINFO_DR_BASE_ADDRESS (stmt_info));
2941 base_offset = unshare_expr (STMT_VINFO_DR_OFFSET (stmt_info));
2942 init = unshare_expr (STMT_VINFO_DR_INIT (stmt_info));
2945 if (loop_vinfo)
2946 base_name = build_fold_indirect_ref (data_ref_base);
2947 else
2949 base_offset = ssize_int (0);
2950 init = ssize_int (0);
2951 base_name = build_fold_indirect_ref (unshare_expr (DR_REF (dr)));
2954 data_ref_base_var = create_tmp_var (TREE_TYPE (data_ref_base), "batmp");
2955 add_referenced_var (data_ref_base_var);
2956 data_ref_base = force_gimple_operand (data_ref_base, &seq, true,
2957 data_ref_base_var);
2958 gimple_seq_add_seq (new_stmt_list, seq);
2960 /* Create base_offset */
2961 base_offset = size_binop (PLUS_EXPR,
2962 fold_convert (sizetype, base_offset),
2963 fold_convert (sizetype, init));
2964 dest = create_tmp_var (sizetype, "base_off");
2965 add_referenced_var (dest);
2966 base_offset = force_gimple_operand (base_offset, &seq, true, dest);
2967 gimple_seq_add_seq (new_stmt_list, seq);
2969 if (offset)
2971 tree tmp = create_tmp_var (sizetype, "offset");
2973 add_referenced_var (tmp);
2974 offset = fold_build2 (MULT_EXPR, sizetype,
2975 fold_convert (sizetype, offset), step);
2976 base_offset = fold_build2 (PLUS_EXPR, sizetype,
2977 base_offset, offset);
2978 base_offset = force_gimple_operand (base_offset, &seq, false, tmp);
2979 gimple_seq_add_seq (new_stmt_list, seq);
2982 /* base + base_offset */
2983 if (loop_vinfo)
2984 addr_base = fold_build_pointer_plus (data_ref_base, base_offset);
2985 else
2987 addr_base = build1 (ADDR_EXPR,
2988 build_pointer_type (TREE_TYPE (DR_REF (dr))),
2989 unshare_expr (DR_REF (dr)));
2992 vect_ptr_type = build_pointer_type (STMT_VINFO_VECTYPE (stmt_info));
2993 base = get_base_address (DR_REF (dr));
2994 if (base
2995 && TREE_CODE (base) == MEM_REF)
2996 vect_ptr_type
2997 = build_qualified_type (vect_ptr_type,
2998 TYPE_QUALS (TREE_TYPE (TREE_OPERAND (base, 0))));
3000 vec_stmt = fold_convert (vect_ptr_type, addr_base);
3001 addr_expr = vect_get_new_vect_var (vect_ptr_type, vect_pointer_var,
3002 get_name (base_name));
3003 add_referenced_var (addr_expr);
3004 vec_stmt = force_gimple_operand (vec_stmt, &seq, false, addr_expr);
3005 gimple_seq_add_seq (new_stmt_list, seq);
3007 if (DR_PTR_INFO (dr)
3008 && TREE_CODE (vec_stmt) == SSA_NAME)
3010 duplicate_ssa_name_ptr_info (vec_stmt, DR_PTR_INFO (dr));
3011 if (offset)
3013 SSA_NAME_PTR_INFO (vec_stmt)->align = 1;
3014 SSA_NAME_PTR_INFO (vec_stmt)->misalign = 0;
3018 if (vect_print_dump_info (REPORT_DETAILS))
3020 fprintf (vect_dump, "created ");
3021 print_generic_expr (vect_dump, vec_stmt, TDF_SLIM);
3024 return vec_stmt;
3028 /* Function vect_create_data_ref_ptr.
3030 Create a new pointer-to-AGGR_TYPE variable (ap), that points to the first
3031 location accessed in the loop by STMT, along with the def-use update
3032 chain to appropriately advance the pointer through the loop iterations.
3033 Also set aliasing information for the pointer. This pointer is used by
3034 the callers to this function to create a memory reference expression for
3035 vector load/store access.
3037 Input:
3038 1. STMT: a stmt that references memory. Expected to be of the form
3039 GIMPLE_ASSIGN <name, data-ref> or
3040 GIMPLE_ASSIGN <data-ref, name>.
3041 2. AGGR_TYPE: the type of the reference, which should be either a vector
3042 or an array.
3043 3. AT_LOOP: the loop where the vector memref is to be created.
3044 4. OFFSET (optional): an offset to be added to the initial address accessed
3045 by the data-ref in STMT.
3046 5. BSI: location where the new stmts are to be placed if there is no loop
3047 6. ONLY_INIT: indicate if ap is to be updated in the loop, or remain
3048 pointing to the initial address.
3050 Output:
3051 1. Declare a new ptr to vector_type, and have it point to the base of the
3052 data reference (initial addressed accessed by the data reference).
3053 For example, for vector of type V8HI, the following code is generated:
3055 v8hi *ap;
3056 ap = (v8hi *)initial_address;
3058 if OFFSET is not supplied:
3059 initial_address = &a[init];
3060 if OFFSET is supplied:
3061 initial_address = &a[init + OFFSET];
3063 Return the initial_address in INITIAL_ADDRESS.
3065 2. If ONLY_INIT is true, just return the initial pointer. Otherwise, also
3066 update the pointer in each iteration of the loop.
3068 Return the increment stmt that updates the pointer in PTR_INCR.
3070 3. Set INV_P to true if the access pattern of the data reference in the
3071 vectorized loop is invariant. Set it to false otherwise.
3073 4. Return the pointer. */
3075 tree
3076 vect_create_data_ref_ptr (gimple stmt, tree aggr_type, struct loop *at_loop,
3077 tree offset, tree *initial_address,
3078 gimple_stmt_iterator *gsi, gimple *ptr_incr,
3079 bool only_init, bool *inv_p)
3081 tree base_name;
3082 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3083 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3084 struct loop *loop = NULL;
3085 bool nested_in_vect_loop = false;
3086 struct loop *containing_loop = NULL;
3087 tree aggr_ptr_type;
3088 tree aggr_ptr;
3089 tree new_temp;
3090 gimple vec_stmt;
3091 gimple_seq new_stmt_list = NULL;
3092 edge pe = NULL;
3093 basic_block new_bb;
3094 tree aggr_ptr_init;
3095 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
3096 tree aptr;
3097 gimple_stmt_iterator incr_gsi;
3098 bool insert_after;
3099 bool negative;
3100 tree indx_before_incr, indx_after_incr;
3101 gimple incr;
3102 tree step;
3103 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
3104 tree base;
3106 gcc_assert (TREE_CODE (aggr_type) == ARRAY_TYPE
3107 || TREE_CODE (aggr_type) == VECTOR_TYPE);
3109 if (loop_vinfo)
3111 loop = LOOP_VINFO_LOOP (loop_vinfo);
3112 nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
3113 containing_loop = (gimple_bb (stmt))->loop_father;
3114 pe = loop_preheader_edge (loop);
3116 else
3118 gcc_assert (bb_vinfo);
3119 only_init = true;
3120 *ptr_incr = NULL;
3123 /* Check the step (evolution) of the load in LOOP, and record
3124 whether it's invariant. */
3125 if (nested_in_vect_loop)
3126 step = STMT_VINFO_DR_STEP (stmt_info);
3127 else
3128 step = DR_STEP (STMT_VINFO_DATA_REF (stmt_info));
3130 if (tree_int_cst_compare (step, size_zero_node) == 0)
3131 *inv_p = true;
3132 else
3133 *inv_p = false;
3134 negative = tree_int_cst_compare (step, size_zero_node) < 0;
3136 /* Create an expression for the first address accessed by this load
3137 in LOOP. */
3138 base_name = build_fold_indirect_ref (unshare_expr (DR_BASE_ADDRESS (dr)));
3140 if (vect_print_dump_info (REPORT_DETAILS))
3142 tree data_ref_base = base_name;
3143 fprintf (vect_dump, "create %s-pointer variable to type: ",
3144 tree_code_name[(int) TREE_CODE (aggr_type)]);
3145 print_generic_expr (vect_dump, aggr_type, TDF_SLIM);
3146 if (TREE_CODE (data_ref_base) == VAR_DECL
3147 || TREE_CODE (data_ref_base) == ARRAY_REF)
3148 fprintf (vect_dump, " vectorizing an array ref: ");
3149 else if (TREE_CODE (data_ref_base) == COMPONENT_REF)
3150 fprintf (vect_dump, " vectorizing a record based array ref: ");
3151 else if (TREE_CODE (data_ref_base) == SSA_NAME)
3152 fprintf (vect_dump, " vectorizing a pointer ref: ");
3153 print_generic_expr (vect_dump, base_name, TDF_SLIM);
3156 /* (1) Create the new aggregate-pointer variable. */
3157 aggr_ptr_type = build_pointer_type (aggr_type);
3158 base = get_base_address (DR_REF (dr));
3159 if (base
3160 && TREE_CODE (base) == MEM_REF)
3161 aggr_ptr_type
3162 = build_qualified_type (aggr_ptr_type,
3163 TYPE_QUALS (TREE_TYPE (TREE_OPERAND (base, 0))));
3164 aggr_ptr = vect_get_new_vect_var (aggr_ptr_type, vect_pointer_var,
3165 get_name (base_name));
3167 /* Vector and array types inherit the alias set of their component
3168 type by default so we need to use a ref-all pointer if the data
3169 reference does not conflict with the created aggregated data
3170 reference because it is not addressable. */
3171 if (!alias_sets_conflict_p (get_deref_alias_set (aggr_ptr),
3172 get_alias_set (DR_REF (dr))))
3174 aggr_ptr_type
3175 = build_pointer_type_for_mode (aggr_type,
3176 TYPE_MODE (aggr_ptr_type), true);
3177 aggr_ptr = vect_get_new_vect_var (aggr_ptr_type, vect_pointer_var,
3178 get_name (base_name));
3181 /* Likewise for any of the data references in the stmt group. */
3182 else if (STMT_VINFO_GROUP_SIZE (stmt_info) > 1)
3184 gimple orig_stmt = STMT_VINFO_GROUP_FIRST_ELEMENT (stmt_info);
3187 tree lhs = gimple_assign_lhs (orig_stmt);
3188 if (!alias_sets_conflict_p (get_deref_alias_set (aggr_ptr),
3189 get_alias_set (lhs)))
3191 aggr_ptr_type
3192 = build_pointer_type_for_mode (aggr_type,
3193 TYPE_MODE (aggr_ptr_type), true);
3194 aggr_ptr
3195 = vect_get_new_vect_var (aggr_ptr_type, vect_pointer_var,
3196 get_name (base_name));
3197 break;
3200 orig_stmt = STMT_VINFO_GROUP_NEXT_ELEMENT (vinfo_for_stmt (orig_stmt));
3202 while (orig_stmt);
3205 add_referenced_var (aggr_ptr);
3207 /* Note: If the dataref is in an inner-loop nested in LOOP, and we are
3208 vectorizing LOOP (i.e., outer-loop vectorization), we need to create two
3209 def-use update cycles for the pointer: one relative to the outer-loop
3210 (LOOP), which is what steps (3) and (4) below do. The other is relative
3211 to the inner-loop (which is the inner-most loop containing the dataref),
3212 and this is done be step (5) below.
3214 When vectorizing inner-most loops, the vectorized loop (LOOP) is also the
3215 inner-most loop, and so steps (3),(4) work the same, and step (5) is
3216 redundant. Steps (3),(4) create the following:
3218 vp0 = &base_addr;
3219 LOOP: vp1 = phi(vp0,vp2)
3222 vp2 = vp1 + step
3223 goto LOOP
3225 If there is an inner-loop nested in loop, then step (5) will also be
3226 applied, and an additional update in the inner-loop will be created:
3228 vp0 = &base_addr;
3229 LOOP: vp1 = phi(vp0,vp2)
3231 inner: vp3 = phi(vp1,vp4)
3232 vp4 = vp3 + inner_step
3233 if () goto inner
3235 vp2 = vp1 + step
3236 if () goto LOOP */
3238 /* (2) Calculate the initial address of the aggregate-pointer, and set
3239 the aggregate-pointer to point to it before the loop. */
3241 /* Create: (&(base[init_val+offset]) in the loop preheader. */
3243 new_temp = vect_create_addr_base_for_vector_ref (stmt, &new_stmt_list,
3244 offset, loop);
3245 if (new_stmt_list)
3247 if (pe)
3249 new_bb = gsi_insert_seq_on_edge_immediate (pe, new_stmt_list);
3250 gcc_assert (!new_bb);
3252 else
3253 gsi_insert_seq_before (gsi, new_stmt_list, GSI_SAME_STMT);
3256 *initial_address = new_temp;
3258 /* Create: p = (aggr_type *) initial_base */
3259 if (TREE_CODE (new_temp) != SSA_NAME
3260 || !useless_type_conversion_p (aggr_ptr_type, TREE_TYPE (new_temp)))
3262 vec_stmt = gimple_build_assign (aggr_ptr,
3263 fold_convert (aggr_ptr_type, new_temp));
3264 aggr_ptr_init = make_ssa_name (aggr_ptr, vec_stmt);
3265 /* Copy the points-to information if it exists. */
3266 if (DR_PTR_INFO (dr))
3267 duplicate_ssa_name_ptr_info (aggr_ptr_init, DR_PTR_INFO (dr));
3268 gimple_assign_set_lhs (vec_stmt, aggr_ptr_init);
3269 if (pe)
3271 new_bb = gsi_insert_on_edge_immediate (pe, vec_stmt);
3272 gcc_assert (!new_bb);
3274 else
3275 gsi_insert_before (gsi, vec_stmt, GSI_SAME_STMT);
3277 else
3278 aggr_ptr_init = new_temp;
3280 /* (3) Handle the updating of the aggregate-pointer inside the loop.
3281 This is needed when ONLY_INIT is false, and also when AT_LOOP is the
3282 inner-loop nested in LOOP (during outer-loop vectorization). */
3284 /* No update in loop is required. */
3285 if (only_init && (!loop_vinfo || at_loop == loop))
3286 aptr = aggr_ptr_init;
3287 else
3289 /* The step of the aggregate pointer is the type size. */
3290 tree step = TYPE_SIZE_UNIT (aggr_type);
3291 /* One exception to the above is when the scalar step of the load in
3292 LOOP is zero. In this case the step here is also zero. */
3293 if (*inv_p)
3294 step = size_zero_node;
3295 else if (negative)
3296 step = fold_build1 (NEGATE_EXPR, TREE_TYPE (step), step);
3298 standard_iv_increment_position (loop, &incr_gsi, &insert_after);
3300 create_iv (aggr_ptr_init,
3301 fold_convert (aggr_ptr_type, step),
3302 aggr_ptr, loop, &incr_gsi, insert_after,
3303 &indx_before_incr, &indx_after_incr);
3304 incr = gsi_stmt (incr_gsi);
3305 set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo, NULL));
3307 /* Copy the points-to information if it exists. */
3308 if (DR_PTR_INFO (dr))
3310 duplicate_ssa_name_ptr_info (indx_before_incr, DR_PTR_INFO (dr));
3311 duplicate_ssa_name_ptr_info (indx_after_incr, DR_PTR_INFO (dr));
3313 if (ptr_incr)
3314 *ptr_incr = incr;
3316 aptr = indx_before_incr;
3319 if (!nested_in_vect_loop || only_init)
3320 return aptr;
3323 /* (4) Handle the updating of the aggregate-pointer inside the inner-loop
3324 nested in LOOP, if exists. */
3326 gcc_assert (nested_in_vect_loop);
3327 if (!only_init)
3329 standard_iv_increment_position (containing_loop, &incr_gsi,
3330 &insert_after);
3331 create_iv (aptr, fold_convert (aggr_ptr_type, DR_STEP (dr)), aggr_ptr,
3332 containing_loop, &incr_gsi, insert_after, &indx_before_incr,
3333 &indx_after_incr);
3334 incr = gsi_stmt (incr_gsi);
3335 set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo, NULL));
3337 /* Copy the points-to information if it exists. */
3338 if (DR_PTR_INFO (dr))
3340 duplicate_ssa_name_ptr_info (indx_before_incr, DR_PTR_INFO (dr));
3341 duplicate_ssa_name_ptr_info (indx_after_incr, DR_PTR_INFO (dr));
3343 if (ptr_incr)
3344 *ptr_incr = incr;
3346 return indx_before_incr;
3348 else
3349 gcc_unreachable ();
3353 /* Function bump_vector_ptr
3355 Increment a pointer (to a vector type) by vector-size. If requested,
3356 i.e. if PTR-INCR is given, then also connect the new increment stmt
3357 to the existing def-use update-chain of the pointer, by modifying
3358 the PTR_INCR as illustrated below:
3360 The pointer def-use update-chain before this function:
3361 DATAREF_PTR = phi (p_0, p_2)
3362 ....
3363 PTR_INCR: p_2 = DATAREF_PTR + step
3365 The pointer def-use update-chain after this function:
3366 DATAREF_PTR = phi (p_0, p_2)
3367 ....
3368 NEW_DATAREF_PTR = DATAREF_PTR + BUMP
3369 ....
3370 PTR_INCR: p_2 = NEW_DATAREF_PTR + step
3372 Input:
3373 DATAREF_PTR - ssa_name of a pointer (to vector type) that is being updated
3374 in the loop.
3375 PTR_INCR - optional. The stmt that updates the pointer in each iteration of
3376 the loop. The increment amount across iterations is expected
3377 to be vector_size.
3378 BSI - location where the new update stmt is to be placed.
3379 STMT - the original scalar memory-access stmt that is being vectorized.
3380 BUMP - optional. The offset by which to bump the pointer. If not given,
3381 the offset is assumed to be vector_size.
3383 Output: Return NEW_DATAREF_PTR as illustrated above.
3387 tree
3388 bump_vector_ptr (tree dataref_ptr, gimple ptr_incr, gimple_stmt_iterator *gsi,
3389 gimple stmt, tree bump)
3391 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3392 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
3393 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
3394 tree ptr_var = SSA_NAME_VAR (dataref_ptr);
3395 tree update = TYPE_SIZE_UNIT (vectype);
3396 gimple incr_stmt;
3397 ssa_op_iter iter;
3398 use_operand_p use_p;
3399 tree new_dataref_ptr;
3401 if (bump)
3402 update = bump;
3404 incr_stmt = gimple_build_assign_with_ops (POINTER_PLUS_EXPR, ptr_var,
3405 dataref_ptr, update);
3406 new_dataref_ptr = make_ssa_name (ptr_var, incr_stmt);
3407 gimple_assign_set_lhs (incr_stmt, new_dataref_ptr);
3408 vect_finish_stmt_generation (stmt, incr_stmt, gsi);
3410 /* Copy the points-to information if it exists. */
3411 if (DR_PTR_INFO (dr))
3413 duplicate_ssa_name_ptr_info (new_dataref_ptr, DR_PTR_INFO (dr));
3414 SSA_NAME_PTR_INFO (new_dataref_ptr)->align = 1;
3415 SSA_NAME_PTR_INFO (new_dataref_ptr)->misalign = 0;
3418 if (!ptr_incr)
3419 return new_dataref_ptr;
3421 /* Update the vector-pointer's cross-iteration increment. */
3422 FOR_EACH_SSA_USE_OPERAND (use_p, ptr_incr, iter, SSA_OP_USE)
3424 tree use = USE_FROM_PTR (use_p);
3426 if (use == dataref_ptr)
3427 SET_USE (use_p, new_dataref_ptr);
3428 else
3429 gcc_assert (tree_int_cst_compare (use, update) == 0);
3432 return new_dataref_ptr;
3436 /* Function vect_create_destination_var.
3438 Create a new temporary of type VECTYPE. */
3440 tree
3441 vect_create_destination_var (tree scalar_dest, tree vectype)
3443 tree vec_dest;
3444 const char *new_name;
3445 tree type;
3446 enum vect_var_kind kind;
3448 kind = vectype ? vect_simple_var : vect_scalar_var;
3449 type = vectype ? vectype : TREE_TYPE (scalar_dest);
3451 gcc_assert (TREE_CODE (scalar_dest) == SSA_NAME);
3453 new_name = get_name (scalar_dest);
3454 if (!new_name)
3455 new_name = "var_";
3456 vec_dest = vect_get_new_vect_var (type, kind, new_name);
3457 add_referenced_var (vec_dest);
3459 return vec_dest;
3462 /* Function vect_strided_store_supported.
3464 Returns TRUE is INTERLEAVE_HIGH and INTERLEAVE_LOW operations are supported,
3465 and FALSE otherwise. */
3467 bool
3468 vect_strided_store_supported (tree vectype, unsigned HOST_WIDE_INT count)
3470 optab ih_optab, il_optab;
3471 enum machine_mode mode;
3473 mode = TYPE_MODE (vectype);
3475 /* vect_permute_store_chain requires the group size to be a power of two. */
3476 if (exact_log2 (count) == -1)
3478 if (vect_print_dump_info (REPORT_DETAILS))
3479 fprintf (vect_dump, "the size of the group of strided accesses"
3480 " is not a power of 2");
3481 return false;
3484 /* Check that the operation is supported. */
3485 ih_optab = optab_for_tree_code (VEC_INTERLEAVE_HIGH_EXPR,
3486 vectype, optab_default);
3487 il_optab = optab_for_tree_code (VEC_INTERLEAVE_LOW_EXPR,
3488 vectype, optab_default);
3489 if (il_optab && ih_optab
3490 && optab_handler (ih_optab, mode) != CODE_FOR_nothing
3491 && optab_handler (il_optab, mode) != CODE_FOR_nothing)
3492 return true;
3494 if (can_vec_perm_for_code_p (VEC_INTERLEAVE_HIGH_EXPR, mode, NULL)
3495 && can_vec_perm_for_code_p (VEC_INTERLEAVE_LOW_EXPR, mode, NULL))
3496 return true;
3498 if (vect_print_dump_info (REPORT_DETAILS))
3499 fprintf (vect_dump, "interleave op not supported by target.");
3500 return false;
3504 /* Return TRUE if vec_store_lanes is available for COUNT vectors of
3505 type VECTYPE. */
3507 bool
3508 vect_store_lanes_supported (tree vectype, unsigned HOST_WIDE_INT count)
3510 return vect_lanes_optab_supported_p ("vec_store_lanes",
3511 vec_store_lanes_optab,
3512 vectype, count);
3516 /* Function vect_permute_store_chain.
3518 Given a chain of interleaved stores in DR_CHAIN of LENGTH that must be
3519 a power of 2, generate interleave_high/low stmts to reorder the data
3520 correctly for the stores. Return the final references for stores in
3521 RESULT_CHAIN.
3523 E.g., LENGTH is 4 and the scalar type is short, i.e., VF is 8.
3524 The input is 4 vectors each containing 8 elements. We assign a number to
3525 each element, the input sequence is:
3527 1st vec: 0 1 2 3 4 5 6 7
3528 2nd vec: 8 9 10 11 12 13 14 15
3529 3rd vec: 16 17 18 19 20 21 22 23
3530 4th vec: 24 25 26 27 28 29 30 31
3532 The output sequence should be:
3534 1st vec: 0 8 16 24 1 9 17 25
3535 2nd vec: 2 10 18 26 3 11 19 27
3536 3rd vec: 4 12 20 28 5 13 21 30
3537 4th vec: 6 14 22 30 7 15 23 31
3539 i.e., we interleave the contents of the four vectors in their order.
3541 We use interleave_high/low instructions to create such output. The input of
3542 each interleave_high/low operation is two vectors:
3543 1st vec 2nd vec
3544 0 1 2 3 4 5 6 7
3545 the even elements of the result vector are obtained left-to-right from the
3546 high/low elements of the first vector. The odd elements of the result are
3547 obtained left-to-right from the high/low elements of the second vector.
3548 The output of interleave_high will be: 0 4 1 5
3549 and of interleave_low: 2 6 3 7
3552 The permutation is done in log LENGTH stages. In each stage interleave_high
3553 and interleave_low stmts are created for each pair of vectors in DR_CHAIN,
3554 where the first argument is taken from the first half of DR_CHAIN and the
3555 second argument from it's second half.
3556 In our example,
3558 I1: interleave_high (1st vec, 3rd vec)
3559 I2: interleave_low (1st vec, 3rd vec)
3560 I3: interleave_high (2nd vec, 4th vec)
3561 I4: interleave_low (2nd vec, 4th vec)
3563 The output for the first stage is:
3565 I1: 0 16 1 17 2 18 3 19
3566 I2: 4 20 5 21 6 22 7 23
3567 I3: 8 24 9 25 10 26 11 27
3568 I4: 12 28 13 29 14 30 15 31
3570 The output of the second stage, i.e. the final result is:
3572 I1: 0 8 16 24 1 9 17 25
3573 I2: 2 10 18 26 3 11 19 27
3574 I3: 4 12 20 28 5 13 21 30
3575 I4: 6 14 22 30 7 15 23 31. */
3577 void
3578 vect_permute_store_chain (VEC(tree,heap) *dr_chain,
3579 unsigned int length,
3580 gimple stmt,
3581 gimple_stmt_iterator *gsi,
3582 VEC(tree,heap) **result_chain)
3584 tree perm_dest, vect1, vect2, high, low;
3585 gimple perm_stmt;
3586 tree vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
3587 int i;
3588 unsigned int j;
3589 enum tree_code high_code, low_code;
3591 gcc_assert (vect_strided_store_supported (vectype, length));
3593 *result_chain = VEC_copy (tree, heap, dr_chain);
3595 for (i = 0; i < exact_log2 (length); i++)
3597 for (j = 0; j < length/2; j++)
3599 vect1 = VEC_index (tree, dr_chain, j);
3600 vect2 = VEC_index (tree, dr_chain, j+length/2);
3602 /* Create interleaving stmt:
3603 in the case of big endian:
3604 high = interleave_high (vect1, vect2)
3605 and in the case of little endian:
3606 high = interleave_low (vect1, vect2). */
3607 perm_dest = create_tmp_var (vectype, "vect_inter_high");
3608 DECL_GIMPLE_REG_P (perm_dest) = 1;
3609 add_referenced_var (perm_dest);
3610 if (BYTES_BIG_ENDIAN)
3612 high_code = VEC_INTERLEAVE_HIGH_EXPR;
3613 low_code = VEC_INTERLEAVE_LOW_EXPR;
3615 else
3617 low_code = VEC_INTERLEAVE_HIGH_EXPR;
3618 high_code = VEC_INTERLEAVE_LOW_EXPR;
3620 perm_stmt = gimple_build_assign_with_ops (high_code, perm_dest,
3621 vect1, vect2);
3622 high = make_ssa_name (perm_dest, perm_stmt);
3623 gimple_assign_set_lhs (perm_stmt, high);
3624 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
3625 VEC_replace (tree, *result_chain, 2*j, high);
3627 /* Create interleaving stmt:
3628 in the case of big endian:
3629 low = interleave_low (vect1, vect2)
3630 and in the case of little endian:
3631 low = interleave_high (vect1, vect2). */
3632 perm_dest = create_tmp_var (vectype, "vect_inter_low");
3633 DECL_GIMPLE_REG_P (perm_dest) = 1;
3634 add_referenced_var (perm_dest);
3635 perm_stmt = gimple_build_assign_with_ops (low_code, perm_dest,
3636 vect1, vect2);
3637 low = make_ssa_name (perm_dest, perm_stmt);
3638 gimple_assign_set_lhs (perm_stmt, low);
3639 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
3640 VEC_replace (tree, *result_chain, 2*j+1, low);
3642 dr_chain = VEC_copy (tree, heap, *result_chain);
3646 /* Function vect_setup_realignment
3648 This function is called when vectorizing an unaligned load using
3649 the dr_explicit_realign[_optimized] scheme.
3650 This function generates the following code at the loop prolog:
3652 p = initial_addr;
3653 x msq_init = *(floor(p)); # prolog load
3654 realignment_token = call target_builtin;
3655 loop:
3656 x msq = phi (msq_init, ---)
3658 The stmts marked with x are generated only for the case of
3659 dr_explicit_realign_optimized.
3661 The code above sets up a new (vector) pointer, pointing to the first
3662 location accessed by STMT, and a "floor-aligned" load using that pointer.
3663 It also generates code to compute the "realignment-token" (if the relevant
3664 target hook was defined), and creates a phi-node at the loop-header bb
3665 whose arguments are the result of the prolog-load (created by this
3666 function) and the result of a load that takes place in the loop (to be
3667 created by the caller to this function).
3669 For the case of dr_explicit_realign_optimized:
3670 The caller to this function uses the phi-result (msq) to create the
3671 realignment code inside the loop, and sets up the missing phi argument,
3672 as follows:
3673 loop:
3674 msq = phi (msq_init, lsq)
3675 lsq = *(floor(p')); # load in loop
3676 result = realign_load (msq, lsq, realignment_token);
3678 For the case of dr_explicit_realign:
3679 loop:
3680 msq = *(floor(p)); # load in loop
3681 p' = p + (VS-1);
3682 lsq = *(floor(p')); # load in loop
3683 result = realign_load (msq, lsq, realignment_token);
3685 Input:
3686 STMT - (scalar) load stmt to be vectorized. This load accesses
3687 a memory location that may be unaligned.
3688 BSI - place where new code is to be inserted.
3689 ALIGNMENT_SUPPORT_SCHEME - which of the two misalignment handling schemes
3690 is used.
3692 Output:
3693 REALIGNMENT_TOKEN - the result of a call to the builtin_mask_for_load
3694 target hook, if defined.
3695 Return value - the result of the loop-header phi node. */
3697 tree
3698 vect_setup_realignment (gimple stmt, gimple_stmt_iterator *gsi,
3699 tree *realignment_token,
3700 enum dr_alignment_support alignment_support_scheme,
3701 tree init_addr,
3702 struct loop **at_loop)
3704 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3705 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
3706 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3707 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
3708 struct loop *loop = NULL;
3709 edge pe = NULL;
3710 tree scalar_dest = gimple_assign_lhs (stmt);
3711 tree vec_dest;
3712 gimple inc;
3713 tree ptr;
3714 tree data_ref;
3715 gimple new_stmt;
3716 basic_block new_bb;
3717 tree msq_init = NULL_TREE;
3718 tree new_temp;
3719 gimple phi_stmt;
3720 tree msq = NULL_TREE;
3721 gimple_seq stmts = NULL;
3722 bool inv_p;
3723 bool compute_in_loop = false;
3724 bool nested_in_vect_loop = false;
3725 struct loop *containing_loop = (gimple_bb (stmt))->loop_father;
3726 struct loop *loop_for_initial_load = NULL;
3728 if (loop_vinfo)
3730 loop = LOOP_VINFO_LOOP (loop_vinfo);
3731 nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
3734 gcc_assert (alignment_support_scheme == dr_explicit_realign
3735 || alignment_support_scheme == dr_explicit_realign_optimized);
3737 /* We need to generate three things:
3738 1. the misalignment computation
3739 2. the extra vector load (for the optimized realignment scheme).
3740 3. the phi node for the two vectors from which the realignment is
3741 done (for the optimized realignment scheme). */
3743 /* 1. Determine where to generate the misalignment computation.
3745 If INIT_ADDR is NULL_TREE, this indicates that the misalignment
3746 calculation will be generated by this function, outside the loop (in the
3747 preheader). Otherwise, INIT_ADDR had already been computed for us by the
3748 caller, inside the loop.
3750 Background: If the misalignment remains fixed throughout the iterations of
3751 the loop, then both realignment schemes are applicable, and also the
3752 misalignment computation can be done outside LOOP. This is because we are
3753 vectorizing LOOP, and so the memory accesses in LOOP advance in steps that
3754 are a multiple of VS (the Vector Size), and therefore the misalignment in
3755 different vectorized LOOP iterations is always the same.
3756 The problem arises only if the memory access is in an inner-loop nested
3757 inside LOOP, which is now being vectorized using outer-loop vectorization.
3758 This is the only case when the misalignment of the memory access may not
3759 remain fixed throughout the iterations of the inner-loop (as explained in
3760 detail in vect_supportable_dr_alignment). In this case, not only is the
3761 optimized realignment scheme not applicable, but also the misalignment
3762 computation (and generation of the realignment token that is passed to
3763 REALIGN_LOAD) have to be done inside the loop.
3765 In short, INIT_ADDR indicates whether we are in a COMPUTE_IN_LOOP mode
3766 or not, which in turn determines if the misalignment is computed inside
3767 the inner-loop, or outside LOOP. */
3769 if (init_addr != NULL_TREE || !loop_vinfo)
3771 compute_in_loop = true;
3772 gcc_assert (alignment_support_scheme == dr_explicit_realign);
3776 /* 2. Determine where to generate the extra vector load.
3778 For the optimized realignment scheme, instead of generating two vector
3779 loads in each iteration, we generate a single extra vector load in the
3780 preheader of the loop, and in each iteration reuse the result of the
3781 vector load from the previous iteration. In case the memory access is in
3782 an inner-loop nested inside LOOP, which is now being vectorized using
3783 outer-loop vectorization, we need to determine whether this initial vector
3784 load should be generated at the preheader of the inner-loop, or can be
3785 generated at the preheader of LOOP. If the memory access has no evolution
3786 in LOOP, it can be generated in the preheader of LOOP. Otherwise, it has
3787 to be generated inside LOOP (in the preheader of the inner-loop). */
3789 if (nested_in_vect_loop)
3791 tree outerloop_step = STMT_VINFO_DR_STEP (stmt_info);
3792 bool invariant_in_outerloop =
3793 (tree_int_cst_compare (outerloop_step, size_zero_node) == 0);
3794 loop_for_initial_load = (invariant_in_outerloop ? loop : loop->inner);
3796 else
3797 loop_for_initial_load = loop;
3798 if (at_loop)
3799 *at_loop = loop_for_initial_load;
3801 if (loop_for_initial_load)
3802 pe = loop_preheader_edge (loop_for_initial_load);
3804 /* 3. For the case of the optimized realignment, create the first vector
3805 load at the loop preheader. */
3807 if (alignment_support_scheme == dr_explicit_realign_optimized)
3809 /* Create msq_init = *(floor(p1)) in the loop preheader */
3811 gcc_assert (!compute_in_loop);
3812 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3813 ptr = vect_create_data_ref_ptr (stmt, vectype, loop_for_initial_load,
3814 NULL_TREE, &init_addr, NULL, &inc,
3815 true, &inv_p);
3816 new_stmt = gimple_build_assign_with_ops
3817 (BIT_AND_EXPR, NULL_TREE, ptr,
3818 build_int_cst (TREE_TYPE (ptr),
3819 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
3820 new_temp = make_ssa_name (SSA_NAME_VAR (ptr), new_stmt);
3821 gimple_assign_set_lhs (new_stmt, new_temp);
3822 new_bb = gsi_insert_on_edge_immediate (pe, new_stmt);
3823 gcc_assert (!new_bb);
3824 data_ref
3825 = build2 (MEM_REF, TREE_TYPE (vec_dest), new_temp,
3826 build_int_cst (reference_alias_ptr_type (DR_REF (dr)), 0));
3827 new_stmt = gimple_build_assign (vec_dest, data_ref);
3828 new_temp = make_ssa_name (vec_dest, new_stmt);
3829 gimple_assign_set_lhs (new_stmt, new_temp);
3830 mark_symbols_for_renaming (new_stmt);
3831 if (pe)
3833 new_bb = gsi_insert_on_edge_immediate (pe, new_stmt);
3834 gcc_assert (!new_bb);
3836 else
3837 gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
3839 msq_init = gimple_assign_lhs (new_stmt);
3842 /* 4. Create realignment token using a target builtin, if available.
3843 It is done either inside the containing loop, or before LOOP (as
3844 determined above). */
3846 if (targetm.vectorize.builtin_mask_for_load)
3848 tree builtin_decl;
3850 /* Compute INIT_ADDR - the initial addressed accessed by this memref. */
3851 if (!init_addr)
3853 /* Generate the INIT_ADDR computation outside LOOP. */
3854 init_addr = vect_create_addr_base_for_vector_ref (stmt, &stmts,
3855 NULL_TREE, loop);
3856 if (loop)
3858 pe = loop_preheader_edge (loop);
3859 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
3860 gcc_assert (!new_bb);
3862 else
3863 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
3866 builtin_decl = targetm.vectorize.builtin_mask_for_load ();
3867 new_stmt = gimple_build_call (builtin_decl, 1, init_addr);
3868 vec_dest =
3869 vect_create_destination_var (scalar_dest,
3870 gimple_call_return_type (new_stmt));
3871 new_temp = make_ssa_name (vec_dest, new_stmt);
3872 gimple_call_set_lhs (new_stmt, new_temp);
3874 if (compute_in_loop)
3875 gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
3876 else
3878 /* Generate the misalignment computation outside LOOP. */
3879 pe = loop_preheader_edge (loop);
3880 new_bb = gsi_insert_on_edge_immediate (pe, new_stmt);
3881 gcc_assert (!new_bb);
3884 *realignment_token = gimple_call_lhs (new_stmt);
3886 /* The result of the CALL_EXPR to this builtin is determined from
3887 the value of the parameter and no global variables are touched
3888 which makes the builtin a "const" function. Requiring the
3889 builtin to have the "const" attribute makes it unnecessary
3890 to call mark_call_clobbered. */
3891 gcc_assert (TREE_READONLY (builtin_decl));
3894 if (alignment_support_scheme == dr_explicit_realign)
3895 return msq;
3897 gcc_assert (!compute_in_loop);
3898 gcc_assert (alignment_support_scheme == dr_explicit_realign_optimized);
3901 /* 5. Create msq = phi <msq_init, lsq> in loop */
3903 pe = loop_preheader_edge (containing_loop);
3904 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3905 msq = make_ssa_name (vec_dest, NULL);
3906 phi_stmt = create_phi_node (msq, containing_loop->header);
3907 SSA_NAME_DEF_STMT (msq) = phi_stmt;
3908 add_phi_arg (phi_stmt, msq_init, pe, UNKNOWN_LOCATION);
3910 return msq;
3914 /* Function vect_strided_load_supported.
3916 Returns TRUE is EXTRACT_EVEN and EXTRACT_ODD operations are supported,
3917 and FALSE otherwise. */
3919 bool
3920 vect_strided_load_supported (tree vectype, unsigned HOST_WIDE_INT count)
3922 optab ee_optab, eo_optab;
3923 enum machine_mode mode;
3925 mode = TYPE_MODE (vectype);
3927 /* vect_permute_load_chain requires the group size to be a power of two. */
3928 if (exact_log2 (count) == -1)
3930 if (vect_print_dump_info (REPORT_DETAILS))
3931 fprintf (vect_dump, "the size of the group of strided accesses"
3932 " is not a power of 2");
3933 return false;
3936 ee_optab = optab_for_tree_code (VEC_EXTRACT_EVEN_EXPR,
3937 vectype, optab_default);
3938 eo_optab = optab_for_tree_code (VEC_EXTRACT_ODD_EXPR,
3939 vectype, optab_default);
3940 if (ee_optab && eo_optab
3941 && optab_handler (ee_optab, mode) != CODE_FOR_nothing
3942 && optab_handler (eo_optab, mode) != CODE_FOR_nothing)
3943 return true;
3945 if (can_vec_perm_for_code_p (VEC_EXTRACT_EVEN_EXPR, mode, NULL)
3946 && can_vec_perm_for_code_p (VEC_EXTRACT_ODD_EXPR, mode, NULL))
3947 return true;
3949 if (vect_print_dump_info (REPORT_DETAILS))
3950 fprintf (vect_dump, "extract even/odd not supported by target");
3951 return false;
3954 /* Return TRUE if vec_load_lanes is available for COUNT vectors of
3955 type VECTYPE. */
3957 bool
3958 vect_load_lanes_supported (tree vectype, unsigned HOST_WIDE_INT count)
3960 return vect_lanes_optab_supported_p ("vec_load_lanes",
3961 vec_load_lanes_optab,
3962 vectype, count);
3965 /* Function vect_permute_load_chain.
3967 Given a chain of interleaved loads in DR_CHAIN of LENGTH that must be
3968 a power of 2, generate extract_even/odd stmts to reorder the input data
3969 correctly. Return the final references for loads in RESULT_CHAIN.
3971 E.g., LENGTH is 4 and the scalar type is short, i.e., VF is 8.
3972 The input is 4 vectors each containing 8 elements. We assign a number to each
3973 element, the input sequence is:
3975 1st vec: 0 1 2 3 4 5 6 7
3976 2nd vec: 8 9 10 11 12 13 14 15
3977 3rd vec: 16 17 18 19 20 21 22 23
3978 4th vec: 24 25 26 27 28 29 30 31
3980 The output sequence should be:
3982 1st vec: 0 4 8 12 16 20 24 28
3983 2nd vec: 1 5 9 13 17 21 25 29
3984 3rd vec: 2 6 10 14 18 22 26 30
3985 4th vec: 3 7 11 15 19 23 27 31
3987 i.e., the first output vector should contain the first elements of each
3988 interleaving group, etc.
3990 We use extract_even/odd instructions to create such output. The input of
3991 each extract_even/odd operation is two vectors
3992 1st vec 2nd vec
3993 0 1 2 3 4 5 6 7
3995 and the output is the vector of extracted even/odd elements. The output of
3996 extract_even will be: 0 2 4 6
3997 and of extract_odd: 1 3 5 7
4000 The permutation is done in log LENGTH stages. In each stage extract_even
4001 and extract_odd stmts are created for each pair of vectors in DR_CHAIN in
4002 their order. In our example,
4004 E1: extract_even (1st vec, 2nd vec)
4005 E2: extract_odd (1st vec, 2nd vec)
4006 E3: extract_even (3rd vec, 4th vec)
4007 E4: extract_odd (3rd vec, 4th vec)
4009 The output for the first stage will be:
4011 E1: 0 2 4 6 8 10 12 14
4012 E2: 1 3 5 7 9 11 13 15
4013 E3: 16 18 20 22 24 26 28 30
4014 E4: 17 19 21 23 25 27 29 31
4016 In order to proceed and create the correct sequence for the next stage (or
4017 for the correct output, if the second stage is the last one, as in our
4018 example), we first put the output of extract_even operation and then the
4019 output of extract_odd in RESULT_CHAIN (which is then copied to DR_CHAIN).
4020 The input for the second stage is:
4022 1st vec (E1): 0 2 4 6 8 10 12 14
4023 2nd vec (E3): 16 18 20 22 24 26 28 30
4024 3rd vec (E2): 1 3 5 7 9 11 13 15
4025 4th vec (E4): 17 19 21 23 25 27 29 31
4027 The output of the second stage:
4029 E1: 0 4 8 12 16 20 24 28
4030 E2: 2 6 10 14 18 22 26 30
4031 E3: 1 5 9 13 17 21 25 29
4032 E4: 3 7 11 15 19 23 27 31
4034 And RESULT_CHAIN after reordering:
4036 1st vec (E1): 0 4 8 12 16 20 24 28
4037 2nd vec (E3): 1 5 9 13 17 21 25 29
4038 3rd vec (E2): 2 6 10 14 18 22 26 30
4039 4th vec (E4): 3 7 11 15 19 23 27 31. */
4041 static void
4042 vect_permute_load_chain (VEC(tree,heap) *dr_chain,
4043 unsigned int length,
4044 gimple stmt,
4045 gimple_stmt_iterator *gsi,
4046 VEC(tree,heap) **result_chain)
4048 tree perm_dest, data_ref, first_vect, second_vect;
4049 gimple perm_stmt;
4050 tree vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
4051 int i;
4052 unsigned int j;
4054 gcc_assert (vect_strided_load_supported (vectype, length));
4056 *result_chain = VEC_copy (tree, heap, dr_chain);
4057 for (i = 0; i < exact_log2 (length); i++)
4059 for (j = 0; j < length; j +=2)
4061 first_vect = VEC_index (tree, dr_chain, j);
4062 second_vect = VEC_index (tree, dr_chain, j+1);
4064 /* data_ref = permute_even (first_data_ref, second_data_ref); */
4065 perm_dest = create_tmp_var (vectype, "vect_perm_even");
4066 DECL_GIMPLE_REG_P (perm_dest) = 1;
4067 add_referenced_var (perm_dest);
4069 perm_stmt = gimple_build_assign_with_ops (VEC_EXTRACT_EVEN_EXPR,
4070 perm_dest, first_vect,
4071 second_vect);
4073 data_ref = make_ssa_name (perm_dest, perm_stmt);
4074 gimple_assign_set_lhs (perm_stmt, data_ref);
4075 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
4076 mark_symbols_for_renaming (perm_stmt);
4078 VEC_replace (tree, *result_chain, j/2, data_ref);
4080 /* data_ref = permute_odd (first_data_ref, second_data_ref); */
4081 perm_dest = create_tmp_var (vectype, "vect_perm_odd");
4082 DECL_GIMPLE_REG_P (perm_dest) = 1;
4083 add_referenced_var (perm_dest);
4085 perm_stmt = gimple_build_assign_with_ops (VEC_EXTRACT_ODD_EXPR,
4086 perm_dest, first_vect,
4087 second_vect);
4088 data_ref = make_ssa_name (perm_dest, perm_stmt);
4089 gimple_assign_set_lhs (perm_stmt, data_ref);
4090 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
4091 mark_symbols_for_renaming (perm_stmt);
4093 VEC_replace (tree, *result_chain, j/2+length/2, data_ref);
4095 dr_chain = VEC_copy (tree, heap, *result_chain);
4100 /* Function vect_transform_strided_load.
4102 Given a chain of input interleaved data-refs (in DR_CHAIN), build statements
4103 to perform their permutation and ascribe the result vectorized statements to
4104 the scalar statements.
4107 void
4108 vect_transform_strided_load (gimple stmt, VEC(tree,heap) *dr_chain, int size,
4109 gimple_stmt_iterator *gsi)
4111 VEC(tree,heap) *result_chain = NULL;
4113 /* DR_CHAIN contains input data-refs that are a part of the interleaving.
4114 RESULT_CHAIN is the output of vect_permute_load_chain, it contains permuted
4115 vectors, that are ready for vector computation. */
4116 result_chain = VEC_alloc (tree, heap, size);
4117 vect_permute_load_chain (dr_chain, size, stmt, gsi, &result_chain);
4118 vect_record_strided_load_vectors (stmt, result_chain);
4119 VEC_free (tree, heap, result_chain);
4122 /* RESULT_CHAIN contains the output of a group of strided loads that were
4123 generated as part of the vectorization of STMT. Assign the statement
4124 for each vector to the associated scalar statement. */
4126 void
4127 vect_record_strided_load_vectors (gimple stmt, VEC(tree,heap) *result_chain)
4129 gimple first_stmt = GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt));
4130 gimple next_stmt, new_stmt;
4131 unsigned int i, gap_count;
4132 tree tmp_data_ref;
4134 /* Put a permuted data-ref in the VECTORIZED_STMT field.
4135 Since we scan the chain starting from it's first node, their order
4136 corresponds the order of data-refs in RESULT_CHAIN. */
4137 next_stmt = first_stmt;
4138 gap_count = 1;
4139 FOR_EACH_VEC_ELT (tree, result_chain, i, tmp_data_ref)
4141 if (!next_stmt)
4142 break;
4144 /* Skip the gaps. Loads created for the gaps will be removed by dead
4145 code elimination pass later. No need to check for the first stmt in
4146 the group, since it always exists.
4147 GROUP_GAP is the number of steps in elements from the previous
4148 access (if there is no gap GROUP_GAP is 1). We skip loads that
4149 correspond to the gaps. */
4150 if (next_stmt != first_stmt
4151 && gap_count < GROUP_GAP (vinfo_for_stmt (next_stmt)))
4153 gap_count++;
4154 continue;
4157 while (next_stmt)
4159 new_stmt = SSA_NAME_DEF_STMT (tmp_data_ref);
4160 /* We assume that if VEC_STMT is not NULL, this is a case of multiple
4161 copies, and we put the new vector statement in the first available
4162 RELATED_STMT. */
4163 if (!STMT_VINFO_VEC_STMT (vinfo_for_stmt (next_stmt)))
4164 STMT_VINFO_VEC_STMT (vinfo_for_stmt (next_stmt)) = new_stmt;
4165 else
4167 if (!GROUP_SAME_DR_STMT (vinfo_for_stmt (next_stmt)))
4169 gimple prev_stmt =
4170 STMT_VINFO_VEC_STMT (vinfo_for_stmt (next_stmt));
4171 gimple rel_stmt =
4172 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (prev_stmt));
4173 while (rel_stmt)
4175 prev_stmt = rel_stmt;
4176 rel_stmt =
4177 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (rel_stmt));
4180 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (prev_stmt)) =
4181 new_stmt;
4185 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
4186 gap_count = 1;
4187 /* If NEXT_STMT accesses the same DR as the previous statement,
4188 put the same TMP_DATA_REF as its vectorized statement; otherwise
4189 get the next data-ref from RESULT_CHAIN. */
4190 if (!next_stmt || !GROUP_SAME_DR_STMT (vinfo_for_stmt (next_stmt)))
4191 break;
4196 /* Function vect_force_dr_alignment_p.
4198 Returns whether the alignment of a DECL can be forced to be aligned
4199 on ALIGNMENT bit boundary. */
4201 bool
4202 vect_can_force_dr_alignment_p (const_tree decl, unsigned int alignment)
4204 if (TREE_CODE (decl) != VAR_DECL)
4205 return false;
4207 if (DECL_EXTERNAL (decl))
4208 return false;
4210 if (TREE_ASM_WRITTEN (decl))
4211 return false;
4213 if (TREE_STATIC (decl))
4214 return (alignment <= MAX_OFILE_ALIGNMENT);
4215 else
4216 return (alignment <= MAX_STACK_ALIGNMENT);
4220 /* Return whether the data reference DR is supported with respect to its
4221 alignment.
4222 If CHECK_ALIGNED_ACCESSES is TRUE, check if the access is supported even
4223 it is aligned, i.e., check if it is possible to vectorize it with different
4224 alignment. */
4226 enum dr_alignment_support
4227 vect_supportable_dr_alignment (struct data_reference *dr,
4228 bool check_aligned_accesses)
4230 gimple stmt = DR_STMT (dr);
4231 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4232 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
4233 enum machine_mode mode = TYPE_MODE (vectype);
4234 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4235 struct loop *vect_loop = NULL;
4236 bool nested_in_vect_loop = false;
4238 if (aligned_access_p (dr) && !check_aligned_accesses)
4239 return dr_aligned;
4241 if (loop_vinfo)
4243 vect_loop = LOOP_VINFO_LOOP (loop_vinfo);
4244 nested_in_vect_loop = nested_in_vect_loop_p (vect_loop, stmt);
4247 /* Possibly unaligned access. */
4249 /* We can choose between using the implicit realignment scheme (generating
4250 a misaligned_move stmt) and the explicit realignment scheme (generating
4251 aligned loads with a REALIGN_LOAD). There are two variants to the
4252 explicit realignment scheme: optimized, and unoptimized.
4253 We can optimize the realignment only if the step between consecutive
4254 vector loads is equal to the vector size. Since the vector memory
4255 accesses advance in steps of VS (Vector Size) in the vectorized loop, it
4256 is guaranteed that the misalignment amount remains the same throughout the
4257 execution of the vectorized loop. Therefore, we can create the
4258 "realignment token" (the permutation mask that is passed to REALIGN_LOAD)
4259 at the loop preheader.
4261 However, in the case of outer-loop vectorization, when vectorizing a
4262 memory access in the inner-loop nested within the LOOP that is now being
4263 vectorized, while it is guaranteed that the misalignment of the
4264 vectorized memory access will remain the same in different outer-loop
4265 iterations, it is *not* guaranteed that is will remain the same throughout
4266 the execution of the inner-loop. This is because the inner-loop advances
4267 with the original scalar step (and not in steps of VS). If the inner-loop
4268 step happens to be a multiple of VS, then the misalignment remains fixed
4269 and we can use the optimized realignment scheme. For example:
4271 for (i=0; i<N; i++)
4272 for (j=0; j<M; j++)
4273 s += a[i+j];
4275 When vectorizing the i-loop in the above example, the step between
4276 consecutive vector loads is 1, and so the misalignment does not remain
4277 fixed across the execution of the inner-loop, and the realignment cannot
4278 be optimized (as illustrated in the following pseudo vectorized loop):
4280 for (i=0; i<N; i+=4)
4281 for (j=0; j<M; j++){
4282 vs += vp[i+j]; // misalignment of &vp[i+j] is {0,1,2,3,0,1,2,3,...}
4283 // when j is {0,1,2,3,4,5,6,7,...} respectively.
4284 // (assuming that we start from an aligned address).
4287 We therefore have to use the unoptimized realignment scheme:
4289 for (i=0; i<N; i+=4)
4290 for (j=k; j<M; j+=4)
4291 vs += vp[i+j]; // misalignment of &vp[i+j] is always k (assuming
4292 // that the misalignment of the initial address is
4293 // 0).
4295 The loop can then be vectorized as follows:
4297 for (k=0; k<4; k++){
4298 rt = get_realignment_token (&vp[k]);
4299 for (i=0; i<N; i+=4){
4300 v1 = vp[i+k];
4301 for (j=k; j<M; j+=4){
4302 v2 = vp[i+j+VS-1];
4303 va = REALIGN_LOAD <v1,v2,rt>;
4304 vs += va;
4305 v1 = v2;
4308 } */
4310 if (DR_IS_READ (dr))
4312 bool is_packed = false;
4313 tree type = (TREE_TYPE (DR_REF (dr)));
4315 if (optab_handler (vec_realign_load_optab, mode) != CODE_FOR_nothing
4316 && (!targetm.vectorize.builtin_mask_for_load
4317 || targetm.vectorize.builtin_mask_for_load ()))
4319 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
4320 if ((nested_in_vect_loop
4321 && (TREE_INT_CST_LOW (DR_STEP (dr))
4322 != GET_MODE_SIZE (TYPE_MODE (vectype))))
4323 || !loop_vinfo)
4324 return dr_explicit_realign;
4325 else
4326 return dr_explicit_realign_optimized;
4328 if (!known_alignment_for_access_p (dr))
4330 tree ba = DR_BASE_OBJECT (dr);
4332 if (ba)
4333 is_packed = contains_packed_reference (ba);
4336 if (targetm.vectorize.
4337 support_vector_misalignment (mode, type,
4338 DR_MISALIGNMENT (dr), is_packed))
4339 /* Can't software pipeline the loads, but can at least do them. */
4340 return dr_unaligned_supported;
4342 else
4344 bool is_packed = false;
4345 tree type = (TREE_TYPE (DR_REF (dr)));
4347 if (!known_alignment_for_access_p (dr))
4349 tree ba = DR_BASE_OBJECT (dr);
4351 if (ba)
4352 is_packed = contains_packed_reference (ba);
4355 if (targetm.vectorize.
4356 support_vector_misalignment (mode, type,
4357 DR_MISALIGNMENT (dr), is_packed))
4358 return dr_unaligned_supported;
4361 /* Unsupported. */
4362 return dr_unaligned_unsupported;