1 /* Data References Analysis and Manipulation Utilities for Vectorization.
2 Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
3 Free Software Foundation, Inc.
4 Contributed by Dorit Naishlos <dorit@il.ibm.com>
5 and Ira Rosen <irar@il.ibm.com>
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
30 #include "basic-block.h"
31 #include "tree-pretty-print.h"
32 #include "gimple-pretty-print.h"
33 #include "tree-flow.h"
34 #include "tree-dump.h"
36 #include "tree-chrec.h"
37 #include "tree-scalar-evolution.h"
38 #include "tree-vectorizer.h"
41 /* Need to include rtl.h, expr.h, etc. for optabs. */
45 /* Return the smallest scalar part of STMT.
46 This is used to determine the vectype of the stmt. We generally set the
47 vectype according to the type of the result (lhs). For stmts whose
48 result-type is different than the type of the arguments (e.g., demotion,
49 promotion), vectype will be reset appropriately (later). Note that we have
50 to visit the smallest datatype in this function, because that determines the
51 VF. If the smallest datatype in the loop is present only as the rhs of a
52 promotion operation - we'd miss it.
53 Such a case, where a variable of this datatype does not appear in the lhs
54 anywhere in the loop, can only occur if it's an invariant: e.g.:
55 'int_x = (int) short_inv', which we'd expect to have been optimized away by
56 invariant motion. However, we cannot rely on invariant motion to always take
57 invariants out of the loop, and so in the case of promotion we also have to
59 LHS_SIZE_UNIT and RHS_SIZE_UNIT contain the sizes of the corresponding
63 vect_get_smallest_scalar_type (gimple stmt
, HOST_WIDE_INT
*lhs_size_unit
,
64 HOST_WIDE_INT
*rhs_size_unit
)
66 tree scalar_type
= gimple_expr_type (stmt
);
67 HOST_WIDE_INT lhs
, rhs
;
69 lhs
= rhs
= TREE_INT_CST_LOW (TYPE_SIZE_UNIT (scalar_type
));
71 if (is_gimple_assign (stmt
)
72 && (gimple_assign_cast_p (stmt
)
73 || gimple_assign_rhs_code (stmt
) == WIDEN_MULT_EXPR
74 || gimple_assign_rhs_code (stmt
) == FLOAT_EXPR
))
76 tree rhs_type
= TREE_TYPE (gimple_assign_rhs1 (stmt
));
78 rhs
= TREE_INT_CST_LOW (TYPE_SIZE_UNIT (rhs_type
));
80 scalar_type
= rhs_type
;
89 /* Find the place of the data-ref in STMT in the interleaving chain that starts
90 from FIRST_STMT. Return -1 if the data-ref is not a part of the chain. */
93 vect_get_place_in_interleaving_chain (gimple stmt
, gimple first_stmt
)
95 gimple next_stmt
= first_stmt
;
98 if (first_stmt
!= DR_GROUP_FIRST_DR (vinfo_for_stmt (stmt
)))
101 while (next_stmt
&& next_stmt
!= stmt
)
104 next_stmt
= DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt
));
114 /* Function vect_insert_into_interleaving_chain.
116 Insert DRA into the interleaving chain of DRB according to DRA's INIT. */
119 vect_insert_into_interleaving_chain (struct data_reference
*dra
,
120 struct data_reference
*drb
)
124 stmt_vec_info stmtinfo_a
= vinfo_for_stmt (DR_STMT (dra
));
125 stmt_vec_info stmtinfo_b
= vinfo_for_stmt (DR_STMT (drb
));
127 prev
= DR_GROUP_FIRST_DR (stmtinfo_b
);
128 next
= DR_GROUP_NEXT_DR (vinfo_for_stmt (prev
));
131 next_init
= DR_INIT (STMT_VINFO_DATA_REF (vinfo_for_stmt (next
)));
132 if (tree_int_cst_compare (next_init
, DR_INIT (dra
)) > 0)
135 DR_GROUP_NEXT_DR (vinfo_for_stmt (prev
)) = DR_STMT (dra
);
136 DR_GROUP_NEXT_DR (stmtinfo_a
) = next
;
140 next
= DR_GROUP_NEXT_DR (vinfo_for_stmt (prev
));
143 /* We got to the end of the list. Insert here. */
144 DR_GROUP_NEXT_DR (vinfo_for_stmt (prev
)) = DR_STMT (dra
);
145 DR_GROUP_NEXT_DR (stmtinfo_a
) = NULL
;
149 /* Function vect_update_interleaving_chain.
151 For two data-refs DRA and DRB that are a part of a chain interleaved data
152 accesses, update the interleaving chain. DRB's INIT is smaller than DRA's.
154 There are four possible cases:
155 1. New stmts - both DRA and DRB are not a part of any chain:
158 2. DRB is a part of a chain and DRA is not:
159 no need to update FIRST_DR
160 no need to insert DRB
161 insert DRA according to init
162 3. DRA is a part of a chain and DRB is not:
163 if (init of FIRST_DR > init of DRB)
165 NEXT(FIRST_DR) = previous FIRST_DR
167 insert DRB according to its init
168 4. both DRA and DRB are in some interleaving chains:
169 choose the chain with the smallest init of FIRST_DR
170 insert the nodes of the second chain into the first one. */
173 vect_update_interleaving_chain (struct data_reference
*drb
,
174 struct data_reference
*dra
)
176 stmt_vec_info stmtinfo_a
= vinfo_for_stmt (DR_STMT (dra
));
177 stmt_vec_info stmtinfo_b
= vinfo_for_stmt (DR_STMT (drb
));
178 tree next_init
, init_dra_chain
, init_drb_chain
;
179 gimple first_a
, first_b
;
181 gimple node
, prev
, next
, first_stmt
;
183 /* 1. New stmts - both DRA and DRB are not a part of any chain. */
184 if (!DR_GROUP_FIRST_DR (stmtinfo_a
) && !DR_GROUP_FIRST_DR (stmtinfo_b
))
186 DR_GROUP_FIRST_DR (stmtinfo_a
) = DR_STMT (drb
);
187 DR_GROUP_FIRST_DR (stmtinfo_b
) = DR_STMT (drb
);
188 DR_GROUP_NEXT_DR (stmtinfo_b
) = DR_STMT (dra
);
192 /* 2. DRB is a part of a chain and DRA is not. */
193 if (!DR_GROUP_FIRST_DR (stmtinfo_a
) && DR_GROUP_FIRST_DR (stmtinfo_b
))
195 DR_GROUP_FIRST_DR (stmtinfo_a
) = DR_GROUP_FIRST_DR (stmtinfo_b
);
196 /* Insert DRA into the chain of DRB. */
197 vect_insert_into_interleaving_chain (dra
, drb
);
201 /* 3. DRA is a part of a chain and DRB is not. */
202 if (DR_GROUP_FIRST_DR (stmtinfo_a
) && !DR_GROUP_FIRST_DR (stmtinfo_b
))
204 gimple old_first_stmt
= DR_GROUP_FIRST_DR (stmtinfo_a
);
205 tree init_old
= DR_INIT (STMT_VINFO_DATA_REF (vinfo_for_stmt (
209 if (tree_int_cst_compare (init_old
, DR_INIT (drb
)) > 0)
211 /* DRB's init is smaller than the init of the stmt previously marked
212 as the first stmt of the interleaving chain of DRA. Therefore, we
213 update FIRST_STMT and put DRB in the head of the list. */
214 DR_GROUP_FIRST_DR (stmtinfo_b
) = DR_STMT (drb
);
215 DR_GROUP_NEXT_DR (stmtinfo_b
) = old_first_stmt
;
217 /* Update all the stmts in the list to point to the new FIRST_STMT. */
218 tmp
= old_first_stmt
;
221 DR_GROUP_FIRST_DR (vinfo_for_stmt (tmp
)) = DR_STMT (drb
);
222 tmp
= DR_GROUP_NEXT_DR (vinfo_for_stmt (tmp
));
227 /* Insert DRB in the list of DRA. */
228 vect_insert_into_interleaving_chain (drb
, dra
);
229 DR_GROUP_FIRST_DR (stmtinfo_b
) = DR_GROUP_FIRST_DR (stmtinfo_a
);
234 /* 4. both DRA and DRB are in some interleaving chains. */
235 first_a
= DR_GROUP_FIRST_DR (stmtinfo_a
);
236 first_b
= DR_GROUP_FIRST_DR (stmtinfo_b
);
237 if (first_a
== first_b
)
239 init_dra_chain
= DR_INIT (STMT_VINFO_DATA_REF (vinfo_for_stmt (first_a
)));
240 init_drb_chain
= DR_INIT (STMT_VINFO_DATA_REF (vinfo_for_stmt (first_b
)));
242 if (tree_int_cst_compare (init_dra_chain
, init_drb_chain
) > 0)
244 /* Insert the nodes of DRA chain into the DRB chain.
245 After inserting a node, continue from this node of the DRB chain (don't
246 start from the beginning. */
247 node
= DR_GROUP_FIRST_DR (stmtinfo_a
);
248 prev
= DR_GROUP_FIRST_DR (stmtinfo_b
);
249 first_stmt
= first_b
;
253 /* Insert the nodes of DRB chain into the DRA chain.
254 After inserting a node, continue from this node of the DRA chain (don't
255 start from the beginning. */
256 node
= DR_GROUP_FIRST_DR (stmtinfo_b
);
257 prev
= DR_GROUP_FIRST_DR (stmtinfo_a
);
258 first_stmt
= first_a
;
263 node_init
= DR_INIT (STMT_VINFO_DATA_REF (vinfo_for_stmt (node
)));
264 next
= DR_GROUP_NEXT_DR (vinfo_for_stmt (prev
));
267 next_init
= DR_INIT (STMT_VINFO_DATA_REF (vinfo_for_stmt (next
)));
268 if (tree_int_cst_compare (next_init
, node_init
) > 0)
271 DR_GROUP_NEXT_DR (vinfo_for_stmt (prev
)) = node
;
272 DR_GROUP_NEXT_DR (vinfo_for_stmt (node
)) = next
;
277 next
= DR_GROUP_NEXT_DR (vinfo_for_stmt (prev
));
281 /* We got to the end of the list. Insert here. */
282 DR_GROUP_NEXT_DR (vinfo_for_stmt (prev
)) = node
;
283 DR_GROUP_NEXT_DR (vinfo_for_stmt (node
)) = NULL
;
286 DR_GROUP_FIRST_DR (vinfo_for_stmt (node
)) = first_stmt
;
287 node
= DR_GROUP_NEXT_DR (vinfo_for_stmt (node
));
292 /* Function vect_equal_offsets.
294 Check if OFFSET1 and OFFSET2 are identical expressions. */
297 vect_equal_offsets (tree offset1
, tree offset2
)
301 STRIP_NOPS (offset1
);
302 STRIP_NOPS (offset2
);
304 if (offset1
== offset2
)
307 if (TREE_CODE (offset1
) != TREE_CODE (offset2
)
308 || (!BINARY_CLASS_P (offset1
) && !UNARY_CLASS_P (offset1
)))
311 res
= vect_equal_offsets (TREE_OPERAND (offset1
, 0),
312 TREE_OPERAND (offset2
, 0));
314 if (!res
|| !BINARY_CLASS_P (offset1
))
317 res
= vect_equal_offsets (TREE_OPERAND (offset1
, 1),
318 TREE_OPERAND (offset2
, 1));
324 /* Function vect_check_interleaving.
326 Check if DRA and DRB are a part of interleaving. In case they are, insert
327 DRA and DRB in an interleaving chain. */
330 vect_check_interleaving (struct data_reference
*dra
,
331 struct data_reference
*drb
)
333 HOST_WIDE_INT type_size_a
, type_size_b
, diff_mod_size
, step
, init_a
, init_b
;
335 /* Check that the data-refs have same first location (except init) and they
336 are both either store or load (not load and store). */
337 if ((DR_BASE_ADDRESS (dra
) != DR_BASE_ADDRESS (drb
)
338 && (TREE_CODE (DR_BASE_ADDRESS (dra
)) != ADDR_EXPR
339 || TREE_CODE (DR_BASE_ADDRESS (drb
)) != ADDR_EXPR
340 || TREE_OPERAND (DR_BASE_ADDRESS (dra
), 0)
341 != TREE_OPERAND (DR_BASE_ADDRESS (drb
),0)))
342 || !vect_equal_offsets (DR_OFFSET (dra
), DR_OFFSET (drb
))
343 || !tree_int_cst_compare (DR_INIT (dra
), DR_INIT (drb
))
344 || DR_IS_READ (dra
) != DR_IS_READ (drb
))
348 1. data-refs are of the same type
349 2. their steps are equal
350 3. the step (if greater than zero) is greater than the difference between
352 type_size_a
= TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dra
))));
353 type_size_b
= TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (drb
))));
355 if (type_size_a
!= type_size_b
356 || tree_int_cst_compare (DR_STEP (dra
), DR_STEP (drb
))
357 || !types_compatible_p (TREE_TYPE (DR_REF (dra
)),
358 TREE_TYPE (DR_REF (drb
))))
361 init_a
= TREE_INT_CST_LOW (DR_INIT (dra
));
362 init_b
= TREE_INT_CST_LOW (DR_INIT (drb
));
363 step
= TREE_INT_CST_LOW (DR_STEP (dra
));
367 /* If init_a == init_b + the size of the type * k, we have an interleaving,
368 and DRB is accessed before DRA. */
369 diff_mod_size
= (init_a
- init_b
) % type_size_a
;
371 if (step
&& (init_a
- init_b
) > step
)
374 if (diff_mod_size
== 0)
376 vect_update_interleaving_chain (drb
, dra
);
377 if (vect_print_dump_info (REPORT_DR_DETAILS
))
379 fprintf (vect_dump
, "Detected interleaving ");
380 print_generic_expr (vect_dump
, DR_REF (dra
), TDF_SLIM
);
381 fprintf (vect_dump
, " and ");
382 print_generic_expr (vect_dump
, DR_REF (drb
), TDF_SLIM
);
389 /* If init_b == init_a + the size of the type * k, we have an
390 interleaving, and DRA is accessed before DRB. */
391 diff_mod_size
= (init_b
- init_a
) % type_size_a
;
393 if (step
&& (init_b
- init_a
) > step
)
396 if (diff_mod_size
== 0)
398 vect_update_interleaving_chain (dra
, drb
);
399 if (vect_print_dump_info (REPORT_DR_DETAILS
))
401 fprintf (vect_dump
, "Detected interleaving ");
402 print_generic_expr (vect_dump
, DR_REF (dra
), TDF_SLIM
);
403 fprintf (vect_dump
, " and ");
404 print_generic_expr (vect_dump
, DR_REF (drb
), TDF_SLIM
);
413 /* Check if data references pointed by DR_I and DR_J are same or
414 belong to same interleaving group. Return FALSE if drs are
415 different, otherwise return TRUE. */
418 vect_same_range_drs (data_reference_p dr_i
, data_reference_p dr_j
)
420 gimple stmt_i
= DR_STMT (dr_i
);
421 gimple stmt_j
= DR_STMT (dr_j
);
423 if (operand_equal_p (DR_REF (dr_i
), DR_REF (dr_j
), 0)
424 || (DR_GROUP_FIRST_DR (vinfo_for_stmt (stmt_i
))
425 && DR_GROUP_FIRST_DR (vinfo_for_stmt (stmt_j
))
426 && (DR_GROUP_FIRST_DR (vinfo_for_stmt (stmt_i
))
427 == DR_GROUP_FIRST_DR (vinfo_for_stmt (stmt_j
)))))
433 /* If address ranges represented by DDR_I and DDR_J are equal,
434 return TRUE, otherwise return FALSE. */
437 vect_vfa_range_equal (ddr_p ddr_i
, ddr_p ddr_j
)
439 if ((vect_same_range_drs (DDR_A (ddr_i
), DDR_A (ddr_j
))
440 && vect_same_range_drs (DDR_B (ddr_i
), DDR_B (ddr_j
)))
441 || (vect_same_range_drs (DDR_A (ddr_i
), DDR_B (ddr_j
))
442 && vect_same_range_drs (DDR_B (ddr_i
), DDR_A (ddr_j
))))
448 /* Insert DDR into LOOP_VINFO list of ddrs that may alias and need to be
449 tested at run-time. Return TRUE if DDR was successfully inserted.
450 Return false if versioning is not supported. */
453 vect_mark_for_runtime_alias_test (ddr_p ddr
, loop_vec_info loop_vinfo
)
455 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
457 if ((unsigned) PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS
) == 0)
460 if (vect_print_dump_info (REPORT_DR_DETAILS
))
462 fprintf (vect_dump
, "mark for run-time aliasing test between ");
463 print_generic_expr (vect_dump
, DR_REF (DDR_A (ddr
)), TDF_SLIM
);
464 fprintf (vect_dump
, " and ");
465 print_generic_expr (vect_dump
, DR_REF (DDR_B (ddr
)), TDF_SLIM
);
468 if (optimize_loop_nest_for_size_p (loop
))
470 if (vect_print_dump_info (REPORT_DR_DETAILS
))
471 fprintf (vect_dump
, "versioning not supported when optimizing for size.");
475 /* FORNOW: We don't support versioning with outer-loop vectorization. */
478 if (vect_print_dump_info (REPORT_DR_DETAILS
))
479 fprintf (vect_dump
, "versioning not yet supported for outer-loops.");
483 VEC_safe_push (ddr_p
, heap
, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo
), ddr
);
488 /* Function vect_analyze_data_ref_dependence.
490 Return TRUE if there (might) exist a dependence between a memory-reference
491 DRA and a memory-reference DRB. When versioning for alias may check a
492 dependence at run-time, return FALSE. Adjust *MAX_VF according to
493 the data dependence. */
496 vect_analyze_data_ref_dependence (struct data_dependence_relation
*ddr
,
497 loop_vec_info loop_vinfo
, int *max_vf
)
500 struct loop
*loop
= NULL
;
501 struct data_reference
*dra
= DDR_A (ddr
);
502 struct data_reference
*drb
= DDR_B (ddr
);
503 stmt_vec_info stmtinfo_a
= vinfo_for_stmt (DR_STMT (dra
));
504 stmt_vec_info stmtinfo_b
= vinfo_for_stmt (DR_STMT (drb
));
505 lambda_vector dist_v
;
506 unsigned int loop_depth
;
508 /* Don't bother to analyze statements marked as unvectorizable. */
509 if (!STMT_VINFO_VECTORIZABLE (stmtinfo_a
)
510 || !STMT_VINFO_VECTORIZABLE (stmtinfo_b
))
513 if (DDR_ARE_DEPENDENT (ddr
) == chrec_known
)
515 /* Independent data accesses. */
516 vect_check_interleaving (dra
, drb
);
521 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
523 if ((DR_IS_READ (dra
) && DR_IS_READ (drb
) && loop_vinfo
) || dra
== drb
)
526 if (DDR_ARE_DEPENDENT (ddr
) == chrec_dont_know
)
530 if (vect_print_dump_info (REPORT_DR_DETAILS
))
532 fprintf (vect_dump
, "versioning for alias required: "
533 "can't determine dependence between ");
534 print_generic_expr (vect_dump
, DR_REF (dra
), TDF_SLIM
);
535 fprintf (vect_dump
, " and ");
536 print_generic_expr (vect_dump
, DR_REF (drb
), TDF_SLIM
);
539 /* Add to list of ddrs that need to be tested at run-time. */
540 return !vect_mark_for_runtime_alias_test (ddr
, loop_vinfo
);
543 /* When vectorizing a basic block unknown depnedence can still mean
545 if (vect_check_interleaving (dra
, drb
))
548 if (vect_print_dump_info (REPORT_DR_DETAILS
))
550 fprintf (vect_dump
, "can't determine dependence between ");
551 print_generic_expr (vect_dump
, DR_REF (dra
), TDF_SLIM
);
552 fprintf (vect_dump
, " and ");
553 print_generic_expr (vect_dump
, DR_REF (drb
), TDF_SLIM
);
556 /* Mark the statements as unvectorizable. */
557 STMT_VINFO_VECTORIZABLE (stmtinfo_a
) = false;
558 STMT_VINFO_VECTORIZABLE (stmtinfo_b
) = false;
563 /* Versioning for alias is not yet supported for basic block SLP, and
564 dependence distance is unapplicable, hence, in case of known data
565 dependence, basic block vectorization is impossible for now. */
568 if (dra
!= drb
&& vect_check_interleaving (dra
, drb
))
571 if (vect_print_dump_info (REPORT_DR_DETAILS
))
573 fprintf (vect_dump
, "determined dependence between ");
574 print_generic_expr (vect_dump
, DR_REF (dra
), TDF_SLIM
);
575 fprintf (vect_dump
, " and ");
576 print_generic_expr (vect_dump
, DR_REF (drb
), TDF_SLIM
);
582 /* Loop-based vectorization and known data dependence. */
583 if (DDR_NUM_DIST_VECTS (ddr
) == 0)
585 if (vect_print_dump_info (REPORT_DR_DETAILS
))
587 fprintf (vect_dump
, "versioning for alias required: bad dist vector for ");
588 print_generic_expr (vect_dump
, DR_REF (dra
), TDF_SLIM
);
589 fprintf (vect_dump
, " and ");
590 print_generic_expr (vect_dump
, DR_REF (drb
), TDF_SLIM
);
592 /* Add to list of ddrs that need to be tested at run-time. */
593 return !vect_mark_for_runtime_alias_test (ddr
, loop_vinfo
);
596 loop_depth
= index_in_loop_nest (loop
->num
, DDR_LOOP_NEST (ddr
));
597 for (i
= 0; VEC_iterate (lambda_vector
, DDR_DIST_VECTS (ddr
), i
, dist_v
); i
++)
599 int dist
= dist_v
[loop_depth
];
601 if (vect_print_dump_info (REPORT_DR_DETAILS
))
602 fprintf (vect_dump
, "dependence distance = %d.", dist
);
606 if (vect_print_dump_info (REPORT_DR_DETAILS
))
608 fprintf (vect_dump
, "dependence distance == 0 between ");
609 print_generic_expr (vect_dump
, DR_REF (dra
), TDF_SLIM
);
610 fprintf (vect_dump
, " and ");
611 print_generic_expr (vect_dump
, DR_REF (drb
), TDF_SLIM
);
614 /* For interleaving, mark that there is a read-write dependency if
615 necessary. We check before that one of the data-refs is store. */
616 if (DR_IS_READ (dra
))
617 DR_GROUP_READ_WRITE_DEPENDENCE (stmtinfo_a
) = true;
620 if (DR_IS_READ (drb
))
621 DR_GROUP_READ_WRITE_DEPENDENCE (stmtinfo_b
) = true;
627 if (dist
> 0 && DDR_REVERSED_P (ddr
))
629 /* If DDR_REVERSED_P the order of the data-refs in DDR was
630 reversed (to make distance vector positive), and the actual
631 distance is negative. */
632 if (vect_print_dump_info (REPORT_DR_DETAILS
))
633 fprintf (vect_dump
, "dependence distance negative.");
638 && abs (dist
) < *max_vf
)
640 /* The dependence distance requires reduction of the maximal
641 vectorization factor. */
642 *max_vf
= abs (dist
);
643 if (vect_print_dump_info (REPORT_DR_DETAILS
))
644 fprintf (vect_dump
, "adjusting maximal vectorization factor to %i",
648 if (abs (dist
) >= *max_vf
)
650 /* Dependence distance does not create dependence, as far as
651 vectorization is concerned, in this case. */
652 if (vect_print_dump_info (REPORT_DR_DETAILS
))
653 fprintf (vect_dump
, "dependence distance >= VF.");
657 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS
))
659 fprintf (vect_dump
, "not vectorized, possible dependence "
660 "between data-refs ");
661 print_generic_expr (vect_dump
, DR_REF (dra
), TDF_SLIM
);
662 fprintf (vect_dump
, " and ");
663 print_generic_expr (vect_dump
, DR_REF (drb
), TDF_SLIM
);
672 /* Function vect_analyze_data_ref_dependences.
674 Examine all the data references in the loop, and make sure there do not
675 exist any data dependences between them. Set *MAX_VF according to
676 the maximum vectorization factor the data dependences allow. */
679 vect_analyze_data_ref_dependences (loop_vec_info loop_vinfo
,
680 bb_vec_info bb_vinfo
, int *max_vf
)
683 VEC (ddr_p
, heap
) *ddrs
= NULL
;
684 struct data_dependence_relation
*ddr
;
686 if (vect_print_dump_info (REPORT_DETAILS
))
687 fprintf (vect_dump
, "=== vect_analyze_dependences ===");
690 ddrs
= LOOP_VINFO_DDRS (loop_vinfo
);
692 ddrs
= BB_VINFO_DDRS (bb_vinfo
);
694 for (i
= 0; VEC_iterate (ddr_p
, ddrs
, i
, ddr
); i
++)
695 if (vect_analyze_data_ref_dependence (ddr
, loop_vinfo
, max_vf
))
702 /* Function vect_compute_data_ref_alignment
704 Compute the misalignment of the data reference DR.
707 1. If during the misalignment computation it is found that the data reference
708 cannot be vectorized then false is returned.
709 2. DR_MISALIGNMENT (DR) is defined.
711 FOR NOW: No analysis is actually performed. Misalignment is calculated
712 only for trivial cases. TODO. */
715 vect_compute_data_ref_alignment (struct data_reference
*dr
)
717 gimple stmt
= DR_STMT (dr
);
718 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
719 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
720 struct loop
*loop
= NULL
;
721 tree ref
= DR_REF (dr
);
723 tree base
, base_addr
;
726 tree aligned_to
, alignment
;
728 if (vect_print_dump_info (REPORT_DETAILS
))
729 fprintf (vect_dump
, "vect_compute_data_ref_alignment:");
732 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
734 /* Initialize misalignment to unknown. */
735 SET_DR_MISALIGNMENT (dr
, -1);
737 misalign
= DR_INIT (dr
);
738 aligned_to
= DR_ALIGNED_TO (dr
);
739 base_addr
= DR_BASE_ADDRESS (dr
);
740 vectype
= STMT_VINFO_VECTYPE (stmt_info
);
742 /* In case the dataref is in an inner-loop of the loop that is being
743 vectorized (LOOP), we use the base and misalignment information
744 relative to the outer-loop (LOOP). This is ok only if the misalignment
745 stays the same throughout the execution of the inner-loop, which is why
746 we have to check that the stride of the dataref in the inner-loop evenly
747 divides by the vector size. */
748 if (loop
&& nested_in_vect_loop_p (loop
, stmt
))
750 tree step
= DR_STEP (dr
);
751 HOST_WIDE_INT dr_step
= TREE_INT_CST_LOW (step
);
753 if (dr_step
% GET_MODE_SIZE (TYPE_MODE (vectype
)) == 0)
755 if (vect_print_dump_info (REPORT_ALIGNMENT
))
756 fprintf (vect_dump
, "inner step divides the vector-size.");
757 misalign
= STMT_VINFO_DR_INIT (stmt_info
);
758 aligned_to
= STMT_VINFO_DR_ALIGNED_TO (stmt_info
);
759 base_addr
= STMT_VINFO_DR_BASE_ADDRESS (stmt_info
);
763 if (vect_print_dump_info (REPORT_ALIGNMENT
))
764 fprintf (vect_dump
, "inner step doesn't divide the vector-size.");
765 misalign
= NULL_TREE
;
769 base
= build_fold_indirect_ref (base_addr
);
770 alignment
= ssize_int (TYPE_ALIGN (vectype
)/BITS_PER_UNIT
);
772 if ((aligned_to
&& tree_int_cst_compare (aligned_to
, alignment
) < 0)
775 if (vect_print_dump_info (REPORT_ALIGNMENT
))
777 fprintf (vect_dump
, "Unknown alignment for access: ");
778 print_generic_expr (vect_dump
, base
, TDF_SLIM
);
784 && tree_int_cst_compare (ssize_int (DECL_ALIGN_UNIT (base
)),
786 || (TREE_CODE (base_addr
) == SSA_NAME
787 && tree_int_cst_compare (ssize_int (TYPE_ALIGN_UNIT (TREE_TYPE (
788 TREE_TYPE (base_addr
)))),
792 base_aligned
= false;
796 /* Do not change the alignment of global variables if
797 flag_section_anchors is enabled. */
798 if (!vect_can_force_dr_alignment_p (base
, TYPE_ALIGN (vectype
))
799 || (TREE_STATIC (base
) && flag_section_anchors
))
801 if (vect_print_dump_info (REPORT_DETAILS
))
803 fprintf (vect_dump
, "can't force alignment of ref: ");
804 print_generic_expr (vect_dump
, ref
, TDF_SLIM
);
809 /* Force the alignment of the decl.
810 NOTE: This is the only change to the code we make during
811 the analysis phase, before deciding to vectorize the loop. */
812 if (vect_print_dump_info (REPORT_DETAILS
))
813 fprintf (vect_dump
, "force alignment");
814 DECL_ALIGN (base
) = TYPE_ALIGN (vectype
);
815 DECL_USER_ALIGN (base
) = 1;
818 /* At this point we assume that the base is aligned. */
819 gcc_assert (base_aligned
820 || (TREE_CODE (base
) == VAR_DECL
821 && DECL_ALIGN (base
) >= TYPE_ALIGN (vectype
)));
823 /* Modulo alignment. */
824 misalign
= size_binop (FLOOR_MOD_EXPR
, misalign
, alignment
);
826 if (!host_integerp (misalign
, 1))
828 /* Negative or overflowed misalignment value. */
829 if (vect_print_dump_info (REPORT_DETAILS
))
830 fprintf (vect_dump
, "unexpected misalign value");
834 SET_DR_MISALIGNMENT (dr
, TREE_INT_CST_LOW (misalign
));
836 if (vect_print_dump_info (REPORT_DETAILS
))
838 fprintf (vect_dump
, "misalign = %d bytes of ref ", DR_MISALIGNMENT (dr
));
839 print_generic_expr (vect_dump
, ref
, TDF_SLIM
);
846 /* Function vect_compute_data_refs_alignment
848 Compute the misalignment of data references in the loop.
849 Return FALSE if a data reference is found that cannot be vectorized. */
852 vect_compute_data_refs_alignment (loop_vec_info loop_vinfo
,
853 bb_vec_info bb_vinfo
)
855 VEC (data_reference_p
, heap
) *datarefs
;
856 struct data_reference
*dr
;
860 datarefs
= LOOP_VINFO_DATAREFS (loop_vinfo
);
862 datarefs
= BB_VINFO_DATAREFS (bb_vinfo
);
864 for (i
= 0; VEC_iterate (data_reference_p
, datarefs
, i
, dr
); i
++)
865 if (STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr
)))
866 && !vect_compute_data_ref_alignment (dr
))
870 /* Mark unsupported statement as unvectorizable. */
871 STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr
))) = false;
882 /* Function vect_update_misalignment_for_peel
884 DR - the data reference whose misalignment is to be adjusted.
885 DR_PEEL - the data reference whose misalignment is being made
886 zero in the vector loop by the peel.
887 NPEEL - the number of iterations in the peel loop if the misalignment
888 of DR_PEEL is known at compile time. */
891 vect_update_misalignment_for_peel (struct data_reference
*dr
,
892 struct data_reference
*dr_peel
, int npeel
)
895 VEC(dr_p
,heap
) *same_align_drs
;
896 struct data_reference
*current_dr
;
897 int dr_size
= GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr
))));
898 int dr_peel_size
= GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr_peel
))));
899 stmt_vec_info stmt_info
= vinfo_for_stmt (DR_STMT (dr
));
900 stmt_vec_info peel_stmt_info
= vinfo_for_stmt (DR_STMT (dr_peel
));
902 /* For interleaved data accesses the step in the loop must be multiplied by
903 the size of the interleaving group. */
904 if (STMT_VINFO_STRIDED_ACCESS (stmt_info
))
905 dr_size
*= DR_GROUP_SIZE (vinfo_for_stmt (DR_GROUP_FIRST_DR (stmt_info
)));
906 if (STMT_VINFO_STRIDED_ACCESS (peel_stmt_info
))
907 dr_peel_size
*= DR_GROUP_SIZE (peel_stmt_info
);
909 /* It can be assumed that the data refs with the same alignment as dr_peel
910 are aligned in the vector loop. */
912 = STMT_VINFO_SAME_ALIGN_REFS (vinfo_for_stmt (DR_STMT (dr_peel
)));
913 for (i
= 0; VEC_iterate (dr_p
, same_align_drs
, i
, current_dr
); i
++)
915 if (current_dr
!= dr
)
917 gcc_assert (DR_MISALIGNMENT (dr
) / dr_size
==
918 DR_MISALIGNMENT (dr_peel
) / dr_peel_size
);
919 SET_DR_MISALIGNMENT (dr
, 0);
923 if (known_alignment_for_access_p (dr
)
924 && known_alignment_for_access_p (dr_peel
))
926 int misal
= DR_MISALIGNMENT (dr
);
927 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
928 misal
+= npeel
* dr_size
;
929 misal
%= GET_MODE_SIZE (TYPE_MODE (vectype
));
930 SET_DR_MISALIGNMENT (dr
, misal
);
934 if (vect_print_dump_info (REPORT_DETAILS
))
935 fprintf (vect_dump
, "Setting misalignment to -1.");
936 SET_DR_MISALIGNMENT (dr
, -1);
940 /* Function vect_verify_datarefs_alignment
942 Return TRUE if all data references in the loop can be
943 handled with respect to alignment. */
946 vect_verify_datarefs_alignment (loop_vec_info loop_vinfo
, bb_vec_info bb_vinfo
)
948 VEC (data_reference_p
, heap
) *datarefs
;
949 struct data_reference
*dr
;
950 enum dr_alignment_support supportable_dr_alignment
;
954 datarefs
= LOOP_VINFO_DATAREFS (loop_vinfo
);
956 datarefs
= BB_VINFO_DATAREFS (bb_vinfo
);
958 for (i
= 0; VEC_iterate (data_reference_p
, datarefs
, i
, dr
); i
++)
960 gimple stmt
= DR_STMT (dr
);
961 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
963 /* For interleaving, only the alignment of the first access matters.
964 Skip statements marked as not vectorizable. */
965 if ((STMT_VINFO_STRIDED_ACCESS (stmt_info
)
966 && DR_GROUP_FIRST_DR (stmt_info
) != stmt
)
967 || !STMT_VINFO_VECTORIZABLE (stmt_info
))
970 supportable_dr_alignment
= vect_supportable_dr_alignment (dr
);
971 if (!supportable_dr_alignment
)
973 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS
))
977 "not vectorized: unsupported unaligned load.");
980 "not vectorized: unsupported unaligned store.");
982 print_generic_expr (vect_dump
, DR_REF (dr
), TDF_SLIM
);
986 if (supportable_dr_alignment
!= dr_aligned
987 && vect_print_dump_info (REPORT_ALIGNMENT
))
988 fprintf (vect_dump
, "Vectorizing an unaligned access.");
994 /* Function vector_alignment_reachable_p
996 Return true if vector alignment for DR is reachable by peeling
997 a few loop iterations. Return false otherwise. */
1000 vector_alignment_reachable_p (struct data_reference
*dr
)
1002 gimple stmt
= DR_STMT (dr
);
1003 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1004 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
1006 if (STMT_VINFO_STRIDED_ACCESS (stmt_info
))
1008 /* For interleaved access we peel only if number of iterations in
1009 the prolog loop ({VF - misalignment}), is a multiple of the
1010 number of the interleaved accesses. */
1011 int elem_size
, mis_in_elements
;
1012 int nelements
= TYPE_VECTOR_SUBPARTS (vectype
);
1014 /* FORNOW: handle only known alignment. */
1015 if (!known_alignment_for_access_p (dr
))
1018 elem_size
= GET_MODE_SIZE (TYPE_MODE (vectype
)) / nelements
;
1019 mis_in_elements
= DR_MISALIGNMENT (dr
) / elem_size
;
1021 if ((nelements
- mis_in_elements
) % DR_GROUP_SIZE (stmt_info
))
1025 /* If misalignment is known at the compile time then allow peeling
1026 only if natural alignment is reachable through peeling. */
1027 if (known_alignment_for_access_p (dr
) && !aligned_access_p (dr
))
1029 HOST_WIDE_INT elmsize
=
1030 int_cst_value (TYPE_SIZE_UNIT (TREE_TYPE (vectype
)));
1031 if (vect_print_dump_info (REPORT_DETAILS
))
1033 fprintf (vect_dump
, "data size =" HOST_WIDE_INT_PRINT_DEC
, elmsize
);
1034 fprintf (vect_dump
, ". misalignment = %d. ", DR_MISALIGNMENT (dr
));
1036 if (DR_MISALIGNMENT (dr
) % elmsize
)
1038 if (vect_print_dump_info (REPORT_DETAILS
))
1039 fprintf (vect_dump
, "data size does not divide the misalignment.\n");
1044 if (!known_alignment_for_access_p (dr
))
1046 tree type
= (TREE_TYPE (DR_REF (dr
)));
1047 tree ba
= DR_BASE_OBJECT (dr
);
1048 bool is_packed
= false;
1051 is_packed
= contains_packed_reference (ba
);
1053 if (vect_print_dump_info (REPORT_DETAILS
))
1054 fprintf (vect_dump
, "Unknown misalignment, is_packed = %d",is_packed
);
1055 if (targetm
.vectorize
.vector_alignment_reachable (type
, is_packed
))
1064 /* Function vect_enhance_data_refs_alignment
1066 This pass will use loop versioning and loop peeling in order to enhance
1067 the alignment of data references in the loop.
1069 FOR NOW: we assume that whatever versioning/peeling takes place, only the
1070 original loop is to be vectorized; Any other loops that are created by
1071 the transformations performed in this pass - are not supposed to be
1072 vectorized. This restriction will be relaxed.
1074 This pass will require a cost model to guide it whether to apply peeling
1075 or versioning or a combination of the two. For example, the scheme that
1076 intel uses when given a loop with several memory accesses, is as follows:
1077 choose one memory access ('p') which alignment you want to force by doing
1078 peeling. Then, either (1) generate a loop in which 'p' is aligned and all
1079 other accesses are not necessarily aligned, or (2) use loop versioning to
1080 generate one loop in which all accesses are aligned, and another loop in
1081 which only 'p' is necessarily aligned.
1083 ("Automatic Intra-Register Vectorization for the Intel Architecture",
1084 Aart J.C. Bik, Milind Girkar, Paul M. Grey and Ximmin Tian, International
1085 Journal of Parallel Programming, Vol. 30, No. 2, April 2002.)
1087 Devising a cost model is the most critical aspect of this work. It will
1088 guide us on which access to peel for, whether to use loop versioning, how
1089 many versions to create, etc. The cost model will probably consist of
1090 generic considerations as well as target specific considerations (on
1091 powerpc for example, misaligned stores are more painful than misaligned
1094 Here are the general steps involved in alignment enhancements:
1096 -- original loop, before alignment analysis:
1097 for (i=0; i<N; i++){
1098 x = q[i]; # DR_MISALIGNMENT(q) = unknown
1099 p[i] = y; # DR_MISALIGNMENT(p) = unknown
1102 -- After vect_compute_data_refs_alignment:
1103 for (i=0; i<N; i++){
1104 x = q[i]; # DR_MISALIGNMENT(q) = 3
1105 p[i] = y; # DR_MISALIGNMENT(p) = unknown
1108 -- Possibility 1: we do loop versioning:
1110 for (i=0; i<N; i++){ # loop 1A
1111 x = q[i]; # DR_MISALIGNMENT(q) = 3
1112 p[i] = y; # DR_MISALIGNMENT(p) = 0
1116 for (i=0; i<N; i++){ # loop 1B
1117 x = q[i]; # DR_MISALIGNMENT(q) = 3
1118 p[i] = y; # DR_MISALIGNMENT(p) = unaligned
1122 -- Possibility 2: we do loop peeling:
1123 for (i = 0; i < 3; i++){ # (scalar loop, not to be vectorized).
1127 for (i = 3; i < N; i++){ # loop 2A
1128 x = q[i]; # DR_MISALIGNMENT(q) = 0
1129 p[i] = y; # DR_MISALIGNMENT(p) = unknown
1132 -- Possibility 3: combination of loop peeling and versioning:
1133 for (i = 0; i < 3; i++){ # (scalar loop, not to be vectorized).
1138 for (i = 3; i<N; i++){ # loop 3A
1139 x = q[i]; # DR_MISALIGNMENT(q) = 0
1140 p[i] = y; # DR_MISALIGNMENT(p) = 0
1144 for (i = 3; i<N; i++){ # loop 3B
1145 x = q[i]; # DR_MISALIGNMENT(q) = 0
1146 p[i] = y; # DR_MISALIGNMENT(p) = unaligned
1150 These loops are later passed to loop_transform to be vectorized. The
1151 vectorizer will use the alignment information to guide the transformation
1152 (whether to generate regular loads/stores, or with special handling for
1156 vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo
)
1158 VEC (data_reference_p
, heap
) *datarefs
= LOOP_VINFO_DATAREFS (loop_vinfo
);
1159 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
1160 enum dr_alignment_support supportable_dr_alignment
;
1161 struct data_reference
*dr0
= NULL
;
1162 struct data_reference
*dr
;
1164 bool do_peeling
= false;
1165 bool do_versioning
= false;
1168 stmt_vec_info stmt_info
;
1169 int vect_versioning_for_alias_required
;
1171 if (vect_print_dump_info (REPORT_DETAILS
))
1172 fprintf (vect_dump
, "=== vect_enhance_data_refs_alignment ===");
1174 /* While cost model enhancements are expected in the future, the high level
1175 view of the code at this time is as follows:
1177 A) If there is a misaligned access then see if peeling to align
1178 this access can make all data references satisfy
1179 vect_supportable_dr_alignment. If so, update data structures
1180 as needed and return true.
1182 B) If peeling wasn't possible and there is a data reference with an
1183 unknown misalignment that does not satisfy vect_supportable_dr_alignment
1184 then see if loop versioning checks can be used to make all data
1185 references satisfy vect_supportable_dr_alignment. If so, update
1186 data structures as needed and return true.
1188 C) If neither peeling nor versioning were successful then return false if
1189 any data reference does not satisfy vect_supportable_dr_alignment.
1191 D) Return true (all data references satisfy vect_supportable_dr_alignment).
1193 Note, Possibility 3 above (which is peeling and versioning together) is not
1194 being done at this time. */
1196 /* (1) Peeling to force alignment. */
1198 /* (1.1) Decide whether to perform peeling, and how many iterations to peel:
1200 + How many accesses will become aligned due to the peeling
1201 - How many accesses will become unaligned due to the peeling,
1202 and the cost of misaligned accesses.
1203 - The cost of peeling (the extra runtime checks, the increase
1206 The scheme we use FORNOW: peel to force the alignment of the first
1207 unsupported misaligned access in the loop.
1209 TODO: Use a cost model. */
1211 for (i
= 0; VEC_iterate (data_reference_p
, datarefs
, i
, dr
); i
++)
1213 stmt
= DR_STMT (dr
);
1214 stmt_info
= vinfo_for_stmt (stmt
);
1216 /* For interleaving, only the alignment of the first access
1218 if (STMT_VINFO_STRIDED_ACCESS (stmt_info
)
1219 && DR_GROUP_FIRST_DR (stmt_info
) != stmt
)
1222 if (!DR_IS_READ (dr
) && !aligned_access_p (dr
))
1224 do_peeling
= vector_alignment_reachable_p (dr
);
1227 if (!do_peeling
&& vect_print_dump_info (REPORT_DETAILS
))
1228 fprintf (vect_dump
, "vector alignment may not be reachable");
1233 vect_versioning_for_alias_required
1234 = LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo
);
1236 /* Temporarily, if versioning for alias is required, we disable peeling
1237 until we support peeling and versioning. Often peeling for alignment
1238 will require peeling for loop-bound, which in turn requires that we
1239 know how to adjust the loop ivs after the loop. */
1240 if (vect_versioning_for_alias_required
1241 || !vect_can_advance_ivs_p (loop_vinfo
)
1242 || !slpeel_can_duplicate_loop_p (loop
, single_exit (loop
)))
1249 gimple stmt
= DR_STMT (dr0
);
1250 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1251 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
1252 int nelements
= TYPE_VECTOR_SUBPARTS (vectype
);
1254 if (known_alignment_for_access_p (dr0
))
1256 /* Since it's known at compile time, compute the number of iterations
1257 in the peeled loop (the peeling factor) for use in updating
1258 DR_MISALIGNMENT values. The peeling factor is the vectorization
1259 factor minus the misalignment as an element count. */
1260 mis
= DR_MISALIGNMENT (dr0
);
1261 mis
/= GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr0
))));
1262 npeel
= nelements
- mis
;
1264 /* For interleaved data access every iteration accesses all the
1265 members of the group, therefore we divide the number of iterations
1266 by the group size. */
1267 stmt_info
= vinfo_for_stmt (DR_STMT (dr0
));
1268 if (STMT_VINFO_STRIDED_ACCESS (stmt_info
))
1269 npeel
/= DR_GROUP_SIZE (stmt_info
);
1271 if (vect_print_dump_info (REPORT_DETAILS
))
1272 fprintf (vect_dump
, "Try peeling by %d", npeel
);
1275 /* Ensure that all data refs can be vectorized after the peel. */
1276 for (i
= 0; VEC_iterate (data_reference_p
, datarefs
, i
, dr
); i
++)
1278 int save_misalignment
;
1283 stmt
= DR_STMT (dr
);
1284 stmt_info
= vinfo_for_stmt (stmt
);
1285 /* For interleaving, only the alignment of the first access
1287 if (STMT_VINFO_STRIDED_ACCESS (stmt_info
)
1288 && DR_GROUP_FIRST_DR (stmt_info
) != stmt
)
1291 save_misalignment
= DR_MISALIGNMENT (dr
);
1292 vect_update_misalignment_for_peel (dr
, dr0
, npeel
);
1293 supportable_dr_alignment
= vect_supportable_dr_alignment (dr
);
1294 SET_DR_MISALIGNMENT (dr
, save_misalignment
);
1296 if (!supportable_dr_alignment
)
1305 /* (1.2) Update the DR_MISALIGNMENT of each data reference DR_i.
1306 If the misalignment of DR_i is identical to that of dr0 then set
1307 DR_MISALIGNMENT (DR_i) to zero. If the misalignment of DR_i and
1308 dr0 are known at compile time then increment DR_MISALIGNMENT (DR_i)
1309 by the peeling factor times the element size of DR_i (MOD the
1310 vectorization factor times the size). Otherwise, the
1311 misalignment of DR_i must be set to unknown. */
1312 for (i
= 0; VEC_iterate (data_reference_p
, datarefs
, i
, dr
); i
++)
1314 vect_update_misalignment_for_peel (dr
, dr0
, npeel
);
1316 LOOP_VINFO_UNALIGNED_DR (loop_vinfo
) = dr0
;
1317 LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo
) = DR_MISALIGNMENT (dr0
);
1318 SET_DR_MISALIGNMENT (dr0
, 0);
1319 if (vect_print_dump_info (REPORT_ALIGNMENT
))
1320 fprintf (vect_dump
, "Alignment of access forced using peeling.");
1322 if (vect_print_dump_info (REPORT_DETAILS
))
1323 fprintf (vect_dump
, "Peeling for alignment will be applied.");
1325 stat
= vect_verify_datarefs_alignment (loop_vinfo
, NULL
);
1332 /* (2) Versioning to force alignment. */
1334 /* Try versioning if:
1335 1) flag_tree_vect_loop_version is TRUE
1336 2) optimize loop for speed
1337 3) there is at least one unsupported misaligned data ref with an unknown
1339 4) all misaligned data refs with a known misalignment are supported, and
1340 5) the number of runtime alignment checks is within reason. */
1343 flag_tree_vect_loop_version
1344 && optimize_loop_nest_for_speed_p (loop
)
1345 && (!loop
->inner
); /* FORNOW */
1349 for (i
= 0; VEC_iterate (data_reference_p
, datarefs
, i
, dr
); i
++)
1351 stmt
= DR_STMT (dr
);
1352 stmt_info
= vinfo_for_stmt (stmt
);
1354 /* For interleaving, only the alignment of the first access
1356 if (aligned_access_p (dr
)
1357 || (STMT_VINFO_STRIDED_ACCESS (stmt_info
)
1358 && DR_GROUP_FIRST_DR (stmt_info
) != stmt
))
1361 supportable_dr_alignment
= vect_supportable_dr_alignment (dr
);
1363 if (!supportable_dr_alignment
)
1369 if (known_alignment_for_access_p (dr
)
1370 || VEC_length (gimple
,
1371 LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo
))
1372 >= (unsigned) PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIGNMENT_CHECKS
))
1374 do_versioning
= false;
1378 stmt
= DR_STMT (dr
);
1379 vectype
= STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt
));
1380 gcc_assert (vectype
);
1382 /* The rightmost bits of an aligned address must be zeros.
1383 Construct the mask needed for this test. For example,
1384 GET_MODE_SIZE for the vector mode V4SI is 16 bytes so the
1385 mask must be 15 = 0xf. */
1386 mask
= GET_MODE_SIZE (TYPE_MODE (vectype
)) - 1;
1388 /* FORNOW: use the same mask to test all potentially unaligned
1389 references in the loop. The vectorizer currently supports
1390 a single vector size, see the reference to
1391 GET_MODE_NUNITS (TYPE_MODE (vectype)) where the
1392 vectorization factor is computed. */
1393 gcc_assert (!LOOP_VINFO_PTR_MASK (loop_vinfo
)
1394 || LOOP_VINFO_PTR_MASK (loop_vinfo
) == mask
);
1395 LOOP_VINFO_PTR_MASK (loop_vinfo
) = mask
;
1396 VEC_safe_push (gimple
, heap
,
1397 LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo
),
1402 /* Versioning requires at least one misaligned data reference. */
1403 if (!LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo
))
1404 do_versioning
= false;
1405 else if (!do_versioning
)
1406 VEC_truncate (gimple
, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo
), 0);
1411 VEC(gimple
,heap
) *may_misalign_stmts
1412 = LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo
);
1415 /* It can now be assumed that the data references in the statements
1416 in LOOP_VINFO_MAY_MISALIGN_STMTS will be aligned in the version
1417 of the loop being vectorized. */
1418 for (i
= 0; VEC_iterate (gimple
, may_misalign_stmts
, i
, stmt
); i
++)
1420 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1421 dr
= STMT_VINFO_DATA_REF (stmt_info
);
1422 SET_DR_MISALIGNMENT (dr
, 0);
1423 if (vect_print_dump_info (REPORT_ALIGNMENT
))
1424 fprintf (vect_dump
, "Alignment of access forced using versioning.");
1427 if (vect_print_dump_info (REPORT_DETAILS
))
1428 fprintf (vect_dump
, "Versioning for alignment will be applied.");
1430 /* Peeling and versioning can't be done together at this time. */
1431 gcc_assert (! (do_peeling
&& do_versioning
));
1433 stat
= vect_verify_datarefs_alignment (loop_vinfo
, NULL
);
1438 /* This point is reached if neither peeling nor versioning is being done. */
1439 gcc_assert (! (do_peeling
|| do_versioning
));
1441 stat
= vect_verify_datarefs_alignment (loop_vinfo
, NULL
);
1446 /* Function vect_find_same_alignment_drs.
1448 Update group and alignment relations according to the chosen
1449 vectorization factor. */
1452 vect_find_same_alignment_drs (struct data_dependence_relation
*ddr
,
1453 loop_vec_info loop_vinfo
)
1456 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
1457 int vectorization_factor
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
1458 struct data_reference
*dra
= DDR_A (ddr
);
1459 struct data_reference
*drb
= DDR_B (ddr
);
1460 stmt_vec_info stmtinfo_a
= vinfo_for_stmt (DR_STMT (dra
));
1461 stmt_vec_info stmtinfo_b
= vinfo_for_stmt (DR_STMT (drb
));
1462 int dra_size
= GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dra
))));
1463 int drb_size
= GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (drb
))));
1464 lambda_vector dist_v
;
1465 unsigned int loop_depth
;
1467 if (DDR_ARE_DEPENDENT (ddr
) == chrec_known
)
1470 if ((DR_IS_READ (dra
) && DR_IS_READ (drb
)) || dra
== drb
)
1473 if (DDR_ARE_DEPENDENT (ddr
) == chrec_dont_know
)
1476 /* Loop-based vectorization and known data dependence. */
1477 if (DDR_NUM_DIST_VECTS (ddr
) == 0)
1480 loop_depth
= index_in_loop_nest (loop
->num
, DDR_LOOP_NEST (ddr
));
1481 for (i
= 0; VEC_iterate (lambda_vector
, DDR_DIST_VECTS (ddr
), i
, dist_v
); i
++)
1483 int dist
= dist_v
[loop_depth
];
1485 if (vect_print_dump_info (REPORT_DR_DETAILS
))
1486 fprintf (vect_dump
, "dependence distance = %d.", dist
);
1488 /* Same loop iteration. */
1490 || (dist
% vectorization_factor
== 0 && dra_size
== drb_size
))
1492 /* Two references with distance zero have the same alignment. */
1493 VEC_safe_push (dr_p
, heap
, STMT_VINFO_SAME_ALIGN_REFS (stmtinfo_a
), drb
);
1494 VEC_safe_push (dr_p
, heap
, STMT_VINFO_SAME_ALIGN_REFS (stmtinfo_b
), dra
);
1495 if (vect_print_dump_info (REPORT_ALIGNMENT
))
1496 fprintf (vect_dump
, "accesses have the same alignment.");
1497 if (vect_print_dump_info (REPORT_DR_DETAILS
))
1499 fprintf (vect_dump
, "dependence distance modulo vf == 0 between ");
1500 print_generic_expr (vect_dump
, DR_REF (dra
), TDF_SLIM
);
1501 fprintf (vect_dump
, " and ");
1502 print_generic_expr (vect_dump
, DR_REF (drb
), TDF_SLIM
);
1509 /* Function vect_analyze_data_refs_alignment
1511 Analyze the alignment of the data-references in the loop.
1512 Return FALSE if a data reference is found that cannot be vectorized. */
1515 vect_analyze_data_refs_alignment (loop_vec_info loop_vinfo
,
1516 bb_vec_info bb_vinfo
)
1518 if (vect_print_dump_info (REPORT_DETAILS
))
1519 fprintf (vect_dump
, "=== vect_analyze_data_refs_alignment ===");
1521 /* Mark groups of data references with same alignment using
1522 data dependence information. */
1525 VEC (ddr_p
, heap
) *ddrs
= LOOP_VINFO_DDRS (loop_vinfo
);
1526 struct data_dependence_relation
*ddr
;
1529 for (i
= 0; VEC_iterate (ddr_p
, ddrs
, i
, ddr
); i
++)
1530 vect_find_same_alignment_drs (ddr
, loop_vinfo
);
1533 if (!vect_compute_data_refs_alignment (loop_vinfo
, bb_vinfo
))
1535 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS
))
1537 "not vectorized: can't calculate alignment for data ref.");
1545 /* Analyze groups of strided accesses: check that DR belongs to a group of
1546 strided accesses of legal size, step, etc. Detect gaps, single element
1547 interleaving, and other special cases. Set strided access info.
1548 Collect groups of strided stores for further use in SLP analysis. */
1551 vect_analyze_group_access (struct data_reference
*dr
)
1553 tree step
= DR_STEP (dr
);
1554 tree scalar_type
= TREE_TYPE (DR_REF (dr
));
1555 HOST_WIDE_INT type_size
= TREE_INT_CST_LOW (TYPE_SIZE_UNIT (scalar_type
));
1556 gimple stmt
= DR_STMT (dr
);
1557 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1558 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
1559 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
1560 HOST_WIDE_INT dr_step
= TREE_INT_CST_LOW (step
);
1561 HOST_WIDE_INT stride
;
1562 bool slp_impossible
= false;
1564 /* For interleaving, STRIDE is STEP counted in elements, i.e., the size of the
1565 interleaving group (including gaps). */
1566 stride
= dr_step
/ type_size
;
1568 /* Not consecutive access is possible only if it is a part of interleaving. */
1569 if (!DR_GROUP_FIRST_DR (vinfo_for_stmt (stmt
)))
1571 /* Check if it this DR is a part of interleaving, and is a single
1572 element of the group that is accessed in the loop. */
1574 /* Gaps are supported only for loads. STEP must be a multiple of the type
1575 size. The size of the group must be a power of 2. */
1577 && (dr_step
% type_size
) == 0
1579 && exact_log2 (stride
) != -1)
1581 DR_GROUP_FIRST_DR (vinfo_for_stmt (stmt
)) = stmt
;
1582 DR_GROUP_SIZE (vinfo_for_stmt (stmt
)) = stride
;
1583 if (vect_print_dump_info (REPORT_DR_DETAILS
))
1585 fprintf (vect_dump
, "Detected single element interleaving ");
1586 print_generic_expr (vect_dump
, DR_REF (dr
), TDF_SLIM
);
1587 fprintf (vect_dump
, " step ");
1588 print_generic_expr (vect_dump
, step
, TDF_SLIM
);
1593 if (vect_print_dump_info (REPORT_DETAILS
))
1595 fprintf (vect_dump
, "not consecutive access ");
1596 print_gimple_stmt (vect_dump
, stmt
, 0, TDF_SLIM
);
1601 /* Mark the statement as unvectorizable. */
1602 STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr
))) = false;
1609 if (DR_GROUP_FIRST_DR (vinfo_for_stmt (stmt
)) == stmt
)
1611 /* First stmt in the interleaving chain. Check the chain. */
1612 gimple next
= DR_GROUP_NEXT_DR (vinfo_for_stmt (stmt
));
1613 struct data_reference
*data_ref
= dr
;
1614 unsigned int count
= 1;
1616 tree prev_init
= DR_INIT (data_ref
);
1618 HOST_WIDE_INT diff
, count_in_bytes
, gaps
= 0;
1622 /* Skip same data-refs. In case that two or more stmts share data-ref
1623 (supported only for loads), we vectorize only the first stmt, and
1624 the rest get their vectorized loads from the first one. */
1625 if (!tree_int_cst_compare (DR_INIT (data_ref
),
1626 DR_INIT (STMT_VINFO_DATA_REF (
1627 vinfo_for_stmt (next
)))))
1629 if (!DR_IS_READ (data_ref
))
1631 if (vect_print_dump_info (REPORT_DETAILS
))
1632 fprintf (vect_dump
, "Two store stmts share the same dr.");
1636 /* Check that there is no load-store dependencies for this loads
1637 to prevent a case of load-store-load to the same location. */
1638 if (DR_GROUP_READ_WRITE_DEPENDENCE (vinfo_for_stmt (next
))
1639 || DR_GROUP_READ_WRITE_DEPENDENCE (vinfo_for_stmt (prev
)))
1641 if (vect_print_dump_info (REPORT_DETAILS
))
1643 "READ_WRITE dependence in interleaving.");
1647 /* For load use the same data-ref load. */
1648 DR_GROUP_SAME_DR_STMT (vinfo_for_stmt (next
)) = prev
;
1651 next
= DR_GROUP_NEXT_DR (vinfo_for_stmt (next
));
1656 /* Check that all the accesses have the same STEP. */
1657 next_step
= DR_STEP (STMT_VINFO_DATA_REF (vinfo_for_stmt (next
)));
1658 if (tree_int_cst_compare (step
, next_step
))
1660 if (vect_print_dump_info (REPORT_DETAILS
))
1661 fprintf (vect_dump
, "not consecutive access in interleaving");
1665 data_ref
= STMT_VINFO_DATA_REF (vinfo_for_stmt (next
));
1666 /* Check that the distance between two accesses is equal to the type
1667 size. Otherwise, we have gaps. */
1668 diff
= (TREE_INT_CST_LOW (DR_INIT (data_ref
))
1669 - TREE_INT_CST_LOW (prev_init
)) / type_size
;
1672 /* FORNOW: SLP of accesses with gaps is not supported. */
1673 slp_impossible
= true;
1674 if (!DR_IS_READ (data_ref
))
1676 if (vect_print_dump_info (REPORT_DETAILS
))
1677 fprintf (vect_dump
, "interleaved store with gaps");
1684 /* Store the gap from the previous member of the group. If there is no
1685 gap in the access, DR_GROUP_GAP is always 1. */
1686 DR_GROUP_GAP (vinfo_for_stmt (next
)) = diff
;
1688 prev_init
= DR_INIT (data_ref
);
1689 next
= DR_GROUP_NEXT_DR (vinfo_for_stmt (next
));
1690 /* Count the number of data-refs in the chain. */
1694 /* COUNT is the number of accesses found, we multiply it by the size of
1695 the type to get COUNT_IN_BYTES. */
1696 count_in_bytes
= type_size
* count
;
1698 /* Check that the size of the interleaving (including gaps) is not
1699 greater than STEP. */
1700 if (dr_step
&& dr_step
< count_in_bytes
+ gaps
* type_size
)
1702 if (vect_print_dump_info (REPORT_DETAILS
))
1704 fprintf (vect_dump
, "interleaving size is greater than step for ");
1705 print_generic_expr (vect_dump
, DR_REF (dr
), TDF_SLIM
);
1710 /* Check that the size of the interleaving is equal to STEP for stores,
1711 i.e., that there are no gaps. */
1712 if (dr_step
&& dr_step
!= count_in_bytes
)
1714 if (DR_IS_READ (dr
))
1716 slp_impossible
= true;
1717 /* There is a gap after the last load in the group. This gap is a
1718 difference between the stride and the number of elements. When
1719 there is no gap, this difference should be 0. */
1720 DR_GROUP_GAP (vinfo_for_stmt (stmt
)) = stride
- count
;
1724 if (vect_print_dump_info (REPORT_DETAILS
))
1725 fprintf (vect_dump
, "interleaved store with gaps");
1730 /* Check that STEP is a multiple of type size. */
1731 if (dr_step
&& (dr_step
% type_size
) != 0)
1733 if (vect_print_dump_info (REPORT_DETAILS
))
1735 fprintf (vect_dump
, "step is not a multiple of type size: step ");
1736 print_generic_expr (vect_dump
, step
, TDF_SLIM
);
1737 fprintf (vect_dump
, " size ");
1738 print_generic_expr (vect_dump
, TYPE_SIZE_UNIT (scalar_type
),
1744 /* FORNOW: we handle only interleaving that is a power of 2.
1745 We don't fail here if it may be still possible to vectorize the
1746 group using SLP. If not, the size of the group will be checked in
1747 vect_analyze_operations, and the vectorization will fail. */
1748 if (exact_log2 (stride
) == -1)
1750 if (vect_print_dump_info (REPORT_DETAILS
))
1751 fprintf (vect_dump
, "interleaving is not a power of 2");
1760 DR_GROUP_SIZE (vinfo_for_stmt (stmt
)) = stride
;
1761 if (vect_print_dump_info (REPORT_DETAILS
))
1762 fprintf (vect_dump
, "Detected interleaving of size %d", (int)stride
);
1764 /* SLP: create an SLP data structure for every interleaving group of
1765 stores for further analysis in vect_analyse_slp. */
1766 if (!DR_IS_READ (dr
) && !slp_impossible
)
1769 VEC_safe_push (gimple
, heap
, LOOP_VINFO_STRIDED_STORES (loop_vinfo
),
1772 VEC_safe_push (gimple
, heap
, BB_VINFO_STRIDED_STORES (bb_vinfo
),
1781 /* Analyze the access pattern of the data-reference DR.
1782 In case of non-consecutive accesses call vect_analyze_group_access() to
1783 analyze groups of strided accesses. */
1786 vect_analyze_data_ref_access (struct data_reference
*dr
)
1788 tree step
= DR_STEP (dr
);
1789 tree scalar_type
= TREE_TYPE (DR_REF (dr
));
1790 gimple stmt
= DR_STMT (dr
);
1791 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1792 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
1793 struct loop
*loop
= NULL
;
1794 HOST_WIDE_INT dr_step
= TREE_INT_CST_LOW (step
);
1797 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
1799 if (loop_vinfo
&& !step
)
1801 if (vect_print_dump_info (REPORT_DETAILS
))
1802 fprintf (vect_dump
, "bad data-ref access in loop");
1806 /* Don't allow invariant accesses in loops. */
1807 if (loop_vinfo
&& dr_step
== 0)
1810 if (loop
&& nested_in_vect_loop_p (loop
, stmt
))
1812 /* Interleaved accesses are not yet supported within outer-loop
1813 vectorization for references in the inner-loop. */
1814 DR_GROUP_FIRST_DR (vinfo_for_stmt (stmt
)) = NULL
;
1816 /* For the rest of the analysis we use the outer-loop step. */
1817 step
= STMT_VINFO_DR_STEP (stmt_info
);
1818 dr_step
= TREE_INT_CST_LOW (step
);
1822 if (vect_print_dump_info (REPORT_ALIGNMENT
))
1823 fprintf (vect_dump
, "zero step in outer loop.");
1824 if (DR_IS_READ (dr
))
1832 if (!tree_int_cst_compare (step
, TYPE_SIZE_UNIT (scalar_type
)))
1834 /* Mark that it is not interleaving. */
1835 DR_GROUP_FIRST_DR (vinfo_for_stmt (stmt
)) = NULL
;
1839 if (loop
&& nested_in_vect_loop_p (loop
, stmt
))
1841 if (vect_print_dump_info (REPORT_ALIGNMENT
))
1842 fprintf (vect_dump
, "strided access in outer loop.");
1846 /* Not consecutive access - check if it's a part of interleaving group. */
1847 return vect_analyze_group_access (dr
);
1851 /* Function vect_analyze_data_ref_accesses.
1853 Analyze the access pattern of all the data references in the loop.
1855 FORNOW: the only access pattern that is considered vectorizable is a
1856 simple step 1 (consecutive) access.
1858 FORNOW: handle only arrays and pointer accesses. */
1861 vect_analyze_data_ref_accesses (loop_vec_info loop_vinfo
, bb_vec_info bb_vinfo
)
1864 VEC (data_reference_p
, heap
) *datarefs
;
1865 struct data_reference
*dr
;
1867 if (vect_print_dump_info (REPORT_DETAILS
))
1868 fprintf (vect_dump
, "=== vect_analyze_data_ref_accesses ===");
1871 datarefs
= LOOP_VINFO_DATAREFS (loop_vinfo
);
1873 datarefs
= BB_VINFO_DATAREFS (bb_vinfo
);
1875 for (i
= 0; VEC_iterate (data_reference_p
, datarefs
, i
, dr
); i
++)
1876 if (STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr
)))
1877 && !vect_analyze_data_ref_access (dr
))
1879 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS
))
1880 fprintf (vect_dump
, "not vectorized: complicated access pattern.");
1884 /* Mark the statement as not vectorizable. */
1885 STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr
))) = false;
1895 /* Function vect_prune_runtime_alias_test_list.
1897 Prune a list of ddrs to be tested at run-time by versioning for alias.
1898 Return FALSE if resulting list of ddrs is longer then allowed by
1899 PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS, otherwise return TRUE. */
1902 vect_prune_runtime_alias_test_list (loop_vec_info loop_vinfo
)
1904 VEC (ddr_p
, heap
) * ddrs
=
1905 LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo
);
1908 if (vect_print_dump_info (REPORT_DETAILS
))
1909 fprintf (vect_dump
, "=== vect_prune_runtime_alias_test_list ===");
1911 for (i
= 0; i
< VEC_length (ddr_p
, ddrs
); )
1916 ddr_i
= VEC_index (ddr_p
, ddrs
, i
);
1919 for (j
= 0; j
< i
; j
++)
1921 ddr_p ddr_j
= VEC_index (ddr_p
, ddrs
, j
);
1923 if (vect_vfa_range_equal (ddr_i
, ddr_j
))
1925 if (vect_print_dump_info (REPORT_DR_DETAILS
))
1927 fprintf (vect_dump
, "found equal ranges ");
1928 print_generic_expr (vect_dump
, DR_REF (DDR_A (ddr_i
)), TDF_SLIM
);
1929 fprintf (vect_dump
, ", ");
1930 print_generic_expr (vect_dump
, DR_REF (DDR_B (ddr_i
)), TDF_SLIM
);
1931 fprintf (vect_dump
, " and ");
1932 print_generic_expr (vect_dump
, DR_REF (DDR_A (ddr_j
)), TDF_SLIM
);
1933 fprintf (vect_dump
, ", ");
1934 print_generic_expr (vect_dump
, DR_REF (DDR_B (ddr_j
)), TDF_SLIM
);
1943 VEC_ordered_remove (ddr_p
, ddrs
, i
);
1949 if (VEC_length (ddr_p
, ddrs
) >
1950 (unsigned) PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS
))
1952 if (vect_print_dump_info (REPORT_DR_DETAILS
))
1955 "disable versioning for alias - max number of generated "
1956 "checks exceeded.");
1959 VEC_truncate (ddr_p
, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo
), 0);
1968 /* Function vect_analyze_data_refs.
1970 Find all the data references in the loop or basic block.
1972 The general structure of the analysis of data refs in the vectorizer is as
1974 1- vect_analyze_data_refs(loop/bb): call
1975 compute_data_dependences_for_loop/bb to find and analyze all data-refs
1976 in the loop/bb and their dependences.
1977 2- vect_analyze_dependences(): apply dependence testing using ddrs.
1978 3- vect_analyze_drs_alignment(): check that ref_stmt.alignment is ok.
1979 4- vect_analyze_drs_access(): check that ref_stmt.step is ok.
1984 vect_analyze_data_refs (loop_vec_info loop_vinfo
,
1985 bb_vec_info bb_vinfo
,
1988 struct loop
*loop
= NULL
;
1989 basic_block bb
= NULL
;
1991 VEC (data_reference_p
, heap
) *datarefs
;
1992 struct data_reference
*dr
;
1996 if (vect_print_dump_info (REPORT_DETAILS
))
1997 fprintf (vect_dump
, "=== vect_analyze_data_refs ===\n");
2001 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
2002 res
= compute_data_dependences_for_loop
2003 (loop
, true, &LOOP_VINFO_DATAREFS (loop_vinfo
),
2004 &LOOP_VINFO_DDRS (loop_vinfo
));
2008 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS
))
2009 fprintf (vect_dump
, "not vectorized: loop contains function calls"
2010 " or data references that cannot be analyzed");
2014 datarefs
= LOOP_VINFO_DATAREFS (loop_vinfo
);
2018 bb
= BB_VINFO_BB (bb_vinfo
);
2019 res
= compute_data_dependences_for_bb (bb
, true,
2020 &BB_VINFO_DATAREFS (bb_vinfo
),
2021 &BB_VINFO_DDRS (bb_vinfo
));
2024 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS
))
2025 fprintf (vect_dump
, "not vectorized: basic block contains function"
2026 " calls or data references that cannot be analyzed");
2030 datarefs
= BB_VINFO_DATAREFS (bb_vinfo
);
2033 /* Go through the data-refs, check that the analysis succeeded. Update pointer
2034 from stmt_vec_info struct to DR and vectype. */
2036 for (i
= 0; VEC_iterate (data_reference_p
, datarefs
, i
, dr
); i
++)
2039 stmt_vec_info stmt_info
;
2040 tree base
, offset
, init
;
2043 if (!dr
|| !DR_REF (dr
))
2045 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS
))
2046 fprintf (vect_dump
, "not vectorized: unhandled data-ref ");
2050 stmt
= DR_STMT (dr
);
2051 stmt_info
= vinfo_for_stmt (stmt
);
2053 /* Check that analysis of the data-ref succeeded. */
2054 if (!DR_BASE_ADDRESS (dr
) || !DR_OFFSET (dr
) || !DR_INIT (dr
)
2057 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS
))
2059 fprintf (vect_dump
, "not vectorized: data ref analysis failed ");
2060 print_gimple_stmt (vect_dump
, stmt
, 0, TDF_SLIM
);
2065 /* Mark the statement as not vectorizable. */
2066 STMT_VINFO_VECTORIZABLE (stmt_info
) = false;
2073 if (TREE_CODE (DR_BASE_ADDRESS (dr
)) == INTEGER_CST
)
2075 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS
))
2076 fprintf (vect_dump
, "not vectorized: base addr of dr is a "
2080 /* Mark the statement as not vectorizable. */
2081 STMT_VINFO_VECTORIZABLE (stmt_info
) = false;
2088 base
= unshare_expr (DR_BASE_ADDRESS (dr
));
2089 offset
= unshare_expr (DR_OFFSET (dr
));
2090 init
= unshare_expr (DR_INIT (dr
));
2092 /* Update DR field in stmt_vec_info struct. */
2094 /* If the dataref is in an inner-loop of the loop that is considered for
2095 for vectorization, we also want to analyze the access relative to
2096 the outer-loop (DR contains information only relative to the
2097 inner-most enclosing loop). We do that by building a reference to the
2098 first location accessed by the inner-loop, and analyze it relative to
2100 if (loop
&& nested_in_vect_loop_p (loop
, stmt
))
2102 tree outer_step
, outer_base
, outer_init
;
2103 HOST_WIDE_INT pbitsize
, pbitpos
;
2105 enum machine_mode pmode
;
2106 int punsignedp
, pvolatilep
;
2107 affine_iv base_iv
, offset_iv
;
2110 /* Build a reference to the first location accessed by the
2111 inner-loop: *(BASE+INIT). (The first location is actually
2112 BASE+INIT+OFFSET, but we add OFFSET separately later). */
2113 tree inner_base
= build_fold_indirect_ref
2114 (fold_build2 (POINTER_PLUS_EXPR
,
2115 TREE_TYPE (base
), base
,
2116 fold_convert (sizetype
, init
)));
2118 if (vect_print_dump_info (REPORT_DETAILS
))
2120 fprintf (vect_dump
, "analyze in outer-loop: ");
2121 print_generic_expr (vect_dump
, inner_base
, TDF_SLIM
);
2124 outer_base
= get_inner_reference (inner_base
, &pbitsize
, &pbitpos
,
2125 &poffset
, &pmode
, &punsignedp
, &pvolatilep
, false);
2126 gcc_assert (outer_base
!= NULL_TREE
);
2128 if (pbitpos
% BITS_PER_UNIT
!= 0)
2130 if (vect_print_dump_info (REPORT_DETAILS
))
2131 fprintf (vect_dump
, "failed: bit offset alignment.\n");
2135 outer_base
= build_fold_addr_expr (outer_base
);
2136 if (!simple_iv (loop
, loop_containing_stmt (stmt
), outer_base
,
2139 if (vect_print_dump_info (REPORT_DETAILS
))
2140 fprintf (vect_dump
, "failed: evolution of base is not affine.\n");
2147 poffset
= fold_build2 (PLUS_EXPR
, TREE_TYPE (offset
), offset
,
2155 offset_iv
.base
= ssize_int (0);
2156 offset_iv
.step
= ssize_int (0);
2158 else if (!simple_iv (loop
, loop_containing_stmt (stmt
), poffset
,
2161 if (vect_print_dump_info (REPORT_DETAILS
))
2162 fprintf (vect_dump
, "evolution of offset is not affine.\n");
2166 outer_init
= ssize_int (pbitpos
/ BITS_PER_UNIT
);
2167 split_constant_offset (base_iv
.base
, &base_iv
.base
, &dinit
);
2168 outer_init
= size_binop (PLUS_EXPR
, outer_init
, dinit
);
2169 split_constant_offset (offset_iv
.base
, &offset_iv
.base
, &dinit
);
2170 outer_init
= size_binop (PLUS_EXPR
, outer_init
, dinit
);
2172 outer_step
= size_binop (PLUS_EXPR
,
2173 fold_convert (ssizetype
, base_iv
.step
),
2174 fold_convert (ssizetype
, offset_iv
.step
));
2176 STMT_VINFO_DR_STEP (stmt_info
) = outer_step
;
2177 /* FIXME: Use canonicalize_base_object_address (base_iv.base); */
2178 STMT_VINFO_DR_BASE_ADDRESS (stmt_info
) = base_iv
.base
;
2179 STMT_VINFO_DR_INIT (stmt_info
) = outer_init
;
2180 STMT_VINFO_DR_OFFSET (stmt_info
) =
2181 fold_convert (ssizetype
, offset_iv
.base
);
2182 STMT_VINFO_DR_ALIGNED_TO (stmt_info
) =
2183 size_int (highest_pow2_factor (offset_iv
.base
));
2185 if (vect_print_dump_info (REPORT_DETAILS
))
2187 fprintf (vect_dump
, "\touter base_address: ");
2188 print_generic_expr (vect_dump
, STMT_VINFO_DR_BASE_ADDRESS (stmt_info
), TDF_SLIM
);
2189 fprintf (vect_dump
, "\n\touter offset from base address: ");
2190 print_generic_expr (vect_dump
, STMT_VINFO_DR_OFFSET (stmt_info
), TDF_SLIM
);
2191 fprintf (vect_dump
, "\n\touter constant offset from base address: ");
2192 print_generic_expr (vect_dump
, STMT_VINFO_DR_INIT (stmt_info
), TDF_SLIM
);
2193 fprintf (vect_dump
, "\n\touter step: ");
2194 print_generic_expr (vect_dump
, STMT_VINFO_DR_STEP (stmt_info
), TDF_SLIM
);
2195 fprintf (vect_dump
, "\n\touter aligned to: ");
2196 print_generic_expr (vect_dump
, STMT_VINFO_DR_ALIGNED_TO (stmt_info
), TDF_SLIM
);
2200 if (STMT_VINFO_DATA_REF (stmt_info
))
2202 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS
))
2205 "not vectorized: more than one data ref in stmt: ");
2206 print_gimple_stmt (vect_dump
, stmt
, 0, TDF_SLIM
);
2211 STMT_VINFO_DATA_REF (stmt_info
) = dr
;
2213 /* Set vectype for STMT. */
2214 scalar_type
= TREE_TYPE (DR_REF (dr
));
2215 STMT_VINFO_VECTYPE (stmt_info
) =
2216 get_vectype_for_scalar_type (scalar_type
);
2217 if (!STMT_VINFO_VECTYPE (stmt_info
))
2219 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS
))
2222 "not vectorized: no vectype for stmt: ");
2223 print_gimple_stmt (vect_dump
, stmt
, 0, TDF_SLIM
);
2224 fprintf (vect_dump
, " scalar_type: ");
2225 print_generic_expr (vect_dump
, scalar_type
, TDF_DETAILS
);
2230 /* Mark the statement as not vectorizable. */
2231 STMT_VINFO_VECTORIZABLE (stmt_info
) = false;
2238 /* Adjust the minimal vectorization factor according to the
2240 vf
= TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info
));
2249 /* Function vect_get_new_vect_var.
2251 Returns a name for a new variable. The current naming scheme appends the
2252 prefix "vect_" or "vect_p" (depending on the value of VAR_KIND) to
2253 the name of vectorizer generated variables, and appends that to NAME if
2257 vect_get_new_vect_var (tree type
, enum vect_var_kind var_kind
, const char *name
)
2264 case vect_simple_var
:
2267 case vect_scalar_var
:
2270 case vect_pointer_var
:
2279 char* tmp
= concat (prefix
, name
, NULL
);
2280 new_vect_var
= create_tmp_var (type
, tmp
);
2284 new_vect_var
= create_tmp_var (type
, prefix
);
2286 /* Mark vector typed variable as a gimple register variable. */
2287 if (TREE_CODE (type
) == VECTOR_TYPE
)
2288 DECL_GIMPLE_REG_P (new_vect_var
) = true;
2290 return new_vect_var
;
2294 /* Function vect_create_addr_base_for_vector_ref.
2296 Create an expression that computes the address of the first memory location
2297 that will be accessed for a data reference.
2300 STMT: The statement containing the data reference.
2301 NEW_STMT_LIST: Must be initialized to NULL_TREE or a statement list.
2302 OFFSET: Optional. If supplied, it is be added to the initial address.
2303 LOOP: Specify relative to which loop-nest should the address be computed.
2304 For example, when the dataref is in an inner-loop nested in an
2305 outer-loop that is now being vectorized, LOOP can be either the
2306 outer-loop, or the inner-loop. The first memory location accessed
2307 by the following dataref ('in' points to short):
2314 if LOOP=i_loop: &in (relative to i_loop)
2315 if LOOP=j_loop: &in+i*2B (relative to j_loop)
2318 1. Return an SSA_NAME whose value is the address of the memory location of
2319 the first vector of the data reference.
2320 2. If new_stmt_list is not NULL_TREE after return then the caller must insert
2321 these statement(s) which define the returned SSA_NAME.
2323 FORNOW: We are only handling array accesses with step 1. */
2326 vect_create_addr_base_for_vector_ref (gimple stmt
,
2327 gimple_seq
*new_stmt_list
,
2331 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
2332 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
);
2333 tree data_ref_base
= unshare_expr (DR_BASE_ADDRESS (dr
));
2335 tree data_ref_base_var
;
2337 tree addr_base
, addr_expr
;
2339 gimple_seq seq
= NULL
;
2340 tree base_offset
= unshare_expr (DR_OFFSET (dr
));
2341 tree init
= unshare_expr (DR_INIT (dr
));
2343 tree step
= TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dr
)));
2344 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
2347 if (loop_vinfo
&& loop
&& loop
!= (gimple_bb (stmt
))->loop_father
)
2349 struct loop
*outer_loop
= LOOP_VINFO_LOOP (loop_vinfo
);
2351 gcc_assert (nested_in_vect_loop_p (outer_loop
, stmt
));
2353 data_ref_base
= unshare_expr (STMT_VINFO_DR_BASE_ADDRESS (stmt_info
));
2354 base_offset
= unshare_expr (STMT_VINFO_DR_OFFSET (stmt_info
));
2355 init
= unshare_expr (STMT_VINFO_DR_INIT (stmt_info
));
2359 base_name
= build_fold_indirect_ref (data_ref_base
);
2362 base_offset
= ssize_int (0);
2363 init
= ssize_int (0);
2364 base_name
= build_fold_indirect_ref (unshare_expr (DR_REF (dr
)));
2367 data_ref_base_var
= create_tmp_var (TREE_TYPE (data_ref_base
), "batmp");
2368 add_referenced_var (data_ref_base_var
);
2369 data_ref_base
= force_gimple_operand (data_ref_base
, &seq
, true,
2371 gimple_seq_add_seq (new_stmt_list
, seq
);
2373 /* Create base_offset */
2374 base_offset
= size_binop (PLUS_EXPR
,
2375 fold_convert (sizetype
, base_offset
),
2376 fold_convert (sizetype
, init
));
2377 dest
= create_tmp_var (sizetype
, "base_off");
2378 add_referenced_var (dest
);
2379 base_offset
= force_gimple_operand (base_offset
, &seq
, true, dest
);
2380 gimple_seq_add_seq (new_stmt_list
, seq
);
2384 tree tmp
= create_tmp_var (sizetype
, "offset");
2386 add_referenced_var (tmp
);
2387 offset
= fold_build2 (MULT_EXPR
, sizetype
,
2388 fold_convert (sizetype
, offset
), step
);
2389 base_offset
= fold_build2 (PLUS_EXPR
, sizetype
,
2390 base_offset
, offset
);
2391 base_offset
= force_gimple_operand (base_offset
, &seq
, false, tmp
);
2392 gimple_seq_add_seq (new_stmt_list
, seq
);
2395 /* base + base_offset */
2397 addr_base
= fold_build2 (POINTER_PLUS_EXPR
, TREE_TYPE (data_ref_base
),
2398 data_ref_base
, base_offset
);
2401 if (TREE_CODE (DR_REF (dr
)) == INDIRECT_REF
)
2402 addr_base
= unshare_expr (TREE_OPERAND (DR_REF (dr
), 0));
2404 addr_base
= build1 (ADDR_EXPR
,
2405 build_pointer_type (TREE_TYPE (DR_REF (dr
))),
2406 unshare_expr (DR_REF (dr
)));
2409 vect_ptr_type
= build_pointer_type (STMT_VINFO_VECTYPE (stmt_info
));
2410 base
= get_base_address (DR_REF (dr
));
2412 && INDIRECT_REF_P (base
))
2414 = build_qualified_type (vect_ptr_type
,
2415 TYPE_QUALS (TREE_TYPE (TREE_OPERAND (base
, 0))));
2417 vec_stmt
= fold_convert (vect_ptr_type
, addr_base
);
2418 addr_expr
= vect_get_new_vect_var (vect_ptr_type
, vect_pointer_var
,
2419 get_name (base_name
));
2420 add_referenced_var (addr_expr
);
2421 vec_stmt
= force_gimple_operand (vec_stmt
, &seq
, false, addr_expr
);
2422 gimple_seq_add_seq (new_stmt_list
, seq
);
2424 if (vect_print_dump_info (REPORT_DETAILS
))
2426 fprintf (vect_dump
, "created ");
2427 print_generic_expr (vect_dump
, vec_stmt
, TDF_SLIM
);
2434 /* Function vect_create_data_ref_ptr.
2436 Create a new pointer to vector type (vp), that points to the first location
2437 accessed in the loop by STMT, along with the def-use update chain to
2438 appropriately advance the pointer through the loop iterations. Also set
2439 aliasing information for the pointer. This vector pointer is used by the
2440 callers to this function to create a memory reference expression for vector
2444 1. STMT: a stmt that references memory. Expected to be of the form
2445 GIMPLE_ASSIGN <name, data-ref> or
2446 GIMPLE_ASSIGN <data-ref, name>.
2447 2. AT_LOOP: the loop where the vector memref is to be created.
2448 3. OFFSET (optional): an offset to be added to the initial address accessed
2449 by the data-ref in STMT.
2450 4. ONLY_INIT: indicate if vp is to be updated in the loop, or remain
2451 pointing to the initial address.
2452 5. TYPE: if not NULL indicates the required type of the data-ref.
2455 1. Declare a new ptr to vector_type, and have it point to the base of the
2456 data reference (initial addressed accessed by the data reference).
2457 For example, for vector of type V8HI, the following code is generated:
2460 vp = (v8hi *)initial_address;
2462 if OFFSET is not supplied:
2463 initial_address = &a[init];
2464 if OFFSET is supplied:
2465 initial_address = &a[init + OFFSET];
2467 Return the initial_address in INITIAL_ADDRESS.
2469 2. If ONLY_INIT is true, just return the initial pointer. Otherwise, also
2470 update the pointer in each iteration of the loop.
2472 Return the increment stmt that updates the pointer in PTR_INCR.
2474 3. Set INV_P to true if the access pattern of the data reference in the
2475 vectorized loop is invariant. Set it to false otherwise.
2477 4. Return the pointer. */
2480 vect_create_data_ref_ptr (gimple stmt
, struct loop
*at_loop
,
2481 tree offset
, tree
*initial_address
, gimple
*ptr_incr
,
2482 bool only_init
, bool *inv_p
)
2485 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
2486 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
2487 struct loop
*loop
= NULL
;
2488 bool nested_in_vect_loop
= false;
2489 struct loop
*containing_loop
= NULL
;
2490 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
2495 gimple_seq new_stmt_list
= NULL
;
2499 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
);
2501 gimple_stmt_iterator incr_gsi
;
2503 tree indx_before_incr
, indx_after_incr
;
2506 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
2507 gimple_stmt_iterator gsi
= gsi_for_stmt (stmt
);
2512 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
2513 nested_in_vect_loop
= nested_in_vect_loop_p (loop
, stmt
);
2514 containing_loop
= (gimple_bb (stmt
))->loop_father
;
2515 pe
= loop_preheader_edge (loop
);
2519 gcc_assert (bb_vinfo
);
2524 /* Check the step (evolution) of the load in LOOP, and record
2525 whether it's invariant. */
2526 if (nested_in_vect_loop
)
2527 step
= STMT_VINFO_DR_STEP (stmt_info
);
2529 step
= DR_STEP (STMT_VINFO_DATA_REF (stmt_info
));
2531 if (tree_int_cst_compare (step
, size_zero_node
) == 0)
2536 /* Create an expression for the first address accessed by this load
2538 base_name
= build_fold_indirect_ref (unshare_expr (DR_BASE_ADDRESS (dr
)));
2540 if (vect_print_dump_info (REPORT_DETAILS
))
2542 tree data_ref_base
= base_name
;
2543 fprintf (vect_dump
, "create vector-pointer variable to type: ");
2544 print_generic_expr (vect_dump
, vectype
, TDF_SLIM
);
2545 if (TREE_CODE (data_ref_base
) == VAR_DECL
2546 || TREE_CODE (data_ref_base
) == ARRAY_REF
)
2547 fprintf (vect_dump
, " vectorizing an array ref: ");
2548 else if (TREE_CODE (data_ref_base
) == COMPONENT_REF
)
2549 fprintf (vect_dump
, " vectorizing a record based array ref: ");
2550 else if (TREE_CODE (data_ref_base
) == SSA_NAME
)
2551 fprintf (vect_dump
, " vectorizing a pointer ref: ");
2552 print_generic_expr (vect_dump
, base_name
, TDF_SLIM
);
2555 /** (1) Create the new vector-pointer variable: **/
2556 vect_ptr_type
= build_pointer_type (vectype
);
2557 base
= get_base_address (DR_REF (dr
));
2559 && INDIRECT_REF_P (base
))
2561 = build_qualified_type (vect_ptr_type
,
2562 TYPE_QUALS (TREE_TYPE (TREE_OPERAND (base
, 0))));
2563 vect_ptr
= vect_get_new_vect_var (vect_ptr_type
, vect_pointer_var
,
2564 get_name (base_name
));
2566 /* Vector types inherit the alias set of their component type by default so
2567 we need to use a ref-all pointer if the data reference does not conflict
2568 with the created vector data reference because it is not addressable. */
2569 if (!alias_sets_conflict_p (get_deref_alias_set (vect_ptr
),
2570 get_alias_set (DR_REF (dr
))))
2573 = build_pointer_type_for_mode (vectype
,
2574 TYPE_MODE (vect_ptr_type
), true);
2575 vect_ptr
= vect_get_new_vect_var (vect_ptr_type
, vect_pointer_var
,
2576 get_name (base_name
));
2579 /* Likewise for any of the data references in the stmt group. */
2580 else if (STMT_VINFO_DR_GROUP_SIZE (stmt_info
) > 1)
2582 gimple orig_stmt
= STMT_VINFO_DR_GROUP_FIRST_DR (stmt_info
);
2585 tree lhs
= gimple_assign_lhs (orig_stmt
);
2586 if (!alias_sets_conflict_p (get_deref_alias_set (vect_ptr
),
2587 get_alias_set (lhs
)))
2590 = build_pointer_type_for_mode (vectype
,
2591 TYPE_MODE (vect_ptr_type
), true);
2593 = vect_get_new_vect_var (vect_ptr_type
, vect_pointer_var
,
2594 get_name (base_name
));
2598 orig_stmt
= STMT_VINFO_DR_GROUP_NEXT_DR (vinfo_for_stmt (orig_stmt
));
2603 add_referenced_var (vect_ptr
);
2605 /** Note: If the dataref is in an inner-loop nested in LOOP, and we are
2606 vectorizing LOOP (i.e. outer-loop vectorization), we need to create two
2607 def-use update cycles for the pointer: One relative to the outer-loop
2608 (LOOP), which is what steps (3) and (4) below do. The other is relative
2609 to the inner-loop (which is the inner-most loop containing the dataref),
2610 and this is done be step (5) below.
2612 When vectorizing inner-most loops, the vectorized loop (LOOP) is also the
2613 inner-most loop, and so steps (3),(4) work the same, and step (5) is
2614 redundant. Steps (3),(4) create the following:
2617 LOOP: vp1 = phi(vp0,vp2)
2623 If there is an inner-loop nested in loop, then step (5) will also be
2624 applied, and an additional update in the inner-loop will be created:
2627 LOOP: vp1 = phi(vp0,vp2)
2629 inner: vp3 = phi(vp1,vp4)
2630 vp4 = vp3 + inner_step
2636 /** (3) Calculate the initial address the vector-pointer, and set
2637 the vector-pointer to point to it before the loop: **/
2639 /* Create: (&(base[init_val+offset]) in the loop preheader. */
2641 new_temp
= vect_create_addr_base_for_vector_ref (stmt
, &new_stmt_list
,
2647 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, new_stmt_list
);
2648 gcc_assert (!new_bb
);
2651 gsi_insert_seq_before (&gsi
, new_stmt_list
, GSI_SAME_STMT
);
2654 *initial_address
= new_temp
;
2656 /* Create: p = (vectype *) initial_base */
2657 vec_stmt
= gimple_build_assign (vect_ptr
,
2658 fold_convert (vect_ptr_type
, new_temp
));
2659 vect_ptr_init
= make_ssa_name (vect_ptr
, vec_stmt
);
2660 gimple_assign_set_lhs (vec_stmt
, vect_ptr_init
);
2663 new_bb
= gsi_insert_on_edge_immediate (pe
, vec_stmt
);
2664 gcc_assert (!new_bb
);
2667 gsi_insert_before (&gsi
, vec_stmt
, GSI_SAME_STMT
);
2669 /** (4) Handle the updating of the vector-pointer inside the loop.
2670 This is needed when ONLY_INIT is false, and also when AT_LOOP
2671 is the inner-loop nested in LOOP (during outer-loop vectorization).
2674 /* No update in loop is required. */
2675 if (only_init
&& (!loop_vinfo
|| at_loop
== loop
))
2677 /* Copy the points-to information if it exists. */
2678 if (DR_PTR_INFO (dr
))
2679 duplicate_ssa_name_ptr_info (vect_ptr_init
, DR_PTR_INFO (dr
));
2680 vptr
= vect_ptr_init
;
2684 /* The step of the vector pointer is the Vector Size. */
2685 tree step
= TYPE_SIZE_UNIT (vectype
);
2686 /* One exception to the above is when the scalar step of the load in
2687 LOOP is zero. In this case the step here is also zero. */
2689 step
= size_zero_node
;
2691 standard_iv_increment_position (loop
, &incr_gsi
, &insert_after
);
2693 create_iv (vect_ptr_init
,
2694 fold_convert (vect_ptr_type
, step
),
2695 vect_ptr
, loop
, &incr_gsi
, insert_after
,
2696 &indx_before_incr
, &indx_after_incr
);
2697 incr
= gsi_stmt (incr_gsi
);
2698 set_vinfo_for_stmt (incr
, new_stmt_vec_info (incr
, loop_vinfo
, NULL
));
2700 /* Copy the points-to information if it exists. */
2701 if (DR_PTR_INFO (dr
))
2703 duplicate_ssa_name_ptr_info (indx_before_incr
, DR_PTR_INFO (dr
));
2704 duplicate_ssa_name_ptr_info (indx_after_incr
, DR_PTR_INFO (dr
));
2709 vptr
= indx_before_incr
;
2712 if (!nested_in_vect_loop
|| only_init
)
2716 /** (5) Handle the updating of the vector-pointer inside the inner-loop
2717 nested in LOOP, if exists: **/
2719 gcc_assert (nested_in_vect_loop
);
2722 standard_iv_increment_position (containing_loop
, &incr_gsi
,
2724 create_iv (vptr
, fold_convert (vect_ptr_type
, DR_STEP (dr
)), vect_ptr
,
2725 containing_loop
, &incr_gsi
, insert_after
, &indx_before_incr
,
2727 incr
= gsi_stmt (incr_gsi
);
2728 set_vinfo_for_stmt (incr
, new_stmt_vec_info (incr
, loop_vinfo
, NULL
));
2730 /* Copy the points-to information if it exists. */
2731 if (DR_PTR_INFO (dr
))
2733 duplicate_ssa_name_ptr_info (indx_before_incr
, DR_PTR_INFO (dr
));
2734 duplicate_ssa_name_ptr_info (indx_after_incr
, DR_PTR_INFO (dr
));
2739 return indx_before_incr
;
2746 /* Function bump_vector_ptr
2748 Increment a pointer (to a vector type) by vector-size. If requested,
2749 i.e. if PTR-INCR is given, then also connect the new increment stmt
2750 to the existing def-use update-chain of the pointer, by modifying
2751 the PTR_INCR as illustrated below:
2753 The pointer def-use update-chain before this function:
2754 DATAREF_PTR = phi (p_0, p_2)
2756 PTR_INCR: p_2 = DATAREF_PTR + step
2758 The pointer def-use update-chain after this function:
2759 DATAREF_PTR = phi (p_0, p_2)
2761 NEW_DATAREF_PTR = DATAREF_PTR + BUMP
2763 PTR_INCR: p_2 = NEW_DATAREF_PTR + step
2766 DATAREF_PTR - ssa_name of a pointer (to vector type) that is being updated
2768 PTR_INCR - optional. The stmt that updates the pointer in each iteration of
2769 the loop. The increment amount across iterations is expected
2771 BSI - location where the new update stmt is to be placed.
2772 STMT - the original scalar memory-access stmt that is being vectorized.
2773 BUMP - optional. The offset by which to bump the pointer. If not given,
2774 the offset is assumed to be vector_size.
2776 Output: Return NEW_DATAREF_PTR as illustrated above.
2781 bump_vector_ptr (tree dataref_ptr
, gimple ptr_incr
, gimple_stmt_iterator
*gsi
,
2782 gimple stmt
, tree bump
)
2784 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
2785 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
);
2786 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
2787 tree ptr_var
= SSA_NAME_VAR (dataref_ptr
);
2788 tree update
= TYPE_SIZE_UNIT (vectype
);
2791 use_operand_p use_p
;
2792 tree new_dataref_ptr
;
2797 incr_stmt
= gimple_build_assign_with_ops (POINTER_PLUS_EXPR
, ptr_var
,
2798 dataref_ptr
, update
);
2799 new_dataref_ptr
= make_ssa_name (ptr_var
, incr_stmt
);
2800 gimple_assign_set_lhs (incr_stmt
, new_dataref_ptr
);
2801 vect_finish_stmt_generation (stmt
, incr_stmt
, gsi
);
2803 /* Copy the points-to information if it exists. */
2804 if (DR_PTR_INFO (dr
))
2805 duplicate_ssa_name_ptr_info (new_dataref_ptr
, DR_PTR_INFO (dr
));
2808 return new_dataref_ptr
;
2810 /* Update the vector-pointer's cross-iteration increment. */
2811 FOR_EACH_SSA_USE_OPERAND (use_p
, ptr_incr
, iter
, SSA_OP_USE
)
2813 tree use
= USE_FROM_PTR (use_p
);
2815 if (use
== dataref_ptr
)
2816 SET_USE (use_p
, new_dataref_ptr
);
2818 gcc_assert (tree_int_cst_compare (use
, update
) == 0);
2821 return new_dataref_ptr
;
2825 /* Function vect_create_destination_var.
2827 Create a new temporary of type VECTYPE. */
2830 vect_create_destination_var (tree scalar_dest
, tree vectype
)
2833 const char *new_name
;
2835 enum vect_var_kind kind
;
2837 kind
= vectype
? vect_simple_var
: vect_scalar_var
;
2838 type
= vectype
? vectype
: TREE_TYPE (scalar_dest
);
2840 gcc_assert (TREE_CODE (scalar_dest
) == SSA_NAME
);
2842 new_name
= get_name (scalar_dest
);
2845 vec_dest
= vect_get_new_vect_var (type
, kind
, new_name
);
2846 add_referenced_var (vec_dest
);
2851 /* Function vect_strided_store_supported.
2853 Returns TRUE is INTERLEAVE_HIGH and INTERLEAVE_LOW operations are supported,
2854 and FALSE otherwise. */
2857 vect_strided_store_supported (tree vectype
)
2859 optab interleave_high_optab
, interleave_low_optab
;
2862 mode
= (int) TYPE_MODE (vectype
);
2864 /* Check that the operation is supported. */
2865 interleave_high_optab
= optab_for_tree_code (VEC_INTERLEAVE_HIGH_EXPR
,
2866 vectype
, optab_default
);
2867 interleave_low_optab
= optab_for_tree_code (VEC_INTERLEAVE_LOW_EXPR
,
2868 vectype
, optab_default
);
2869 if (!interleave_high_optab
|| !interleave_low_optab
)
2871 if (vect_print_dump_info (REPORT_DETAILS
))
2872 fprintf (vect_dump
, "no optab for interleave.");
2876 if (optab_handler (interleave_high_optab
, mode
)->insn_code
2878 || optab_handler (interleave_low_optab
, mode
)->insn_code
2879 == CODE_FOR_nothing
)
2881 if (vect_print_dump_info (REPORT_DETAILS
))
2882 fprintf (vect_dump
, "interleave op not supported by target.");
2890 /* Function vect_permute_store_chain.
2892 Given a chain of interleaved stores in DR_CHAIN of LENGTH that must be
2893 a power of 2, generate interleave_high/low stmts to reorder the data
2894 correctly for the stores. Return the final references for stores in
2897 E.g., LENGTH is 4 and the scalar type is short, i.e., VF is 8.
2898 The input is 4 vectors each containing 8 elements. We assign a number to each
2899 element, the input sequence is:
2901 1st vec: 0 1 2 3 4 5 6 7
2902 2nd vec: 8 9 10 11 12 13 14 15
2903 3rd vec: 16 17 18 19 20 21 22 23
2904 4th vec: 24 25 26 27 28 29 30 31
2906 The output sequence should be:
2908 1st vec: 0 8 16 24 1 9 17 25
2909 2nd vec: 2 10 18 26 3 11 19 27
2910 3rd vec: 4 12 20 28 5 13 21 30
2911 4th vec: 6 14 22 30 7 15 23 31
2913 i.e., we interleave the contents of the four vectors in their order.
2915 We use interleave_high/low instructions to create such output. The input of
2916 each interleave_high/low operation is two vectors:
2919 the even elements of the result vector are obtained left-to-right from the
2920 high/low elements of the first vector. The odd elements of the result are
2921 obtained left-to-right from the high/low elements of the second vector.
2922 The output of interleave_high will be: 0 4 1 5
2923 and of interleave_low: 2 6 3 7
2926 The permutation is done in log LENGTH stages. In each stage interleave_high
2927 and interleave_low stmts are created for each pair of vectors in DR_CHAIN,
2928 where the first argument is taken from the first half of DR_CHAIN and the
2929 second argument from it's second half.
2932 I1: interleave_high (1st vec, 3rd vec)
2933 I2: interleave_low (1st vec, 3rd vec)
2934 I3: interleave_high (2nd vec, 4th vec)
2935 I4: interleave_low (2nd vec, 4th vec)
2937 The output for the first stage is:
2939 I1: 0 16 1 17 2 18 3 19
2940 I2: 4 20 5 21 6 22 7 23
2941 I3: 8 24 9 25 10 26 11 27
2942 I4: 12 28 13 29 14 30 15 31
2944 The output of the second stage, i.e. the final result is:
2946 I1: 0 8 16 24 1 9 17 25
2947 I2: 2 10 18 26 3 11 19 27
2948 I3: 4 12 20 28 5 13 21 30
2949 I4: 6 14 22 30 7 15 23 31. */
2952 vect_permute_store_chain (VEC(tree
,heap
) *dr_chain
,
2953 unsigned int length
,
2955 gimple_stmt_iterator
*gsi
,
2956 VEC(tree
,heap
) **result_chain
)
2958 tree perm_dest
, vect1
, vect2
, high
, low
;
2960 tree vectype
= STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt
));
2963 enum tree_code high_code
, low_code
;
2965 /* Check that the operation is supported. */
2966 if (!vect_strided_store_supported (vectype
))
2969 *result_chain
= VEC_copy (tree
, heap
, dr_chain
);
2971 for (i
= 0; i
< exact_log2 (length
); i
++)
2973 for (j
= 0; j
< length
/2; j
++)
2975 vect1
= VEC_index (tree
, dr_chain
, j
);
2976 vect2
= VEC_index (tree
, dr_chain
, j
+length
/2);
2978 /* Create interleaving stmt:
2979 in the case of big endian:
2980 high = interleave_high (vect1, vect2)
2981 and in the case of little endian:
2982 high = interleave_low (vect1, vect2). */
2983 perm_dest
= create_tmp_var (vectype
, "vect_inter_high");
2984 DECL_GIMPLE_REG_P (perm_dest
) = 1;
2985 add_referenced_var (perm_dest
);
2986 if (BYTES_BIG_ENDIAN
)
2988 high_code
= VEC_INTERLEAVE_HIGH_EXPR
;
2989 low_code
= VEC_INTERLEAVE_LOW_EXPR
;
2993 low_code
= VEC_INTERLEAVE_HIGH_EXPR
;
2994 high_code
= VEC_INTERLEAVE_LOW_EXPR
;
2996 perm_stmt
= gimple_build_assign_with_ops (high_code
, perm_dest
,
2998 high
= make_ssa_name (perm_dest
, perm_stmt
);
2999 gimple_assign_set_lhs (perm_stmt
, high
);
3000 vect_finish_stmt_generation (stmt
, perm_stmt
, gsi
);
3001 VEC_replace (tree
, *result_chain
, 2*j
, high
);
3003 /* Create interleaving stmt:
3004 in the case of big endian:
3005 low = interleave_low (vect1, vect2)
3006 and in the case of little endian:
3007 low = interleave_high (vect1, vect2). */
3008 perm_dest
= create_tmp_var (vectype
, "vect_inter_low");
3009 DECL_GIMPLE_REG_P (perm_dest
) = 1;
3010 add_referenced_var (perm_dest
);
3011 perm_stmt
= gimple_build_assign_with_ops (low_code
, perm_dest
,
3013 low
= make_ssa_name (perm_dest
, perm_stmt
);
3014 gimple_assign_set_lhs (perm_stmt
, low
);
3015 vect_finish_stmt_generation (stmt
, perm_stmt
, gsi
);
3016 VEC_replace (tree
, *result_chain
, 2*j
+1, low
);
3018 dr_chain
= VEC_copy (tree
, heap
, *result_chain
);
3023 /* Function vect_setup_realignment
3025 This function is called when vectorizing an unaligned load using
3026 the dr_explicit_realign[_optimized] scheme.
3027 This function generates the following code at the loop prolog:
3030 x msq_init = *(floor(p)); # prolog load
3031 realignment_token = call target_builtin;
3033 x msq = phi (msq_init, ---)
3035 The stmts marked with x are generated only for the case of
3036 dr_explicit_realign_optimized.
3038 The code above sets up a new (vector) pointer, pointing to the first
3039 location accessed by STMT, and a "floor-aligned" load using that pointer.
3040 It also generates code to compute the "realignment-token" (if the relevant
3041 target hook was defined), and creates a phi-node at the loop-header bb
3042 whose arguments are the result of the prolog-load (created by this
3043 function) and the result of a load that takes place in the loop (to be
3044 created by the caller to this function).
3046 For the case of dr_explicit_realign_optimized:
3047 The caller to this function uses the phi-result (msq) to create the
3048 realignment code inside the loop, and sets up the missing phi argument,
3051 msq = phi (msq_init, lsq)
3052 lsq = *(floor(p')); # load in loop
3053 result = realign_load (msq, lsq, realignment_token);
3055 For the case of dr_explicit_realign:
3057 msq = *(floor(p)); # load in loop
3059 lsq = *(floor(p')); # load in loop
3060 result = realign_load (msq, lsq, realignment_token);
3063 STMT - (scalar) load stmt to be vectorized. This load accesses
3064 a memory location that may be unaligned.
3065 BSI - place where new code is to be inserted.
3066 ALIGNMENT_SUPPORT_SCHEME - which of the two misalignment handling schemes
3070 REALIGNMENT_TOKEN - the result of a call to the builtin_mask_for_load
3071 target hook, if defined.
3072 Return value - the result of the loop-header phi node. */
3075 vect_setup_realignment (gimple stmt
, gimple_stmt_iterator
*gsi
,
3076 tree
*realignment_token
,
3077 enum dr_alignment_support alignment_support_scheme
,
3079 struct loop
**at_loop
)
3081 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
3082 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
3083 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
3084 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
3086 tree scalar_dest
= gimple_assign_lhs (stmt
);
3093 tree msq_init
= NULL_TREE
;
3096 tree msq
= NULL_TREE
;
3097 gimple_seq stmts
= NULL
;
3099 bool compute_in_loop
= false;
3100 bool nested_in_vect_loop
= nested_in_vect_loop_p (loop
, stmt
);
3101 struct loop
*containing_loop
= (gimple_bb (stmt
))->loop_father
;
3102 struct loop
*loop_for_initial_load
;
3104 gcc_assert (alignment_support_scheme
== dr_explicit_realign
3105 || alignment_support_scheme
== dr_explicit_realign_optimized
);
3107 /* We need to generate three things:
3108 1. the misalignment computation
3109 2. the extra vector load (for the optimized realignment scheme).
3110 3. the phi node for the two vectors from which the realignment is
3111 done (for the optimized realignment scheme).
3114 /* 1. Determine where to generate the misalignment computation.
3116 If INIT_ADDR is NULL_TREE, this indicates that the misalignment
3117 calculation will be generated by this function, outside the loop (in the
3118 preheader). Otherwise, INIT_ADDR had already been computed for us by the
3119 caller, inside the loop.
3121 Background: If the misalignment remains fixed throughout the iterations of
3122 the loop, then both realignment schemes are applicable, and also the
3123 misalignment computation can be done outside LOOP. This is because we are
3124 vectorizing LOOP, and so the memory accesses in LOOP advance in steps that
3125 are a multiple of VS (the Vector Size), and therefore the misalignment in
3126 different vectorized LOOP iterations is always the same.
3127 The problem arises only if the memory access is in an inner-loop nested
3128 inside LOOP, which is now being vectorized using outer-loop vectorization.
3129 This is the only case when the misalignment of the memory access may not
3130 remain fixed throughout the iterations of the inner-loop (as explained in
3131 detail in vect_supportable_dr_alignment). In this case, not only is the
3132 optimized realignment scheme not applicable, but also the misalignment
3133 computation (and generation of the realignment token that is passed to
3134 REALIGN_LOAD) have to be done inside the loop.
3136 In short, INIT_ADDR indicates whether we are in a COMPUTE_IN_LOOP mode
3137 or not, which in turn determines if the misalignment is computed inside
3138 the inner-loop, or outside LOOP. */
3140 if (init_addr
!= NULL_TREE
)
3142 compute_in_loop
= true;
3143 gcc_assert (alignment_support_scheme
== dr_explicit_realign
);
3147 /* 2. Determine where to generate the extra vector load.
3149 For the optimized realignment scheme, instead of generating two vector
3150 loads in each iteration, we generate a single extra vector load in the
3151 preheader of the loop, and in each iteration reuse the result of the
3152 vector load from the previous iteration. In case the memory access is in
3153 an inner-loop nested inside LOOP, which is now being vectorized using
3154 outer-loop vectorization, we need to determine whether this initial vector
3155 load should be generated at the preheader of the inner-loop, or can be
3156 generated at the preheader of LOOP. If the memory access has no evolution
3157 in LOOP, it can be generated in the preheader of LOOP. Otherwise, it has
3158 to be generated inside LOOP (in the preheader of the inner-loop). */
3160 if (nested_in_vect_loop
)
3162 tree outerloop_step
= STMT_VINFO_DR_STEP (stmt_info
);
3163 bool invariant_in_outerloop
=
3164 (tree_int_cst_compare (outerloop_step
, size_zero_node
) == 0);
3165 loop_for_initial_load
= (invariant_in_outerloop
? loop
: loop
->inner
);
3168 loop_for_initial_load
= loop
;
3170 *at_loop
= loop_for_initial_load
;
3172 /* 3. For the case of the optimized realignment, create the first vector
3173 load at the loop preheader. */
3175 if (alignment_support_scheme
== dr_explicit_realign_optimized
)
3177 /* Create msq_init = *(floor(p1)) in the loop preheader */
3179 gcc_assert (!compute_in_loop
);
3180 pe
= loop_preheader_edge (loop_for_initial_load
);
3181 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
3182 ptr
= vect_create_data_ref_ptr (stmt
, loop_for_initial_load
, NULL_TREE
,
3183 &init_addr
, &inc
, true, &inv_p
);
3184 data_ref
= build1 (ALIGN_INDIRECT_REF
, vectype
, ptr
);
3185 new_stmt
= gimple_build_assign (vec_dest
, data_ref
);
3186 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3187 gimple_assign_set_lhs (new_stmt
, new_temp
);
3188 mark_symbols_for_renaming (new_stmt
);
3189 new_bb
= gsi_insert_on_edge_immediate (pe
, new_stmt
);
3190 gcc_assert (!new_bb
);
3191 msq_init
= gimple_assign_lhs (new_stmt
);
3194 /* 4. Create realignment token using a target builtin, if available.
3195 It is done either inside the containing loop, or before LOOP (as
3196 determined above). */
3198 if (targetm
.vectorize
.builtin_mask_for_load
)
3202 /* Compute INIT_ADDR - the initial addressed accessed by this memref. */
3203 if (compute_in_loop
)
3204 gcc_assert (init_addr
); /* already computed by the caller. */
3207 /* Generate the INIT_ADDR computation outside LOOP. */
3208 init_addr
= vect_create_addr_base_for_vector_ref (stmt
, &stmts
,
3210 pe
= loop_preheader_edge (loop
);
3211 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, stmts
);
3212 gcc_assert (!new_bb
);
3215 builtin_decl
= targetm
.vectorize
.builtin_mask_for_load ();
3216 new_stmt
= gimple_build_call (builtin_decl
, 1, init_addr
);
3218 vect_create_destination_var (scalar_dest
,
3219 gimple_call_return_type (new_stmt
));
3220 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3221 gimple_call_set_lhs (new_stmt
, new_temp
);
3223 if (compute_in_loop
)
3224 gsi_insert_before (gsi
, new_stmt
, GSI_SAME_STMT
);
3227 /* Generate the misalignment computation outside LOOP. */
3228 pe
= loop_preheader_edge (loop
);
3229 new_bb
= gsi_insert_on_edge_immediate (pe
, new_stmt
);
3230 gcc_assert (!new_bb
);
3233 *realignment_token
= gimple_call_lhs (new_stmt
);
3235 /* The result of the CALL_EXPR to this builtin is determined from
3236 the value of the parameter and no global variables are touched
3237 which makes the builtin a "const" function. Requiring the
3238 builtin to have the "const" attribute makes it unnecessary
3239 to call mark_call_clobbered. */
3240 gcc_assert (TREE_READONLY (builtin_decl
));
3243 if (alignment_support_scheme
== dr_explicit_realign
)
3246 gcc_assert (!compute_in_loop
);
3247 gcc_assert (alignment_support_scheme
== dr_explicit_realign_optimized
);
3250 /* 5. Create msq = phi <msq_init, lsq> in loop */
3252 pe
= loop_preheader_edge (containing_loop
);
3253 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
3254 msq
= make_ssa_name (vec_dest
, NULL
);
3255 phi_stmt
= create_phi_node (msq
, containing_loop
->header
);
3256 SSA_NAME_DEF_STMT (msq
) = phi_stmt
;
3257 add_phi_arg (phi_stmt
, msq_init
, pe
, UNKNOWN_LOCATION
);
3263 /* Function vect_strided_load_supported.
3265 Returns TRUE is EXTRACT_EVEN and EXTRACT_ODD operations are supported,
3266 and FALSE otherwise. */
3269 vect_strided_load_supported (tree vectype
)
3271 optab perm_even_optab
, perm_odd_optab
;
3274 mode
= (int) TYPE_MODE (vectype
);
3276 perm_even_optab
= optab_for_tree_code (VEC_EXTRACT_EVEN_EXPR
, vectype
,
3278 if (!perm_even_optab
)
3280 if (vect_print_dump_info (REPORT_DETAILS
))
3281 fprintf (vect_dump
, "no optab for perm_even.");
3285 if (optab_handler (perm_even_optab
, mode
)->insn_code
== CODE_FOR_nothing
)
3287 if (vect_print_dump_info (REPORT_DETAILS
))
3288 fprintf (vect_dump
, "perm_even op not supported by target.");
3292 perm_odd_optab
= optab_for_tree_code (VEC_EXTRACT_ODD_EXPR
, vectype
,
3294 if (!perm_odd_optab
)
3296 if (vect_print_dump_info (REPORT_DETAILS
))
3297 fprintf (vect_dump
, "no optab for perm_odd.");
3301 if (optab_handler (perm_odd_optab
, mode
)->insn_code
== CODE_FOR_nothing
)
3303 if (vect_print_dump_info (REPORT_DETAILS
))
3304 fprintf (vect_dump
, "perm_odd op not supported by target.");
3311 /* Function vect_permute_load_chain.
3313 Given a chain of interleaved loads in DR_CHAIN of LENGTH that must be
3314 a power of 2, generate extract_even/odd stmts to reorder the input data
3315 correctly. Return the final references for loads in RESULT_CHAIN.
3317 E.g., LENGTH is 4 and the scalar type is short, i.e., VF is 8.
3318 The input is 4 vectors each containing 8 elements. We assign a number to each
3319 element, the input sequence is:
3321 1st vec: 0 1 2 3 4 5 6 7
3322 2nd vec: 8 9 10 11 12 13 14 15
3323 3rd vec: 16 17 18 19 20 21 22 23
3324 4th vec: 24 25 26 27 28 29 30 31
3326 The output sequence should be:
3328 1st vec: 0 4 8 12 16 20 24 28
3329 2nd vec: 1 5 9 13 17 21 25 29
3330 3rd vec: 2 6 10 14 18 22 26 30
3331 4th vec: 3 7 11 15 19 23 27 31
3333 i.e., the first output vector should contain the first elements of each
3334 interleaving group, etc.
3336 We use extract_even/odd instructions to create such output. The input of each
3337 extract_even/odd operation is two vectors
3341 and the output is the vector of extracted even/odd elements. The output of
3342 extract_even will be: 0 2 4 6
3343 and of extract_odd: 1 3 5 7
3346 The permutation is done in log LENGTH stages. In each stage extract_even and
3347 extract_odd stmts are created for each pair of vectors in DR_CHAIN in their
3348 order. In our example,
3350 E1: extract_even (1st vec, 2nd vec)
3351 E2: extract_odd (1st vec, 2nd vec)
3352 E3: extract_even (3rd vec, 4th vec)
3353 E4: extract_odd (3rd vec, 4th vec)
3355 The output for the first stage will be:
3357 E1: 0 2 4 6 8 10 12 14
3358 E2: 1 3 5 7 9 11 13 15
3359 E3: 16 18 20 22 24 26 28 30
3360 E4: 17 19 21 23 25 27 29 31
3362 In order to proceed and create the correct sequence for the next stage (or
3363 for the correct output, if the second stage is the last one, as in our
3364 example), we first put the output of extract_even operation and then the
3365 output of extract_odd in RESULT_CHAIN (which is then copied to DR_CHAIN).
3366 The input for the second stage is:
3368 1st vec (E1): 0 2 4 6 8 10 12 14
3369 2nd vec (E3): 16 18 20 22 24 26 28 30
3370 3rd vec (E2): 1 3 5 7 9 11 13 15
3371 4th vec (E4): 17 19 21 23 25 27 29 31
3373 The output of the second stage:
3375 E1: 0 4 8 12 16 20 24 28
3376 E2: 2 6 10 14 18 22 26 30
3377 E3: 1 5 9 13 17 21 25 29
3378 E4: 3 7 11 15 19 23 27 31
3380 And RESULT_CHAIN after reordering:
3382 1st vec (E1): 0 4 8 12 16 20 24 28
3383 2nd vec (E3): 1 5 9 13 17 21 25 29
3384 3rd vec (E2): 2 6 10 14 18 22 26 30
3385 4th vec (E4): 3 7 11 15 19 23 27 31. */
3388 vect_permute_load_chain (VEC(tree
,heap
) *dr_chain
,
3389 unsigned int length
,
3391 gimple_stmt_iterator
*gsi
,
3392 VEC(tree
,heap
) **result_chain
)
3394 tree perm_dest
, data_ref
, first_vect
, second_vect
;
3396 tree vectype
= STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt
));
3400 /* Check that the operation is supported. */
3401 if (!vect_strided_load_supported (vectype
))
3404 *result_chain
= VEC_copy (tree
, heap
, dr_chain
);
3405 for (i
= 0; i
< exact_log2 (length
); i
++)
3407 for (j
= 0; j
< length
; j
+=2)
3409 first_vect
= VEC_index (tree
, dr_chain
, j
);
3410 second_vect
= VEC_index (tree
, dr_chain
, j
+1);
3412 /* data_ref = permute_even (first_data_ref, second_data_ref); */
3413 perm_dest
= create_tmp_var (vectype
, "vect_perm_even");
3414 DECL_GIMPLE_REG_P (perm_dest
) = 1;
3415 add_referenced_var (perm_dest
);
3417 perm_stmt
= gimple_build_assign_with_ops (VEC_EXTRACT_EVEN_EXPR
,
3418 perm_dest
, first_vect
,
3421 data_ref
= make_ssa_name (perm_dest
, perm_stmt
);
3422 gimple_assign_set_lhs (perm_stmt
, data_ref
);
3423 vect_finish_stmt_generation (stmt
, perm_stmt
, gsi
);
3424 mark_symbols_for_renaming (perm_stmt
);
3426 VEC_replace (tree
, *result_chain
, j
/2, data_ref
);
3428 /* data_ref = permute_odd (first_data_ref, second_data_ref); */
3429 perm_dest
= create_tmp_var (vectype
, "vect_perm_odd");
3430 DECL_GIMPLE_REG_P (perm_dest
) = 1;
3431 add_referenced_var (perm_dest
);
3433 perm_stmt
= gimple_build_assign_with_ops (VEC_EXTRACT_ODD_EXPR
,
3434 perm_dest
, first_vect
,
3436 data_ref
= make_ssa_name (perm_dest
, perm_stmt
);
3437 gimple_assign_set_lhs (perm_stmt
, data_ref
);
3438 vect_finish_stmt_generation (stmt
, perm_stmt
, gsi
);
3439 mark_symbols_for_renaming (perm_stmt
);
3441 VEC_replace (tree
, *result_chain
, j
/2+length
/2, data_ref
);
3443 dr_chain
= VEC_copy (tree
, heap
, *result_chain
);
3449 /* Function vect_transform_strided_load.
3451 Given a chain of input interleaved data-refs (in DR_CHAIN), build statements
3452 to perform their permutation and ascribe the result vectorized statements to
3453 the scalar statements.
3457 vect_transform_strided_load (gimple stmt
, VEC(tree
,heap
) *dr_chain
, int size
,
3458 gimple_stmt_iterator
*gsi
)
3460 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
3461 gimple first_stmt
= DR_GROUP_FIRST_DR (stmt_info
);
3462 gimple next_stmt
, new_stmt
;
3463 VEC(tree
,heap
) *result_chain
= NULL
;
3464 unsigned int i
, gap_count
;
3467 /* DR_CHAIN contains input data-refs that are a part of the interleaving.
3468 RESULT_CHAIN is the output of vect_permute_load_chain, it contains permuted
3469 vectors, that are ready for vector computation. */
3470 result_chain
= VEC_alloc (tree
, heap
, size
);
3472 if (!vect_permute_load_chain (dr_chain
, size
, stmt
, gsi
, &result_chain
))
3475 /* Put a permuted data-ref in the VECTORIZED_STMT field.
3476 Since we scan the chain starting from it's first node, their order
3477 corresponds the order of data-refs in RESULT_CHAIN. */
3478 next_stmt
= first_stmt
;
3480 for (i
= 0; VEC_iterate (tree
, result_chain
, i
, tmp_data_ref
); i
++)
3485 /* Skip the gaps. Loads created for the gaps will be removed by dead
3486 code elimination pass later. No need to check for the first stmt in
3487 the group, since it always exists.
3488 DR_GROUP_GAP is the number of steps in elements from the previous
3489 access (if there is no gap DR_GROUP_GAP is 1). We skip loads that
3490 correspond to the gaps.
3492 if (next_stmt
!= first_stmt
3493 && gap_count
< DR_GROUP_GAP (vinfo_for_stmt (next_stmt
)))
3501 new_stmt
= SSA_NAME_DEF_STMT (tmp_data_ref
);
3502 /* We assume that if VEC_STMT is not NULL, this is a case of multiple
3503 copies, and we put the new vector statement in the first available
3505 if (!STMT_VINFO_VEC_STMT (vinfo_for_stmt (next_stmt
)))
3506 STMT_VINFO_VEC_STMT (vinfo_for_stmt (next_stmt
)) = new_stmt
;
3509 if (!DR_GROUP_SAME_DR_STMT (vinfo_for_stmt (next_stmt
)))
3512 STMT_VINFO_VEC_STMT (vinfo_for_stmt (next_stmt
));
3514 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (prev_stmt
));
3517 prev_stmt
= rel_stmt
;
3519 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (rel_stmt
));
3522 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (prev_stmt
)) =
3527 next_stmt
= DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt
));
3529 /* If NEXT_STMT accesses the same DR as the previous statement,
3530 put the same TMP_DATA_REF as its vectorized statement; otherwise
3531 get the next data-ref from RESULT_CHAIN. */
3532 if (!next_stmt
|| !DR_GROUP_SAME_DR_STMT (vinfo_for_stmt (next_stmt
)))
3537 VEC_free (tree
, heap
, result_chain
);
3541 /* Function vect_force_dr_alignment_p.
3543 Returns whether the alignment of a DECL can be forced to be aligned
3544 on ALIGNMENT bit boundary. */
3547 vect_can_force_dr_alignment_p (const_tree decl
, unsigned int alignment
)
3549 if (TREE_CODE (decl
) != VAR_DECL
)
3552 if (DECL_EXTERNAL (decl
))
3555 if (TREE_ASM_WRITTEN (decl
))
3558 if (TREE_STATIC (decl
))
3559 return (alignment
<= MAX_OFILE_ALIGNMENT
);
3561 return (alignment
<= MAX_STACK_ALIGNMENT
);
3564 /* Function vect_supportable_dr_alignment
3566 Return whether the data reference DR is supported with respect to its
3569 enum dr_alignment_support
3570 vect_supportable_dr_alignment (struct data_reference
*dr
)
3572 gimple stmt
= DR_STMT (dr
);
3573 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
3574 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
3575 enum machine_mode mode
= TYPE_MODE (vectype
);
3576 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
3577 struct loop
*vect_loop
= NULL
;
3578 bool nested_in_vect_loop
= false;
3580 if (aligned_access_p (dr
))
3584 /* FORNOW: Misaligned accesses are supported only in loops. */
3585 return dr_unaligned_unsupported
;
3587 vect_loop
= LOOP_VINFO_LOOP (loop_vinfo
);
3588 nested_in_vect_loop
= nested_in_vect_loop_p (vect_loop
, stmt
);
3590 /* Possibly unaligned access. */
3592 /* We can choose between using the implicit realignment scheme (generating
3593 a misaligned_move stmt) and the explicit realignment scheme (generating
3594 aligned loads with a REALIGN_LOAD). There are two variants to the explicit
3595 realignment scheme: optimized, and unoptimized.
3596 We can optimize the realignment only if the step between consecutive
3597 vector loads is equal to the vector size. Since the vector memory
3598 accesses advance in steps of VS (Vector Size) in the vectorized loop, it
3599 is guaranteed that the misalignment amount remains the same throughout the
3600 execution of the vectorized loop. Therefore, we can create the
3601 "realignment token" (the permutation mask that is passed to REALIGN_LOAD)
3602 at the loop preheader.
3604 However, in the case of outer-loop vectorization, when vectorizing a
3605 memory access in the inner-loop nested within the LOOP that is now being
3606 vectorized, while it is guaranteed that the misalignment of the
3607 vectorized memory access will remain the same in different outer-loop
3608 iterations, it is *not* guaranteed that is will remain the same throughout
3609 the execution of the inner-loop. This is because the inner-loop advances
3610 with the original scalar step (and not in steps of VS). If the inner-loop
3611 step happens to be a multiple of VS, then the misalignment remains fixed
3612 and we can use the optimized realignment scheme. For example:
3618 When vectorizing the i-loop in the above example, the step between
3619 consecutive vector loads is 1, and so the misalignment does not remain
3620 fixed across the execution of the inner-loop, and the realignment cannot
3621 be optimized (as illustrated in the following pseudo vectorized loop):
3623 for (i=0; i<N; i+=4)
3624 for (j=0; j<M; j++){
3625 vs += vp[i+j]; // misalignment of &vp[i+j] is {0,1,2,3,0,1,2,3,...}
3626 // when j is {0,1,2,3,4,5,6,7,...} respectively.
3627 // (assuming that we start from an aligned address).
3630 We therefore have to use the unoptimized realignment scheme:
3632 for (i=0; i<N; i+=4)
3633 for (j=k; j<M; j+=4)
3634 vs += vp[i+j]; // misalignment of &vp[i+j] is always k (assuming
3635 // that the misalignment of the initial address is
3638 The loop can then be vectorized as follows:
3640 for (k=0; k<4; k++){
3641 rt = get_realignment_token (&vp[k]);
3642 for (i=0; i<N; i+=4){
3644 for (j=k; j<M; j+=4){
3646 va = REALIGN_LOAD <v1,v2,rt>;
3653 if (DR_IS_READ (dr
))
3655 bool is_packed
= false;
3656 tree type
= (TREE_TYPE (DR_REF (dr
)));
3658 if (optab_handler (vec_realign_load_optab
, mode
)->insn_code
!=
3660 && (!targetm
.vectorize
.builtin_mask_for_load
3661 || targetm
.vectorize
.builtin_mask_for_load ()))
3663 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
3664 if (nested_in_vect_loop
3665 && (TREE_INT_CST_LOW (DR_STEP (dr
))
3666 != GET_MODE_SIZE (TYPE_MODE (vectype
))))
3667 return dr_explicit_realign
;
3669 return dr_explicit_realign_optimized
;
3671 if (!known_alignment_for_access_p (dr
))
3673 tree ba
= DR_BASE_OBJECT (dr
);
3676 is_packed
= contains_packed_reference (ba
);
3679 if (targetm
.vectorize
.
3680 builtin_support_vector_misalignment (mode
, type
,
3681 DR_MISALIGNMENT (dr
), is_packed
))
3682 /* Can't software pipeline the loads, but can at least do them. */
3683 return dr_unaligned_supported
;
3687 bool is_packed
= false;
3688 tree type
= (TREE_TYPE (DR_REF (dr
)));
3690 if (!known_alignment_for_access_p (dr
))
3692 tree ba
= DR_BASE_OBJECT (dr
);
3695 is_packed
= contains_packed_reference (ba
);
3698 if (targetm
.vectorize
.
3699 builtin_support_vector_misalignment (mode
, type
,
3700 DR_MISALIGNMENT (dr
), is_packed
))
3701 return dr_unaligned_supported
;
3705 return dr_unaligned_unsupported
;