1 /* Data References Analysis and Manipulation Utilities for Vectorization.
2 Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012
3 Free Software Foundation, Inc.
4 Contributed by Dorit Naishlos <dorit@il.ibm.com>
5 and Ira Rosen <irar@il.ibm.com>
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
32 #include "basic-block.h"
33 #include "gimple-pretty-print.h"
34 #include "tree-flow.h"
37 #include "tree-chrec.h"
38 #include "tree-scalar-evolution.h"
39 #include "tree-vectorizer.h"
40 #include "diagnostic-core.h"
42 /* Need to include rtl.h, expr.h, etc. for optabs. */
46 /* Return true if load- or store-lanes optab OPTAB is implemented for
47 COUNT vectors of type VECTYPE. NAME is the name of OPTAB. */
50 vect_lanes_optab_supported_p (const char *name
, convert_optab optab
,
51 tree vectype
, unsigned HOST_WIDE_INT count
)
53 enum machine_mode mode
, array_mode
;
56 mode
= TYPE_MODE (vectype
);
57 limit_p
= !targetm
.array_mode_supported_p (mode
, count
);
58 array_mode
= mode_for_size (count
* GET_MODE_BITSIZE (mode
),
61 if (array_mode
== BLKmode
)
63 if (dump_enabled_p ())
64 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
65 "no array mode for %s[" HOST_WIDE_INT_PRINT_DEC
"]",
66 GET_MODE_NAME (mode
), count
);
70 if (convert_optab_handler (optab
, array_mode
, mode
) == CODE_FOR_nothing
)
72 if (dump_enabled_p ())
73 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
74 "cannot use %s<%s><%s>", name
,
75 GET_MODE_NAME (array_mode
), GET_MODE_NAME (mode
));
79 if (dump_enabled_p ())
80 dump_printf_loc (MSG_NOTE
, vect_location
,
81 "can use %s<%s><%s>", name
, GET_MODE_NAME (array_mode
),
82 GET_MODE_NAME (mode
));
88 /* Return the smallest scalar part of STMT.
89 This is used to determine the vectype of the stmt. We generally set the
90 vectype according to the type of the result (lhs). For stmts whose
91 result-type is different than the type of the arguments (e.g., demotion,
92 promotion), vectype will be reset appropriately (later). Note that we have
93 to visit the smallest datatype in this function, because that determines the
94 VF. If the smallest datatype in the loop is present only as the rhs of a
95 promotion operation - we'd miss it.
96 Such a case, where a variable of this datatype does not appear in the lhs
97 anywhere in the loop, can only occur if it's an invariant: e.g.:
98 'int_x = (int) short_inv', which we'd expect to have been optimized away by
99 invariant motion. However, we cannot rely on invariant motion to always
100 take invariants out of the loop, and so in the case of promotion we also
101 have to check the rhs.
102 LHS_SIZE_UNIT and RHS_SIZE_UNIT contain the sizes of the corresponding
106 vect_get_smallest_scalar_type (gimple stmt
, HOST_WIDE_INT
*lhs_size_unit
,
107 HOST_WIDE_INT
*rhs_size_unit
)
109 tree scalar_type
= gimple_expr_type (stmt
);
110 HOST_WIDE_INT lhs
, rhs
;
112 lhs
= rhs
= TREE_INT_CST_LOW (TYPE_SIZE_UNIT (scalar_type
));
114 if (is_gimple_assign (stmt
)
115 && (gimple_assign_cast_p (stmt
)
116 || gimple_assign_rhs_code (stmt
) == WIDEN_MULT_EXPR
117 || gimple_assign_rhs_code (stmt
) == WIDEN_LSHIFT_EXPR
118 || gimple_assign_rhs_code (stmt
) == FLOAT_EXPR
))
120 tree rhs_type
= TREE_TYPE (gimple_assign_rhs1 (stmt
));
122 rhs
= TREE_INT_CST_LOW (TYPE_SIZE_UNIT (rhs_type
));
124 scalar_type
= rhs_type
;
127 *lhs_size_unit
= lhs
;
128 *rhs_size_unit
= rhs
;
133 /* Find the place of the data-ref in STMT in the interleaving chain that starts
134 from FIRST_STMT. Return -1 if the data-ref is not a part of the chain. */
137 vect_get_place_in_interleaving_chain (gimple stmt
, gimple first_stmt
)
139 gimple next_stmt
= first_stmt
;
142 if (first_stmt
!= GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt
)))
145 while (next_stmt
&& next_stmt
!= stmt
)
148 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
158 /* Function vect_insert_into_interleaving_chain.
160 Insert DRA into the interleaving chain of DRB according to DRA's INIT. */
163 vect_insert_into_interleaving_chain (struct data_reference
*dra
,
164 struct data_reference
*drb
)
168 stmt_vec_info stmtinfo_a
= vinfo_for_stmt (DR_STMT (dra
));
169 stmt_vec_info stmtinfo_b
= vinfo_for_stmt (DR_STMT (drb
));
171 prev
= GROUP_FIRST_ELEMENT (stmtinfo_b
);
172 next
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (prev
));
175 next_init
= DR_INIT (STMT_VINFO_DATA_REF (vinfo_for_stmt (next
)));
176 if (tree_int_cst_compare (next_init
, DR_INIT (dra
)) > 0)
179 GROUP_NEXT_ELEMENT (vinfo_for_stmt (prev
)) = DR_STMT (dra
);
180 GROUP_NEXT_ELEMENT (stmtinfo_a
) = next
;
184 next
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (prev
));
187 /* We got to the end of the list. Insert here. */
188 GROUP_NEXT_ELEMENT (vinfo_for_stmt (prev
)) = DR_STMT (dra
);
189 GROUP_NEXT_ELEMENT (stmtinfo_a
) = NULL
;
193 /* Function vect_update_interleaving_chain.
195 For two data-refs DRA and DRB that are a part of a chain interleaved data
196 accesses, update the interleaving chain. DRB's INIT is smaller than DRA's.
198 There are four possible cases:
199 1. New stmts - both DRA and DRB are not a part of any chain:
202 2. DRB is a part of a chain and DRA is not:
203 no need to update FIRST_DR
204 no need to insert DRB
205 insert DRA according to init
206 3. DRA is a part of a chain and DRB is not:
207 if (init of FIRST_DR > init of DRB)
209 NEXT(FIRST_DR) = previous FIRST_DR
211 insert DRB according to its init
212 4. both DRA and DRB are in some interleaving chains:
213 choose the chain with the smallest init of FIRST_DR
214 insert the nodes of the second chain into the first one. */
217 vect_update_interleaving_chain (struct data_reference
*drb
,
218 struct data_reference
*dra
)
220 stmt_vec_info stmtinfo_a
= vinfo_for_stmt (DR_STMT (dra
));
221 stmt_vec_info stmtinfo_b
= vinfo_for_stmt (DR_STMT (drb
));
222 tree next_init
, init_dra_chain
, init_drb_chain
;
223 gimple first_a
, first_b
;
225 gimple node
, prev
, next
, first_stmt
;
227 /* 1. New stmts - both DRA and DRB are not a part of any chain. */
228 if (!GROUP_FIRST_ELEMENT (stmtinfo_a
) && !GROUP_FIRST_ELEMENT (stmtinfo_b
))
230 GROUP_FIRST_ELEMENT (stmtinfo_a
) = DR_STMT (drb
);
231 GROUP_FIRST_ELEMENT (stmtinfo_b
) = DR_STMT (drb
);
232 GROUP_NEXT_ELEMENT (stmtinfo_b
) = DR_STMT (dra
);
236 /* 2. DRB is a part of a chain and DRA is not. */
237 if (!GROUP_FIRST_ELEMENT (stmtinfo_a
) && GROUP_FIRST_ELEMENT (stmtinfo_b
))
239 GROUP_FIRST_ELEMENT (stmtinfo_a
) = GROUP_FIRST_ELEMENT (stmtinfo_b
);
240 /* Insert DRA into the chain of DRB. */
241 vect_insert_into_interleaving_chain (dra
, drb
);
245 /* 3. DRA is a part of a chain and DRB is not. */
246 if (GROUP_FIRST_ELEMENT (stmtinfo_a
) && !GROUP_FIRST_ELEMENT (stmtinfo_b
))
248 gimple old_first_stmt
= GROUP_FIRST_ELEMENT (stmtinfo_a
);
249 tree init_old
= DR_INIT (STMT_VINFO_DATA_REF (vinfo_for_stmt (
253 if (tree_int_cst_compare (init_old
, DR_INIT (drb
)) > 0)
255 /* DRB's init is smaller than the init of the stmt previously marked
256 as the first stmt of the interleaving chain of DRA. Therefore, we
257 update FIRST_STMT and put DRB in the head of the list. */
258 GROUP_FIRST_ELEMENT (stmtinfo_b
) = DR_STMT (drb
);
259 GROUP_NEXT_ELEMENT (stmtinfo_b
) = old_first_stmt
;
261 /* Update all the stmts in the list to point to the new FIRST_STMT. */
262 tmp
= old_first_stmt
;
265 GROUP_FIRST_ELEMENT (vinfo_for_stmt (tmp
)) = DR_STMT (drb
);
266 tmp
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (tmp
));
271 /* Insert DRB in the list of DRA. */
272 vect_insert_into_interleaving_chain (drb
, dra
);
273 GROUP_FIRST_ELEMENT (stmtinfo_b
) = GROUP_FIRST_ELEMENT (stmtinfo_a
);
278 /* 4. both DRA and DRB are in some interleaving chains. */
279 first_a
= GROUP_FIRST_ELEMENT (stmtinfo_a
);
280 first_b
= GROUP_FIRST_ELEMENT (stmtinfo_b
);
281 if (first_a
== first_b
)
283 init_dra_chain
= DR_INIT (STMT_VINFO_DATA_REF (vinfo_for_stmt (first_a
)));
284 init_drb_chain
= DR_INIT (STMT_VINFO_DATA_REF (vinfo_for_stmt (first_b
)));
286 if (tree_int_cst_compare (init_dra_chain
, init_drb_chain
) > 0)
288 /* Insert the nodes of DRA chain into the DRB chain.
289 After inserting a node, continue from this node of the DRB chain (don't
290 start from the beginning. */
291 node
= GROUP_FIRST_ELEMENT (stmtinfo_a
);
292 prev
= GROUP_FIRST_ELEMENT (stmtinfo_b
);
293 first_stmt
= first_b
;
297 /* Insert the nodes of DRB chain into the DRA chain.
298 After inserting a node, continue from this node of the DRA chain (don't
299 start from the beginning. */
300 node
= GROUP_FIRST_ELEMENT (stmtinfo_b
);
301 prev
= GROUP_FIRST_ELEMENT (stmtinfo_a
);
302 first_stmt
= first_a
;
307 node_init
= DR_INIT (STMT_VINFO_DATA_REF (vinfo_for_stmt (node
)));
308 next
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (prev
));
311 next_init
= DR_INIT (STMT_VINFO_DATA_REF (vinfo_for_stmt (next
)));
312 if (tree_int_cst_compare (next_init
, node_init
) > 0)
315 GROUP_NEXT_ELEMENT (vinfo_for_stmt (prev
)) = node
;
316 GROUP_NEXT_ELEMENT (vinfo_for_stmt (node
)) = next
;
321 next
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (prev
));
325 /* We got to the end of the list. Insert here. */
326 GROUP_NEXT_ELEMENT (vinfo_for_stmt (prev
)) = node
;
327 GROUP_NEXT_ELEMENT (vinfo_for_stmt (node
)) = NULL
;
330 GROUP_FIRST_ELEMENT (vinfo_for_stmt (node
)) = first_stmt
;
331 node
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (node
));
335 /* Check dependence between DRA and DRB for basic block vectorization.
336 If the accesses share same bases and offsets, we can compare their initial
337 constant offsets to decide whether they differ or not. In case of a read-
338 write dependence we check that the load is before the store to ensure that
339 vectorization will not change the order of the accesses. */
342 vect_drs_dependent_in_basic_block (struct data_reference
*dra
,
343 struct data_reference
*drb
)
345 HOST_WIDE_INT type_size_a
, type_size_b
, init_a
, init_b
;
348 /* We only call this function for pairs of loads and stores, but we verify
350 if (DR_IS_READ (dra
) == DR_IS_READ (drb
))
352 if (DR_IS_READ (dra
))
358 /* Check that the data-refs have same bases and offsets. If not, we can't
359 determine if they are dependent. */
360 if (!operand_equal_p (DR_BASE_ADDRESS (dra
), DR_BASE_ADDRESS (drb
), 0)
361 || !dr_equal_offsets_p (dra
, drb
))
364 /* Check the types. */
365 type_size_a
= TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dra
))));
366 type_size_b
= TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (drb
))));
368 if (type_size_a
!= type_size_b
369 || !types_compatible_p (TREE_TYPE (DR_REF (dra
)),
370 TREE_TYPE (DR_REF (drb
))))
373 init_a
= TREE_INT_CST_LOW (DR_INIT (dra
));
374 init_b
= TREE_INT_CST_LOW (DR_INIT (drb
));
376 /* Two different locations - no dependence. */
377 if (init_a
!= init_b
)
380 /* We have a read-write dependence. Check that the load is before the store.
381 When we vectorize basic blocks, vector load can be only before
382 corresponding scalar load, and vector store can be only after its
383 corresponding scalar store. So the order of the acceses is preserved in
384 case the load is before the store. */
385 earlier_stmt
= get_earlier_stmt (DR_STMT (dra
), DR_STMT (drb
));
386 if (DR_IS_READ (STMT_VINFO_DATA_REF (vinfo_for_stmt (earlier_stmt
))))
393 /* Function vect_check_interleaving.
395 Check if DRA and DRB are a part of interleaving. In case they are, insert
396 DRA and DRB in an interleaving chain. */
399 vect_check_interleaving (struct data_reference
*dra
,
400 struct data_reference
*drb
)
402 HOST_WIDE_INT type_size_a
, type_size_b
, diff_mod_size
, step
, init_a
, init_b
;
404 /* Check that the data-refs have same first location (except init) and they
405 are both either store or load (not load and store). */
406 if (!operand_equal_p (DR_BASE_ADDRESS (dra
), DR_BASE_ADDRESS (drb
), 0)
407 || !dr_equal_offsets_p (dra
, drb
)
408 || !tree_int_cst_compare (DR_INIT (dra
), DR_INIT (drb
))
409 || DR_IS_READ (dra
) != DR_IS_READ (drb
))
413 1. data-refs are of the same type
414 2. their steps are equal
415 3. the step (if greater than zero) is greater than the difference between
417 type_size_a
= TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dra
))));
418 type_size_b
= TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (drb
))));
420 if (type_size_a
!= type_size_b
421 || tree_int_cst_compare (DR_STEP (dra
), DR_STEP (drb
))
422 || !types_compatible_p (TREE_TYPE (DR_REF (dra
)),
423 TREE_TYPE (DR_REF (drb
))))
426 init_a
= TREE_INT_CST_LOW (DR_INIT (dra
));
427 init_b
= TREE_INT_CST_LOW (DR_INIT (drb
));
428 step
= TREE_INT_CST_LOW (DR_STEP (dra
));
432 /* If init_a == init_b + the size of the type * k, we have an interleaving,
433 and DRB is accessed before DRA. */
434 diff_mod_size
= (init_a
- init_b
) % type_size_a
;
436 if (step
&& (init_a
- init_b
) > step
)
439 if (diff_mod_size
== 0)
441 vect_update_interleaving_chain (drb
, dra
);
442 if (dump_enabled_p ())
444 dump_printf_loc (MSG_NOTE
, vect_location
,
445 "Detected interleaving ");
446 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, DR_REF (dra
));
447 dump_printf (MSG_NOTE
, " and ");
448 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, DR_REF (drb
));
455 /* If init_b == init_a + the size of the type * k, we have an
456 interleaving, and DRA is accessed before DRB. */
457 diff_mod_size
= (init_b
- init_a
) % type_size_a
;
459 if (step
&& (init_b
- init_a
) > step
)
462 if (diff_mod_size
== 0)
464 vect_update_interleaving_chain (dra
, drb
);
465 if (dump_enabled_p ())
467 dump_printf_loc (MSG_NOTE
, vect_location
,
468 "Detected interleaving ");
469 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, DR_REF (dra
));
470 dump_printf (MSG_NOTE
, " and ");
471 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, DR_REF (drb
));
480 /* Check if data references pointed by DR_I and DR_J are same or
481 belong to same interleaving group. Return FALSE if drs are
482 different, otherwise return TRUE. */
485 vect_same_range_drs (data_reference_p dr_i
, data_reference_p dr_j
)
487 gimple stmt_i
= DR_STMT (dr_i
);
488 gimple stmt_j
= DR_STMT (dr_j
);
490 if (operand_equal_p (DR_REF (dr_i
), DR_REF (dr_j
), 0)
491 || (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt_i
))
492 && GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt_j
))
493 && (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt_i
))
494 == GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt_j
)))))
500 /* If address ranges represented by DDR_I and DDR_J are equal,
501 return TRUE, otherwise return FALSE. */
504 vect_vfa_range_equal (ddr_p ddr_i
, ddr_p ddr_j
)
506 if ((vect_same_range_drs (DDR_A (ddr_i
), DDR_A (ddr_j
))
507 && vect_same_range_drs (DDR_B (ddr_i
), DDR_B (ddr_j
)))
508 || (vect_same_range_drs (DDR_A (ddr_i
), DDR_B (ddr_j
))
509 && vect_same_range_drs (DDR_B (ddr_i
), DDR_A (ddr_j
))))
515 /* Insert DDR into LOOP_VINFO list of ddrs that may alias and need to be
516 tested at run-time. Return TRUE if DDR was successfully inserted.
517 Return false if versioning is not supported. */
520 vect_mark_for_runtime_alias_test (ddr_p ddr
, loop_vec_info loop_vinfo
)
522 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
524 if ((unsigned) PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS
) == 0)
527 if (dump_enabled_p ())
529 dump_printf_loc (MSG_NOTE
, vect_location
,
530 "mark for run-time aliasing test between ");
531 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, DR_REF (DDR_A (ddr
)));
532 dump_printf (MSG_NOTE
, " and ");
533 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, DR_REF (DDR_B (ddr
)));
536 if (optimize_loop_nest_for_size_p (loop
))
538 if (dump_enabled_p ())
539 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
540 "versioning not supported when optimizing for size.");
544 /* FORNOW: We don't support versioning with outer-loop vectorization. */
547 if (dump_enabled_p ())
548 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
549 "versioning not yet supported for outer-loops.");
553 /* FORNOW: We don't support creating runtime alias tests for non-constant
555 if (TREE_CODE (DR_STEP (DDR_A (ddr
))) != INTEGER_CST
556 || TREE_CODE (DR_STEP (DDR_B (ddr
))) != INTEGER_CST
)
558 if (dump_enabled_p ())
559 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
560 "versioning not yet supported for non-constant "
565 VEC_safe_push (ddr_p
, heap
, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo
), ddr
);
570 /* Function vect_analyze_data_ref_dependence.
572 Return TRUE if there (might) exist a dependence between a memory-reference
573 DRA and a memory-reference DRB. When versioning for alias may check a
574 dependence at run-time, return FALSE. Adjust *MAX_VF according to
575 the data dependence. */
578 vect_analyze_data_ref_dependence (struct data_dependence_relation
*ddr
,
579 loop_vec_info loop_vinfo
, int *max_vf
)
582 struct loop
*loop
= NULL
;
583 struct data_reference
*dra
= DDR_A (ddr
);
584 struct data_reference
*drb
= DDR_B (ddr
);
585 stmt_vec_info stmtinfo_a
= vinfo_for_stmt (DR_STMT (dra
));
586 stmt_vec_info stmtinfo_b
= vinfo_for_stmt (DR_STMT (drb
));
587 lambda_vector dist_v
;
588 unsigned int loop_depth
;
590 /* Don't bother to analyze statements marked as unvectorizable. */
591 if (!STMT_VINFO_VECTORIZABLE (stmtinfo_a
)
592 || !STMT_VINFO_VECTORIZABLE (stmtinfo_b
))
595 if (DDR_ARE_DEPENDENT (ddr
) == chrec_known
)
597 /* Independent data accesses. */
598 vect_check_interleaving (dra
, drb
);
603 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
605 if ((DR_IS_READ (dra
) && DR_IS_READ (drb
) && loop_vinfo
) || dra
== drb
)
608 if (DDR_ARE_DEPENDENT (ddr
) == chrec_dont_know
)
614 if (dump_enabled_p ())
616 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
617 "versioning for alias required: "
618 "can't determine dependence between ");
619 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
621 dump_printf (MSG_MISSED_OPTIMIZATION
, " and ");
622 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
626 /* Add to list of ddrs that need to be tested at run-time. */
627 return !vect_mark_for_runtime_alias_test (ddr
, loop_vinfo
);
630 /* When vectorizing a basic block unknown depnedence can still mean
632 if (vect_check_interleaving (dra
, drb
))
635 /* Read-read is OK (we need this check here, after checking for
637 if (DR_IS_READ (dra
) && DR_IS_READ (drb
))
640 if (dump_enabled_p ())
642 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
643 "can't determine dependence between ");
644 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, DR_REF (dra
));
645 dump_printf (MSG_MISSED_OPTIMIZATION
, " and ");
646 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, DR_REF (drb
));
649 /* We do not vectorize basic blocks with write-write dependencies. */
650 if (DR_IS_WRITE (dra
) && DR_IS_WRITE (drb
))
653 /* Check that it's not a load-after-store dependence. */
654 earlier_stmt
= get_earlier_stmt (DR_STMT (dra
), DR_STMT (drb
));
655 if (DR_IS_WRITE (STMT_VINFO_DATA_REF (vinfo_for_stmt (earlier_stmt
))))
661 /* Versioning for alias is not yet supported for basic block SLP, and
662 dependence distance is unapplicable, hence, in case of known data
663 dependence, basic block vectorization is impossible for now. */
666 if (dra
!= drb
&& vect_check_interleaving (dra
, drb
))
669 if (dump_enabled_p ())
671 dump_printf_loc (MSG_NOTE
, vect_location
,
672 "determined dependence between ");
673 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, DR_REF (dra
));
674 dump_printf (MSG_NOTE
, " and ");
675 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, DR_REF (drb
));
678 /* Do not vectorize basic blcoks with write-write dependences. */
679 if (DR_IS_WRITE (dra
) && DR_IS_WRITE (drb
))
682 /* Check if this dependence is allowed in basic block vectorization. */
683 return vect_drs_dependent_in_basic_block (dra
, drb
);
686 /* Loop-based vectorization and known data dependence. */
687 if (DDR_NUM_DIST_VECTS (ddr
) == 0)
689 if (dump_enabled_p ())
691 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
692 "versioning for alias required: "
693 "bad dist vector for ");
694 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, DR_REF (dra
));
695 dump_printf (MSG_MISSED_OPTIMIZATION
, " and ");
696 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, DR_REF (drb
));
698 /* Add to list of ddrs that need to be tested at run-time. */
699 return !vect_mark_for_runtime_alias_test (ddr
, loop_vinfo
);
702 loop_depth
= index_in_loop_nest (loop
->num
, DDR_LOOP_NEST (ddr
));
703 FOR_EACH_VEC_ELT (lambda_vector
, DDR_DIST_VECTS (ddr
), i
, dist_v
)
705 int dist
= dist_v
[loop_depth
];
707 if (dump_enabled_p ())
708 dump_printf_loc (MSG_NOTE
, vect_location
,
709 "dependence distance = %d.", dist
);
713 if (dump_enabled_p ())
715 dump_printf_loc (MSG_NOTE
, vect_location
,
716 "dependence distance == 0 between ");
717 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, DR_REF (dra
));
718 dump_printf (MSG_NOTE
, " and ");
719 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, DR_REF (drb
));
722 /* For interleaving, mark that there is a read-write dependency if
723 necessary. We check before that one of the data-refs is store. */
724 if (DR_IS_READ (dra
))
725 GROUP_READ_WRITE_DEPENDENCE (stmtinfo_a
) = true;
728 if (DR_IS_READ (drb
))
729 GROUP_READ_WRITE_DEPENDENCE (stmtinfo_b
) = true;
735 if (dist
> 0 && DDR_REVERSED_P (ddr
))
737 /* If DDR_REVERSED_P the order of the data-refs in DDR was
738 reversed (to make distance vector positive), and the actual
739 distance is negative. */
740 if (dump_enabled_p ())
741 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
742 "dependence distance negative.");
747 && abs (dist
) < *max_vf
)
749 /* The dependence distance requires reduction of the maximal
750 vectorization factor. */
751 *max_vf
= abs (dist
);
752 if (dump_enabled_p ())
753 dump_printf_loc (MSG_NOTE
, vect_location
,
754 "adjusting maximal vectorization factor to %i",
758 if (abs (dist
) >= *max_vf
)
760 /* Dependence distance does not create dependence, as far as
761 vectorization is concerned, in this case. */
762 if (dump_enabled_p ())
763 dump_printf_loc (MSG_NOTE
, vect_location
,
764 "dependence distance >= VF.");
768 if (dump_enabled_p ())
770 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
771 "not vectorized, possible dependence "
772 "between data-refs ");
773 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, DR_REF (dra
));
774 dump_printf (MSG_NOTE
, " and ");
775 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, DR_REF (drb
));
784 /* Function vect_analyze_data_ref_dependences.
786 Examine all the data references in the loop, and make sure there do not
787 exist any data dependences between them. Set *MAX_VF according to
788 the maximum vectorization factor the data dependences allow. */
791 vect_analyze_data_ref_dependences (loop_vec_info loop_vinfo
,
792 bb_vec_info bb_vinfo
, int *max_vf
)
795 VEC (ddr_p
, heap
) *ddrs
= NULL
;
796 struct data_dependence_relation
*ddr
;
798 if (dump_enabled_p ())
799 dump_printf_loc (MSG_NOTE
, vect_location
,
800 "=== vect_analyze_dependences ===");
802 ddrs
= LOOP_VINFO_DDRS (loop_vinfo
);
804 ddrs
= BB_VINFO_DDRS (bb_vinfo
);
806 FOR_EACH_VEC_ELT (ddr_p
, ddrs
, i
, ddr
)
807 if (vect_analyze_data_ref_dependence (ddr
, loop_vinfo
, max_vf
))
814 /* Function vect_compute_data_ref_alignment
816 Compute the misalignment of the data reference DR.
819 1. If during the misalignment computation it is found that the data reference
820 cannot be vectorized then false is returned.
821 2. DR_MISALIGNMENT (DR) is defined.
823 FOR NOW: No analysis is actually performed. Misalignment is calculated
824 only for trivial cases. TODO. */
827 vect_compute_data_ref_alignment (struct data_reference
*dr
)
829 gimple stmt
= DR_STMT (dr
);
830 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
831 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
832 struct loop
*loop
= NULL
;
833 tree ref
= DR_REF (dr
);
835 tree base
, base_addr
;
838 tree aligned_to
, alignment
;
840 if (dump_enabled_p ())
841 dump_printf_loc (MSG_NOTE
, vect_location
,
842 "vect_compute_data_ref_alignment:");
845 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
847 /* Initialize misalignment to unknown. */
848 SET_DR_MISALIGNMENT (dr
, -1);
850 /* Strided loads perform only component accesses, misalignment information
851 is irrelevant for them. */
852 if (STMT_VINFO_STRIDE_LOAD_P (stmt_info
))
855 misalign
= DR_INIT (dr
);
856 aligned_to
= DR_ALIGNED_TO (dr
);
857 base_addr
= DR_BASE_ADDRESS (dr
);
858 vectype
= STMT_VINFO_VECTYPE (stmt_info
);
860 /* In case the dataref is in an inner-loop of the loop that is being
861 vectorized (LOOP), we use the base and misalignment information
862 relative to the outer-loop (LOOP). This is ok only if the misalignment
863 stays the same throughout the execution of the inner-loop, which is why
864 we have to check that the stride of the dataref in the inner-loop evenly
865 divides by the vector size. */
866 if (loop
&& nested_in_vect_loop_p (loop
, stmt
))
868 tree step
= DR_STEP (dr
);
869 HOST_WIDE_INT dr_step
= TREE_INT_CST_LOW (step
);
871 if (dr_step
% GET_MODE_SIZE (TYPE_MODE (vectype
)) == 0)
873 if (dump_enabled_p ())
874 dump_printf_loc (MSG_NOTE
, vect_location
,
875 "inner step divides the vector-size.");
876 misalign
= STMT_VINFO_DR_INIT (stmt_info
);
877 aligned_to
= STMT_VINFO_DR_ALIGNED_TO (stmt_info
);
878 base_addr
= STMT_VINFO_DR_BASE_ADDRESS (stmt_info
);
882 if (dump_enabled_p ())
883 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
884 "inner step doesn't divide the vector-size.");
885 misalign
= NULL_TREE
;
889 /* Similarly, if we're doing basic-block vectorization, we can only use
890 base and misalignment information relative to an innermost loop if the
891 misalignment stays the same throughout the execution of the loop.
892 As above, this is the case if the stride of the dataref evenly divides
893 by the vector size. */
896 tree step
= DR_STEP (dr
);
897 HOST_WIDE_INT dr_step
= TREE_INT_CST_LOW (step
);
899 if (dr_step
% GET_MODE_SIZE (TYPE_MODE (vectype
)) != 0)
901 if (dump_enabled_p ())
902 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
903 "SLP: step doesn't divide the vector-size.");
904 misalign
= NULL_TREE
;
908 base
= build_fold_indirect_ref (base_addr
);
909 alignment
= ssize_int (TYPE_ALIGN (vectype
)/BITS_PER_UNIT
);
911 if ((aligned_to
&& tree_int_cst_compare (aligned_to
, alignment
) < 0)
914 if (dump_enabled_p ())
916 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
917 "Unknown alignment for access: ");
918 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, base
);
924 && tree_int_cst_compare (ssize_int (DECL_ALIGN_UNIT (base
)),
926 || (TREE_CODE (base_addr
) == SSA_NAME
927 && tree_int_cst_compare (ssize_int (TYPE_ALIGN_UNIT (TREE_TYPE (
928 TREE_TYPE (base_addr
)))),
930 || (get_pointer_alignment (base_addr
) >= TYPE_ALIGN (vectype
)))
933 base_aligned
= false;
937 /* Do not change the alignment of global variables here if
938 flag_section_anchors is enabled as we already generated
939 RTL for other functions. Most global variables should
940 have been aligned during the IPA increase_alignment pass. */
941 if (!vect_can_force_dr_alignment_p (base
, TYPE_ALIGN (vectype
))
942 || (TREE_STATIC (base
) && flag_section_anchors
))
944 if (dump_enabled_p ())
946 dump_printf_loc (MSG_NOTE
, vect_location
,
947 "can't force alignment of ref: ");
948 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, ref
);
953 /* Force the alignment of the decl.
954 NOTE: This is the only change to the code we make during
955 the analysis phase, before deciding to vectorize the loop. */
956 if (dump_enabled_p ())
958 dump_printf_loc (MSG_NOTE
, vect_location
, "force alignment of ");
959 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, ref
);
962 DECL_ALIGN (base
) = TYPE_ALIGN (vectype
);
963 DECL_USER_ALIGN (base
) = 1;
966 /* At this point we assume that the base is aligned. */
967 gcc_assert (base_aligned
968 || (TREE_CODE (base
) == VAR_DECL
969 && DECL_ALIGN (base
) >= TYPE_ALIGN (vectype
)));
971 /* If this is a backward running DR then first access in the larger
972 vectype actually is N-1 elements before the address in the DR.
973 Adjust misalign accordingly. */
974 if (tree_int_cst_compare (DR_STEP (dr
), size_zero_node
) < 0)
976 tree offset
= ssize_int (TYPE_VECTOR_SUBPARTS (vectype
) - 1);
977 /* DR_STEP(dr) is the same as -TYPE_SIZE of the scalar type,
978 otherwise we wouldn't be here. */
979 offset
= fold_build2 (MULT_EXPR
, ssizetype
, offset
, DR_STEP (dr
));
980 /* PLUS because DR_STEP was negative. */
981 misalign
= size_binop (PLUS_EXPR
, misalign
, offset
);
984 /* Modulo alignment. */
985 misalign
= size_binop (FLOOR_MOD_EXPR
, misalign
, alignment
);
987 if (!host_integerp (misalign
, 1))
989 /* Negative or overflowed misalignment value. */
990 if (dump_enabled_p ())
991 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
992 "unexpected misalign value");
996 SET_DR_MISALIGNMENT (dr
, TREE_INT_CST_LOW (misalign
));
998 if (dump_enabled_p ())
1000 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1001 "misalign = %d bytes of ref ", DR_MISALIGNMENT (dr
));
1002 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, ref
);
1009 /* Function vect_compute_data_refs_alignment
1011 Compute the misalignment of data references in the loop.
1012 Return FALSE if a data reference is found that cannot be vectorized. */
1015 vect_compute_data_refs_alignment (loop_vec_info loop_vinfo
,
1016 bb_vec_info bb_vinfo
)
1018 VEC (data_reference_p
, heap
) *datarefs
;
1019 struct data_reference
*dr
;
1023 datarefs
= LOOP_VINFO_DATAREFS (loop_vinfo
);
1025 datarefs
= BB_VINFO_DATAREFS (bb_vinfo
);
1027 FOR_EACH_VEC_ELT (data_reference_p
, datarefs
, i
, dr
)
1028 if (STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr
)))
1029 && !vect_compute_data_ref_alignment (dr
))
1033 /* Mark unsupported statement as unvectorizable. */
1034 STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr
))) = false;
1045 /* Function vect_update_misalignment_for_peel
1047 DR - the data reference whose misalignment is to be adjusted.
1048 DR_PEEL - the data reference whose misalignment is being made
1049 zero in the vector loop by the peel.
1050 NPEEL - the number of iterations in the peel loop if the misalignment
1051 of DR_PEEL is known at compile time. */
1054 vect_update_misalignment_for_peel (struct data_reference
*dr
,
1055 struct data_reference
*dr_peel
, int npeel
)
1058 VEC(dr_p
,heap
) *same_align_drs
;
1059 struct data_reference
*current_dr
;
1060 int dr_size
= GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr
))));
1061 int dr_peel_size
= GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr_peel
))));
1062 stmt_vec_info stmt_info
= vinfo_for_stmt (DR_STMT (dr
));
1063 stmt_vec_info peel_stmt_info
= vinfo_for_stmt (DR_STMT (dr_peel
));
1065 /* For interleaved data accesses the step in the loop must be multiplied by
1066 the size of the interleaving group. */
1067 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
))
1068 dr_size
*= GROUP_SIZE (vinfo_for_stmt (GROUP_FIRST_ELEMENT (stmt_info
)));
1069 if (STMT_VINFO_GROUPED_ACCESS (peel_stmt_info
))
1070 dr_peel_size
*= GROUP_SIZE (peel_stmt_info
);
1072 /* It can be assumed that the data refs with the same alignment as dr_peel
1073 are aligned in the vector loop. */
1075 = STMT_VINFO_SAME_ALIGN_REFS (vinfo_for_stmt (DR_STMT (dr_peel
)));
1076 FOR_EACH_VEC_ELT (dr_p
, same_align_drs
, i
, current_dr
)
1078 if (current_dr
!= dr
)
1080 gcc_assert (DR_MISALIGNMENT (dr
) / dr_size
==
1081 DR_MISALIGNMENT (dr_peel
) / dr_peel_size
);
1082 SET_DR_MISALIGNMENT (dr
, 0);
1086 if (known_alignment_for_access_p (dr
)
1087 && known_alignment_for_access_p (dr_peel
))
1089 bool negative
= tree_int_cst_compare (DR_STEP (dr
), size_zero_node
) < 0;
1090 int misal
= DR_MISALIGNMENT (dr
);
1091 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
1092 misal
+= negative
? -npeel
* dr_size
: npeel
* dr_size
;
1093 misal
&= (TYPE_ALIGN (vectype
) / BITS_PER_UNIT
) - 1;
1094 SET_DR_MISALIGNMENT (dr
, misal
);
1098 if (dump_enabled_p ())
1099 dump_printf_loc (MSG_NOTE
, vect_location
, "Setting misalignment to -1.");
1100 SET_DR_MISALIGNMENT (dr
, -1);
1104 /* Function vect_verify_datarefs_alignment
1106 Return TRUE if all data references in the loop can be
1107 handled with respect to alignment. */
1110 vect_verify_datarefs_alignment (loop_vec_info loop_vinfo
, bb_vec_info bb_vinfo
)
1112 VEC (data_reference_p
, heap
) *datarefs
;
1113 struct data_reference
*dr
;
1114 enum dr_alignment_support supportable_dr_alignment
;
1118 datarefs
= LOOP_VINFO_DATAREFS (loop_vinfo
);
1120 datarefs
= BB_VINFO_DATAREFS (bb_vinfo
);
1122 FOR_EACH_VEC_ELT (data_reference_p
, datarefs
, i
, dr
)
1124 gimple stmt
= DR_STMT (dr
);
1125 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1127 if (!STMT_VINFO_RELEVANT_P (stmt_info
))
1130 /* For interleaving, only the alignment of the first access matters.
1131 Skip statements marked as not vectorizable. */
1132 if ((STMT_VINFO_GROUPED_ACCESS (stmt_info
)
1133 && GROUP_FIRST_ELEMENT (stmt_info
) != stmt
)
1134 || !STMT_VINFO_VECTORIZABLE (stmt_info
))
1137 /* Strided loads perform only component accesses, alignment is
1138 irrelevant for them. */
1139 if (STMT_VINFO_STRIDE_LOAD_P (stmt_info
))
1142 supportable_dr_alignment
= vect_supportable_dr_alignment (dr
, false);
1143 if (!supportable_dr_alignment
)
1145 if (dump_enabled_p ())
1147 if (DR_IS_READ (dr
))
1148 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1149 "not vectorized: unsupported unaligned load.");
1151 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1152 "not vectorized: unsupported unaligned "
1155 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
1160 if (supportable_dr_alignment
!= dr_aligned
&& dump_enabled_p ())
1161 dump_printf_loc (MSG_NOTE
, vect_location
,
1162 "Vectorizing an unaligned access.");
1167 /* Given an memory reference EXP return whether its alignment is less
1171 not_size_aligned (tree exp
)
1173 if (!host_integerp (TYPE_SIZE (TREE_TYPE (exp
)), 1))
1176 return (TREE_INT_CST_LOW (TYPE_SIZE (TREE_TYPE (exp
)))
1177 > get_object_alignment (exp
));
1180 /* Function vector_alignment_reachable_p
1182 Return true if vector alignment for DR is reachable by peeling
1183 a few loop iterations. Return false otherwise. */
1186 vector_alignment_reachable_p (struct data_reference
*dr
)
1188 gimple stmt
= DR_STMT (dr
);
1189 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1190 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
1192 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
))
1194 /* For interleaved access we peel only if number of iterations in
1195 the prolog loop ({VF - misalignment}), is a multiple of the
1196 number of the interleaved accesses. */
1197 int elem_size
, mis_in_elements
;
1198 int nelements
= TYPE_VECTOR_SUBPARTS (vectype
);
1200 /* FORNOW: handle only known alignment. */
1201 if (!known_alignment_for_access_p (dr
))
1204 elem_size
= GET_MODE_SIZE (TYPE_MODE (vectype
)) / nelements
;
1205 mis_in_elements
= DR_MISALIGNMENT (dr
) / elem_size
;
1207 if ((nelements
- mis_in_elements
) % GROUP_SIZE (stmt_info
))
1211 /* If misalignment is known at the compile time then allow peeling
1212 only if natural alignment is reachable through peeling. */
1213 if (known_alignment_for_access_p (dr
) && !aligned_access_p (dr
))
1215 HOST_WIDE_INT elmsize
=
1216 int_cst_value (TYPE_SIZE_UNIT (TREE_TYPE (vectype
)));
1217 if (dump_enabled_p ())
1219 dump_printf_loc (MSG_NOTE
, vect_location
,
1220 "data size =" HOST_WIDE_INT_PRINT_DEC
, elmsize
);
1221 dump_printf (MSG_NOTE
,
1222 ". misalignment = %d. ", DR_MISALIGNMENT (dr
));
1224 if (DR_MISALIGNMENT (dr
) % elmsize
)
1226 if (dump_enabled_p ())
1227 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1228 "data size does not divide the misalignment.\n");
1233 if (!known_alignment_for_access_p (dr
))
1235 tree type
= TREE_TYPE (DR_REF (dr
));
1236 bool is_packed
= not_size_aligned (DR_REF (dr
));
1237 if (dump_enabled_p ())
1238 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1239 "Unknown misalignment, is_packed = %d",is_packed
);
1240 if (targetm
.vectorize
.vector_alignment_reachable (type
, is_packed
))
1250 /* Calculate the cost of the memory access represented by DR. */
1253 vect_get_data_access_cost (struct data_reference
*dr
,
1254 unsigned int *inside_cost
,
1255 unsigned int *outside_cost
,
1256 stmt_vector_for_cost
*body_cost_vec
)
1258 gimple stmt
= DR_STMT (dr
);
1259 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1260 int nunits
= TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info
));
1261 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
1262 int vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
1263 int ncopies
= vf
/ nunits
;
1265 if (DR_IS_READ (dr
))
1266 vect_get_load_cost (dr
, ncopies
, true, inside_cost
, outside_cost
,
1267 NULL
, body_cost_vec
, false);
1269 vect_get_store_cost (dr
, ncopies
, inside_cost
, body_cost_vec
);
1271 if (dump_enabled_p ())
1272 dump_printf_loc (MSG_NOTE
, vect_location
,
1273 "vect_get_data_access_cost: inside_cost = %d, "
1274 "outside_cost = %d.", *inside_cost
, *outside_cost
);
1279 vect_peeling_hash (const void *elem
)
1281 const struct _vect_peel_info
*peel_info
;
1283 peel_info
= (const struct _vect_peel_info
*) elem
;
1284 return (hashval_t
) peel_info
->npeel
;
1289 vect_peeling_hash_eq (const void *elem1
, const void *elem2
)
1291 const struct _vect_peel_info
*a
, *b
;
1293 a
= (const struct _vect_peel_info
*) elem1
;
1294 b
= (const struct _vect_peel_info
*) elem2
;
1295 return (a
->npeel
== b
->npeel
);
1299 /* Insert DR into peeling hash table with NPEEL as key. */
1302 vect_peeling_hash_insert (loop_vec_info loop_vinfo
, struct data_reference
*dr
,
1305 struct _vect_peel_info elem
, *slot
;
1307 bool supportable_dr_alignment
= vect_supportable_dr_alignment (dr
, true);
1310 slot
= (vect_peel_info
) htab_find (LOOP_VINFO_PEELING_HTAB (loop_vinfo
),
1316 slot
= XNEW (struct _vect_peel_info
);
1317 slot
->npeel
= npeel
;
1320 new_slot
= htab_find_slot (LOOP_VINFO_PEELING_HTAB (loop_vinfo
), slot
,
1325 if (!supportable_dr_alignment
&& !flag_vect_cost_model
)
1326 slot
->count
+= VECT_MAX_COST
;
1330 /* Traverse peeling hash table to find peeling option that aligns maximum
1331 number of data accesses. */
1334 vect_peeling_hash_get_most_frequent (void **slot
, void *data
)
1336 vect_peel_info elem
= (vect_peel_info
) *slot
;
1337 vect_peel_extended_info max
= (vect_peel_extended_info
) data
;
1339 if (elem
->count
> max
->peel_info
.count
1340 || (elem
->count
== max
->peel_info
.count
1341 && max
->peel_info
.npeel
> elem
->npeel
))
1343 max
->peel_info
.npeel
= elem
->npeel
;
1344 max
->peel_info
.count
= elem
->count
;
1345 max
->peel_info
.dr
= elem
->dr
;
1352 /* Traverse peeling hash table and calculate cost for each peeling option.
1353 Find the one with the lowest cost. */
1356 vect_peeling_hash_get_lowest_cost (void **slot
, void *data
)
1358 vect_peel_info elem
= (vect_peel_info
) *slot
;
1359 vect_peel_extended_info min
= (vect_peel_extended_info
) data
;
1360 int save_misalignment
, dummy
;
1361 unsigned int inside_cost
= 0, outside_cost
= 0, i
;
1362 gimple stmt
= DR_STMT (elem
->dr
);
1363 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1364 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
1365 VEC (data_reference_p
, heap
) *datarefs
= LOOP_VINFO_DATAREFS (loop_vinfo
);
1366 struct data_reference
*dr
;
1367 stmt_vector_for_cost prologue_cost_vec
, body_cost_vec
, epilogue_cost_vec
;
1368 int single_iter_cost
;
1370 prologue_cost_vec
= VEC_alloc (stmt_info_for_cost
, heap
, 2);
1371 body_cost_vec
= VEC_alloc (stmt_info_for_cost
, heap
, 2);
1372 epilogue_cost_vec
= VEC_alloc (stmt_info_for_cost
, heap
, 2);
1374 FOR_EACH_VEC_ELT (data_reference_p
, datarefs
, i
, dr
)
1376 stmt
= DR_STMT (dr
);
1377 stmt_info
= vinfo_for_stmt (stmt
);
1378 /* For interleaving, only the alignment of the first access
1380 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
)
1381 && GROUP_FIRST_ELEMENT (stmt_info
) != stmt
)
1384 save_misalignment
= DR_MISALIGNMENT (dr
);
1385 vect_update_misalignment_for_peel (dr
, elem
->dr
, elem
->npeel
);
1386 vect_get_data_access_cost (dr
, &inside_cost
, &outside_cost
,
1388 SET_DR_MISALIGNMENT (dr
, save_misalignment
);
1391 single_iter_cost
= vect_get_single_scalar_iteration_cost (loop_vinfo
);
1392 outside_cost
+= vect_get_known_peeling_cost (loop_vinfo
, elem
->npeel
,
1393 &dummy
, single_iter_cost
,
1395 &epilogue_cost_vec
);
1397 /* Prologue and epilogue costs are added to the target model later.
1398 These costs depend only on the scalar iteration cost, the
1399 number of peeling iterations finally chosen, and the number of
1400 misaligned statements. So discard the information found here. */
1401 VEC_free (stmt_info_for_cost
, heap
, prologue_cost_vec
);
1402 VEC_free (stmt_info_for_cost
, heap
, epilogue_cost_vec
);
1404 if (inside_cost
< min
->inside_cost
1405 || (inside_cost
== min
->inside_cost
&& outside_cost
< min
->outside_cost
))
1407 min
->inside_cost
= inside_cost
;
1408 min
->outside_cost
= outside_cost
;
1409 VEC_free (stmt_info_for_cost
, heap
, min
->body_cost_vec
);
1410 min
->body_cost_vec
= body_cost_vec
;
1411 min
->peel_info
.dr
= elem
->dr
;
1412 min
->peel_info
.npeel
= elem
->npeel
;
1415 VEC_free (stmt_info_for_cost
, heap
, body_cost_vec
);
1421 /* Choose best peeling option by traversing peeling hash table and either
1422 choosing an option with the lowest cost (if cost model is enabled) or the
1423 option that aligns as many accesses as possible. */
1425 static struct data_reference
*
1426 vect_peeling_hash_choose_best_peeling (loop_vec_info loop_vinfo
,
1427 unsigned int *npeel
,
1428 stmt_vector_for_cost
*body_cost_vec
)
1430 struct _vect_peel_extended_info res
;
1432 res
.peel_info
.dr
= NULL
;
1433 res
.body_cost_vec
= NULL
;
1435 if (flag_vect_cost_model
)
1437 res
.inside_cost
= INT_MAX
;
1438 res
.outside_cost
= INT_MAX
;
1439 htab_traverse (LOOP_VINFO_PEELING_HTAB (loop_vinfo
),
1440 vect_peeling_hash_get_lowest_cost
, &res
);
1444 res
.peel_info
.count
= 0;
1445 htab_traverse (LOOP_VINFO_PEELING_HTAB (loop_vinfo
),
1446 vect_peeling_hash_get_most_frequent
, &res
);
1449 *npeel
= res
.peel_info
.npeel
;
1450 *body_cost_vec
= res
.body_cost_vec
;
1451 return res
.peel_info
.dr
;
1455 /* Function vect_enhance_data_refs_alignment
1457 This pass will use loop versioning and loop peeling in order to enhance
1458 the alignment of data references in the loop.
1460 FOR NOW: we assume that whatever versioning/peeling takes place, only the
1461 original loop is to be vectorized. Any other loops that are created by
1462 the transformations performed in this pass - are not supposed to be
1463 vectorized. This restriction will be relaxed.
1465 This pass will require a cost model to guide it whether to apply peeling
1466 or versioning or a combination of the two. For example, the scheme that
1467 intel uses when given a loop with several memory accesses, is as follows:
1468 choose one memory access ('p') which alignment you want to force by doing
1469 peeling. Then, either (1) generate a loop in which 'p' is aligned and all
1470 other accesses are not necessarily aligned, or (2) use loop versioning to
1471 generate one loop in which all accesses are aligned, and another loop in
1472 which only 'p' is necessarily aligned.
1474 ("Automatic Intra-Register Vectorization for the Intel Architecture",
1475 Aart J.C. Bik, Milind Girkar, Paul M. Grey and Ximmin Tian, International
1476 Journal of Parallel Programming, Vol. 30, No. 2, April 2002.)
1478 Devising a cost model is the most critical aspect of this work. It will
1479 guide us on which access to peel for, whether to use loop versioning, how
1480 many versions to create, etc. The cost model will probably consist of
1481 generic considerations as well as target specific considerations (on
1482 powerpc for example, misaligned stores are more painful than misaligned
1485 Here are the general steps involved in alignment enhancements:
1487 -- original loop, before alignment analysis:
1488 for (i=0; i<N; i++){
1489 x = q[i]; # DR_MISALIGNMENT(q) = unknown
1490 p[i] = y; # DR_MISALIGNMENT(p) = unknown
1493 -- After vect_compute_data_refs_alignment:
1494 for (i=0; i<N; i++){
1495 x = q[i]; # DR_MISALIGNMENT(q) = 3
1496 p[i] = y; # DR_MISALIGNMENT(p) = unknown
1499 -- Possibility 1: we do loop versioning:
1501 for (i=0; i<N; i++){ # loop 1A
1502 x = q[i]; # DR_MISALIGNMENT(q) = 3
1503 p[i] = y; # DR_MISALIGNMENT(p) = 0
1507 for (i=0; i<N; i++){ # loop 1B
1508 x = q[i]; # DR_MISALIGNMENT(q) = 3
1509 p[i] = y; # DR_MISALIGNMENT(p) = unaligned
1513 -- Possibility 2: we do loop peeling:
1514 for (i = 0; i < 3; i++){ # (scalar loop, not to be vectorized).
1518 for (i = 3; i < N; i++){ # loop 2A
1519 x = q[i]; # DR_MISALIGNMENT(q) = 0
1520 p[i] = y; # DR_MISALIGNMENT(p) = unknown
1523 -- Possibility 3: combination of loop peeling and versioning:
1524 for (i = 0; i < 3; i++){ # (scalar loop, not to be vectorized).
1529 for (i = 3; i<N; i++){ # loop 3A
1530 x = q[i]; # DR_MISALIGNMENT(q) = 0
1531 p[i] = y; # DR_MISALIGNMENT(p) = 0
1535 for (i = 3; i<N; i++){ # loop 3B
1536 x = q[i]; # DR_MISALIGNMENT(q) = 0
1537 p[i] = y; # DR_MISALIGNMENT(p) = unaligned
1541 These loops are later passed to loop_transform to be vectorized. The
1542 vectorizer will use the alignment information to guide the transformation
1543 (whether to generate regular loads/stores, or with special handling for
1547 vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo
)
1549 VEC (data_reference_p
, heap
) *datarefs
= LOOP_VINFO_DATAREFS (loop_vinfo
);
1550 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
1551 enum dr_alignment_support supportable_dr_alignment
;
1552 struct data_reference
*dr0
= NULL
, *first_store
= NULL
;
1553 struct data_reference
*dr
;
1555 bool do_peeling
= false;
1556 bool do_versioning
= false;
1559 stmt_vec_info stmt_info
;
1560 int vect_versioning_for_alias_required
;
1561 unsigned int npeel
= 0;
1562 bool all_misalignments_unknown
= true;
1563 unsigned int vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
1564 unsigned possible_npeel_number
= 1;
1566 unsigned int nelements
, mis
, same_align_drs_max
= 0;
1567 stmt_vector_for_cost body_cost_vec
= NULL
;
1569 if (dump_enabled_p ())
1570 dump_printf_loc (MSG_NOTE
, vect_location
,
1571 "=== vect_enhance_data_refs_alignment ===");
1573 /* While cost model enhancements are expected in the future, the high level
1574 view of the code at this time is as follows:
1576 A) If there is a misaligned access then see if peeling to align
1577 this access can make all data references satisfy
1578 vect_supportable_dr_alignment. If so, update data structures
1579 as needed and return true.
1581 B) If peeling wasn't possible and there is a data reference with an
1582 unknown misalignment that does not satisfy vect_supportable_dr_alignment
1583 then see if loop versioning checks can be used to make all data
1584 references satisfy vect_supportable_dr_alignment. If so, update
1585 data structures as needed and return true.
1587 C) If neither peeling nor versioning were successful then return false if
1588 any data reference does not satisfy vect_supportable_dr_alignment.
1590 D) Return true (all data references satisfy vect_supportable_dr_alignment).
1592 Note, Possibility 3 above (which is peeling and versioning together) is not
1593 being done at this time. */
1595 /* (1) Peeling to force alignment. */
1597 /* (1.1) Decide whether to perform peeling, and how many iterations to peel:
1599 + How many accesses will become aligned due to the peeling
1600 - How many accesses will become unaligned due to the peeling,
1601 and the cost of misaligned accesses.
1602 - The cost of peeling (the extra runtime checks, the increase
1605 FOR_EACH_VEC_ELT (data_reference_p
, datarefs
, i
, dr
)
1607 stmt
= DR_STMT (dr
);
1608 stmt_info
= vinfo_for_stmt (stmt
);
1610 if (!STMT_VINFO_RELEVANT_P (stmt_info
))
1613 /* For interleaving, only the alignment of the first access
1615 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
)
1616 && GROUP_FIRST_ELEMENT (stmt_info
) != stmt
)
1619 /* FORNOW: Any strided load prevents peeling. The induction
1620 variable analysis will fail when the prologue loop is generated,
1621 and so we can't generate the new base for the pointer. */
1622 if (STMT_VINFO_STRIDE_LOAD_P (stmt_info
))
1624 if (dump_enabled_p ())
1625 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1626 "strided load prevents peeling");
1631 /* For invariant accesses there is nothing to enhance. */
1632 if (integer_zerop (DR_STEP (dr
)))
1635 /* Strided loads perform only component accesses, alignment is
1636 irrelevant for them. */
1637 if (STMT_VINFO_STRIDE_LOAD_P (stmt_info
))
1640 supportable_dr_alignment
= vect_supportable_dr_alignment (dr
, true);
1641 do_peeling
= vector_alignment_reachable_p (dr
);
1644 if (known_alignment_for_access_p (dr
))
1646 unsigned int npeel_tmp
;
1647 bool negative
= tree_int_cst_compare (DR_STEP (dr
),
1648 size_zero_node
) < 0;
1650 /* Save info about DR in the hash table. */
1651 if (!LOOP_VINFO_PEELING_HTAB (loop_vinfo
))
1652 LOOP_VINFO_PEELING_HTAB (loop_vinfo
) =
1653 htab_create (1, vect_peeling_hash
,
1654 vect_peeling_hash_eq
, free
);
1656 vectype
= STMT_VINFO_VECTYPE (stmt_info
);
1657 nelements
= TYPE_VECTOR_SUBPARTS (vectype
);
1658 mis
= DR_MISALIGNMENT (dr
) / GET_MODE_SIZE (TYPE_MODE (
1659 TREE_TYPE (DR_REF (dr
))));
1660 npeel_tmp
= (negative
1661 ? (mis
- nelements
) : (nelements
- mis
))
1664 /* For multiple types, it is possible that the bigger type access
1665 will have more than one peeling option. E.g., a loop with two
1666 types: one of size (vector size / 4), and the other one of
1667 size (vector size / 8). Vectorization factor will 8. If both
1668 access are misaligned by 3, the first one needs one scalar
1669 iteration to be aligned, and the second one needs 5. But the
1670 the first one will be aligned also by peeling 5 scalar
1671 iterations, and in that case both accesses will be aligned.
1672 Hence, except for the immediate peeling amount, we also want
1673 to try to add full vector size, while we don't exceed
1674 vectorization factor.
1675 We do this automtically for cost model, since we calculate cost
1676 for every peeling option. */
1677 if (!flag_vect_cost_model
)
1678 possible_npeel_number
= vf
/nelements
;
1680 /* Handle the aligned case. We may decide to align some other
1681 access, making DR unaligned. */
1682 if (DR_MISALIGNMENT (dr
) == 0)
1685 if (!flag_vect_cost_model
)
1686 possible_npeel_number
++;
1689 for (j
= 0; j
< possible_npeel_number
; j
++)
1691 gcc_assert (npeel_tmp
<= vf
);
1692 vect_peeling_hash_insert (loop_vinfo
, dr
, npeel_tmp
);
1693 npeel_tmp
+= nelements
;
1696 all_misalignments_unknown
= false;
1697 /* Data-ref that was chosen for the case that all the
1698 misalignments are unknown is not relevant anymore, since we
1699 have a data-ref with known alignment. */
1704 /* If we don't know all the misalignment values, we prefer
1705 peeling for data-ref that has maximum number of data-refs
1706 with the same alignment, unless the target prefers to align
1707 stores over load. */
1708 if (all_misalignments_unknown
)
1710 if (same_align_drs_max
< VEC_length (dr_p
,
1711 STMT_VINFO_SAME_ALIGN_REFS (stmt_info
))
1714 same_align_drs_max
= VEC_length (dr_p
,
1715 STMT_VINFO_SAME_ALIGN_REFS (stmt_info
));
1719 if (!first_store
&& DR_IS_WRITE (dr
))
1723 /* If there are both known and unknown misaligned accesses in the
1724 loop, we choose peeling amount according to the known
1728 if (!supportable_dr_alignment
)
1731 if (!first_store
&& DR_IS_WRITE (dr
))
1738 if (!aligned_access_p (dr
))
1740 if (dump_enabled_p ())
1741 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1742 "vector alignment may not be reachable");
1748 vect_versioning_for_alias_required
1749 = LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo
);
1751 /* Temporarily, if versioning for alias is required, we disable peeling
1752 until we support peeling and versioning. Often peeling for alignment
1753 will require peeling for loop-bound, which in turn requires that we
1754 know how to adjust the loop ivs after the loop. */
1755 if (vect_versioning_for_alias_required
1756 || !vect_can_advance_ivs_p (loop_vinfo
)
1757 || !slpeel_can_duplicate_loop_p (loop
, single_exit (loop
)))
1760 if (do_peeling
&& all_misalignments_unknown
1761 && vect_supportable_dr_alignment (dr0
, false))
1764 /* Check if the target requires to prefer stores over loads, i.e., if
1765 misaligned stores are more expensive than misaligned loads (taking
1766 drs with same alignment into account). */
1767 if (first_store
&& DR_IS_READ (dr0
))
1769 unsigned int load_inside_cost
= 0, load_outside_cost
= 0;
1770 unsigned int store_inside_cost
= 0, store_outside_cost
= 0;
1771 unsigned int load_inside_penalty
= 0, load_outside_penalty
= 0;
1772 unsigned int store_inside_penalty
= 0, store_outside_penalty
= 0;
1773 stmt_vector_for_cost dummy
= VEC_alloc (stmt_info_for_cost
, heap
, 2);
1775 vect_get_data_access_cost (dr0
, &load_inside_cost
, &load_outside_cost
,
1777 vect_get_data_access_cost (first_store
, &store_inside_cost
,
1778 &store_outside_cost
, &dummy
);
1780 VEC_free (stmt_info_for_cost
, heap
, dummy
);
1782 /* Calculate the penalty for leaving FIRST_STORE unaligned (by
1783 aligning the load DR0). */
1784 load_inside_penalty
= store_inside_cost
;
1785 load_outside_penalty
= store_outside_cost
;
1786 for (i
= 0; VEC_iterate (dr_p
, STMT_VINFO_SAME_ALIGN_REFS
1787 (vinfo_for_stmt (DR_STMT (first_store
))),
1790 if (DR_IS_READ (dr
))
1792 load_inside_penalty
+= load_inside_cost
;
1793 load_outside_penalty
+= load_outside_cost
;
1797 load_inside_penalty
+= store_inside_cost
;
1798 load_outside_penalty
+= store_outside_cost
;
1801 /* Calculate the penalty for leaving DR0 unaligned (by
1802 aligning the FIRST_STORE). */
1803 store_inside_penalty
= load_inside_cost
;
1804 store_outside_penalty
= load_outside_cost
;
1805 for (i
= 0; VEC_iterate (dr_p
, STMT_VINFO_SAME_ALIGN_REFS
1806 (vinfo_for_stmt (DR_STMT (dr0
))),
1809 if (DR_IS_READ (dr
))
1811 store_inside_penalty
+= load_inside_cost
;
1812 store_outside_penalty
+= load_outside_cost
;
1816 store_inside_penalty
+= store_inside_cost
;
1817 store_outside_penalty
+= store_outside_cost
;
1820 if (load_inside_penalty
> store_inside_penalty
1821 || (load_inside_penalty
== store_inside_penalty
1822 && load_outside_penalty
> store_outside_penalty
))
1826 /* In case there are only loads with different unknown misalignments, use
1827 peeling only if it may help to align other accesses in the loop. */
1828 if (!first_store
&& !VEC_length (dr_p
, STMT_VINFO_SAME_ALIGN_REFS
1829 (vinfo_for_stmt (DR_STMT (dr0
))))
1830 && vect_supportable_dr_alignment (dr0
, false)
1831 != dr_unaligned_supported
)
1835 if (do_peeling
&& !dr0
)
1837 /* Peeling is possible, but there is no data access that is not supported
1838 unless aligned. So we try to choose the best possible peeling. */
1840 /* We should get here only if there are drs with known misalignment. */
1841 gcc_assert (!all_misalignments_unknown
);
1843 /* Choose the best peeling from the hash table. */
1844 dr0
= vect_peeling_hash_choose_best_peeling (loop_vinfo
, &npeel
,
1852 stmt
= DR_STMT (dr0
);
1853 stmt_info
= vinfo_for_stmt (stmt
);
1854 vectype
= STMT_VINFO_VECTYPE (stmt_info
);
1855 nelements
= TYPE_VECTOR_SUBPARTS (vectype
);
1857 if (known_alignment_for_access_p (dr0
))
1859 bool negative
= tree_int_cst_compare (DR_STEP (dr0
),
1860 size_zero_node
) < 0;
1863 /* Since it's known at compile time, compute the number of
1864 iterations in the peeled loop (the peeling factor) for use in
1865 updating DR_MISALIGNMENT values. The peeling factor is the
1866 vectorization factor minus the misalignment as an element
1868 mis
= DR_MISALIGNMENT (dr0
);
1869 mis
/= GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr0
))));
1870 npeel
= ((negative
? mis
- nelements
: nelements
- mis
)
1874 /* For interleaved data access every iteration accesses all the
1875 members of the group, therefore we divide the number of iterations
1876 by the group size. */
1877 stmt_info
= vinfo_for_stmt (DR_STMT (dr0
));
1878 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
))
1879 npeel
/= GROUP_SIZE (stmt_info
);
1881 if (dump_enabled_p ())
1882 dump_printf_loc (MSG_NOTE
, vect_location
,
1883 "Try peeling by %d", npeel
);
1886 /* Ensure that all data refs can be vectorized after the peel. */
1887 FOR_EACH_VEC_ELT (data_reference_p
, datarefs
, i
, dr
)
1889 int save_misalignment
;
1894 stmt
= DR_STMT (dr
);
1895 stmt_info
= vinfo_for_stmt (stmt
);
1896 /* For interleaving, only the alignment of the first access
1898 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
)
1899 && GROUP_FIRST_ELEMENT (stmt_info
) != stmt
)
1902 /* Strided loads perform only component accesses, alignment is
1903 irrelevant for them. */
1904 if (STMT_VINFO_STRIDE_LOAD_P (stmt_info
))
1907 save_misalignment
= DR_MISALIGNMENT (dr
);
1908 vect_update_misalignment_for_peel (dr
, dr0
, npeel
);
1909 supportable_dr_alignment
= vect_supportable_dr_alignment (dr
, false);
1910 SET_DR_MISALIGNMENT (dr
, save_misalignment
);
1912 if (!supportable_dr_alignment
)
1919 if (do_peeling
&& known_alignment_for_access_p (dr0
) && npeel
== 0)
1921 stat
= vect_verify_datarefs_alignment (loop_vinfo
, NULL
);
1926 VEC_free (stmt_info_for_cost
, heap
, body_cost_vec
);
1933 stmt_info_for_cost
*si
;
1934 void *data
= LOOP_VINFO_TARGET_COST_DATA (loop_vinfo
);
1936 /* (1.2) Update the DR_MISALIGNMENT of each data reference DR_i.
1937 If the misalignment of DR_i is identical to that of dr0 then set
1938 DR_MISALIGNMENT (DR_i) to zero. If the misalignment of DR_i and
1939 dr0 are known at compile time then increment DR_MISALIGNMENT (DR_i)
1940 by the peeling factor times the element size of DR_i (MOD the
1941 vectorization factor times the size). Otherwise, the
1942 misalignment of DR_i must be set to unknown. */
1943 FOR_EACH_VEC_ELT (data_reference_p
, datarefs
, i
, dr
)
1945 vect_update_misalignment_for_peel (dr
, dr0
, npeel
);
1947 LOOP_VINFO_UNALIGNED_DR (loop_vinfo
) = dr0
;
1949 LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo
) = npeel
;
1951 LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo
) = DR_MISALIGNMENT (dr0
);
1952 SET_DR_MISALIGNMENT (dr0
, 0);
1953 if (dump_enabled_p ())
1955 dump_printf_loc (MSG_NOTE
, vect_location
,
1956 "Alignment of access forced using peeling.");
1957 dump_printf_loc (MSG_NOTE
, vect_location
,
1958 "Peeling for alignment will be applied.");
1960 /* We've delayed passing the inside-loop peeling costs to the
1961 target cost model until we were sure peeling would happen.
1965 FOR_EACH_VEC_ELT (stmt_info_for_cost
, body_cost_vec
, i
, si
)
1967 struct _stmt_vec_info
*stmt_info
1968 = si
->stmt
? vinfo_for_stmt (si
->stmt
) : NULL
;
1969 (void) add_stmt_cost (data
, si
->count
, si
->kind
, stmt_info
,
1970 si
->misalign
, vect_body
);
1972 VEC_free (stmt_info_for_cost
, heap
, body_cost_vec
);
1975 stat
= vect_verify_datarefs_alignment (loop_vinfo
, NULL
);
1981 VEC_free (stmt_info_for_cost
, heap
, body_cost_vec
);
1983 /* (2) Versioning to force alignment. */
1985 /* Try versioning if:
1986 1) flag_tree_vect_loop_version is TRUE
1987 2) optimize loop for speed
1988 3) there is at least one unsupported misaligned data ref with an unknown
1990 4) all misaligned data refs with a known misalignment are supported, and
1991 5) the number of runtime alignment checks is within reason. */
1994 flag_tree_vect_loop_version
1995 && optimize_loop_nest_for_speed_p (loop
)
1996 && (!loop
->inner
); /* FORNOW */
2000 FOR_EACH_VEC_ELT (data_reference_p
, datarefs
, i
, dr
)
2002 stmt
= DR_STMT (dr
);
2003 stmt_info
= vinfo_for_stmt (stmt
);
2005 /* For interleaving, only the alignment of the first access
2007 if (aligned_access_p (dr
)
2008 || (STMT_VINFO_GROUPED_ACCESS (stmt_info
)
2009 && GROUP_FIRST_ELEMENT (stmt_info
) != stmt
))
2012 /* Strided loads perform only component accesses, alignment is
2013 irrelevant for them. */
2014 if (STMT_VINFO_STRIDE_LOAD_P (stmt_info
))
2017 supportable_dr_alignment
= vect_supportable_dr_alignment (dr
, false);
2019 if (!supportable_dr_alignment
)
2025 if (known_alignment_for_access_p (dr
)
2026 || VEC_length (gimple
,
2027 LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo
))
2028 >= (unsigned) PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIGNMENT_CHECKS
))
2030 do_versioning
= false;
2034 stmt
= DR_STMT (dr
);
2035 vectype
= STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt
));
2036 gcc_assert (vectype
);
2038 /* The rightmost bits of an aligned address must be zeros.
2039 Construct the mask needed for this test. For example,
2040 GET_MODE_SIZE for the vector mode V4SI is 16 bytes so the
2041 mask must be 15 = 0xf. */
2042 mask
= GET_MODE_SIZE (TYPE_MODE (vectype
)) - 1;
2044 /* FORNOW: use the same mask to test all potentially unaligned
2045 references in the loop. The vectorizer currently supports
2046 a single vector size, see the reference to
2047 GET_MODE_NUNITS (TYPE_MODE (vectype)) where the
2048 vectorization factor is computed. */
2049 gcc_assert (!LOOP_VINFO_PTR_MASK (loop_vinfo
)
2050 || LOOP_VINFO_PTR_MASK (loop_vinfo
) == mask
);
2051 LOOP_VINFO_PTR_MASK (loop_vinfo
) = mask
;
2052 VEC_safe_push (gimple
, heap
,
2053 LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo
),
2058 /* Versioning requires at least one misaligned data reference. */
2059 if (!LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo
))
2060 do_versioning
= false;
2061 else if (!do_versioning
)
2062 VEC_truncate (gimple
, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo
), 0);
2067 VEC(gimple
,heap
) *may_misalign_stmts
2068 = LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo
);
2071 /* It can now be assumed that the data references in the statements
2072 in LOOP_VINFO_MAY_MISALIGN_STMTS will be aligned in the version
2073 of the loop being vectorized. */
2074 FOR_EACH_VEC_ELT (gimple
, may_misalign_stmts
, i
, stmt
)
2076 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
2077 dr
= STMT_VINFO_DATA_REF (stmt_info
);
2078 SET_DR_MISALIGNMENT (dr
, 0);
2079 if (dump_enabled_p ())
2080 dump_printf_loc (MSG_NOTE
, vect_location
,
2081 "Alignment of access forced using versioning.");
2084 if (dump_enabled_p ())
2085 dump_printf_loc (MSG_NOTE
, vect_location
,
2086 "Versioning for alignment will be applied.");
2088 /* Peeling and versioning can't be done together at this time. */
2089 gcc_assert (! (do_peeling
&& do_versioning
));
2091 stat
= vect_verify_datarefs_alignment (loop_vinfo
, NULL
);
2096 /* This point is reached if neither peeling nor versioning is being done. */
2097 gcc_assert (! (do_peeling
|| do_versioning
));
2099 stat
= vect_verify_datarefs_alignment (loop_vinfo
, NULL
);
2104 /* Function vect_find_same_alignment_drs.
2106 Update group and alignment relations according to the chosen
2107 vectorization factor. */
2110 vect_find_same_alignment_drs (struct data_dependence_relation
*ddr
,
2111 loop_vec_info loop_vinfo
)
2114 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
2115 int vectorization_factor
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
2116 struct data_reference
*dra
= DDR_A (ddr
);
2117 struct data_reference
*drb
= DDR_B (ddr
);
2118 stmt_vec_info stmtinfo_a
= vinfo_for_stmt (DR_STMT (dra
));
2119 stmt_vec_info stmtinfo_b
= vinfo_for_stmt (DR_STMT (drb
));
2120 int dra_size
= GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dra
))));
2121 int drb_size
= GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (drb
))));
2122 lambda_vector dist_v
;
2123 unsigned int loop_depth
;
2125 if (DDR_ARE_DEPENDENT (ddr
) == chrec_known
)
2131 if (DDR_ARE_DEPENDENT (ddr
) == chrec_dont_know
)
2134 /* Loop-based vectorization and known data dependence. */
2135 if (DDR_NUM_DIST_VECTS (ddr
) == 0)
2138 /* Data-dependence analysis reports a distance vector of zero
2139 for data-references that overlap only in the first iteration
2140 but have different sign step (see PR45764).
2141 So as a sanity check require equal DR_STEP. */
2142 if (!operand_equal_p (DR_STEP (dra
), DR_STEP (drb
), 0))
2145 loop_depth
= index_in_loop_nest (loop
->num
, DDR_LOOP_NEST (ddr
));
2146 FOR_EACH_VEC_ELT (lambda_vector
, DDR_DIST_VECTS (ddr
), i
, dist_v
)
2148 int dist
= dist_v
[loop_depth
];
2150 if (dump_enabled_p ())
2151 dump_printf_loc (MSG_NOTE
, vect_location
,
2152 "dependence distance = %d.", dist
);
2154 /* Same loop iteration. */
2156 || (dist
% vectorization_factor
== 0 && dra_size
== drb_size
))
2158 /* Two references with distance zero have the same alignment. */
2159 VEC_safe_push (dr_p
, heap
, STMT_VINFO_SAME_ALIGN_REFS (stmtinfo_a
), drb
);
2160 VEC_safe_push (dr_p
, heap
, STMT_VINFO_SAME_ALIGN_REFS (stmtinfo_b
), dra
);
2161 if (dump_enabled_p ())
2163 dump_printf_loc (MSG_NOTE
, vect_location
,
2164 "accesses have the same alignment.");
2165 dump_printf (MSG_NOTE
,
2166 "dependence distance modulo vf == 0 between ");
2167 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, DR_REF (dra
));
2168 dump_printf (MSG_NOTE
, " and ");
2169 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, DR_REF (drb
));
2176 /* Function vect_analyze_data_refs_alignment
2178 Analyze the alignment of the data-references in the loop.
2179 Return FALSE if a data reference is found that cannot be vectorized. */
2182 vect_analyze_data_refs_alignment (loop_vec_info loop_vinfo
,
2183 bb_vec_info bb_vinfo
)
2185 if (dump_enabled_p ())
2186 dump_printf_loc (MSG_NOTE
, vect_location
,
2187 "=== vect_analyze_data_refs_alignment ===");
2189 /* Mark groups of data references with same alignment using
2190 data dependence information. */
2193 VEC (ddr_p
, heap
) *ddrs
= LOOP_VINFO_DDRS (loop_vinfo
);
2194 struct data_dependence_relation
*ddr
;
2197 FOR_EACH_VEC_ELT (ddr_p
, ddrs
, i
, ddr
)
2198 vect_find_same_alignment_drs (ddr
, loop_vinfo
);
2201 if (!vect_compute_data_refs_alignment (loop_vinfo
, bb_vinfo
))
2203 if (dump_enabled_p ())
2204 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2205 "not vectorized: can't calculate alignment "
2214 /* Analyze groups of accesses: check that DR belongs to a group of
2215 accesses of legal size, step, etc. Detect gaps, single element
2216 interleaving, and other special cases. Set grouped access info.
2217 Collect groups of strided stores for further use in SLP analysis. */
2220 vect_analyze_group_access (struct data_reference
*dr
)
2222 tree step
= DR_STEP (dr
);
2223 tree scalar_type
= TREE_TYPE (DR_REF (dr
));
2224 HOST_WIDE_INT type_size
= TREE_INT_CST_LOW (TYPE_SIZE_UNIT (scalar_type
));
2225 gimple stmt
= DR_STMT (dr
);
2226 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
2227 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
2228 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
2229 HOST_WIDE_INT dr_step
= TREE_INT_CST_LOW (step
);
2230 HOST_WIDE_INT groupsize
, last_accessed_element
= 1;
2231 bool slp_impossible
= false;
2232 struct loop
*loop
= NULL
;
2235 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
2237 /* For interleaving, GROUPSIZE is STEP counted in elements, i.e., the
2238 size of the interleaving group (including gaps). */
2239 groupsize
= dr_step
/ type_size
;
2241 /* Not consecutive access is possible only if it is a part of interleaving. */
2242 if (!GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt
)))
2244 /* Check if it this DR is a part of interleaving, and is a single
2245 element of the group that is accessed in the loop. */
2247 /* Gaps are supported only for loads. STEP must be a multiple of the type
2248 size. The size of the group must be a power of 2. */
2250 && (dr_step
% type_size
) == 0
2252 && exact_log2 (groupsize
) != -1)
2254 GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt
)) = stmt
;
2255 GROUP_SIZE (vinfo_for_stmt (stmt
)) = groupsize
;
2256 if (dump_enabled_p ())
2258 dump_printf_loc (MSG_NOTE
, vect_location
,
2259 "Detected single element interleaving ");
2260 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, DR_REF (dr
));
2261 dump_printf (MSG_NOTE
, " step ");
2262 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, step
);
2267 if (dump_enabled_p ())
2268 dump_printf_loc (MSG_NOTE
, vect_location
,
2269 "Data access with gaps requires scalar "
2273 if (dump_enabled_p ())
2274 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2275 "Peeling for outer loop is not"
2280 LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo
) = true;
2286 if (dump_enabled_p ())
2288 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2289 "not consecutive access ");
2290 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
2295 /* Mark the statement as unvectorizable. */
2296 STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr
))) = false;
2303 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt
)) == stmt
)
2305 /* First stmt in the interleaving chain. Check the chain. */
2306 gimple next
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmt
));
2307 struct data_reference
*data_ref
= dr
;
2308 unsigned int count
= 1;
2310 tree prev_init
= DR_INIT (data_ref
);
2312 HOST_WIDE_INT diff
, count_in_bytes
, gaps
= 0;
2316 /* Skip same data-refs. In case that two or more stmts share
2317 data-ref (supported only for loads), we vectorize only the first
2318 stmt, and the rest get their vectorized loads from the first
2320 if (!tree_int_cst_compare (DR_INIT (data_ref
),
2321 DR_INIT (STMT_VINFO_DATA_REF (
2322 vinfo_for_stmt (next
)))))
2324 if (DR_IS_WRITE (data_ref
))
2326 if (dump_enabled_p ())
2327 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2328 "Two store stmts share the same dr.");
2332 /* Check that there is no load-store dependencies for this loads
2333 to prevent a case of load-store-load to the same location. */
2334 if (GROUP_READ_WRITE_DEPENDENCE (vinfo_for_stmt (next
))
2335 || GROUP_READ_WRITE_DEPENDENCE (vinfo_for_stmt (prev
)))
2337 if (dump_enabled_p ())
2338 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2339 "READ_WRITE dependence in interleaving.");
2343 /* For load use the same data-ref load. */
2344 GROUP_SAME_DR_STMT (vinfo_for_stmt (next
)) = prev
;
2347 next
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next
));
2353 /* Check that all the accesses have the same STEP. */
2354 next_step
= DR_STEP (STMT_VINFO_DATA_REF (vinfo_for_stmt (next
)));
2355 if (tree_int_cst_compare (step
, next_step
))
2357 if (dump_enabled_p ())
2358 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2359 "not consecutive access in interleaving");
2363 data_ref
= STMT_VINFO_DATA_REF (vinfo_for_stmt (next
));
2364 /* Check that the distance between two accesses is equal to the type
2365 size. Otherwise, we have gaps. */
2366 diff
= (TREE_INT_CST_LOW (DR_INIT (data_ref
))
2367 - TREE_INT_CST_LOW (prev_init
)) / type_size
;
2370 /* FORNOW: SLP of accesses with gaps is not supported. */
2371 slp_impossible
= true;
2372 if (DR_IS_WRITE (data_ref
))
2374 if (dump_enabled_p ())
2375 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2376 "interleaved store with gaps");
2383 last_accessed_element
+= diff
;
2385 /* Store the gap from the previous member of the group. If there is no
2386 gap in the access, GROUP_GAP is always 1. */
2387 GROUP_GAP (vinfo_for_stmt (next
)) = diff
;
2389 prev_init
= DR_INIT (data_ref
);
2390 next
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next
));
2391 /* Count the number of data-refs in the chain. */
2395 /* COUNT is the number of accesses found, we multiply it by the size of
2396 the type to get COUNT_IN_BYTES. */
2397 count_in_bytes
= type_size
* count
;
2399 /* Check that the size of the interleaving (including gaps) is not
2400 greater than STEP. */
2401 if (dr_step
&& dr_step
< count_in_bytes
+ gaps
* type_size
)
2403 if (dump_enabled_p ())
2405 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2406 "interleaving size is greater than step for ");
2407 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, DR_REF (dr
));
2412 /* Check that the size of the interleaving is equal to STEP for stores,
2413 i.e., that there are no gaps. */
2414 if (dr_step
&& dr_step
!= count_in_bytes
)
2416 if (DR_IS_READ (dr
))
2418 slp_impossible
= true;
2419 /* There is a gap after the last load in the group. This gap is a
2420 difference between the groupsize and the number of elements.
2421 When there is no gap, this difference should be 0. */
2422 GROUP_GAP (vinfo_for_stmt (stmt
)) = groupsize
- count
;
2426 if (dump_enabled_p ())
2427 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2428 "interleaved store with gaps");
2433 /* Check that STEP is a multiple of type size. */
2434 if (dr_step
&& (dr_step
% type_size
) != 0)
2436 if (dump_enabled_p ())
2438 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2439 "step is not a multiple of type size: step ");
2440 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, step
);
2441 dump_printf (MSG_MISSED_OPTIMIZATION
, " size ");
2442 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
2443 TYPE_SIZE_UNIT (scalar_type
));
2451 GROUP_SIZE (vinfo_for_stmt (stmt
)) = groupsize
;
2452 if (dump_enabled_p ())
2453 dump_printf_loc (MSG_NOTE
, vect_location
,
2454 "Detected interleaving of size %d", (int)groupsize
);
2456 /* SLP: create an SLP data structure for every interleaving group of
2457 stores for further analysis in vect_analyse_slp. */
2458 if (DR_IS_WRITE (dr
) && !slp_impossible
)
2461 VEC_safe_push (gimple
, heap
, LOOP_VINFO_GROUPED_STORES (loop_vinfo
),
2464 VEC_safe_push (gimple
, heap
, BB_VINFO_GROUPED_STORES (bb_vinfo
),
2468 /* There is a gap in the end of the group. */
2469 if (groupsize
- last_accessed_element
> 0 && loop_vinfo
)
2471 if (dump_enabled_p ())
2472 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2473 "Data access with gaps requires scalar "
2477 if (dump_enabled_p ())
2478 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2479 "Peeling for outer loop is not supported");
2483 LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo
) = true;
2491 /* Analyze the access pattern of the data-reference DR.
2492 In case of non-consecutive accesses call vect_analyze_group_access() to
2493 analyze groups of accesses. */
2496 vect_analyze_data_ref_access (struct data_reference
*dr
)
2498 tree step
= DR_STEP (dr
);
2499 tree scalar_type
= TREE_TYPE (DR_REF (dr
));
2500 gimple stmt
= DR_STMT (dr
);
2501 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
2502 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
2503 struct loop
*loop
= NULL
;
2506 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
2508 if (loop_vinfo
&& !step
)
2510 if (dump_enabled_p ())
2511 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2512 "bad data-ref access in loop");
2516 /* Allow invariant loads in loops. */
2517 if (loop_vinfo
&& integer_zerop (step
))
2519 GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt
)) = NULL
;
2520 return DR_IS_READ (dr
);
2523 if (loop
&& nested_in_vect_loop_p (loop
, stmt
))
2525 /* Interleaved accesses are not yet supported within outer-loop
2526 vectorization for references in the inner-loop. */
2527 GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt
)) = NULL
;
2529 /* For the rest of the analysis we use the outer-loop step. */
2530 step
= STMT_VINFO_DR_STEP (stmt_info
);
2531 if (integer_zerop (step
))
2533 if (dump_enabled_p ())
2534 dump_printf_loc (MSG_NOTE
, vect_location
,
2535 "zero step in outer loop.");
2536 if (DR_IS_READ (dr
))
2544 if (TREE_CODE (step
) == INTEGER_CST
)
2546 HOST_WIDE_INT dr_step
= TREE_INT_CST_LOW (step
);
2547 if (!tree_int_cst_compare (step
, TYPE_SIZE_UNIT (scalar_type
))
2549 && !compare_tree_int (TYPE_SIZE_UNIT (scalar_type
), -dr_step
)))
2551 /* Mark that it is not interleaving. */
2552 GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt
)) = NULL
;
2557 if (loop
&& nested_in_vect_loop_p (loop
, stmt
))
2559 if (dump_enabled_p ())
2560 dump_printf_loc (MSG_NOTE
, vect_location
,
2561 "grouped access in outer loop.");
2565 /* Assume this is a DR handled by non-constant strided load case. */
2566 if (TREE_CODE (step
) != INTEGER_CST
)
2567 return STMT_VINFO_STRIDE_LOAD_P (stmt_info
);
2569 /* Not consecutive access - check if it's a part of interleaving group. */
2570 return vect_analyze_group_access (dr
);
2574 /* Function vect_analyze_data_ref_accesses.
2576 Analyze the access pattern of all the data references in the loop.
2578 FORNOW: the only access pattern that is considered vectorizable is a
2579 simple step 1 (consecutive) access.
2581 FORNOW: handle only arrays and pointer accesses. */
2584 vect_analyze_data_ref_accesses (loop_vec_info loop_vinfo
, bb_vec_info bb_vinfo
)
2587 VEC (data_reference_p
, heap
) *datarefs
;
2588 struct data_reference
*dr
;
2590 if (dump_enabled_p ())
2591 dump_printf_loc (MSG_NOTE
, vect_location
,
2592 "=== vect_analyze_data_ref_accesses ===");
2595 datarefs
= LOOP_VINFO_DATAREFS (loop_vinfo
);
2597 datarefs
= BB_VINFO_DATAREFS (bb_vinfo
);
2599 FOR_EACH_VEC_ELT (data_reference_p
, datarefs
, i
, dr
)
2600 if (STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr
)))
2601 && !vect_analyze_data_ref_access (dr
))
2603 if (dump_enabled_p ())
2604 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2605 "not vectorized: complicated access pattern.");
2609 /* Mark the statement as not vectorizable. */
2610 STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr
))) = false;
2620 /* Function vect_prune_runtime_alias_test_list.
2622 Prune a list of ddrs to be tested at run-time by versioning for alias.
2623 Return FALSE if resulting list of ddrs is longer then allowed by
2624 PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS, otherwise return TRUE. */
2627 vect_prune_runtime_alias_test_list (loop_vec_info loop_vinfo
)
2629 VEC (ddr_p
, heap
) * ddrs
=
2630 LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo
);
2633 if (dump_enabled_p ())
2634 dump_printf_loc (MSG_NOTE
, vect_location
,
2635 "=== vect_prune_runtime_alias_test_list ===");
2637 for (i
= 0; i
< VEC_length (ddr_p
, ddrs
); )
2642 ddr_i
= VEC_index (ddr_p
, ddrs
, i
);
2645 for (j
= 0; j
< i
; j
++)
2647 ddr_p ddr_j
= VEC_index (ddr_p
, ddrs
, j
);
2649 if (vect_vfa_range_equal (ddr_i
, ddr_j
))
2651 if (dump_enabled_p ())
2653 dump_printf_loc (MSG_NOTE
, vect_location
,
2654 "found equal ranges ");
2655 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, DR_REF (DDR_A (ddr_i
)));
2656 dump_printf (MSG_NOTE
, ", ");
2657 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, DR_REF (DDR_B (ddr_i
)));
2658 dump_printf (MSG_NOTE
, " and ");
2659 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, DR_REF (DDR_A (ddr_j
)));
2660 dump_printf (MSG_NOTE
, ", ");
2661 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, DR_REF (DDR_B (ddr_j
)));
2670 VEC_ordered_remove (ddr_p
, ddrs
, i
);
2676 if (VEC_length (ddr_p
, ddrs
) >
2677 (unsigned) PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS
))
2679 if (dump_enabled_p ())
2681 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2682 "disable versioning for alias - max number of "
2683 "generated checks exceeded.");
2686 VEC_truncate (ddr_p
, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo
), 0);
2694 /* Check whether a non-affine read in stmt is suitable for gather load
2695 and if so, return a builtin decl for that operation. */
2698 vect_check_gather (gimple stmt
, loop_vec_info loop_vinfo
, tree
*basep
,
2699 tree
*offp
, int *scalep
)
2701 HOST_WIDE_INT scale
= 1, pbitpos
, pbitsize
;
2702 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
2703 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
2704 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
);
2705 tree offtype
= NULL_TREE
;
2706 tree decl
, base
, off
;
2707 enum machine_mode pmode
;
2708 int punsignedp
, pvolatilep
;
2710 /* The gather builtins need address of the form
2711 loop_invariant + vector * {1, 2, 4, 8}
2713 loop_invariant + sign_extend (vector) * { 1, 2, 4, 8 }.
2714 Unfortunately DR_BASE_ADDRESS/DR_OFFSET can be a mixture
2715 of loop invariants/SSA_NAMEs defined in the loop, with casts,
2716 multiplications and additions in it. To get a vector, we need
2717 a single SSA_NAME that will be defined in the loop and will
2718 contain everything that is not loop invariant and that can be
2719 vectorized. The following code attempts to find such a preexistng
2720 SSA_NAME OFF and put the loop invariants into a tree BASE
2721 that can be gimplified before the loop. */
2722 base
= get_inner_reference (DR_REF (dr
), &pbitsize
, &pbitpos
, &off
,
2723 &pmode
, &punsignedp
, &pvolatilep
, false);
2724 gcc_assert (base
!= NULL_TREE
&& (pbitpos
% BITS_PER_UNIT
) == 0);
2726 if (TREE_CODE (base
) == MEM_REF
)
2728 if (!integer_zerop (TREE_OPERAND (base
, 1)))
2730 if (off
== NULL_TREE
)
2732 double_int moff
= mem_ref_offset (base
);
2733 off
= double_int_to_tree (sizetype
, moff
);
2736 off
= size_binop (PLUS_EXPR
, off
,
2737 fold_convert (sizetype
, TREE_OPERAND (base
, 1)));
2739 base
= TREE_OPERAND (base
, 0);
2742 base
= build_fold_addr_expr (base
);
2744 if (off
== NULL_TREE
)
2745 off
= size_zero_node
;
2747 /* If base is not loop invariant, either off is 0, then we start with just
2748 the constant offset in the loop invariant BASE and continue with base
2749 as OFF, otherwise give up.
2750 We could handle that case by gimplifying the addition of base + off
2751 into some SSA_NAME and use that as off, but for now punt. */
2752 if (!expr_invariant_in_loop_p (loop
, base
))
2754 if (!integer_zerop (off
))
2757 base
= size_int (pbitpos
/ BITS_PER_UNIT
);
2759 /* Otherwise put base + constant offset into the loop invariant BASE
2760 and continue with OFF. */
2763 base
= fold_convert (sizetype
, base
);
2764 base
= size_binop (PLUS_EXPR
, base
, size_int (pbitpos
/ BITS_PER_UNIT
));
2767 /* OFF at this point may be either a SSA_NAME or some tree expression
2768 from get_inner_reference. Try to peel off loop invariants from it
2769 into BASE as long as possible. */
2771 while (offtype
== NULL_TREE
)
2773 enum tree_code code
;
2774 tree op0
, op1
, add
= NULL_TREE
;
2776 if (TREE_CODE (off
) == SSA_NAME
)
2778 gimple def_stmt
= SSA_NAME_DEF_STMT (off
);
2780 if (expr_invariant_in_loop_p (loop
, off
))
2783 if (gimple_code (def_stmt
) != GIMPLE_ASSIGN
)
2786 op0
= gimple_assign_rhs1 (def_stmt
);
2787 code
= gimple_assign_rhs_code (def_stmt
);
2788 op1
= gimple_assign_rhs2 (def_stmt
);
2792 if (get_gimple_rhs_class (TREE_CODE (off
)) == GIMPLE_TERNARY_RHS
)
2794 code
= TREE_CODE (off
);
2795 extract_ops_from_tree (off
, &code
, &op0
, &op1
);
2799 case POINTER_PLUS_EXPR
:
2801 if (expr_invariant_in_loop_p (loop
, op0
))
2806 add
= fold_convert (sizetype
, add
);
2808 add
= size_binop (MULT_EXPR
, add
, size_int (scale
));
2809 base
= size_binop (PLUS_EXPR
, base
, add
);
2812 if (expr_invariant_in_loop_p (loop
, op1
))
2820 if (expr_invariant_in_loop_p (loop
, op1
))
2822 add
= fold_convert (sizetype
, op1
);
2823 add
= size_binop (MINUS_EXPR
, size_zero_node
, add
);
2829 if (scale
== 1 && host_integerp (op1
, 0))
2831 scale
= tree_low_cst (op1
, 0);
2840 if (!POINTER_TYPE_P (TREE_TYPE (op0
))
2841 && !INTEGRAL_TYPE_P (TREE_TYPE (op0
)))
2843 if (TYPE_PRECISION (TREE_TYPE (op0
))
2844 == TYPE_PRECISION (TREE_TYPE (off
)))
2849 if (TYPE_PRECISION (TREE_TYPE (op0
))
2850 < TYPE_PRECISION (TREE_TYPE (off
)))
2853 offtype
= TREE_TYPE (off
);
2864 /* If at the end OFF still isn't a SSA_NAME or isn't
2865 defined in the loop, punt. */
2866 if (TREE_CODE (off
) != SSA_NAME
2867 || expr_invariant_in_loop_p (loop
, off
))
2870 if (offtype
== NULL_TREE
)
2871 offtype
= TREE_TYPE (off
);
2873 decl
= targetm
.vectorize
.builtin_gather (STMT_VINFO_VECTYPE (stmt_info
),
2875 if (decl
== NULL_TREE
)
2887 /* Check wether a non-affine load in STMT (being in the loop referred to
2888 in LOOP_VINFO) is suitable for handling as strided load. That is the case
2889 if its address is a simple induction variable. If so return the base
2890 of that induction variable in *BASEP and the (loop-invariant) step
2891 in *STEPP, both only when that pointer is non-zero.
2893 This handles ARRAY_REFs (with variant index) and MEM_REFs (with variant
2894 base pointer) only. */
2897 vect_check_strided_load (gimple stmt
, loop_vec_info loop_vinfo
, tree
*basep
,
2900 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
2901 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
2902 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
);
2906 if (!DR_IS_READ (dr
))
2911 if (TREE_CODE (base
) == ARRAY_REF
)
2913 off
= TREE_OPERAND (base
, 1);
2914 base
= TREE_OPERAND (base
, 0);
2916 else if (TREE_CODE (base
) == MEM_REF
)
2918 off
= TREE_OPERAND (base
, 0);
2919 base
= TREE_OPERAND (base
, 1);
2924 if (TREE_CODE (off
) != SSA_NAME
)
2927 if (!expr_invariant_in_loop_p (loop
, base
)
2928 || !simple_iv (loop
, loop_containing_stmt (stmt
), off
, &iv
, true))
2938 /* Function vect_analyze_data_refs.
2940 Find all the data references in the loop or basic block.
2942 The general structure of the analysis of data refs in the vectorizer is as
2944 1- vect_analyze_data_refs(loop/bb): call
2945 compute_data_dependences_for_loop/bb to find and analyze all data-refs
2946 in the loop/bb and their dependences.
2947 2- vect_analyze_dependences(): apply dependence testing using ddrs.
2948 3- vect_analyze_drs_alignment(): check that ref_stmt.alignment is ok.
2949 4- vect_analyze_drs_access(): check that ref_stmt.step is ok.
2954 vect_analyze_data_refs (loop_vec_info loop_vinfo
,
2955 bb_vec_info bb_vinfo
,
2958 struct loop
*loop
= NULL
;
2959 basic_block bb
= NULL
;
2961 VEC (data_reference_p
, heap
) *datarefs
;
2962 struct data_reference
*dr
;
2964 bool res
, stop_bb_analysis
= false;
2966 if (dump_enabled_p ())
2967 dump_printf_loc (MSG_NOTE
, vect_location
,
2968 "=== vect_analyze_data_refs ===\n");
2972 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
2973 res
= compute_data_dependences_for_loop
2975 &LOOP_VINFO_LOOP_NEST (loop_vinfo
),
2976 &LOOP_VINFO_DATAREFS (loop_vinfo
),
2977 &LOOP_VINFO_DDRS (loop_vinfo
));
2981 if (dump_enabled_p ())
2982 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2983 "not vectorized: loop contains function calls"
2984 " or data references that cannot be analyzed");
2988 datarefs
= LOOP_VINFO_DATAREFS (loop_vinfo
);
2992 gimple_stmt_iterator gsi
;
2994 bb
= BB_VINFO_BB (bb_vinfo
);
2995 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
2997 gimple stmt
= gsi_stmt (gsi
);
2998 if (!find_data_references_in_stmt (NULL
, stmt
,
2999 &BB_VINFO_DATAREFS (bb_vinfo
)))
3001 /* Mark the rest of the basic-block as unvectorizable. */
3002 for (; !gsi_end_p (gsi
); gsi_next (&gsi
))
3004 stmt
= gsi_stmt (gsi
);
3005 STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (stmt
)) = false;
3010 if (!compute_all_dependences (BB_VINFO_DATAREFS (bb_vinfo
),
3011 &BB_VINFO_DDRS (bb_vinfo
), NULL
, true))
3013 if (dump_enabled_p ())
3014 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3015 "not vectorized: basic block contains function"
3016 " calls or data references that cannot be"
3021 datarefs
= BB_VINFO_DATAREFS (bb_vinfo
);
3024 /* Go through the data-refs, check that the analysis succeeded. Update
3025 pointer from stmt_vec_info struct to DR and vectype. */
3027 FOR_EACH_VEC_ELT (data_reference_p
, datarefs
, i
, dr
)
3030 stmt_vec_info stmt_info
;
3031 tree base
, offset
, init
;
3032 bool gather
= false;
3035 if (!dr
|| !DR_REF (dr
))
3037 if (dump_enabled_p ())
3038 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3039 "not vectorized: unhandled data-ref ");
3043 stmt
= DR_STMT (dr
);
3044 stmt_info
= vinfo_for_stmt (stmt
);
3046 if (stop_bb_analysis
)
3048 STMT_VINFO_VECTORIZABLE (stmt_info
) = false;
3052 /* Check that analysis of the data-ref succeeded. */
3053 if (!DR_BASE_ADDRESS (dr
) || !DR_OFFSET (dr
) || !DR_INIT (dr
)
3056 /* If target supports vector gather loads, see if they can't
3060 && !TREE_THIS_VOLATILE (DR_REF (dr
))
3061 && targetm
.vectorize
.builtin_gather
!= NULL
3062 && !nested_in_vect_loop_p (loop
, stmt
))
3064 struct data_reference
*newdr
3065 = create_data_ref (NULL
, loop_containing_stmt (stmt
),
3066 DR_REF (dr
), stmt
, true);
3067 gcc_assert (newdr
!= NULL
&& DR_REF (newdr
));
3068 if (DR_BASE_ADDRESS (newdr
)
3069 && DR_OFFSET (newdr
)
3072 && integer_zerop (DR_STEP (newdr
)))
3078 free_data_ref (newdr
);
3083 if (dump_enabled_p ())
3085 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3086 "not vectorized: data ref analysis "
3088 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
3093 STMT_VINFO_VECTORIZABLE (stmt_info
) = false;
3094 stop_bb_analysis
= true;
3102 if (TREE_CODE (DR_BASE_ADDRESS (dr
)) == INTEGER_CST
)
3104 if (dump_enabled_p ())
3105 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3106 "not vectorized: base addr of dr is a "
3111 STMT_VINFO_VECTORIZABLE (stmt_info
) = false;
3112 stop_bb_analysis
= true;
3121 if (TREE_THIS_VOLATILE (DR_REF (dr
)))
3123 if (dump_enabled_p ())
3125 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3126 "not vectorized: volatile type ");
3127 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
3132 STMT_VINFO_VECTORIZABLE (stmt_info
) = false;
3133 stop_bb_analysis
= true;
3140 if (stmt_can_throw_internal (stmt
))
3142 if (dump_enabled_p ())
3144 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3145 "not vectorized: statement can throw an "
3147 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
3152 STMT_VINFO_VECTORIZABLE (stmt_info
) = false;
3153 stop_bb_analysis
= true;
3162 if (TREE_CODE (DR_REF (dr
)) == COMPONENT_REF
3163 && DECL_BIT_FIELD (TREE_OPERAND (DR_REF (dr
), 1)))
3165 if (dump_enabled_p ())
3167 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3168 "not vectorized: statement is bitfield "
3170 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
3175 STMT_VINFO_VECTORIZABLE (stmt_info
) = false;
3176 stop_bb_analysis
= true;
3185 base
= unshare_expr (DR_BASE_ADDRESS (dr
));
3186 offset
= unshare_expr (DR_OFFSET (dr
));
3187 init
= unshare_expr (DR_INIT (dr
));
3189 if (is_gimple_call (stmt
))
3191 if (dump_enabled_p ())
3193 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3194 "not vectorized: dr in a call ");
3195 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
3200 STMT_VINFO_VECTORIZABLE (stmt_info
) = false;
3201 stop_bb_analysis
= true;
3210 /* Update DR field in stmt_vec_info struct. */
3212 /* If the dataref is in an inner-loop of the loop that is considered for
3213 for vectorization, we also want to analyze the access relative to
3214 the outer-loop (DR contains information only relative to the
3215 inner-most enclosing loop). We do that by building a reference to the
3216 first location accessed by the inner-loop, and analyze it relative to
3218 if (loop
&& nested_in_vect_loop_p (loop
, stmt
))
3220 tree outer_step
, outer_base
, outer_init
;
3221 HOST_WIDE_INT pbitsize
, pbitpos
;
3223 enum machine_mode pmode
;
3224 int punsignedp
, pvolatilep
;
3225 affine_iv base_iv
, offset_iv
;
3228 /* Build a reference to the first location accessed by the
3229 inner-loop: *(BASE+INIT). (The first location is actually
3230 BASE+INIT+OFFSET, but we add OFFSET separately later). */
3231 tree inner_base
= build_fold_indirect_ref
3232 (fold_build_pointer_plus (base
, init
));
3234 if (dump_enabled_p ())
3236 dump_printf_loc (MSG_NOTE
, vect_location
,
3237 "analyze in outer-loop: ");
3238 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, inner_base
);
3241 outer_base
= get_inner_reference (inner_base
, &pbitsize
, &pbitpos
,
3242 &poffset
, &pmode
, &punsignedp
, &pvolatilep
, false);
3243 gcc_assert (outer_base
!= NULL_TREE
);
3245 if (pbitpos
% BITS_PER_UNIT
!= 0)
3247 if (dump_enabled_p ())
3248 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3249 "failed: bit offset alignment.\n");
3253 outer_base
= build_fold_addr_expr (outer_base
);
3254 if (!simple_iv (loop
, loop_containing_stmt (stmt
), outer_base
,
3257 if (dump_enabled_p ())
3258 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3259 "failed: evolution of base is not affine.\n");
3266 poffset
= fold_build2 (PLUS_EXPR
, TREE_TYPE (offset
), offset
,
3274 offset_iv
.base
= ssize_int (0);
3275 offset_iv
.step
= ssize_int (0);
3277 else if (!simple_iv (loop
, loop_containing_stmt (stmt
), poffset
,
3280 if (dump_enabled_p ())
3281 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3282 "evolution of offset is not affine.\n");
3286 outer_init
= ssize_int (pbitpos
/ BITS_PER_UNIT
);
3287 split_constant_offset (base_iv
.base
, &base_iv
.base
, &dinit
);
3288 outer_init
= size_binop (PLUS_EXPR
, outer_init
, dinit
);
3289 split_constant_offset (offset_iv
.base
, &offset_iv
.base
, &dinit
);
3290 outer_init
= size_binop (PLUS_EXPR
, outer_init
, dinit
);
3292 outer_step
= size_binop (PLUS_EXPR
,
3293 fold_convert (ssizetype
, base_iv
.step
),
3294 fold_convert (ssizetype
, offset_iv
.step
));
3296 STMT_VINFO_DR_STEP (stmt_info
) = outer_step
;
3297 /* FIXME: Use canonicalize_base_object_address (base_iv.base); */
3298 STMT_VINFO_DR_BASE_ADDRESS (stmt_info
) = base_iv
.base
;
3299 STMT_VINFO_DR_INIT (stmt_info
) = outer_init
;
3300 STMT_VINFO_DR_OFFSET (stmt_info
) =
3301 fold_convert (ssizetype
, offset_iv
.base
);
3302 STMT_VINFO_DR_ALIGNED_TO (stmt_info
) =
3303 size_int (highest_pow2_factor (offset_iv
.base
));
3305 if (dump_enabled_p ())
3307 dump_printf_loc (MSG_NOTE
, vect_location
,
3308 "\touter base_address: ");
3309 dump_generic_expr (MSG_NOTE
, TDF_SLIM
,
3310 STMT_VINFO_DR_BASE_ADDRESS (stmt_info
));
3311 dump_printf (MSG_NOTE
, "\n\touter offset from base address: ");
3312 dump_generic_expr (MSG_NOTE
, TDF_SLIM
,
3313 STMT_VINFO_DR_OFFSET (stmt_info
));
3314 dump_printf (MSG_NOTE
,
3315 "\n\touter constant offset from base address: ");
3316 dump_generic_expr (MSG_NOTE
, TDF_SLIM
,
3317 STMT_VINFO_DR_INIT (stmt_info
));
3318 dump_printf (MSG_NOTE
, "\n\touter step: ");
3319 dump_generic_expr (MSG_NOTE
, TDF_SLIM
,
3320 STMT_VINFO_DR_STEP (stmt_info
));
3321 dump_printf (MSG_NOTE
, "\n\touter aligned to: ");
3322 dump_generic_expr (MSG_NOTE
, TDF_SLIM
,
3323 STMT_VINFO_DR_ALIGNED_TO (stmt_info
));
3327 if (STMT_VINFO_DATA_REF (stmt_info
))
3329 if (dump_enabled_p ())
3331 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3332 "not vectorized: more than one data ref "
3334 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
3339 STMT_VINFO_VECTORIZABLE (stmt_info
) = false;
3340 stop_bb_analysis
= true;
3349 STMT_VINFO_DATA_REF (stmt_info
) = dr
;
3351 /* Set vectype for STMT. */
3352 scalar_type
= TREE_TYPE (DR_REF (dr
));
3353 STMT_VINFO_VECTYPE (stmt_info
) =
3354 get_vectype_for_scalar_type (scalar_type
);
3355 if (!STMT_VINFO_VECTYPE (stmt_info
))
3357 if (dump_enabled_p ())
3359 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3360 "not vectorized: no vectype for stmt: ");
3361 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
3362 dump_printf (MSG_MISSED_OPTIMIZATION
, " scalar_type: ");
3363 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_DETAILS
,
3369 /* Mark the statement as not vectorizable. */
3370 STMT_VINFO_VECTORIZABLE (stmt_info
) = false;
3371 stop_bb_analysis
= true;
3377 STMT_VINFO_DATA_REF (stmt_info
) = NULL
;
3383 /* Adjust the minimal vectorization factor according to the
3385 vf
= TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info
));
3391 unsigned int j
, k
, n
;
3392 struct data_reference
*olddr
3393 = VEC_index (data_reference_p
, datarefs
, i
);
3394 VEC (ddr_p
, heap
) *ddrs
= LOOP_VINFO_DDRS (loop_vinfo
);
3395 struct data_dependence_relation
*ddr
, *newddr
;
3398 VEC (loop_p
, heap
) *nest
= LOOP_VINFO_LOOP_NEST (loop_vinfo
);
3400 gather
= 0 != vect_check_gather (stmt
, loop_vinfo
, NULL
, &off
, NULL
);
3402 && get_vectype_for_scalar_type (TREE_TYPE (off
)) == NULL_TREE
)
3406 STMT_VINFO_DATA_REF (stmt_info
) = NULL
;
3408 if (dump_enabled_p ())
3410 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3411 "not vectorized: not suitable for gather "
3413 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
3418 n
= VEC_length (data_reference_p
, datarefs
) - 1;
3419 for (j
= 0, k
= i
- 1; j
< i
; j
++)
3421 ddr
= VEC_index (ddr_p
, ddrs
, k
);
3422 gcc_assert (DDR_B (ddr
) == olddr
);
3423 newddr
= initialize_data_dependence_relation (DDR_A (ddr
), dr
,
3425 VEC_replace (ddr_p
, ddrs
, k
, newddr
);
3426 free_dependence_relation (ddr
);
3428 && DR_IS_WRITE (DDR_A (newddr
))
3429 && DDR_ARE_DEPENDENT (newddr
) != chrec_known
)
3435 n
= k
+ VEC_length (data_reference_p
, datarefs
) - i
- 1;
3438 ddr
= VEC_index (ddr_p
, ddrs
, k
);
3439 gcc_assert (DDR_A (ddr
) == olddr
);
3440 newddr
= initialize_data_dependence_relation (dr
, DDR_B (ddr
),
3442 VEC_replace (ddr_p
, ddrs
, k
, newddr
);
3443 free_dependence_relation (ddr
);
3445 && DR_IS_WRITE (DDR_B (newddr
))
3446 && DDR_ARE_DEPENDENT (newddr
) != chrec_known
)
3450 k
= VEC_length (ddr_p
, ddrs
)
3451 - VEC_length (data_reference_p
, datarefs
) + i
;
3452 ddr
= VEC_index (ddr_p
, ddrs
, k
);
3453 gcc_assert (DDR_A (ddr
) == olddr
&& DDR_B (ddr
) == olddr
);
3454 newddr
= initialize_data_dependence_relation (dr
, dr
, nest
);
3455 VEC_replace (ddr_p
, ddrs
, k
, newddr
);
3456 free_dependence_relation (ddr
);
3457 VEC_replace (data_reference_p
, datarefs
, i
, dr
);
3461 if (dump_enabled_p ())
3463 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3464 "not vectorized: data dependence conflict"
3465 " prevents gather load");
3466 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
3471 STMT_VINFO_GATHER_P (stmt_info
) = true;
3474 && TREE_CODE (DR_STEP (dr
)) != INTEGER_CST
)
3476 bool strided_load
= false;
3477 if (!nested_in_vect_loop_p (loop
, stmt
))
3479 = vect_check_strided_load (stmt
, loop_vinfo
, NULL
, NULL
);
3482 if (dump_enabled_p ())
3484 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3485 "not vectorized: not suitable for strided "
3487 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
3491 STMT_VINFO_STRIDE_LOAD_P (stmt_info
) = true;
3499 /* Function vect_get_new_vect_var.
3501 Returns a name for a new variable. The current naming scheme appends the
3502 prefix "vect_" or "vect_p" (depending on the value of VAR_KIND) to
3503 the name of vectorizer generated variables, and appends that to NAME if
3507 vect_get_new_vect_var (tree type
, enum vect_var_kind var_kind
, const char *name
)
3514 case vect_simple_var
:
3517 case vect_scalar_var
:
3520 case vect_pointer_var
:
3529 char* tmp
= concat (prefix
, name
, NULL
);
3530 new_vect_var
= create_tmp_reg (type
, tmp
);
3534 new_vect_var
= create_tmp_reg (type
, prefix
);
3536 return new_vect_var
;
3540 /* Function vect_create_addr_base_for_vector_ref.
3542 Create an expression that computes the address of the first memory location
3543 that will be accessed for a data reference.
3546 STMT: The statement containing the data reference.
3547 NEW_STMT_LIST: Must be initialized to NULL_TREE or a statement list.
3548 OFFSET: Optional. If supplied, it is be added to the initial address.
3549 LOOP: Specify relative to which loop-nest should the address be computed.
3550 For example, when the dataref is in an inner-loop nested in an
3551 outer-loop that is now being vectorized, LOOP can be either the
3552 outer-loop, or the inner-loop. The first memory location accessed
3553 by the following dataref ('in' points to short):
3560 if LOOP=i_loop: &in (relative to i_loop)
3561 if LOOP=j_loop: &in+i*2B (relative to j_loop)
3564 1. Return an SSA_NAME whose value is the address of the memory location of
3565 the first vector of the data reference.
3566 2. If new_stmt_list is not NULL_TREE after return then the caller must insert
3567 these statement(s) which define the returned SSA_NAME.
3569 FORNOW: We are only handling array accesses with step 1. */
3572 vect_create_addr_base_for_vector_ref (gimple stmt
,
3573 gimple_seq
*new_stmt_list
,
3577 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
3578 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
);
3579 tree data_ref_base
= unshare_expr (DR_BASE_ADDRESS (dr
));
3581 tree data_ref_base_var
;
3583 tree addr_base
, addr_expr
;
3585 gimple_seq seq
= NULL
;
3586 tree base_offset
= unshare_expr (DR_OFFSET (dr
));
3587 tree init
= unshare_expr (DR_INIT (dr
));
3589 tree step
= TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dr
)));
3590 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
3593 if (loop_vinfo
&& loop
&& loop
!= (gimple_bb (stmt
))->loop_father
)
3595 struct loop
*outer_loop
= LOOP_VINFO_LOOP (loop_vinfo
);
3597 gcc_assert (nested_in_vect_loop_p (outer_loop
, stmt
));
3599 data_ref_base
= unshare_expr (STMT_VINFO_DR_BASE_ADDRESS (stmt_info
));
3600 base_offset
= unshare_expr (STMT_VINFO_DR_OFFSET (stmt_info
));
3601 init
= unshare_expr (STMT_VINFO_DR_INIT (stmt_info
));
3605 base_name
= build_fold_indirect_ref (data_ref_base
);
3608 base_offset
= ssize_int (0);
3609 init
= ssize_int (0);
3610 base_name
= build_fold_indirect_ref (unshare_expr (DR_REF (dr
)));
3613 data_ref_base_var
= create_tmp_var (TREE_TYPE (data_ref_base
), "batmp");
3614 data_ref_base
= force_gimple_operand (data_ref_base
, &seq
, true,
3616 gimple_seq_add_seq (new_stmt_list
, seq
);
3618 /* Create base_offset */
3619 base_offset
= size_binop (PLUS_EXPR
,
3620 fold_convert (sizetype
, base_offset
),
3621 fold_convert (sizetype
, init
));
3622 dest
= create_tmp_var (sizetype
, "base_off");
3623 base_offset
= force_gimple_operand (base_offset
, &seq
, true, dest
);
3624 gimple_seq_add_seq (new_stmt_list
, seq
);
3628 tree tmp
= create_tmp_var (sizetype
, "offset");
3630 offset
= fold_build2 (MULT_EXPR
, sizetype
,
3631 fold_convert (sizetype
, offset
), step
);
3632 base_offset
= fold_build2 (PLUS_EXPR
, sizetype
,
3633 base_offset
, offset
);
3634 base_offset
= force_gimple_operand (base_offset
, &seq
, false, tmp
);
3635 gimple_seq_add_seq (new_stmt_list
, seq
);
3638 /* base + base_offset */
3640 addr_base
= fold_build_pointer_plus (data_ref_base
, base_offset
);
3643 addr_base
= build1 (ADDR_EXPR
,
3644 build_pointer_type (TREE_TYPE (DR_REF (dr
))),
3645 unshare_expr (DR_REF (dr
)));
3648 vect_ptr_type
= build_pointer_type (STMT_VINFO_VECTYPE (stmt_info
));
3649 base
= get_base_address (DR_REF (dr
));
3651 && TREE_CODE (base
) == MEM_REF
)
3653 = build_qualified_type (vect_ptr_type
,
3654 TYPE_QUALS (TREE_TYPE (TREE_OPERAND (base
, 0))));
3656 vec_stmt
= fold_convert (vect_ptr_type
, addr_base
);
3657 addr_expr
= vect_get_new_vect_var (vect_ptr_type
, vect_pointer_var
,
3658 get_name (base_name
));
3659 vec_stmt
= force_gimple_operand (vec_stmt
, &seq
, false, addr_expr
);
3660 gimple_seq_add_seq (new_stmt_list
, seq
);
3662 if (DR_PTR_INFO (dr
)
3663 && TREE_CODE (vec_stmt
) == SSA_NAME
)
3665 duplicate_ssa_name_ptr_info (vec_stmt
, DR_PTR_INFO (dr
));
3667 mark_ptr_info_alignment_unknown (SSA_NAME_PTR_INFO (vec_stmt
));
3670 if (dump_enabled_p ())
3672 dump_printf_loc (MSG_NOTE
, vect_location
, "created ");
3673 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, vec_stmt
);
3680 /* Function vect_create_data_ref_ptr.
3682 Create a new pointer-to-AGGR_TYPE variable (ap), that points to the first
3683 location accessed in the loop by STMT, along with the def-use update
3684 chain to appropriately advance the pointer through the loop iterations.
3685 Also set aliasing information for the pointer. This pointer is used by
3686 the callers to this function to create a memory reference expression for
3687 vector load/store access.
3690 1. STMT: a stmt that references memory. Expected to be of the form
3691 GIMPLE_ASSIGN <name, data-ref> or
3692 GIMPLE_ASSIGN <data-ref, name>.
3693 2. AGGR_TYPE: the type of the reference, which should be either a vector
3695 3. AT_LOOP: the loop where the vector memref is to be created.
3696 4. OFFSET (optional): an offset to be added to the initial address accessed
3697 by the data-ref in STMT.
3698 5. BSI: location where the new stmts are to be placed if there is no loop
3699 6. ONLY_INIT: indicate if ap is to be updated in the loop, or remain
3700 pointing to the initial address.
3703 1. Declare a new ptr to vector_type, and have it point to the base of the
3704 data reference (initial addressed accessed by the data reference).
3705 For example, for vector of type V8HI, the following code is generated:
3708 ap = (v8hi *)initial_address;
3710 if OFFSET is not supplied:
3711 initial_address = &a[init];
3712 if OFFSET is supplied:
3713 initial_address = &a[init + OFFSET];
3715 Return the initial_address in INITIAL_ADDRESS.
3717 2. If ONLY_INIT is true, just return the initial pointer. Otherwise, also
3718 update the pointer in each iteration of the loop.
3720 Return the increment stmt that updates the pointer in PTR_INCR.
3722 3. Set INV_P to true if the access pattern of the data reference in the
3723 vectorized loop is invariant. Set it to false otherwise.
3725 4. Return the pointer. */
3728 vect_create_data_ref_ptr (gimple stmt
, tree aggr_type
, struct loop
*at_loop
,
3729 tree offset
, tree
*initial_address
,
3730 gimple_stmt_iterator
*gsi
, gimple
*ptr_incr
,
3731 bool only_init
, bool *inv_p
)
3734 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
3735 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
3736 struct loop
*loop
= NULL
;
3737 bool nested_in_vect_loop
= false;
3738 struct loop
*containing_loop
= NULL
;
3743 gimple_seq new_stmt_list
= NULL
;
3747 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
);
3749 gimple_stmt_iterator incr_gsi
;
3752 tree indx_before_incr
, indx_after_incr
;
3755 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
3758 gcc_assert (TREE_CODE (aggr_type
) == ARRAY_TYPE
3759 || TREE_CODE (aggr_type
) == VECTOR_TYPE
);
3763 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
3764 nested_in_vect_loop
= nested_in_vect_loop_p (loop
, stmt
);
3765 containing_loop
= (gimple_bb (stmt
))->loop_father
;
3766 pe
= loop_preheader_edge (loop
);
3770 gcc_assert (bb_vinfo
);
3775 /* Check the step (evolution) of the load in LOOP, and record
3776 whether it's invariant. */
3777 if (nested_in_vect_loop
)
3778 step
= STMT_VINFO_DR_STEP (stmt_info
);
3780 step
= DR_STEP (STMT_VINFO_DATA_REF (stmt_info
));
3782 if (tree_int_cst_compare (step
, size_zero_node
) == 0)
3786 negative
= tree_int_cst_compare (step
, size_zero_node
) < 0;
3788 /* Create an expression for the first address accessed by this load
3790 base_name
= build_fold_indirect_ref (unshare_expr (DR_BASE_ADDRESS (dr
)));
3792 if (dump_enabled_p ())
3794 tree data_ref_base
= base_name
;
3795 dump_printf_loc (MSG_NOTE
, vect_location
,
3796 "create %s-pointer variable to type: ",
3797 tree_code_name
[(int) TREE_CODE (aggr_type
)]);
3798 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, aggr_type
);
3799 if (TREE_CODE (data_ref_base
) == VAR_DECL
3800 || TREE_CODE (data_ref_base
) == ARRAY_REF
)
3801 dump_printf (MSG_NOTE
, " vectorizing an array ref: ");
3802 else if (TREE_CODE (data_ref_base
) == COMPONENT_REF
)
3803 dump_printf (MSG_NOTE
, " vectorizing a record based array ref: ");
3804 else if (TREE_CODE (data_ref_base
) == SSA_NAME
)
3805 dump_printf (MSG_NOTE
, " vectorizing a pointer ref: ");
3806 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, base_name
);
3809 /* (1) Create the new aggregate-pointer variable. */
3810 aggr_ptr_type
= build_pointer_type (aggr_type
);
3811 base
= get_base_address (DR_REF (dr
));
3813 && TREE_CODE (base
) == MEM_REF
)
3815 = build_qualified_type (aggr_ptr_type
,
3816 TYPE_QUALS (TREE_TYPE (TREE_OPERAND (base
, 0))));
3817 aggr_ptr
= vect_get_new_vect_var (aggr_ptr_type
, vect_pointer_var
,
3818 get_name (base_name
));
3820 /* Vector and array types inherit the alias set of their component
3821 type by default so we need to use a ref-all pointer if the data
3822 reference does not conflict with the created aggregated data
3823 reference because it is not addressable. */
3824 if (!alias_sets_conflict_p (get_deref_alias_set (aggr_ptr
),
3825 get_alias_set (DR_REF (dr
))))
3828 = build_pointer_type_for_mode (aggr_type
,
3829 TYPE_MODE (aggr_ptr_type
), true);
3830 aggr_ptr
= vect_get_new_vect_var (aggr_ptr_type
, vect_pointer_var
,
3831 get_name (base_name
));
3834 /* Likewise for any of the data references in the stmt group. */
3835 else if (STMT_VINFO_GROUP_SIZE (stmt_info
) > 1)
3837 gimple orig_stmt
= STMT_VINFO_GROUP_FIRST_ELEMENT (stmt_info
);
3840 tree lhs
= gimple_assign_lhs (orig_stmt
);
3841 if (!alias_sets_conflict_p (get_deref_alias_set (aggr_ptr
),
3842 get_alias_set (lhs
)))
3845 = build_pointer_type_for_mode (aggr_type
,
3846 TYPE_MODE (aggr_ptr_type
), true);
3848 = vect_get_new_vect_var (aggr_ptr_type
, vect_pointer_var
,
3849 get_name (base_name
));
3853 orig_stmt
= STMT_VINFO_GROUP_NEXT_ELEMENT (vinfo_for_stmt (orig_stmt
));
3858 /* Note: If the dataref is in an inner-loop nested in LOOP, and we are
3859 vectorizing LOOP (i.e., outer-loop vectorization), we need to create two
3860 def-use update cycles for the pointer: one relative to the outer-loop
3861 (LOOP), which is what steps (3) and (4) below do. The other is relative
3862 to the inner-loop (which is the inner-most loop containing the dataref),
3863 and this is done be step (5) below.
3865 When vectorizing inner-most loops, the vectorized loop (LOOP) is also the
3866 inner-most loop, and so steps (3),(4) work the same, and step (5) is
3867 redundant. Steps (3),(4) create the following:
3870 LOOP: vp1 = phi(vp0,vp2)
3876 If there is an inner-loop nested in loop, then step (5) will also be
3877 applied, and an additional update in the inner-loop will be created:
3880 LOOP: vp1 = phi(vp0,vp2)
3882 inner: vp3 = phi(vp1,vp4)
3883 vp4 = vp3 + inner_step
3889 /* (2) Calculate the initial address of the aggregate-pointer, and set
3890 the aggregate-pointer to point to it before the loop. */
3892 /* Create: (&(base[init_val+offset]) in the loop preheader. */
3894 new_temp
= vect_create_addr_base_for_vector_ref (stmt
, &new_stmt_list
,
3900 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, new_stmt_list
);
3901 gcc_assert (!new_bb
);
3904 gsi_insert_seq_before (gsi
, new_stmt_list
, GSI_SAME_STMT
);
3907 *initial_address
= new_temp
;
3909 /* Create: p = (aggr_type *) initial_base */
3910 if (TREE_CODE (new_temp
) != SSA_NAME
3911 || !useless_type_conversion_p (aggr_ptr_type
, TREE_TYPE (new_temp
)))
3913 vec_stmt
= gimple_build_assign (aggr_ptr
,
3914 fold_convert (aggr_ptr_type
, new_temp
));
3915 aggr_ptr_init
= make_ssa_name (aggr_ptr
, vec_stmt
);
3916 /* Copy the points-to information if it exists. */
3917 if (DR_PTR_INFO (dr
))
3918 duplicate_ssa_name_ptr_info (aggr_ptr_init
, DR_PTR_INFO (dr
));
3919 gimple_assign_set_lhs (vec_stmt
, aggr_ptr_init
);
3922 new_bb
= gsi_insert_on_edge_immediate (pe
, vec_stmt
);
3923 gcc_assert (!new_bb
);
3926 gsi_insert_before (gsi
, vec_stmt
, GSI_SAME_STMT
);
3929 aggr_ptr_init
= new_temp
;
3931 /* (3) Handle the updating of the aggregate-pointer inside the loop.
3932 This is needed when ONLY_INIT is false, and also when AT_LOOP is the
3933 inner-loop nested in LOOP (during outer-loop vectorization). */
3935 /* No update in loop is required. */
3936 if (only_init
&& (!loop_vinfo
|| at_loop
== loop
))
3937 aptr
= aggr_ptr_init
;
3940 /* The step of the aggregate pointer is the type size. */
3941 tree step
= TYPE_SIZE_UNIT (aggr_type
);
3942 /* One exception to the above is when the scalar step of the load in
3943 LOOP is zero. In this case the step here is also zero. */
3945 step
= size_zero_node
;
3947 step
= fold_build1 (NEGATE_EXPR
, TREE_TYPE (step
), step
);
3949 standard_iv_increment_position (loop
, &incr_gsi
, &insert_after
);
3951 create_iv (aggr_ptr_init
,
3952 fold_convert (aggr_ptr_type
, step
),
3953 aggr_ptr
, loop
, &incr_gsi
, insert_after
,
3954 &indx_before_incr
, &indx_after_incr
);
3955 incr
= gsi_stmt (incr_gsi
);
3956 set_vinfo_for_stmt (incr
, new_stmt_vec_info (incr
, loop_vinfo
, NULL
));
3958 /* Copy the points-to information if it exists. */
3959 if (DR_PTR_INFO (dr
))
3961 duplicate_ssa_name_ptr_info (indx_before_incr
, DR_PTR_INFO (dr
));
3962 duplicate_ssa_name_ptr_info (indx_after_incr
, DR_PTR_INFO (dr
));
3967 aptr
= indx_before_incr
;
3970 if (!nested_in_vect_loop
|| only_init
)
3974 /* (4) Handle the updating of the aggregate-pointer inside the inner-loop
3975 nested in LOOP, if exists. */
3977 gcc_assert (nested_in_vect_loop
);
3980 standard_iv_increment_position (containing_loop
, &incr_gsi
,
3982 create_iv (aptr
, fold_convert (aggr_ptr_type
, DR_STEP (dr
)), aggr_ptr
,
3983 containing_loop
, &incr_gsi
, insert_after
, &indx_before_incr
,
3985 incr
= gsi_stmt (incr_gsi
);
3986 set_vinfo_for_stmt (incr
, new_stmt_vec_info (incr
, loop_vinfo
, NULL
));
3988 /* Copy the points-to information if it exists. */
3989 if (DR_PTR_INFO (dr
))
3991 duplicate_ssa_name_ptr_info (indx_before_incr
, DR_PTR_INFO (dr
));
3992 duplicate_ssa_name_ptr_info (indx_after_incr
, DR_PTR_INFO (dr
));
3997 return indx_before_incr
;
4004 /* Function bump_vector_ptr
4006 Increment a pointer (to a vector type) by vector-size. If requested,
4007 i.e. if PTR-INCR is given, then also connect the new increment stmt
4008 to the existing def-use update-chain of the pointer, by modifying
4009 the PTR_INCR as illustrated below:
4011 The pointer def-use update-chain before this function:
4012 DATAREF_PTR = phi (p_0, p_2)
4014 PTR_INCR: p_2 = DATAREF_PTR + step
4016 The pointer def-use update-chain after this function:
4017 DATAREF_PTR = phi (p_0, p_2)
4019 NEW_DATAREF_PTR = DATAREF_PTR + BUMP
4021 PTR_INCR: p_2 = NEW_DATAREF_PTR + step
4024 DATAREF_PTR - ssa_name of a pointer (to vector type) that is being updated
4026 PTR_INCR - optional. The stmt that updates the pointer in each iteration of
4027 the loop. The increment amount across iterations is expected
4029 BSI - location where the new update stmt is to be placed.
4030 STMT - the original scalar memory-access stmt that is being vectorized.
4031 BUMP - optional. The offset by which to bump the pointer. If not given,
4032 the offset is assumed to be vector_size.
4034 Output: Return NEW_DATAREF_PTR as illustrated above.
4039 bump_vector_ptr (tree dataref_ptr
, gimple ptr_incr
, gimple_stmt_iterator
*gsi
,
4040 gimple stmt
, tree bump
)
4042 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
4043 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
);
4044 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
4045 tree update
= TYPE_SIZE_UNIT (vectype
);
4048 use_operand_p use_p
;
4049 tree new_dataref_ptr
;
4054 new_dataref_ptr
= copy_ssa_name (dataref_ptr
, NULL
);
4055 incr_stmt
= gimple_build_assign_with_ops (POINTER_PLUS_EXPR
, new_dataref_ptr
,
4056 dataref_ptr
, update
);
4057 vect_finish_stmt_generation (stmt
, incr_stmt
, gsi
);
4059 /* Copy the points-to information if it exists. */
4060 if (DR_PTR_INFO (dr
))
4062 duplicate_ssa_name_ptr_info (new_dataref_ptr
, DR_PTR_INFO (dr
));
4063 mark_ptr_info_alignment_unknown (SSA_NAME_PTR_INFO (new_dataref_ptr
));
4067 return new_dataref_ptr
;
4069 /* Update the vector-pointer's cross-iteration increment. */
4070 FOR_EACH_SSA_USE_OPERAND (use_p
, ptr_incr
, iter
, SSA_OP_USE
)
4072 tree use
= USE_FROM_PTR (use_p
);
4074 if (use
== dataref_ptr
)
4075 SET_USE (use_p
, new_dataref_ptr
);
4077 gcc_assert (tree_int_cst_compare (use
, update
) == 0);
4080 return new_dataref_ptr
;
4084 /* Function vect_create_destination_var.
4086 Create a new temporary of type VECTYPE. */
4089 vect_create_destination_var (tree scalar_dest
, tree vectype
)
4092 const char *new_name
;
4094 enum vect_var_kind kind
;
4096 kind
= vectype
? vect_simple_var
: vect_scalar_var
;
4097 type
= vectype
? vectype
: TREE_TYPE (scalar_dest
);
4099 gcc_assert (TREE_CODE (scalar_dest
) == SSA_NAME
);
4101 new_name
= get_name (scalar_dest
);
4104 vec_dest
= vect_get_new_vect_var (type
, kind
, new_name
);
4109 /* Function vect_grouped_store_supported.
4111 Returns TRUE if interleave high and interleave low permutations
4112 are supported, and FALSE otherwise. */
4115 vect_grouped_store_supported (tree vectype
, unsigned HOST_WIDE_INT count
)
4117 enum machine_mode mode
= TYPE_MODE (vectype
);
4119 /* vect_permute_store_chain requires the group size to be a power of two. */
4120 if (exact_log2 (count
) == -1)
4122 if (dump_enabled_p ())
4123 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4124 "the size of the group of accesses"
4125 " is not a power of 2");
4129 /* Check that the permutation is supported. */
4130 if (VECTOR_MODE_P (mode
))
4132 unsigned int i
, nelt
= GET_MODE_NUNITS (mode
);
4133 unsigned char *sel
= XALLOCAVEC (unsigned char, nelt
);
4134 for (i
= 0; i
< nelt
/ 2; i
++)
4137 sel
[i
* 2 + 1] = i
+ nelt
;
4139 if (can_vec_perm_p (mode
, false, sel
))
4141 for (i
= 0; i
< nelt
; i
++)
4143 if (can_vec_perm_p (mode
, false, sel
))
4148 if (dump_enabled_p ())
4149 dump_printf (MSG_MISSED_OPTIMIZATION
,
4150 "interleave op not supported by target.");
4155 /* Return TRUE if vec_store_lanes is available for COUNT vectors of
4159 vect_store_lanes_supported (tree vectype
, unsigned HOST_WIDE_INT count
)
4161 return vect_lanes_optab_supported_p ("vec_store_lanes",
4162 vec_store_lanes_optab
,
4167 /* Function vect_permute_store_chain.
4169 Given a chain of interleaved stores in DR_CHAIN of LENGTH that must be
4170 a power of 2, generate interleave_high/low stmts to reorder the data
4171 correctly for the stores. Return the final references for stores in
4174 E.g., LENGTH is 4 and the scalar type is short, i.e., VF is 8.
4175 The input is 4 vectors each containing 8 elements. We assign a number to
4176 each element, the input sequence is:
4178 1st vec: 0 1 2 3 4 5 6 7
4179 2nd vec: 8 9 10 11 12 13 14 15
4180 3rd vec: 16 17 18 19 20 21 22 23
4181 4th vec: 24 25 26 27 28 29 30 31
4183 The output sequence should be:
4185 1st vec: 0 8 16 24 1 9 17 25
4186 2nd vec: 2 10 18 26 3 11 19 27
4187 3rd vec: 4 12 20 28 5 13 21 30
4188 4th vec: 6 14 22 30 7 15 23 31
4190 i.e., we interleave the contents of the four vectors in their order.
4192 We use interleave_high/low instructions to create such output. The input of
4193 each interleave_high/low operation is two vectors:
4196 the even elements of the result vector are obtained left-to-right from the
4197 high/low elements of the first vector. The odd elements of the result are
4198 obtained left-to-right from the high/low elements of the second vector.
4199 The output of interleave_high will be: 0 4 1 5
4200 and of interleave_low: 2 6 3 7
4203 The permutation is done in log LENGTH stages. In each stage interleave_high
4204 and interleave_low stmts are created for each pair of vectors in DR_CHAIN,
4205 where the first argument is taken from the first half of DR_CHAIN and the
4206 second argument from it's second half.
4209 I1: interleave_high (1st vec, 3rd vec)
4210 I2: interleave_low (1st vec, 3rd vec)
4211 I3: interleave_high (2nd vec, 4th vec)
4212 I4: interleave_low (2nd vec, 4th vec)
4214 The output for the first stage is:
4216 I1: 0 16 1 17 2 18 3 19
4217 I2: 4 20 5 21 6 22 7 23
4218 I3: 8 24 9 25 10 26 11 27
4219 I4: 12 28 13 29 14 30 15 31
4221 The output of the second stage, i.e. the final result is:
4223 I1: 0 8 16 24 1 9 17 25
4224 I2: 2 10 18 26 3 11 19 27
4225 I3: 4 12 20 28 5 13 21 30
4226 I4: 6 14 22 30 7 15 23 31. */
4229 vect_permute_store_chain (VEC(tree
,heap
) *dr_chain
,
4230 unsigned int length
,
4232 gimple_stmt_iterator
*gsi
,
4233 VEC(tree
,heap
) **result_chain
)
4235 tree vect1
, vect2
, high
, low
;
4237 tree vectype
= STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt
));
4238 tree perm_mask_low
, perm_mask_high
;
4240 unsigned int j
, nelt
= TYPE_VECTOR_SUBPARTS (vectype
);
4241 unsigned char *sel
= XALLOCAVEC (unsigned char, nelt
);
4243 *result_chain
= VEC_copy (tree
, heap
, dr_chain
);
4245 for (i
= 0, n
= nelt
/ 2; i
< n
; i
++)
4248 sel
[i
* 2 + 1] = i
+ nelt
;
4250 perm_mask_high
= vect_gen_perm_mask (vectype
, sel
);
4251 gcc_assert (perm_mask_high
!= NULL
);
4253 for (i
= 0; i
< nelt
; i
++)
4255 perm_mask_low
= vect_gen_perm_mask (vectype
, sel
);
4256 gcc_assert (perm_mask_low
!= NULL
);
4258 for (i
= 0, n
= exact_log2 (length
); i
< n
; i
++)
4260 for (j
= 0; j
< length
/2; j
++)
4262 vect1
= VEC_index (tree
, dr_chain
, j
);
4263 vect2
= VEC_index (tree
, dr_chain
, j
+length
/2);
4265 /* Create interleaving stmt:
4266 high = VEC_PERM_EXPR <vect1, vect2, {0, nelt, 1, nelt+1, ...}> */
4267 high
= make_temp_ssa_name (vectype
, NULL
, "vect_inter_high");
4269 = gimple_build_assign_with_ops (VEC_PERM_EXPR
, high
,
4270 vect1
, vect2
, perm_mask_high
);
4271 vect_finish_stmt_generation (stmt
, perm_stmt
, gsi
);
4272 VEC_replace (tree
, *result_chain
, 2*j
, high
);
4274 /* Create interleaving stmt:
4275 low = VEC_PERM_EXPR <vect1, vect2, {nelt/2, nelt*3/2, nelt/2+1,
4276 nelt*3/2+1, ...}> */
4277 low
= make_temp_ssa_name (vectype
, NULL
, "vect_inter_low");
4279 = gimple_build_assign_with_ops (VEC_PERM_EXPR
, low
,
4280 vect1
, vect2
, perm_mask_low
);
4281 vect_finish_stmt_generation (stmt
, perm_stmt
, gsi
);
4282 VEC_replace (tree
, *result_chain
, 2*j
+1, low
);
4284 dr_chain
= VEC_copy (tree
, heap
, *result_chain
);
4288 /* Function vect_setup_realignment
4290 This function is called when vectorizing an unaligned load using
4291 the dr_explicit_realign[_optimized] scheme.
4292 This function generates the following code at the loop prolog:
4295 x msq_init = *(floor(p)); # prolog load
4296 realignment_token = call target_builtin;
4298 x msq = phi (msq_init, ---)
4300 The stmts marked with x are generated only for the case of
4301 dr_explicit_realign_optimized.
4303 The code above sets up a new (vector) pointer, pointing to the first
4304 location accessed by STMT, and a "floor-aligned" load using that pointer.
4305 It also generates code to compute the "realignment-token" (if the relevant
4306 target hook was defined), and creates a phi-node at the loop-header bb
4307 whose arguments are the result of the prolog-load (created by this
4308 function) and the result of a load that takes place in the loop (to be
4309 created by the caller to this function).
4311 For the case of dr_explicit_realign_optimized:
4312 The caller to this function uses the phi-result (msq) to create the
4313 realignment code inside the loop, and sets up the missing phi argument,
4316 msq = phi (msq_init, lsq)
4317 lsq = *(floor(p')); # load in loop
4318 result = realign_load (msq, lsq, realignment_token);
4320 For the case of dr_explicit_realign:
4322 msq = *(floor(p)); # load in loop
4324 lsq = *(floor(p')); # load in loop
4325 result = realign_load (msq, lsq, realignment_token);
4328 STMT - (scalar) load stmt to be vectorized. This load accesses
4329 a memory location that may be unaligned.
4330 BSI - place where new code is to be inserted.
4331 ALIGNMENT_SUPPORT_SCHEME - which of the two misalignment handling schemes
4335 REALIGNMENT_TOKEN - the result of a call to the builtin_mask_for_load
4336 target hook, if defined.
4337 Return value - the result of the loop-header phi node. */
4340 vect_setup_realignment (gimple stmt
, gimple_stmt_iterator
*gsi
,
4341 tree
*realignment_token
,
4342 enum dr_alignment_support alignment_support_scheme
,
4344 struct loop
**at_loop
)
4346 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
4347 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
4348 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
4349 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
);
4350 struct loop
*loop
= NULL
;
4352 tree scalar_dest
= gimple_assign_lhs (stmt
);
4359 tree msq_init
= NULL_TREE
;
4362 tree msq
= NULL_TREE
;
4363 gimple_seq stmts
= NULL
;
4365 bool compute_in_loop
= false;
4366 bool nested_in_vect_loop
= false;
4367 struct loop
*containing_loop
= (gimple_bb (stmt
))->loop_father
;
4368 struct loop
*loop_for_initial_load
= NULL
;
4372 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
4373 nested_in_vect_loop
= nested_in_vect_loop_p (loop
, stmt
);
4376 gcc_assert (alignment_support_scheme
== dr_explicit_realign
4377 || alignment_support_scheme
== dr_explicit_realign_optimized
);
4379 /* We need to generate three things:
4380 1. the misalignment computation
4381 2. the extra vector load (for the optimized realignment scheme).
4382 3. the phi node for the two vectors from which the realignment is
4383 done (for the optimized realignment scheme). */
4385 /* 1. Determine where to generate the misalignment computation.
4387 If INIT_ADDR is NULL_TREE, this indicates that the misalignment
4388 calculation will be generated by this function, outside the loop (in the
4389 preheader). Otherwise, INIT_ADDR had already been computed for us by the
4390 caller, inside the loop.
4392 Background: If the misalignment remains fixed throughout the iterations of
4393 the loop, then both realignment schemes are applicable, and also the
4394 misalignment computation can be done outside LOOP. This is because we are
4395 vectorizing LOOP, and so the memory accesses in LOOP advance in steps that
4396 are a multiple of VS (the Vector Size), and therefore the misalignment in
4397 different vectorized LOOP iterations is always the same.
4398 The problem arises only if the memory access is in an inner-loop nested
4399 inside LOOP, which is now being vectorized using outer-loop vectorization.
4400 This is the only case when the misalignment of the memory access may not
4401 remain fixed throughout the iterations of the inner-loop (as explained in
4402 detail in vect_supportable_dr_alignment). In this case, not only is the
4403 optimized realignment scheme not applicable, but also the misalignment
4404 computation (and generation of the realignment token that is passed to
4405 REALIGN_LOAD) have to be done inside the loop.
4407 In short, INIT_ADDR indicates whether we are in a COMPUTE_IN_LOOP mode
4408 or not, which in turn determines if the misalignment is computed inside
4409 the inner-loop, or outside LOOP. */
4411 if (init_addr
!= NULL_TREE
|| !loop_vinfo
)
4413 compute_in_loop
= true;
4414 gcc_assert (alignment_support_scheme
== dr_explicit_realign
);
4418 /* 2. Determine where to generate the extra vector load.
4420 For the optimized realignment scheme, instead of generating two vector
4421 loads in each iteration, we generate a single extra vector load in the
4422 preheader of the loop, and in each iteration reuse the result of the
4423 vector load from the previous iteration. In case the memory access is in
4424 an inner-loop nested inside LOOP, which is now being vectorized using
4425 outer-loop vectorization, we need to determine whether this initial vector
4426 load should be generated at the preheader of the inner-loop, or can be
4427 generated at the preheader of LOOP. If the memory access has no evolution
4428 in LOOP, it can be generated in the preheader of LOOP. Otherwise, it has
4429 to be generated inside LOOP (in the preheader of the inner-loop). */
4431 if (nested_in_vect_loop
)
4433 tree outerloop_step
= STMT_VINFO_DR_STEP (stmt_info
);
4434 bool invariant_in_outerloop
=
4435 (tree_int_cst_compare (outerloop_step
, size_zero_node
) == 0);
4436 loop_for_initial_load
= (invariant_in_outerloop
? loop
: loop
->inner
);
4439 loop_for_initial_load
= loop
;
4441 *at_loop
= loop_for_initial_load
;
4443 if (loop_for_initial_load
)
4444 pe
= loop_preheader_edge (loop_for_initial_load
);
4446 /* 3. For the case of the optimized realignment, create the first vector
4447 load at the loop preheader. */
4449 if (alignment_support_scheme
== dr_explicit_realign_optimized
)
4451 /* Create msq_init = *(floor(p1)) in the loop preheader */
4453 gcc_assert (!compute_in_loop
);
4454 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
4455 ptr
= vect_create_data_ref_ptr (stmt
, vectype
, loop_for_initial_load
,
4456 NULL_TREE
, &init_addr
, NULL
, &inc
,
4458 new_temp
= copy_ssa_name (ptr
, NULL
);
4459 new_stmt
= gimple_build_assign_with_ops
4460 (BIT_AND_EXPR
, new_temp
, ptr
,
4461 build_int_cst (TREE_TYPE (ptr
),
4462 -(HOST_WIDE_INT
)TYPE_ALIGN_UNIT (vectype
)));
4463 new_bb
= gsi_insert_on_edge_immediate (pe
, new_stmt
);
4464 gcc_assert (!new_bb
);
4466 = build2 (MEM_REF
, TREE_TYPE (vec_dest
), new_temp
,
4467 build_int_cst (reference_alias_ptr_type (DR_REF (dr
)), 0));
4468 new_stmt
= gimple_build_assign (vec_dest
, data_ref
);
4469 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4470 gimple_assign_set_lhs (new_stmt
, new_temp
);
4473 new_bb
= gsi_insert_on_edge_immediate (pe
, new_stmt
);
4474 gcc_assert (!new_bb
);
4477 gsi_insert_before (gsi
, new_stmt
, GSI_SAME_STMT
);
4479 msq_init
= gimple_assign_lhs (new_stmt
);
4482 /* 4. Create realignment token using a target builtin, if available.
4483 It is done either inside the containing loop, or before LOOP (as
4484 determined above). */
4486 if (targetm
.vectorize
.builtin_mask_for_load
)
4490 /* Compute INIT_ADDR - the initial addressed accessed by this memref. */
4493 /* Generate the INIT_ADDR computation outside LOOP. */
4494 init_addr
= vect_create_addr_base_for_vector_ref (stmt
, &stmts
,
4498 pe
= loop_preheader_edge (loop
);
4499 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, stmts
);
4500 gcc_assert (!new_bb
);
4503 gsi_insert_seq_before (gsi
, stmts
, GSI_SAME_STMT
);
4506 builtin_decl
= targetm
.vectorize
.builtin_mask_for_load ();
4507 new_stmt
= gimple_build_call (builtin_decl
, 1, init_addr
);
4509 vect_create_destination_var (scalar_dest
,
4510 gimple_call_return_type (new_stmt
));
4511 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4512 gimple_call_set_lhs (new_stmt
, new_temp
);
4514 if (compute_in_loop
)
4515 gsi_insert_before (gsi
, new_stmt
, GSI_SAME_STMT
);
4518 /* Generate the misalignment computation outside LOOP. */
4519 pe
= loop_preheader_edge (loop
);
4520 new_bb
= gsi_insert_on_edge_immediate (pe
, new_stmt
);
4521 gcc_assert (!new_bb
);
4524 *realignment_token
= gimple_call_lhs (new_stmt
);
4526 /* The result of the CALL_EXPR to this builtin is determined from
4527 the value of the parameter and no global variables are touched
4528 which makes the builtin a "const" function. Requiring the
4529 builtin to have the "const" attribute makes it unnecessary
4530 to call mark_call_clobbered. */
4531 gcc_assert (TREE_READONLY (builtin_decl
));
4534 if (alignment_support_scheme
== dr_explicit_realign
)
4537 gcc_assert (!compute_in_loop
);
4538 gcc_assert (alignment_support_scheme
== dr_explicit_realign_optimized
);
4541 /* 5. Create msq = phi <msq_init, lsq> in loop */
4543 pe
= loop_preheader_edge (containing_loop
);
4544 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
4545 msq
= make_ssa_name (vec_dest
, NULL
);
4546 phi_stmt
= create_phi_node (msq
, containing_loop
->header
);
4547 add_phi_arg (phi_stmt
, msq_init
, pe
, UNKNOWN_LOCATION
);
4553 /* Function vect_grouped_load_supported.
4555 Returns TRUE if even and odd permutations are supported,
4556 and FALSE otherwise. */
4559 vect_grouped_load_supported (tree vectype
, unsigned HOST_WIDE_INT count
)
4561 enum machine_mode mode
= TYPE_MODE (vectype
);
4563 /* vect_permute_load_chain requires the group size to be a power of two. */
4564 if (exact_log2 (count
) == -1)
4566 if (dump_enabled_p ())
4567 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4568 "the size of the group of accesses"
4569 " is not a power of 2");
4573 /* Check that the permutation is supported. */
4574 if (VECTOR_MODE_P (mode
))
4576 unsigned int i
, nelt
= GET_MODE_NUNITS (mode
);
4577 unsigned char *sel
= XALLOCAVEC (unsigned char, nelt
);
4579 for (i
= 0; i
< nelt
; i
++)
4581 if (can_vec_perm_p (mode
, false, sel
))
4583 for (i
= 0; i
< nelt
; i
++)
4585 if (can_vec_perm_p (mode
, false, sel
))
4590 if (dump_enabled_p ())
4591 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4592 "extract even/odd not supported by target");
4596 /* Return TRUE if vec_load_lanes is available for COUNT vectors of
4600 vect_load_lanes_supported (tree vectype
, unsigned HOST_WIDE_INT count
)
4602 return vect_lanes_optab_supported_p ("vec_load_lanes",
4603 vec_load_lanes_optab
,
4607 /* Function vect_permute_load_chain.
4609 Given a chain of interleaved loads in DR_CHAIN of LENGTH that must be
4610 a power of 2, generate extract_even/odd stmts to reorder the input data
4611 correctly. Return the final references for loads in RESULT_CHAIN.
4613 E.g., LENGTH is 4 and the scalar type is short, i.e., VF is 8.
4614 The input is 4 vectors each containing 8 elements. We assign a number to each
4615 element, the input sequence is:
4617 1st vec: 0 1 2 3 4 5 6 7
4618 2nd vec: 8 9 10 11 12 13 14 15
4619 3rd vec: 16 17 18 19 20 21 22 23
4620 4th vec: 24 25 26 27 28 29 30 31
4622 The output sequence should be:
4624 1st vec: 0 4 8 12 16 20 24 28
4625 2nd vec: 1 5 9 13 17 21 25 29
4626 3rd vec: 2 6 10 14 18 22 26 30
4627 4th vec: 3 7 11 15 19 23 27 31
4629 i.e., the first output vector should contain the first elements of each
4630 interleaving group, etc.
4632 We use extract_even/odd instructions to create such output. The input of
4633 each extract_even/odd operation is two vectors
4637 and the output is the vector of extracted even/odd elements. The output of
4638 extract_even will be: 0 2 4 6
4639 and of extract_odd: 1 3 5 7
4642 The permutation is done in log LENGTH stages. In each stage extract_even
4643 and extract_odd stmts are created for each pair of vectors in DR_CHAIN in
4644 their order. In our example,
4646 E1: extract_even (1st vec, 2nd vec)
4647 E2: extract_odd (1st vec, 2nd vec)
4648 E3: extract_even (3rd vec, 4th vec)
4649 E4: extract_odd (3rd vec, 4th vec)
4651 The output for the first stage will be:
4653 E1: 0 2 4 6 8 10 12 14
4654 E2: 1 3 5 7 9 11 13 15
4655 E3: 16 18 20 22 24 26 28 30
4656 E4: 17 19 21 23 25 27 29 31
4658 In order to proceed and create the correct sequence for the next stage (or
4659 for the correct output, if the second stage is the last one, as in our
4660 example), we first put the output of extract_even operation and then the
4661 output of extract_odd in RESULT_CHAIN (which is then copied to DR_CHAIN).
4662 The input for the second stage is:
4664 1st vec (E1): 0 2 4 6 8 10 12 14
4665 2nd vec (E3): 16 18 20 22 24 26 28 30
4666 3rd vec (E2): 1 3 5 7 9 11 13 15
4667 4th vec (E4): 17 19 21 23 25 27 29 31
4669 The output of the second stage:
4671 E1: 0 4 8 12 16 20 24 28
4672 E2: 2 6 10 14 18 22 26 30
4673 E3: 1 5 9 13 17 21 25 29
4674 E4: 3 7 11 15 19 23 27 31
4676 And RESULT_CHAIN after reordering:
4678 1st vec (E1): 0 4 8 12 16 20 24 28
4679 2nd vec (E3): 1 5 9 13 17 21 25 29
4680 3rd vec (E2): 2 6 10 14 18 22 26 30
4681 4th vec (E4): 3 7 11 15 19 23 27 31. */
4684 vect_permute_load_chain (VEC(tree
,heap
) *dr_chain
,
4685 unsigned int length
,
4687 gimple_stmt_iterator
*gsi
,
4688 VEC(tree
,heap
) **result_chain
)
4690 tree data_ref
, first_vect
, second_vect
;
4691 tree perm_mask_even
, perm_mask_odd
;
4693 tree vectype
= STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt
));
4694 unsigned int i
, j
, log_length
= exact_log2 (length
);
4695 unsigned nelt
= TYPE_VECTOR_SUBPARTS (vectype
);
4696 unsigned char *sel
= XALLOCAVEC (unsigned char, nelt
);
4698 *result_chain
= VEC_copy (tree
, heap
, dr_chain
);
4700 for (i
= 0; i
< nelt
; ++i
)
4702 perm_mask_even
= vect_gen_perm_mask (vectype
, sel
);
4703 gcc_assert (perm_mask_even
!= NULL
);
4705 for (i
= 0; i
< nelt
; ++i
)
4707 perm_mask_odd
= vect_gen_perm_mask (vectype
, sel
);
4708 gcc_assert (perm_mask_odd
!= NULL
);
4710 for (i
= 0; i
< log_length
; i
++)
4712 for (j
= 0; j
< length
; j
+= 2)
4714 first_vect
= VEC_index (tree
, dr_chain
, j
);
4715 second_vect
= VEC_index (tree
, dr_chain
, j
+1);
4717 /* data_ref = permute_even (first_data_ref, second_data_ref); */
4718 data_ref
= make_temp_ssa_name (vectype
, NULL
, "vect_perm_even");
4719 perm_stmt
= gimple_build_assign_with_ops (VEC_PERM_EXPR
, data_ref
,
4720 first_vect
, second_vect
,
4722 vect_finish_stmt_generation (stmt
, perm_stmt
, gsi
);
4723 VEC_replace (tree
, *result_chain
, j
/2, data_ref
);
4725 /* data_ref = permute_odd (first_data_ref, second_data_ref); */
4726 data_ref
= make_temp_ssa_name (vectype
, NULL
, "vect_perm_odd");
4727 perm_stmt
= gimple_build_assign_with_ops (VEC_PERM_EXPR
, data_ref
,
4728 first_vect
, second_vect
,
4730 vect_finish_stmt_generation (stmt
, perm_stmt
, gsi
);
4731 VEC_replace (tree
, *result_chain
, j
/2+length
/2, data_ref
);
4733 dr_chain
= VEC_copy (tree
, heap
, *result_chain
);
4738 /* Function vect_transform_grouped_load.
4740 Given a chain of input interleaved data-refs (in DR_CHAIN), build statements
4741 to perform their permutation and ascribe the result vectorized statements to
4742 the scalar statements.
4746 vect_transform_grouped_load (gimple stmt
, VEC(tree
,heap
) *dr_chain
, int size
,
4747 gimple_stmt_iterator
*gsi
)
4749 VEC(tree
,heap
) *result_chain
= NULL
;
4751 /* DR_CHAIN contains input data-refs that are a part of the interleaving.
4752 RESULT_CHAIN is the output of vect_permute_load_chain, it contains permuted
4753 vectors, that are ready for vector computation. */
4754 result_chain
= VEC_alloc (tree
, heap
, size
);
4755 vect_permute_load_chain (dr_chain
, size
, stmt
, gsi
, &result_chain
);
4756 vect_record_grouped_load_vectors (stmt
, result_chain
);
4757 VEC_free (tree
, heap
, result_chain
);
4760 /* RESULT_CHAIN contains the output of a group of grouped loads that were
4761 generated as part of the vectorization of STMT. Assign the statement
4762 for each vector to the associated scalar statement. */
4765 vect_record_grouped_load_vectors (gimple stmt
, VEC(tree
,heap
) *result_chain
)
4767 gimple first_stmt
= GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt
));
4768 gimple next_stmt
, new_stmt
;
4769 unsigned int i
, gap_count
;
4772 /* Put a permuted data-ref in the VECTORIZED_STMT field.
4773 Since we scan the chain starting from it's first node, their order
4774 corresponds the order of data-refs in RESULT_CHAIN. */
4775 next_stmt
= first_stmt
;
4777 FOR_EACH_VEC_ELT (tree
, result_chain
, i
, tmp_data_ref
)
4782 /* Skip the gaps. Loads created for the gaps will be removed by dead
4783 code elimination pass later. No need to check for the first stmt in
4784 the group, since it always exists.
4785 GROUP_GAP is the number of steps in elements from the previous
4786 access (if there is no gap GROUP_GAP is 1). We skip loads that
4787 correspond to the gaps. */
4788 if (next_stmt
!= first_stmt
4789 && gap_count
< GROUP_GAP (vinfo_for_stmt (next_stmt
)))
4797 new_stmt
= SSA_NAME_DEF_STMT (tmp_data_ref
);
4798 /* We assume that if VEC_STMT is not NULL, this is a case of multiple
4799 copies, and we put the new vector statement in the first available
4801 if (!STMT_VINFO_VEC_STMT (vinfo_for_stmt (next_stmt
)))
4802 STMT_VINFO_VEC_STMT (vinfo_for_stmt (next_stmt
)) = new_stmt
;
4805 if (!GROUP_SAME_DR_STMT (vinfo_for_stmt (next_stmt
)))
4808 STMT_VINFO_VEC_STMT (vinfo_for_stmt (next_stmt
));
4810 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (prev_stmt
));
4813 prev_stmt
= rel_stmt
;
4815 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (rel_stmt
));
4818 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (prev_stmt
)) =
4823 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
4825 /* If NEXT_STMT accesses the same DR as the previous statement,
4826 put the same TMP_DATA_REF as its vectorized statement; otherwise
4827 get the next data-ref from RESULT_CHAIN. */
4828 if (!next_stmt
|| !GROUP_SAME_DR_STMT (vinfo_for_stmt (next_stmt
)))
4834 /* Function vect_force_dr_alignment_p.
4836 Returns whether the alignment of a DECL can be forced to be aligned
4837 on ALIGNMENT bit boundary. */
4840 vect_can_force_dr_alignment_p (const_tree decl
, unsigned int alignment
)
4842 if (TREE_CODE (decl
) != VAR_DECL
)
4845 /* We cannot change alignment of common or external symbols as another
4846 translation unit may contain a definition with lower alignment.
4847 The rules of common symbol linking mean that the definition
4848 will override the common symbol. */
4849 if (DECL_EXTERNAL (decl
)
4850 || DECL_COMMON (decl
))
4853 if (TREE_ASM_WRITTEN (decl
))
4856 /* Do not override the alignment as specified by the ABI when the used
4857 attribute is set. */
4858 if (DECL_PRESERVE_P (decl
))
4861 if (TREE_STATIC (decl
))
4862 return (alignment
<= MAX_OFILE_ALIGNMENT
);
4864 return (alignment
<= MAX_STACK_ALIGNMENT
);
4868 /* Return whether the data reference DR is supported with respect to its
4870 If CHECK_ALIGNED_ACCESSES is TRUE, check if the access is supported even
4871 it is aligned, i.e., check if it is possible to vectorize it with different
4874 enum dr_alignment_support
4875 vect_supportable_dr_alignment (struct data_reference
*dr
,
4876 bool check_aligned_accesses
)
4878 gimple stmt
= DR_STMT (dr
);
4879 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
4880 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
4881 enum machine_mode mode
= TYPE_MODE (vectype
);
4882 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
4883 struct loop
*vect_loop
= NULL
;
4884 bool nested_in_vect_loop
= false;
4886 if (aligned_access_p (dr
) && !check_aligned_accesses
)
4891 vect_loop
= LOOP_VINFO_LOOP (loop_vinfo
);
4892 nested_in_vect_loop
= nested_in_vect_loop_p (vect_loop
, stmt
);
4895 /* Possibly unaligned access. */
4897 /* We can choose between using the implicit realignment scheme (generating
4898 a misaligned_move stmt) and the explicit realignment scheme (generating
4899 aligned loads with a REALIGN_LOAD). There are two variants to the
4900 explicit realignment scheme: optimized, and unoptimized.
4901 We can optimize the realignment only if the step between consecutive
4902 vector loads is equal to the vector size. Since the vector memory
4903 accesses advance in steps of VS (Vector Size) in the vectorized loop, it
4904 is guaranteed that the misalignment amount remains the same throughout the
4905 execution of the vectorized loop. Therefore, we can create the
4906 "realignment token" (the permutation mask that is passed to REALIGN_LOAD)
4907 at the loop preheader.
4909 However, in the case of outer-loop vectorization, when vectorizing a
4910 memory access in the inner-loop nested within the LOOP that is now being
4911 vectorized, while it is guaranteed that the misalignment of the
4912 vectorized memory access will remain the same in different outer-loop
4913 iterations, it is *not* guaranteed that is will remain the same throughout
4914 the execution of the inner-loop. This is because the inner-loop advances
4915 with the original scalar step (and not in steps of VS). If the inner-loop
4916 step happens to be a multiple of VS, then the misalignment remains fixed
4917 and we can use the optimized realignment scheme. For example:
4923 When vectorizing the i-loop in the above example, the step between
4924 consecutive vector loads is 1, and so the misalignment does not remain
4925 fixed across the execution of the inner-loop, and the realignment cannot
4926 be optimized (as illustrated in the following pseudo vectorized loop):
4928 for (i=0; i<N; i+=4)
4929 for (j=0; j<M; j++){
4930 vs += vp[i+j]; // misalignment of &vp[i+j] is {0,1,2,3,0,1,2,3,...}
4931 // when j is {0,1,2,3,4,5,6,7,...} respectively.
4932 // (assuming that we start from an aligned address).
4935 We therefore have to use the unoptimized realignment scheme:
4937 for (i=0; i<N; i+=4)
4938 for (j=k; j<M; j+=4)
4939 vs += vp[i+j]; // misalignment of &vp[i+j] is always k (assuming
4940 // that the misalignment of the initial address is
4943 The loop can then be vectorized as follows:
4945 for (k=0; k<4; k++){
4946 rt = get_realignment_token (&vp[k]);
4947 for (i=0; i<N; i+=4){
4949 for (j=k; j<M; j+=4){
4951 va = REALIGN_LOAD <v1,v2,rt>;
4958 if (DR_IS_READ (dr
))
4960 bool is_packed
= false;
4961 tree type
= (TREE_TYPE (DR_REF (dr
)));
4963 if (optab_handler (vec_realign_load_optab
, mode
) != CODE_FOR_nothing
4964 && (!targetm
.vectorize
.builtin_mask_for_load
4965 || targetm
.vectorize
.builtin_mask_for_load ()))
4967 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
4968 if ((nested_in_vect_loop
4969 && (TREE_INT_CST_LOW (DR_STEP (dr
))
4970 != GET_MODE_SIZE (TYPE_MODE (vectype
))))
4972 return dr_explicit_realign
;
4974 return dr_explicit_realign_optimized
;
4976 if (!known_alignment_for_access_p (dr
))
4977 is_packed
= not_size_aligned (DR_REF (dr
));
4979 if (targetm
.vectorize
.
4980 support_vector_misalignment (mode
, type
,
4981 DR_MISALIGNMENT (dr
), is_packed
))
4982 /* Can't software pipeline the loads, but can at least do them. */
4983 return dr_unaligned_supported
;
4987 bool is_packed
= false;
4988 tree type
= (TREE_TYPE (DR_REF (dr
)));
4990 if (!known_alignment_for_access_p (dr
))
4991 is_packed
= not_size_aligned (DR_REF (dr
));
4993 if (targetm
.vectorize
.
4994 support_vector_misalignment (mode
, type
,
4995 DR_MISALIGNMENT (dr
), is_packed
))
4996 return dr_unaligned_supported
;
5000 return dr_unaligned_unsupported
;