1 /* Data references and dependences detectors.
2 Copyright (C) 2003-2016 Free Software Foundation, Inc.
3 Contributed by Sebastian Pop <pop@cri.ensmp.fr>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* This pass walks a given loop structure searching for array
22 references. The information about the array accesses is recorded
23 in DATA_REFERENCE structures.
25 The basic test for determining the dependences is:
26 given two access functions chrec1 and chrec2 to a same array, and
27 x and y two vectors from the iteration domain, the same element of
28 the array is accessed twice at iterations x and y if and only if:
29 | chrec1 (x) == chrec2 (y).
31 The goals of this analysis are:
33 - to determine the independence: the relation between two
34 independent accesses is qualified with the chrec_known (this
35 information allows a loop parallelization),
37 - when two data references access the same data, to qualify the
38 dependence relation with classic dependence representations:
42 - loop carried level dependence
43 - polyhedron dependence
44 or with the chains of recurrences based representation,
46 - to define a knowledge base for storing the data dependence
49 - to define an interface to access this data.
54 - subscript: given two array accesses a subscript is the tuple
55 composed of the access functions for a given dimension. Example:
56 Given A[f1][f2][f3] and B[g1][g2][g3], there are three subscripts:
57 (f1, g1), (f2, g2), (f3, g3).
59 - Diophantine equation: an equation whose coefficients and
60 solutions are integer constants, for example the equation
62 has an integer solution x = 1 and y = -1.
66 - "Advanced Compilation for High Performance Computing" by Randy
67 Allen and Ken Kennedy.
68 http://citeseer.ist.psu.edu/goff91practical.html
70 - "Loop Transformations for Restructuring Compilers - The Foundations"
78 #include "coretypes.h"
83 #include "gimple-pretty-print.h"
85 #include "fold-const.h"
87 #include "gimple-iterator.h"
88 #include "tree-ssa-loop-niter.h"
89 #include "tree-ssa-loop.h"
92 #include "tree-data-ref.h"
93 #include "tree-scalar-evolution.h"
95 #include "tree-affine.h"
98 static struct datadep_stats
100 int num_dependence_tests
;
101 int num_dependence_dependent
;
102 int num_dependence_independent
;
103 int num_dependence_undetermined
;
105 int num_subscript_tests
;
106 int num_subscript_undetermined
;
107 int num_same_subscript_function
;
110 int num_ziv_independent
;
111 int num_ziv_dependent
;
112 int num_ziv_unimplemented
;
115 int num_siv_independent
;
116 int num_siv_dependent
;
117 int num_siv_unimplemented
;
120 int num_miv_independent
;
121 int num_miv_dependent
;
122 int num_miv_unimplemented
;
125 static bool subscript_dependence_tester_1 (struct data_dependence_relation
*,
126 struct data_reference
*,
127 struct data_reference
*,
129 /* Returns true iff A divides B. */
132 tree_fold_divides_p (const_tree a
, const_tree b
)
134 gcc_assert (TREE_CODE (a
) == INTEGER_CST
);
135 gcc_assert (TREE_CODE (b
) == INTEGER_CST
);
136 return integer_zerop (int_const_binop (TRUNC_MOD_EXPR
, b
, a
));
139 /* Returns true iff A divides B. */
142 int_divides_p (int a
, int b
)
144 return ((b
% a
) == 0);
149 /* Dump into FILE all the data references from DATAREFS. */
152 dump_data_references (FILE *file
, vec
<data_reference_p
> datarefs
)
155 struct data_reference
*dr
;
157 FOR_EACH_VEC_ELT (datarefs
, i
, dr
)
158 dump_data_reference (file
, dr
);
161 /* Unified dump into FILE all the data references from DATAREFS. */
164 debug (vec
<data_reference_p
> &ref
)
166 dump_data_references (stderr
, ref
);
170 debug (vec
<data_reference_p
> *ptr
)
175 fprintf (stderr
, "<nil>\n");
179 /* Dump into STDERR all the data references from DATAREFS. */
182 debug_data_references (vec
<data_reference_p
> datarefs
)
184 dump_data_references (stderr
, datarefs
);
187 /* Print to STDERR the data_reference DR. */
190 debug_data_reference (struct data_reference
*dr
)
192 dump_data_reference (stderr
, dr
);
195 /* Dump function for a DATA_REFERENCE structure. */
198 dump_data_reference (FILE *outf
,
199 struct data_reference
*dr
)
203 fprintf (outf
, "#(Data Ref: \n");
204 fprintf (outf
, "# bb: %d \n", gimple_bb (DR_STMT (dr
))->index
);
205 fprintf (outf
, "# stmt: ");
206 print_gimple_stmt (outf
, DR_STMT (dr
), 0, 0);
207 fprintf (outf
, "# ref: ");
208 print_generic_stmt (outf
, DR_REF (dr
), 0);
209 fprintf (outf
, "# base_object: ");
210 print_generic_stmt (outf
, DR_BASE_OBJECT (dr
), 0);
212 for (i
= 0; i
< DR_NUM_DIMENSIONS (dr
); i
++)
214 fprintf (outf
, "# Access function %d: ", i
);
215 print_generic_stmt (outf
, DR_ACCESS_FN (dr
, i
), 0);
217 fprintf (outf
, "#)\n");
220 /* Unified dump function for a DATA_REFERENCE structure. */
223 debug (data_reference
&ref
)
225 dump_data_reference (stderr
, &ref
);
229 debug (data_reference
*ptr
)
234 fprintf (stderr
, "<nil>\n");
238 /* Dumps the affine function described by FN to the file OUTF. */
241 dump_affine_function (FILE *outf
, affine_fn fn
)
246 print_generic_expr (outf
, fn
[0], TDF_SLIM
);
247 for (i
= 1; fn
.iterate (i
, &coef
); i
++)
249 fprintf (outf
, " + ");
250 print_generic_expr (outf
, coef
, TDF_SLIM
);
251 fprintf (outf
, " * x_%u", i
);
255 /* Dumps the conflict function CF to the file OUTF. */
258 dump_conflict_function (FILE *outf
, conflict_function
*cf
)
262 if (cf
->n
== NO_DEPENDENCE
)
263 fprintf (outf
, "no dependence");
264 else if (cf
->n
== NOT_KNOWN
)
265 fprintf (outf
, "not known");
268 for (i
= 0; i
< cf
->n
; i
++)
273 dump_affine_function (outf
, cf
->fns
[i
]);
279 /* Dump function for a SUBSCRIPT structure. */
282 dump_subscript (FILE *outf
, struct subscript
*subscript
)
284 conflict_function
*cf
= SUB_CONFLICTS_IN_A (subscript
);
286 fprintf (outf
, "\n (subscript \n");
287 fprintf (outf
, " iterations_that_access_an_element_twice_in_A: ");
288 dump_conflict_function (outf
, cf
);
289 if (CF_NONTRIVIAL_P (cf
))
291 tree last_iteration
= SUB_LAST_CONFLICT (subscript
);
292 fprintf (outf
, "\n last_conflict: ");
293 print_generic_expr (outf
, last_iteration
, 0);
296 cf
= SUB_CONFLICTS_IN_B (subscript
);
297 fprintf (outf
, "\n iterations_that_access_an_element_twice_in_B: ");
298 dump_conflict_function (outf
, cf
);
299 if (CF_NONTRIVIAL_P (cf
))
301 tree last_iteration
= SUB_LAST_CONFLICT (subscript
);
302 fprintf (outf
, "\n last_conflict: ");
303 print_generic_expr (outf
, last_iteration
, 0);
306 fprintf (outf
, "\n (Subscript distance: ");
307 print_generic_expr (outf
, SUB_DISTANCE (subscript
), 0);
308 fprintf (outf
, " ))\n");
311 /* Print the classic direction vector DIRV to OUTF. */
314 print_direction_vector (FILE *outf
,
320 for (eq
= 0; eq
< length
; eq
++)
322 enum data_dependence_direction dir
= ((enum data_dependence_direction
)
328 fprintf (outf
, " +");
331 fprintf (outf
, " -");
334 fprintf (outf
, " =");
336 case dir_positive_or_equal
:
337 fprintf (outf
, " +=");
339 case dir_positive_or_negative
:
340 fprintf (outf
, " +-");
342 case dir_negative_or_equal
:
343 fprintf (outf
, " -=");
346 fprintf (outf
, " *");
349 fprintf (outf
, "indep");
353 fprintf (outf
, "\n");
356 /* Print a vector of direction vectors. */
359 print_dir_vectors (FILE *outf
, vec
<lambda_vector
> dir_vects
,
365 FOR_EACH_VEC_ELT (dir_vects
, j
, v
)
366 print_direction_vector (outf
, v
, length
);
369 /* Print out a vector VEC of length N to OUTFILE. */
372 print_lambda_vector (FILE * outfile
, lambda_vector vector
, int n
)
376 for (i
= 0; i
< n
; i
++)
377 fprintf (outfile
, "%3d ", vector
[i
]);
378 fprintf (outfile
, "\n");
381 /* Print a vector of distance vectors. */
384 print_dist_vectors (FILE *outf
, vec
<lambda_vector
> dist_vects
,
390 FOR_EACH_VEC_ELT (dist_vects
, j
, v
)
391 print_lambda_vector (outf
, v
, length
);
394 /* Dump function for a DATA_DEPENDENCE_RELATION structure. */
397 dump_data_dependence_relation (FILE *outf
,
398 struct data_dependence_relation
*ddr
)
400 struct data_reference
*dra
, *drb
;
402 fprintf (outf
, "(Data Dep: \n");
404 if (!ddr
|| DDR_ARE_DEPENDENT (ddr
) == chrec_dont_know
)
411 dump_data_reference (outf
, dra
);
413 fprintf (outf
, " (nil)\n");
415 dump_data_reference (outf
, drb
);
417 fprintf (outf
, " (nil)\n");
419 fprintf (outf
, " (don't know)\n)\n");
425 dump_data_reference (outf
, dra
);
426 dump_data_reference (outf
, drb
);
428 if (DDR_ARE_DEPENDENT (ddr
) == chrec_known
)
429 fprintf (outf
, " (no dependence)\n");
431 else if (DDR_ARE_DEPENDENT (ddr
) == NULL_TREE
)
436 for (i
= 0; i
< DDR_NUM_SUBSCRIPTS (ddr
); i
++)
438 fprintf (outf
, " access_fn_A: ");
439 print_generic_stmt (outf
, DR_ACCESS_FN (dra
, i
), 0);
440 fprintf (outf
, " access_fn_B: ");
441 print_generic_stmt (outf
, DR_ACCESS_FN (drb
, i
), 0);
442 dump_subscript (outf
, DDR_SUBSCRIPT (ddr
, i
));
445 fprintf (outf
, " inner loop index: %d\n", DDR_INNER_LOOP (ddr
));
446 fprintf (outf
, " loop nest: (");
447 FOR_EACH_VEC_ELT (DDR_LOOP_NEST (ddr
), i
, loopi
)
448 fprintf (outf
, "%d ", loopi
->num
);
449 fprintf (outf
, ")\n");
451 for (i
= 0; i
< DDR_NUM_DIST_VECTS (ddr
); i
++)
453 fprintf (outf
, " distance_vector: ");
454 print_lambda_vector (outf
, DDR_DIST_VECT (ddr
, i
),
458 for (i
= 0; i
< DDR_NUM_DIR_VECTS (ddr
); i
++)
460 fprintf (outf
, " direction_vector: ");
461 print_direction_vector (outf
, DDR_DIR_VECT (ddr
, i
),
466 fprintf (outf
, ")\n");
472 debug_data_dependence_relation (struct data_dependence_relation
*ddr
)
474 dump_data_dependence_relation (stderr
, ddr
);
477 /* Dump into FILE all the dependence relations from DDRS. */
480 dump_data_dependence_relations (FILE *file
,
484 struct data_dependence_relation
*ddr
;
486 FOR_EACH_VEC_ELT (ddrs
, i
, ddr
)
487 dump_data_dependence_relation (file
, ddr
);
491 debug (vec
<ddr_p
> &ref
)
493 dump_data_dependence_relations (stderr
, ref
);
497 debug (vec
<ddr_p
> *ptr
)
502 fprintf (stderr
, "<nil>\n");
506 /* Dump to STDERR all the dependence relations from DDRS. */
509 debug_data_dependence_relations (vec
<ddr_p
> ddrs
)
511 dump_data_dependence_relations (stderr
, ddrs
);
514 /* Dumps the distance and direction vectors in FILE. DDRS contains
515 the dependence relations, and VECT_SIZE is the size of the
516 dependence vectors, or in other words the number of loops in the
520 dump_dist_dir_vectors (FILE *file
, vec
<ddr_p
> ddrs
)
523 struct data_dependence_relation
*ddr
;
526 FOR_EACH_VEC_ELT (ddrs
, i
, ddr
)
527 if (DDR_ARE_DEPENDENT (ddr
) == NULL_TREE
&& DDR_AFFINE_P (ddr
))
529 FOR_EACH_VEC_ELT (DDR_DIST_VECTS (ddr
), j
, v
)
531 fprintf (file
, "DISTANCE_V (");
532 print_lambda_vector (file
, v
, DDR_NB_LOOPS (ddr
));
533 fprintf (file
, ")\n");
536 FOR_EACH_VEC_ELT (DDR_DIR_VECTS (ddr
), j
, v
)
538 fprintf (file
, "DIRECTION_V (");
539 print_direction_vector (file
, v
, DDR_NB_LOOPS (ddr
));
540 fprintf (file
, ")\n");
544 fprintf (file
, "\n\n");
547 /* Dumps the data dependence relations DDRS in FILE. */
550 dump_ddrs (FILE *file
, vec
<ddr_p
> ddrs
)
553 struct data_dependence_relation
*ddr
;
555 FOR_EACH_VEC_ELT (ddrs
, i
, ddr
)
556 dump_data_dependence_relation (file
, ddr
);
558 fprintf (file
, "\n\n");
562 debug_ddrs (vec
<ddr_p
> ddrs
)
564 dump_ddrs (stderr
, ddrs
);
567 /* Helper function for split_constant_offset. Expresses OP0 CODE OP1
568 (the type of the result is TYPE) as VAR + OFF, where OFF is a nonzero
569 constant of type ssizetype, and returns true. If we cannot do this
570 with OFF nonzero, OFF and VAR are set to NULL_TREE instead and false
574 split_constant_offset_1 (tree type
, tree op0
, enum tree_code code
, tree op1
,
575 tree
*var
, tree
*off
)
579 enum tree_code ocode
= code
;
587 *var
= build_int_cst (type
, 0);
588 *off
= fold_convert (ssizetype
, op0
);
591 case POINTER_PLUS_EXPR
:
596 split_constant_offset (op0
, &var0
, &off0
);
597 split_constant_offset (op1
, &var1
, &off1
);
598 *var
= fold_build2 (code
, type
, var0
, var1
);
599 *off
= size_binop (ocode
, off0
, off1
);
603 if (TREE_CODE (op1
) != INTEGER_CST
)
606 split_constant_offset (op0
, &var0
, &off0
);
607 *var
= fold_build2 (MULT_EXPR
, type
, var0
, op1
);
608 *off
= size_binop (MULT_EXPR
, off0
, fold_convert (ssizetype
, op1
));
614 HOST_WIDE_INT pbitsize
, pbitpos
;
616 int punsignedp
, preversep
, pvolatilep
;
618 op0
= TREE_OPERAND (op0
, 0);
620 = get_inner_reference (op0
, &pbitsize
, &pbitpos
, &poffset
, &pmode
,
621 &punsignedp
, &preversep
, &pvolatilep
);
623 if (pbitpos
% BITS_PER_UNIT
!= 0)
625 base
= build_fold_addr_expr (base
);
626 off0
= ssize_int (pbitpos
/ BITS_PER_UNIT
);
630 split_constant_offset (poffset
, &poffset
, &off1
);
631 off0
= size_binop (PLUS_EXPR
, off0
, off1
);
632 if (POINTER_TYPE_P (TREE_TYPE (base
)))
633 base
= fold_build_pointer_plus (base
, poffset
);
635 base
= fold_build2 (PLUS_EXPR
, TREE_TYPE (base
), base
,
636 fold_convert (TREE_TYPE (base
), poffset
));
639 var0
= fold_convert (type
, base
);
641 /* If variable length types are involved, punt, otherwise casts
642 might be converted into ARRAY_REFs in gimplify_conversion.
643 To compute that ARRAY_REF's element size TYPE_SIZE_UNIT, which
644 possibly no longer appears in current GIMPLE, might resurface.
645 This perhaps could run
646 if (CONVERT_EXPR_P (var0))
648 gimplify_conversion (&var0);
649 // Attempt to fill in any within var0 found ARRAY_REF's
650 // element size from corresponding op embedded ARRAY_REF,
651 // if unsuccessful, just punt.
653 while (POINTER_TYPE_P (type
))
654 type
= TREE_TYPE (type
);
655 if (int_size_in_bytes (type
) < 0)
665 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op0
))
668 gimple
*def_stmt
= SSA_NAME_DEF_STMT (op0
);
669 enum tree_code subcode
;
671 if (gimple_code (def_stmt
) != GIMPLE_ASSIGN
)
674 var0
= gimple_assign_rhs1 (def_stmt
);
675 subcode
= gimple_assign_rhs_code (def_stmt
);
676 var1
= gimple_assign_rhs2 (def_stmt
);
678 return split_constant_offset_1 (type
, var0
, subcode
, var1
, var
, off
);
682 /* We must not introduce undefined overflow, and we must not change the value.
683 Hence we're okay if the inner type doesn't overflow to start with
684 (pointer or signed), the outer type also is an integer or pointer
685 and the outer precision is at least as large as the inner. */
686 tree itype
= TREE_TYPE (op0
);
687 if ((POINTER_TYPE_P (itype
)
688 || (INTEGRAL_TYPE_P (itype
) && TYPE_OVERFLOW_UNDEFINED (itype
)))
689 && TYPE_PRECISION (type
) >= TYPE_PRECISION (itype
)
690 && (POINTER_TYPE_P (type
) || INTEGRAL_TYPE_P (type
)))
692 split_constant_offset (op0
, &var0
, off
);
693 *var
= fold_convert (type
, var0
);
704 /* Expresses EXP as VAR + OFF, where off is a constant. The type of OFF
705 will be ssizetype. */
708 split_constant_offset (tree exp
, tree
*var
, tree
*off
)
710 tree type
= TREE_TYPE (exp
), otype
, op0
, op1
, e
, o
;
714 *off
= ssize_int (0);
717 if (tree_is_chrec (exp
)
718 || get_gimple_rhs_class (TREE_CODE (exp
)) == GIMPLE_TERNARY_RHS
)
721 otype
= TREE_TYPE (exp
);
722 code
= TREE_CODE (exp
);
723 extract_ops_from_tree (exp
, &code
, &op0
, &op1
);
724 if (split_constant_offset_1 (otype
, op0
, code
, op1
, &e
, &o
))
726 *var
= fold_convert (type
, e
);
731 /* Returns the address ADDR of an object in a canonical shape (without nop
732 casts, and with type of pointer to the object). */
735 canonicalize_base_object_address (tree addr
)
741 /* The base address may be obtained by casting from integer, in that case
743 if (!POINTER_TYPE_P (TREE_TYPE (addr
)))
746 if (TREE_CODE (addr
) != ADDR_EXPR
)
749 return build_fold_addr_expr (TREE_OPERAND (addr
, 0));
752 /* Analyzes the behavior of the memory reference DR in the innermost loop or
753 basic block that contains it. Returns true if analysis succeed or false
757 dr_analyze_innermost (struct data_reference
*dr
, struct loop
*nest
)
759 gimple
*stmt
= DR_STMT (dr
);
760 struct loop
*loop
= loop_containing_stmt (stmt
);
761 tree ref
= DR_REF (dr
);
762 HOST_WIDE_INT pbitsize
, pbitpos
;
765 int punsignedp
, preversep
, pvolatilep
;
766 affine_iv base_iv
, offset_iv
;
767 tree init
, dinit
, step
;
768 bool in_loop
= (loop
&& loop
->num
);
770 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
771 fprintf (dump_file
, "analyze_innermost: ");
773 base
= get_inner_reference (ref
, &pbitsize
, &pbitpos
, &poffset
, &pmode
,
774 &punsignedp
, &preversep
, &pvolatilep
);
775 gcc_assert (base
!= NULL_TREE
);
777 if (pbitpos
% BITS_PER_UNIT
!= 0)
779 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
780 fprintf (dump_file
, "failed: bit offset alignment.\n");
786 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
787 fprintf (dump_file
, "failed: reverse storage order.\n");
791 if (TREE_CODE (base
) == MEM_REF
)
793 if (!integer_zerop (TREE_OPERAND (base
, 1)))
795 offset_int moff
= mem_ref_offset (base
);
796 tree mofft
= wide_int_to_tree (sizetype
, moff
);
800 poffset
= size_binop (PLUS_EXPR
, poffset
, mofft
);
802 base
= TREE_OPERAND (base
, 0);
805 base
= build_fold_addr_expr (base
);
809 if (!simple_iv (loop
, loop_containing_stmt (stmt
), base
, &base_iv
,
810 nest
? true : false))
814 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
815 fprintf (dump_file
, "failed: evolution of base is not"
822 base_iv
.step
= ssize_int (0);
823 base_iv
.no_overflow
= true;
830 base_iv
.step
= ssize_int (0);
831 base_iv
.no_overflow
= true;
836 offset_iv
.base
= ssize_int (0);
837 offset_iv
.step
= ssize_int (0);
843 offset_iv
.base
= poffset
;
844 offset_iv
.step
= ssize_int (0);
846 else if (!simple_iv (loop
, loop_containing_stmt (stmt
),
848 nest
? true : false))
852 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
853 fprintf (dump_file
, "failed: evolution of offset is not"
859 offset_iv
.base
= poffset
;
860 offset_iv
.step
= ssize_int (0);
865 init
= ssize_int (pbitpos
/ BITS_PER_UNIT
);
866 split_constant_offset (base_iv
.base
, &base_iv
.base
, &dinit
);
867 init
= size_binop (PLUS_EXPR
, init
, dinit
);
868 split_constant_offset (offset_iv
.base
, &offset_iv
.base
, &dinit
);
869 init
= size_binop (PLUS_EXPR
, init
, dinit
);
871 step
= size_binop (PLUS_EXPR
,
872 fold_convert (ssizetype
, base_iv
.step
),
873 fold_convert (ssizetype
, offset_iv
.step
));
875 DR_BASE_ADDRESS (dr
) = canonicalize_base_object_address (base_iv
.base
);
877 DR_OFFSET (dr
) = fold_convert (ssizetype
, offset_iv
.base
);
881 DR_ALIGNED_TO (dr
) = size_int (highest_pow2_factor (offset_iv
.base
));
883 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
884 fprintf (dump_file
, "success.\n");
889 /* Determines the base object and the list of indices of memory reference
890 DR, analyzed in LOOP and instantiated in loop nest NEST. */
893 dr_analyze_indices (struct data_reference
*dr
, loop_p nest
, loop_p loop
)
895 vec
<tree
> access_fns
= vNULL
;
897 tree base
, off
, access_fn
;
898 basic_block before_loop
;
900 /* If analyzing a basic-block there are no indices to analyze
901 and thus no access functions. */
904 DR_BASE_OBJECT (dr
) = DR_REF (dr
);
905 DR_ACCESS_FNS (dr
).create (0);
910 before_loop
= block_before_loop (nest
);
912 /* REALPART_EXPR and IMAGPART_EXPR can be handled like accesses
913 into a two element array with a constant index. The base is
914 then just the immediate underlying object. */
915 if (TREE_CODE (ref
) == REALPART_EXPR
)
917 ref
= TREE_OPERAND (ref
, 0);
918 access_fns
.safe_push (integer_zero_node
);
920 else if (TREE_CODE (ref
) == IMAGPART_EXPR
)
922 ref
= TREE_OPERAND (ref
, 0);
923 access_fns
.safe_push (integer_one_node
);
926 /* Analyze access functions of dimensions we know to be independent. */
927 while (handled_component_p (ref
))
929 if (TREE_CODE (ref
) == ARRAY_REF
)
931 op
= TREE_OPERAND (ref
, 1);
932 access_fn
= analyze_scalar_evolution (loop
, op
);
933 access_fn
= instantiate_scev (before_loop
, loop
, access_fn
);
934 access_fns
.safe_push (access_fn
);
936 else if (TREE_CODE (ref
) == COMPONENT_REF
937 && TREE_CODE (TREE_TYPE (TREE_OPERAND (ref
, 0))) == RECORD_TYPE
)
939 /* For COMPONENT_REFs of records (but not unions!) use the
940 FIELD_DECL offset as constant access function so we can
941 disambiguate a[i].f1 and a[i].f2. */
942 tree off
= component_ref_field_offset (ref
);
943 off
= size_binop (PLUS_EXPR
,
944 size_binop (MULT_EXPR
,
945 fold_convert (bitsizetype
, off
),
946 bitsize_int (BITS_PER_UNIT
)),
947 DECL_FIELD_BIT_OFFSET (TREE_OPERAND (ref
, 1)));
948 access_fns
.safe_push (off
);
951 /* If we have an unhandled component we could not translate
952 to an access function stop analyzing. We have determined
953 our base object in this case. */
956 ref
= TREE_OPERAND (ref
, 0);
959 /* If the address operand of a MEM_REF base has an evolution in the
960 analyzed nest, add it as an additional independent access-function. */
961 if (TREE_CODE (ref
) == MEM_REF
)
963 op
= TREE_OPERAND (ref
, 0);
964 access_fn
= analyze_scalar_evolution (loop
, op
);
965 access_fn
= instantiate_scev (before_loop
, loop
, access_fn
);
966 if (TREE_CODE (access_fn
) == POLYNOMIAL_CHREC
)
969 tree memoff
= TREE_OPERAND (ref
, 1);
970 base
= initial_condition (access_fn
);
971 orig_type
= TREE_TYPE (base
);
972 STRIP_USELESS_TYPE_CONVERSION (base
);
973 split_constant_offset (base
, &base
, &off
);
974 STRIP_USELESS_TYPE_CONVERSION (base
);
975 /* Fold the MEM_REF offset into the evolutions initial
976 value to make more bases comparable. */
977 if (!integer_zerop (memoff
))
979 off
= size_binop (PLUS_EXPR
, off
,
980 fold_convert (ssizetype
, memoff
));
981 memoff
= build_int_cst (TREE_TYPE (memoff
), 0);
983 /* Adjust the offset so it is a multiple of the access type
984 size and thus we separate bases that can possibly be used
985 to produce partial overlaps (which the access_fn machinery
988 if (TYPE_SIZE_UNIT (TREE_TYPE (ref
))
989 && TREE_CODE (TYPE_SIZE_UNIT (TREE_TYPE (ref
))) == INTEGER_CST
990 && !integer_zerop (TYPE_SIZE_UNIT (TREE_TYPE (ref
))))
991 rem
= wi::mod_trunc (off
, TYPE_SIZE_UNIT (TREE_TYPE (ref
)), SIGNED
);
993 /* If we can't compute the remainder simply force the initial
994 condition to zero. */
996 off
= wide_int_to_tree (ssizetype
, wi::sub (off
, rem
));
997 memoff
= wide_int_to_tree (TREE_TYPE (memoff
), rem
);
998 /* And finally replace the initial condition. */
999 access_fn
= chrec_replace_initial_condition
1000 (access_fn
, fold_convert (orig_type
, off
));
1001 /* ??? This is still not a suitable base object for
1002 dr_may_alias_p - the base object needs to be an
1003 access that covers the object as whole. With
1004 an evolution in the pointer this cannot be
1006 As a band-aid, mark the access so we can special-case
1007 it in dr_may_alias_p. */
1009 ref
= fold_build2_loc (EXPR_LOCATION (ref
),
1010 MEM_REF
, TREE_TYPE (ref
),
1012 MR_DEPENDENCE_CLIQUE (ref
) = MR_DEPENDENCE_CLIQUE (old
);
1013 MR_DEPENDENCE_BASE (ref
) = MR_DEPENDENCE_BASE (old
);
1014 DR_UNCONSTRAINED_BASE (dr
) = true;
1015 access_fns
.safe_push (access_fn
);
1018 else if (DECL_P (ref
))
1020 /* Canonicalize DR_BASE_OBJECT to MEM_REF form. */
1021 ref
= build2 (MEM_REF
, TREE_TYPE (ref
),
1022 build_fold_addr_expr (ref
),
1023 build_int_cst (reference_alias_ptr_type (ref
), 0));
1026 DR_BASE_OBJECT (dr
) = ref
;
1027 DR_ACCESS_FNS (dr
) = access_fns
;
1030 /* Extracts the alias analysis information from the memory reference DR. */
1033 dr_analyze_alias (struct data_reference
*dr
)
1035 tree ref
= DR_REF (dr
);
1036 tree base
= get_base_address (ref
), addr
;
1038 if (INDIRECT_REF_P (base
)
1039 || TREE_CODE (base
) == MEM_REF
)
1041 addr
= TREE_OPERAND (base
, 0);
1042 if (TREE_CODE (addr
) == SSA_NAME
)
1043 DR_PTR_INFO (dr
) = SSA_NAME_PTR_INFO (addr
);
1047 /* Frees data reference DR. */
1050 free_data_ref (data_reference_p dr
)
1052 DR_ACCESS_FNS (dr
).release ();
1056 /* Analyzes memory reference MEMREF accessed in STMT. The reference
1057 is read if IS_READ is true, write otherwise. Returns the
1058 data_reference description of MEMREF. NEST is the outermost loop
1059 in which the reference should be instantiated, LOOP is the loop in
1060 which the data reference should be analyzed. */
1062 struct data_reference
*
1063 create_data_ref (loop_p nest
, loop_p loop
, tree memref
, gimple
*stmt
,
1066 struct data_reference
*dr
;
1068 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1070 fprintf (dump_file
, "Creating dr for ");
1071 print_generic_expr (dump_file
, memref
, TDF_SLIM
);
1072 fprintf (dump_file
, "\n");
1075 dr
= XCNEW (struct data_reference
);
1076 DR_STMT (dr
) = stmt
;
1077 DR_REF (dr
) = memref
;
1078 DR_IS_READ (dr
) = is_read
;
1080 dr_analyze_innermost (dr
, nest
);
1081 dr_analyze_indices (dr
, nest
, loop
);
1082 dr_analyze_alias (dr
);
1084 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1087 fprintf (dump_file
, "\tbase_address: ");
1088 print_generic_expr (dump_file
, DR_BASE_ADDRESS (dr
), TDF_SLIM
);
1089 fprintf (dump_file
, "\n\toffset from base address: ");
1090 print_generic_expr (dump_file
, DR_OFFSET (dr
), TDF_SLIM
);
1091 fprintf (dump_file
, "\n\tconstant offset from base address: ");
1092 print_generic_expr (dump_file
, DR_INIT (dr
), TDF_SLIM
);
1093 fprintf (dump_file
, "\n\tstep: ");
1094 print_generic_expr (dump_file
, DR_STEP (dr
), TDF_SLIM
);
1095 fprintf (dump_file
, "\n\taligned to: ");
1096 print_generic_expr (dump_file
, DR_ALIGNED_TO (dr
), TDF_SLIM
);
1097 fprintf (dump_file
, "\n\tbase_object: ");
1098 print_generic_expr (dump_file
, DR_BASE_OBJECT (dr
), TDF_SLIM
);
1099 fprintf (dump_file
, "\n");
1100 for (i
= 0; i
< DR_NUM_DIMENSIONS (dr
); i
++)
1102 fprintf (dump_file
, "\tAccess function %d: ", i
);
1103 print_generic_stmt (dump_file
, DR_ACCESS_FN (dr
, i
), TDF_SLIM
);
1110 /* Check if OFFSET1 and OFFSET2 (DR_OFFSETs of some data-refs) are identical
1113 dr_equal_offsets_p1 (tree offset1
, tree offset2
)
1117 STRIP_NOPS (offset1
);
1118 STRIP_NOPS (offset2
);
1120 if (offset1
== offset2
)
1123 if (TREE_CODE (offset1
) != TREE_CODE (offset2
)
1124 || (!BINARY_CLASS_P (offset1
) && !UNARY_CLASS_P (offset1
)))
1127 res
= dr_equal_offsets_p1 (TREE_OPERAND (offset1
, 0),
1128 TREE_OPERAND (offset2
, 0));
1130 if (!res
|| !BINARY_CLASS_P (offset1
))
1133 res
= dr_equal_offsets_p1 (TREE_OPERAND (offset1
, 1),
1134 TREE_OPERAND (offset2
, 1));
1139 /* Check if DRA and DRB have equal offsets. */
1141 dr_equal_offsets_p (struct data_reference
*dra
,
1142 struct data_reference
*drb
)
1144 tree offset1
, offset2
;
1146 offset1
= DR_OFFSET (dra
);
1147 offset2
= DR_OFFSET (drb
);
1149 return dr_equal_offsets_p1 (offset1
, offset2
);
1152 /* Returns true if FNA == FNB. */
1155 affine_function_equal_p (affine_fn fna
, affine_fn fnb
)
1157 unsigned i
, n
= fna
.length ();
1159 if (n
!= fnb
.length ())
1162 for (i
= 0; i
< n
; i
++)
1163 if (!operand_equal_p (fna
[i
], fnb
[i
], 0))
1169 /* If all the functions in CF are the same, returns one of them,
1170 otherwise returns NULL. */
1173 common_affine_function (conflict_function
*cf
)
1178 if (!CF_NONTRIVIAL_P (cf
))
1179 return affine_fn ();
1183 for (i
= 1; i
< cf
->n
; i
++)
1184 if (!affine_function_equal_p (comm
, cf
->fns
[i
]))
1185 return affine_fn ();
1190 /* Returns the base of the affine function FN. */
1193 affine_function_base (affine_fn fn
)
1198 /* Returns true if FN is a constant. */
1201 affine_function_constant_p (affine_fn fn
)
1206 for (i
= 1; fn
.iterate (i
, &coef
); i
++)
1207 if (!integer_zerop (coef
))
1213 /* Returns true if FN is the zero constant function. */
1216 affine_function_zero_p (affine_fn fn
)
1218 return (integer_zerop (affine_function_base (fn
))
1219 && affine_function_constant_p (fn
));
1222 /* Returns a signed integer type with the largest precision from TA
1226 signed_type_for_types (tree ta
, tree tb
)
1228 if (TYPE_PRECISION (ta
) > TYPE_PRECISION (tb
))
1229 return signed_type_for (ta
);
1231 return signed_type_for (tb
);
1234 /* Applies operation OP on affine functions FNA and FNB, and returns the
1238 affine_fn_op (enum tree_code op
, affine_fn fna
, affine_fn fnb
)
1244 if (fnb
.length () > fna
.length ())
1256 for (i
= 0; i
< n
; i
++)
1258 tree type
= signed_type_for_types (TREE_TYPE (fna
[i
]),
1259 TREE_TYPE (fnb
[i
]));
1260 ret
.quick_push (fold_build2 (op
, type
, fna
[i
], fnb
[i
]));
1263 for (; fna
.iterate (i
, &coef
); i
++)
1264 ret
.quick_push (fold_build2 (op
, signed_type_for (TREE_TYPE (coef
)),
1265 coef
, integer_zero_node
));
1266 for (; fnb
.iterate (i
, &coef
); i
++)
1267 ret
.quick_push (fold_build2 (op
, signed_type_for (TREE_TYPE (coef
)),
1268 integer_zero_node
, coef
));
1273 /* Returns the sum of affine functions FNA and FNB. */
1276 affine_fn_plus (affine_fn fna
, affine_fn fnb
)
1278 return affine_fn_op (PLUS_EXPR
, fna
, fnb
);
1281 /* Returns the difference of affine functions FNA and FNB. */
1284 affine_fn_minus (affine_fn fna
, affine_fn fnb
)
1286 return affine_fn_op (MINUS_EXPR
, fna
, fnb
);
1289 /* Frees affine function FN. */
1292 affine_fn_free (affine_fn fn
)
1297 /* Determine for each subscript in the data dependence relation DDR
1301 compute_subscript_distance (struct data_dependence_relation
*ddr
)
1303 conflict_function
*cf_a
, *cf_b
;
1304 affine_fn fn_a
, fn_b
, diff
;
1306 if (DDR_ARE_DEPENDENT (ddr
) == NULL_TREE
)
1310 for (i
= 0; i
< DDR_NUM_SUBSCRIPTS (ddr
); i
++)
1312 struct subscript
*subscript
;
1314 subscript
= DDR_SUBSCRIPT (ddr
, i
);
1315 cf_a
= SUB_CONFLICTS_IN_A (subscript
);
1316 cf_b
= SUB_CONFLICTS_IN_B (subscript
);
1318 fn_a
= common_affine_function (cf_a
);
1319 fn_b
= common_affine_function (cf_b
);
1320 if (!fn_a
.exists () || !fn_b
.exists ())
1322 SUB_DISTANCE (subscript
) = chrec_dont_know
;
1325 diff
= affine_fn_minus (fn_a
, fn_b
);
1327 if (affine_function_constant_p (diff
))
1328 SUB_DISTANCE (subscript
) = affine_function_base (diff
);
1330 SUB_DISTANCE (subscript
) = chrec_dont_know
;
1332 affine_fn_free (diff
);
1337 /* Returns the conflict function for "unknown". */
1339 static conflict_function
*
1340 conflict_fn_not_known (void)
1342 conflict_function
*fn
= XCNEW (conflict_function
);
1348 /* Returns the conflict function for "independent". */
1350 static conflict_function
*
1351 conflict_fn_no_dependence (void)
1353 conflict_function
*fn
= XCNEW (conflict_function
);
1354 fn
->n
= NO_DEPENDENCE
;
1359 /* Returns true if the address of OBJ is invariant in LOOP. */
1362 object_address_invariant_in_loop_p (const struct loop
*loop
, const_tree obj
)
1364 while (handled_component_p (obj
))
1366 if (TREE_CODE (obj
) == ARRAY_REF
)
1368 /* Index of the ARRAY_REF was zeroed in analyze_indices, thus we only
1369 need to check the stride and the lower bound of the reference. */
1370 if (chrec_contains_symbols_defined_in_loop (TREE_OPERAND (obj
, 2),
1372 || chrec_contains_symbols_defined_in_loop (TREE_OPERAND (obj
, 3),
1376 else if (TREE_CODE (obj
) == COMPONENT_REF
)
1378 if (chrec_contains_symbols_defined_in_loop (TREE_OPERAND (obj
, 2),
1382 obj
= TREE_OPERAND (obj
, 0);
1385 if (!INDIRECT_REF_P (obj
)
1386 && TREE_CODE (obj
) != MEM_REF
)
1389 return !chrec_contains_symbols_defined_in_loop (TREE_OPERAND (obj
, 0),
1393 /* Returns false if we can prove that data references A and B do not alias,
1394 true otherwise. If LOOP_NEST is false no cross-iteration aliases are
1398 dr_may_alias_p (const struct data_reference
*a
, const struct data_reference
*b
,
1401 tree addr_a
= DR_BASE_OBJECT (a
);
1402 tree addr_b
= DR_BASE_OBJECT (b
);
1404 /* If we are not processing a loop nest but scalar code we
1405 do not need to care about possible cross-iteration dependences
1406 and thus can process the full original reference. Do so,
1407 similar to how loop invariant motion applies extra offset-based
1411 aff_tree off1
, off2
;
1412 widest_int size1
, size2
;
1413 get_inner_reference_aff (DR_REF (a
), &off1
, &size1
);
1414 get_inner_reference_aff (DR_REF (b
), &off2
, &size2
);
1415 aff_combination_scale (&off1
, -1);
1416 aff_combination_add (&off2
, &off1
);
1417 if (aff_comb_cannot_overlap_p (&off2
, size1
, size2
))
1421 if ((TREE_CODE (addr_a
) == MEM_REF
|| TREE_CODE (addr_a
) == TARGET_MEM_REF
)
1422 && (TREE_CODE (addr_b
) == MEM_REF
|| TREE_CODE (addr_b
) == TARGET_MEM_REF
)
1423 && MR_DEPENDENCE_CLIQUE (addr_a
) == MR_DEPENDENCE_CLIQUE (addr_b
)
1424 && MR_DEPENDENCE_BASE (addr_a
) != MR_DEPENDENCE_BASE (addr_b
))
1427 /* If we had an evolution in a pointer-based MEM_REF BASE_OBJECT we
1428 do not know the size of the base-object. So we cannot do any
1429 offset/overlap based analysis but have to rely on points-to
1430 information only. */
1431 if (TREE_CODE (addr_a
) == MEM_REF
1432 && (DR_UNCONSTRAINED_BASE (a
)
1433 || TREE_CODE (TREE_OPERAND (addr_a
, 0)) == SSA_NAME
))
1435 /* For true dependences we can apply TBAA. */
1436 if (flag_strict_aliasing
1437 && DR_IS_WRITE (a
) && DR_IS_READ (b
)
1438 && !alias_sets_conflict_p (get_alias_set (DR_REF (a
)),
1439 get_alias_set (DR_REF (b
))))
1441 if (TREE_CODE (addr_b
) == MEM_REF
)
1442 return ptr_derefs_may_alias_p (TREE_OPERAND (addr_a
, 0),
1443 TREE_OPERAND (addr_b
, 0));
1445 return ptr_derefs_may_alias_p (TREE_OPERAND (addr_a
, 0),
1446 build_fold_addr_expr (addr_b
));
1448 else if (TREE_CODE (addr_b
) == MEM_REF
1449 && (DR_UNCONSTRAINED_BASE (b
)
1450 || TREE_CODE (TREE_OPERAND (addr_b
, 0)) == SSA_NAME
))
1452 /* For true dependences we can apply TBAA. */
1453 if (flag_strict_aliasing
1454 && DR_IS_WRITE (a
) && DR_IS_READ (b
)
1455 && !alias_sets_conflict_p (get_alias_set (DR_REF (a
)),
1456 get_alias_set (DR_REF (b
))))
1458 if (TREE_CODE (addr_a
) == MEM_REF
)
1459 return ptr_derefs_may_alias_p (TREE_OPERAND (addr_a
, 0),
1460 TREE_OPERAND (addr_b
, 0));
1462 return ptr_derefs_may_alias_p (build_fold_addr_expr (addr_a
),
1463 TREE_OPERAND (addr_b
, 0));
1466 /* Otherwise DR_BASE_OBJECT is an access that covers the whole object
1467 that is being subsetted in the loop nest. */
1468 if (DR_IS_WRITE (a
) && DR_IS_WRITE (b
))
1469 return refs_output_dependent_p (addr_a
, addr_b
);
1470 else if (DR_IS_READ (a
) && DR_IS_WRITE (b
))
1471 return refs_anti_dependent_p (addr_a
, addr_b
);
1472 return refs_may_alias_p (addr_a
, addr_b
);
1475 /* Initialize a data dependence relation between data accesses A and
1476 B. NB_LOOPS is the number of loops surrounding the references: the
1477 size of the classic distance/direction vectors. */
1479 struct data_dependence_relation
*
1480 initialize_data_dependence_relation (struct data_reference
*a
,
1481 struct data_reference
*b
,
1482 vec
<loop_p
> loop_nest
)
1484 struct data_dependence_relation
*res
;
1487 res
= XNEW (struct data_dependence_relation
);
1490 DDR_LOOP_NEST (res
).create (0);
1491 DDR_REVERSED_P (res
) = false;
1492 DDR_SUBSCRIPTS (res
).create (0);
1493 DDR_DIR_VECTS (res
).create (0);
1494 DDR_DIST_VECTS (res
).create (0);
1496 if (a
== NULL
|| b
== NULL
)
1498 DDR_ARE_DEPENDENT (res
) = chrec_dont_know
;
1502 /* If the data references do not alias, then they are independent. */
1503 if (!dr_may_alias_p (a
, b
, loop_nest
.exists ()))
1505 DDR_ARE_DEPENDENT (res
) = chrec_known
;
1509 /* The case where the references are exactly the same. */
1510 if (operand_equal_p (DR_REF (a
), DR_REF (b
), 0))
1512 if ((loop_nest
.exists ()
1513 && !object_address_invariant_in_loop_p (loop_nest
[0],
1514 DR_BASE_OBJECT (a
)))
1515 || DR_NUM_DIMENSIONS (a
) == 0)
1517 DDR_ARE_DEPENDENT (res
) = chrec_dont_know
;
1520 DDR_AFFINE_P (res
) = true;
1521 DDR_ARE_DEPENDENT (res
) = NULL_TREE
;
1522 DDR_SUBSCRIPTS (res
).create (DR_NUM_DIMENSIONS (a
));
1523 DDR_LOOP_NEST (res
) = loop_nest
;
1524 DDR_INNER_LOOP (res
) = 0;
1525 DDR_SELF_REFERENCE (res
) = true;
1526 for (i
= 0; i
< DR_NUM_DIMENSIONS (a
); i
++)
1528 struct subscript
*subscript
;
1530 subscript
= XNEW (struct subscript
);
1531 SUB_CONFLICTS_IN_A (subscript
) = conflict_fn_not_known ();
1532 SUB_CONFLICTS_IN_B (subscript
) = conflict_fn_not_known ();
1533 SUB_LAST_CONFLICT (subscript
) = chrec_dont_know
;
1534 SUB_DISTANCE (subscript
) = chrec_dont_know
;
1535 DDR_SUBSCRIPTS (res
).safe_push (subscript
);
1540 /* If the references do not access the same object, we do not know
1541 whether they alias or not. We do not care about TBAA or alignment
1542 info so we can use OEP_ADDRESS_OF to avoid false negatives.
1543 But the accesses have to use compatible types as otherwise the
1544 built indices would not match. */
1545 if (!operand_equal_p (DR_BASE_OBJECT (a
), DR_BASE_OBJECT (b
), OEP_ADDRESS_OF
)
1546 || !types_compatible_p (TREE_TYPE (DR_BASE_OBJECT (a
)),
1547 TREE_TYPE (DR_BASE_OBJECT (b
))))
1549 DDR_ARE_DEPENDENT (res
) = chrec_dont_know
;
1553 /* If the base of the object is not invariant in the loop nest, we cannot
1554 analyze it. TODO -- in fact, it would suffice to record that there may
1555 be arbitrary dependences in the loops where the base object varies. */
1556 if ((loop_nest
.exists ()
1557 && !object_address_invariant_in_loop_p (loop_nest
[0], DR_BASE_OBJECT (a
)))
1558 || DR_NUM_DIMENSIONS (a
) == 0)
1560 DDR_ARE_DEPENDENT (res
) = chrec_dont_know
;
1564 /* If the number of dimensions of the access to not agree we can have
1565 a pointer access to a component of the array element type and an
1566 array access while the base-objects are still the same. Punt. */
1567 if (DR_NUM_DIMENSIONS (a
) != DR_NUM_DIMENSIONS (b
))
1569 DDR_ARE_DEPENDENT (res
) = chrec_dont_know
;
1573 DDR_AFFINE_P (res
) = true;
1574 DDR_ARE_DEPENDENT (res
) = NULL_TREE
;
1575 DDR_SUBSCRIPTS (res
).create (DR_NUM_DIMENSIONS (a
));
1576 DDR_LOOP_NEST (res
) = loop_nest
;
1577 DDR_INNER_LOOP (res
) = 0;
1578 DDR_SELF_REFERENCE (res
) = false;
1580 for (i
= 0; i
< DR_NUM_DIMENSIONS (a
); i
++)
1582 struct subscript
*subscript
;
1584 subscript
= XNEW (struct subscript
);
1585 SUB_CONFLICTS_IN_A (subscript
) = conflict_fn_not_known ();
1586 SUB_CONFLICTS_IN_B (subscript
) = conflict_fn_not_known ();
1587 SUB_LAST_CONFLICT (subscript
) = chrec_dont_know
;
1588 SUB_DISTANCE (subscript
) = chrec_dont_know
;
1589 DDR_SUBSCRIPTS (res
).safe_push (subscript
);
1595 /* Frees memory used by the conflict function F. */
1598 free_conflict_function (conflict_function
*f
)
1602 if (CF_NONTRIVIAL_P (f
))
1604 for (i
= 0; i
< f
->n
; i
++)
1605 affine_fn_free (f
->fns
[i
]);
1610 /* Frees memory used by SUBSCRIPTS. */
1613 free_subscripts (vec
<subscript_p
> subscripts
)
1618 FOR_EACH_VEC_ELT (subscripts
, i
, s
)
1620 free_conflict_function (s
->conflicting_iterations_in_a
);
1621 free_conflict_function (s
->conflicting_iterations_in_b
);
1624 subscripts
.release ();
1627 /* Set DDR_ARE_DEPENDENT to CHREC and finalize the subscript overlap
1631 finalize_ddr_dependent (struct data_dependence_relation
*ddr
,
1634 DDR_ARE_DEPENDENT (ddr
) = chrec
;
1635 free_subscripts (DDR_SUBSCRIPTS (ddr
));
1636 DDR_SUBSCRIPTS (ddr
).create (0);
1639 /* The dependence relation DDR cannot be represented by a distance
1643 non_affine_dependence_relation (struct data_dependence_relation
*ddr
)
1645 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1646 fprintf (dump_file
, "(Dependence relation cannot be represented by distance vector.) \n");
1648 DDR_AFFINE_P (ddr
) = false;
1653 /* This section contains the classic Banerjee tests. */
1655 /* Returns true iff CHREC_A and CHREC_B are not dependent on any index
1656 variables, i.e., if the ZIV (Zero Index Variable) test is true. */
1659 ziv_subscript_p (const_tree chrec_a
, const_tree chrec_b
)
1661 return (evolution_function_is_constant_p (chrec_a
)
1662 && evolution_function_is_constant_p (chrec_b
));
1665 /* Returns true iff CHREC_A and CHREC_B are dependent on an index
1666 variable, i.e., if the SIV (Single Index Variable) test is true. */
1669 siv_subscript_p (const_tree chrec_a
, const_tree chrec_b
)
1671 if ((evolution_function_is_constant_p (chrec_a
)
1672 && evolution_function_is_univariate_p (chrec_b
))
1673 || (evolution_function_is_constant_p (chrec_b
)
1674 && evolution_function_is_univariate_p (chrec_a
)))
1677 if (evolution_function_is_univariate_p (chrec_a
)
1678 && evolution_function_is_univariate_p (chrec_b
))
1680 switch (TREE_CODE (chrec_a
))
1682 case POLYNOMIAL_CHREC
:
1683 switch (TREE_CODE (chrec_b
))
1685 case POLYNOMIAL_CHREC
:
1686 if (CHREC_VARIABLE (chrec_a
) != CHREC_VARIABLE (chrec_b
))
1702 /* Creates a conflict function with N dimensions. The affine functions
1703 in each dimension follow. */
1705 static conflict_function
*
1706 conflict_fn (unsigned n
, ...)
1709 conflict_function
*ret
= XCNEW (conflict_function
);
1712 gcc_assert (0 < n
&& n
<= MAX_DIM
);
1716 for (i
= 0; i
< n
; i
++)
1717 ret
->fns
[i
] = va_arg (ap
, affine_fn
);
1723 /* Returns constant affine function with value CST. */
1726 affine_fn_cst (tree cst
)
1730 fn
.quick_push (cst
);
1734 /* Returns affine function with single variable, CST + COEF * x_DIM. */
1737 affine_fn_univar (tree cst
, unsigned dim
, tree coef
)
1740 fn
.create (dim
+ 1);
1743 gcc_assert (dim
> 0);
1744 fn
.quick_push (cst
);
1745 for (i
= 1; i
< dim
; i
++)
1746 fn
.quick_push (integer_zero_node
);
1747 fn
.quick_push (coef
);
1751 /* Analyze a ZIV (Zero Index Variable) subscript. *OVERLAPS_A and
1752 *OVERLAPS_B are initialized to the functions that describe the
1753 relation between the elements accessed twice by CHREC_A and
1754 CHREC_B. For k >= 0, the following property is verified:
1756 CHREC_A (*OVERLAPS_A (k)) = CHREC_B (*OVERLAPS_B (k)). */
1759 analyze_ziv_subscript (tree chrec_a
,
1761 conflict_function
**overlaps_a
,
1762 conflict_function
**overlaps_b
,
1763 tree
*last_conflicts
)
1765 tree type
, difference
;
1766 dependence_stats
.num_ziv
++;
1768 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1769 fprintf (dump_file
, "(analyze_ziv_subscript \n");
1771 type
= signed_type_for_types (TREE_TYPE (chrec_a
), TREE_TYPE (chrec_b
));
1772 chrec_a
= chrec_convert (type
, chrec_a
, NULL
);
1773 chrec_b
= chrec_convert (type
, chrec_b
, NULL
);
1774 difference
= chrec_fold_minus (type
, chrec_a
, chrec_b
);
1776 switch (TREE_CODE (difference
))
1779 if (integer_zerop (difference
))
1781 /* The difference is equal to zero: the accessed index
1782 overlaps for each iteration in the loop. */
1783 *overlaps_a
= conflict_fn (1, affine_fn_cst (integer_zero_node
));
1784 *overlaps_b
= conflict_fn (1, affine_fn_cst (integer_zero_node
));
1785 *last_conflicts
= chrec_dont_know
;
1786 dependence_stats
.num_ziv_dependent
++;
1790 /* The accesses do not overlap. */
1791 *overlaps_a
= conflict_fn_no_dependence ();
1792 *overlaps_b
= conflict_fn_no_dependence ();
1793 *last_conflicts
= integer_zero_node
;
1794 dependence_stats
.num_ziv_independent
++;
1799 /* We're not sure whether the indexes overlap. For the moment,
1800 conservatively answer "don't know". */
1801 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1802 fprintf (dump_file
, "ziv test failed: difference is non-integer.\n");
1804 *overlaps_a
= conflict_fn_not_known ();
1805 *overlaps_b
= conflict_fn_not_known ();
1806 *last_conflicts
= chrec_dont_know
;
1807 dependence_stats
.num_ziv_unimplemented
++;
1811 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1812 fprintf (dump_file
, ")\n");
1815 /* Similar to max_stmt_executions_int, but returns the bound as a tree,
1816 and only if it fits to the int type. If this is not the case, or the
1817 bound on the number of iterations of LOOP could not be derived, returns
1821 max_stmt_executions_tree (struct loop
*loop
)
1825 if (!max_stmt_executions (loop
, &nit
))
1826 return chrec_dont_know
;
1828 if (!wi::fits_to_tree_p (nit
, unsigned_type_node
))
1829 return chrec_dont_know
;
1831 return wide_int_to_tree (unsigned_type_node
, nit
);
1834 /* Determine whether the CHREC is always positive/negative. If the expression
1835 cannot be statically analyzed, return false, otherwise set the answer into
1839 chrec_is_positive (tree chrec
, bool *value
)
1841 bool value0
, value1
, value2
;
1842 tree end_value
, nb_iter
;
1844 switch (TREE_CODE (chrec
))
1846 case POLYNOMIAL_CHREC
:
1847 if (!chrec_is_positive (CHREC_LEFT (chrec
), &value0
)
1848 || !chrec_is_positive (CHREC_RIGHT (chrec
), &value1
))
1851 /* FIXME -- overflows. */
1852 if (value0
== value1
)
1858 /* Otherwise the chrec is under the form: "{-197, +, 2}_1",
1859 and the proof consists in showing that the sign never
1860 changes during the execution of the loop, from 0 to
1861 loop->nb_iterations. */
1862 if (!evolution_function_is_affine_p (chrec
))
1865 nb_iter
= number_of_latch_executions (get_chrec_loop (chrec
));
1866 if (chrec_contains_undetermined (nb_iter
))
1870 /* TODO -- If the test is after the exit, we may decrease the number of
1871 iterations by one. */
1873 nb_iter
= chrec_fold_minus (type
, nb_iter
, build_int_cst (type
, 1));
1876 end_value
= chrec_apply (CHREC_VARIABLE (chrec
), chrec
, nb_iter
);
1878 if (!chrec_is_positive (end_value
, &value2
))
1882 return value0
== value1
;
1885 switch (tree_int_cst_sgn (chrec
))
1904 /* Analyze a SIV (Single Index Variable) subscript where CHREC_A is a
1905 constant, and CHREC_B is an affine function. *OVERLAPS_A and
1906 *OVERLAPS_B are initialized to the functions that describe the
1907 relation between the elements accessed twice by CHREC_A and
1908 CHREC_B. For k >= 0, the following property is verified:
1910 CHREC_A (*OVERLAPS_A (k)) = CHREC_B (*OVERLAPS_B (k)). */
1913 analyze_siv_subscript_cst_affine (tree chrec_a
,
1915 conflict_function
**overlaps_a
,
1916 conflict_function
**overlaps_b
,
1917 tree
*last_conflicts
)
1919 bool value0
, value1
, value2
;
1920 tree type
, difference
, tmp
;
1922 type
= signed_type_for_types (TREE_TYPE (chrec_a
), TREE_TYPE (chrec_b
));
1923 chrec_a
= chrec_convert (type
, chrec_a
, NULL
);
1924 chrec_b
= chrec_convert (type
, chrec_b
, NULL
);
1925 difference
= chrec_fold_minus (type
, initial_condition (chrec_b
), chrec_a
);
1927 /* Special case overlap in the first iteration. */
1928 if (integer_zerop (difference
))
1930 *overlaps_a
= conflict_fn (1, affine_fn_cst (integer_zero_node
));
1931 *overlaps_b
= conflict_fn (1, affine_fn_cst (integer_zero_node
));
1932 *last_conflicts
= integer_one_node
;
1936 if (!chrec_is_positive (initial_condition (difference
), &value0
))
1938 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1939 fprintf (dump_file
, "siv test failed: chrec is not positive.\n");
1941 dependence_stats
.num_siv_unimplemented
++;
1942 *overlaps_a
= conflict_fn_not_known ();
1943 *overlaps_b
= conflict_fn_not_known ();
1944 *last_conflicts
= chrec_dont_know
;
1949 if (value0
== false)
1951 if (!chrec_is_positive (CHREC_RIGHT (chrec_b
), &value1
))
1953 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1954 fprintf (dump_file
, "siv test failed: chrec not positive.\n");
1956 *overlaps_a
= conflict_fn_not_known ();
1957 *overlaps_b
= conflict_fn_not_known ();
1958 *last_conflicts
= chrec_dont_know
;
1959 dependence_stats
.num_siv_unimplemented
++;
1968 chrec_b = {10, +, 1}
1971 if (tree_fold_divides_p (CHREC_RIGHT (chrec_b
), difference
))
1973 HOST_WIDE_INT numiter
;
1974 struct loop
*loop
= get_chrec_loop (chrec_b
);
1976 *overlaps_a
= conflict_fn (1, affine_fn_cst (integer_zero_node
));
1977 tmp
= fold_build2 (EXACT_DIV_EXPR
, type
,
1978 fold_build1 (ABS_EXPR
, type
, difference
),
1979 CHREC_RIGHT (chrec_b
));
1980 *overlaps_b
= conflict_fn (1, affine_fn_cst (tmp
));
1981 *last_conflicts
= integer_one_node
;
1984 /* Perform weak-zero siv test to see if overlap is
1985 outside the loop bounds. */
1986 numiter
= max_stmt_executions_int (loop
);
1989 && compare_tree_int (tmp
, numiter
) > 0)
1991 free_conflict_function (*overlaps_a
);
1992 free_conflict_function (*overlaps_b
);
1993 *overlaps_a
= conflict_fn_no_dependence ();
1994 *overlaps_b
= conflict_fn_no_dependence ();
1995 *last_conflicts
= integer_zero_node
;
1996 dependence_stats
.num_siv_independent
++;
1999 dependence_stats
.num_siv_dependent
++;
2003 /* When the step does not divide the difference, there are
2007 *overlaps_a
= conflict_fn_no_dependence ();
2008 *overlaps_b
= conflict_fn_no_dependence ();
2009 *last_conflicts
= integer_zero_node
;
2010 dependence_stats
.num_siv_independent
++;
2019 chrec_b = {10, +, -1}
2021 In this case, chrec_a will not overlap with chrec_b. */
2022 *overlaps_a
= conflict_fn_no_dependence ();
2023 *overlaps_b
= conflict_fn_no_dependence ();
2024 *last_conflicts
= integer_zero_node
;
2025 dependence_stats
.num_siv_independent
++;
2032 if (!chrec_is_positive (CHREC_RIGHT (chrec_b
), &value2
))
2034 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2035 fprintf (dump_file
, "siv test failed: chrec not positive.\n");
2037 *overlaps_a
= conflict_fn_not_known ();
2038 *overlaps_b
= conflict_fn_not_known ();
2039 *last_conflicts
= chrec_dont_know
;
2040 dependence_stats
.num_siv_unimplemented
++;
2045 if (value2
== false)
2049 chrec_b = {10, +, -1}
2051 if (tree_fold_divides_p (CHREC_RIGHT (chrec_b
), difference
))
2053 HOST_WIDE_INT numiter
;
2054 struct loop
*loop
= get_chrec_loop (chrec_b
);
2056 *overlaps_a
= conflict_fn (1, affine_fn_cst (integer_zero_node
));
2057 tmp
= fold_build2 (EXACT_DIV_EXPR
, type
, difference
,
2058 CHREC_RIGHT (chrec_b
));
2059 *overlaps_b
= conflict_fn (1, affine_fn_cst (tmp
));
2060 *last_conflicts
= integer_one_node
;
2062 /* Perform weak-zero siv test to see if overlap is
2063 outside the loop bounds. */
2064 numiter
= max_stmt_executions_int (loop
);
2067 && compare_tree_int (tmp
, numiter
) > 0)
2069 free_conflict_function (*overlaps_a
);
2070 free_conflict_function (*overlaps_b
);
2071 *overlaps_a
= conflict_fn_no_dependence ();
2072 *overlaps_b
= conflict_fn_no_dependence ();
2073 *last_conflicts
= integer_zero_node
;
2074 dependence_stats
.num_siv_independent
++;
2077 dependence_stats
.num_siv_dependent
++;
2081 /* When the step does not divide the difference, there
2085 *overlaps_a
= conflict_fn_no_dependence ();
2086 *overlaps_b
= conflict_fn_no_dependence ();
2087 *last_conflicts
= integer_zero_node
;
2088 dependence_stats
.num_siv_independent
++;
2098 In this case, chrec_a will not overlap with chrec_b. */
2099 *overlaps_a
= conflict_fn_no_dependence ();
2100 *overlaps_b
= conflict_fn_no_dependence ();
2101 *last_conflicts
= integer_zero_node
;
2102 dependence_stats
.num_siv_independent
++;
2110 /* Helper recursive function for initializing the matrix A. Returns
2111 the initial value of CHREC. */
2114 initialize_matrix_A (lambda_matrix A
, tree chrec
, unsigned index
, int mult
)
2118 switch (TREE_CODE (chrec
))
2120 case POLYNOMIAL_CHREC
:
2121 A
[index
][0] = mult
* int_cst_value (CHREC_RIGHT (chrec
));
2122 return initialize_matrix_A (A
, CHREC_LEFT (chrec
), index
+ 1, mult
);
2128 tree op0
= initialize_matrix_A (A
, TREE_OPERAND (chrec
, 0), index
, mult
);
2129 tree op1
= initialize_matrix_A (A
, TREE_OPERAND (chrec
, 1), index
, mult
);
2131 return chrec_fold_op (TREE_CODE (chrec
), chrec_type (chrec
), op0
, op1
);
2136 tree op
= initialize_matrix_A (A
, TREE_OPERAND (chrec
, 0), index
, mult
);
2137 return chrec_convert (chrec_type (chrec
), op
, NULL
);
2142 /* Handle ~X as -1 - X. */
2143 tree op
= initialize_matrix_A (A
, TREE_OPERAND (chrec
, 0), index
, mult
);
2144 return chrec_fold_op (MINUS_EXPR
, chrec_type (chrec
),
2145 build_int_cst (TREE_TYPE (chrec
), -1), op
);
2157 #define FLOOR_DIV(x,y) ((x) / (y))
2159 /* Solves the special case of the Diophantine equation:
2160 | {0, +, STEP_A}_x (OVERLAPS_A) = {0, +, STEP_B}_y (OVERLAPS_B)
2162 Computes the descriptions OVERLAPS_A and OVERLAPS_B. NITER is the
2163 number of iterations that loops X and Y run. The overlaps will be
2164 constructed as evolutions in dimension DIM. */
2167 compute_overlap_steps_for_affine_univar (HOST_WIDE_INT niter
,
2168 HOST_WIDE_INT step_a
,
2169 HOST_WIDE_INT step_b
,
2170 affine_fn
*overlaps_a
,
2171 affine_fn
*overlaps_b
,
2172 tree
*last_conflicts
, int dim
)
2174 if (((step_a
> 0 && step_b
> 0)
2175 || (step_a
< 0 && step_b
< 0)))
2177 HOST_WIDE_INT step_overlaps_a
, step_overlaps_b
;
2178 HOST_WIDE_INT gcd_steps_a_b
, last_conflict
, tau2
;
2180 gcd_steps_a_b
= gcd (step_a
, step_b
);
2181 step_overlaps_a
= step_b
/ gcd_steps_a_b
;
2182 step_overlaps_b
= step_a
/ gcd_steps_a_b
;
2186 tau2
= FLOOR_DIV (niter
, step_overlaps_a
);
2187 tau2
= MIN (tau2
, FLOOR_DIV (niter
, step_overlaps_b
));
2188 last_conflict
= tau2
;
2189 *last_conflicts
= build_int_cst (NULL_TREE
, last_conflict
);
2192 *last_conflicts
= chrec_dont_know
;
2194 *overlaps_a
= affine_fn_univar (integer_zero_node
, dim
,
2195 build_int_cst (NULL_TREE
,
2197 *overlaps_b
= affine_fn_univar (integer_zero_node
, dim
,
2198 build_int_cst (NULL_TREE
,
2204 *overlaps_a
= affine_fn_cst (integer_zero_node
);
2205 *overlaps_b
= affine_fn_cst (integer_zero_node
);
2206 *last_conflicts
= integer_zero_node
;
2210 /* Solves the special case of a Diophantine equation where CHREC_A is
2211 an affine bivariate function, and CHREC_B is an affine univariate
2212 function. For example,
2214 | {{0, +, 1}_x, +, 1335}_y = {0, +, 1336}_z
2216 has the following overlapping functions:
2218 | x (t, u, v) = {{0, +, 1336}_t, +, 1}_v
2219 | y (t, u, v) = {{0, +, 1336}_u, +, 1}_v
2220 | z (t, u, v) = {{{0, +, 1}_t, +, 1335}_u, +, 1}_v
2222 FORNOW: This is a specialized implementation for a case occurring in
2223 a common benchmark. Implement the general algorithm. */
2226 compute_overlap_steps_for_affine_1_2 (tree chrec_a
, tree chrec_b
,
2227 conflict_function
**overlaps_a
,
2228 conflict_function
**overlaps_b
,
2229 tree
*last_conflicts
)
2231 bool xz_p
, yz_p
, xyz_p
;
2232 HOST_WIDE_INT step_x
, step_y
, step_z
;
2233 HOST_WIDE_INT niter_x
, niter_y
, niter_z
, niter
;
2234 affine_fn overlaps_a_xz
, overlaps_b_xz
;
2235 affine_fn overlaps_a_yz
, overlaps_b_yz
;
2236 affine_fn overlaps_a_xyz
, overlaps_b_xyz
;
2237 affine_fn ova1
, ova2
, ovb
;
2238 tree last_conflicts_xz
, last_conflicts_yz
, last_conflicts_xyz
;
2240 step_x
= int_cst_value (CHREC_RIGHT (CHREC_LEFT (chrec_a
)));
2241 step_y
= int_cst_value (CHREC_RIGHT (chrec_a
));
2242 step_z
= int_cst_value (CHREC_RIGHT (chrec_b
));
2244 niter_x
= max_stmt_executions_int (get_chrec_loop (CHREC_LEFT (chrec_a
)));
2245 niter_y
= max_stmt_executions_int (get_chrec_loop (chrec_a
));
2246 niter_z
= max_stmt_executions_int (get_chrec_loop (chrec_b
));
2248 if (niter_x
< 0 || niter_y
< 0 || niter_z
< 0)
2250 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2251 fprintf (dump_file
, "overlap steps test failed: no iteration counts.\n");
2253 *overlaps_a
= conflict_fn_not_known ();
2254 *overlaps_b
= conflict_fn_not_known ();
2255 *last_conflicts
= chrec_dont_know
;
2259 niter
= MIN (niter_x
, niter_z
);
2260 compute_overlap_steps_for_affine_univar (niter
, step_x
, step_z
,
2263 &last_conflicts_xz
, 1);
2264 niter
= MIN (niter_y
, niter_z
);
2265 compute_overlap_steps_for_affine_univar (niter
, step_y
, step_z
,
2268 &last_conflicts_yz
, 2);
2269 niter
= MIN (niter_x
, niter_z
);
2270 niter
= MIN (niter_y
, niter
);
2271 compute_overlap_steps_for_affine_univar (niter
, step_x
+ step_y
, step_z
,
2274 &last_conflicts_xyz
, 3);
2276 xz_p
= !integer_zerop (last_conflicts_xz
);
2277 yz_p
= !integer_zerop (last_conflicts_yz
);
2278 xyz_p
= !integer_zerop (last_conflicts_xyz
);
2280 if (xz_p
|| yz_p
|| xyz_p
)
2282 ova1
= affine_fn_cst (integer_zero_node
);
2283 ova2
= affine_fn_cst (integer_zero_node
);
2284 ovb
= affine_fn_cst (integer_zero_node
);
2287 affine_fn t0
= ova1
;
2290 ova1
= affine_fn_plus (ova1
, overlaps_a_xz
);
2291 ovb
= affine_fn_plus (ovb
, overlaps_b_xz
);
2292 affine_fn_free (t0
);
2293 affine_fn_free (t2
);
2294 *last_conflicts
= last_conflicts_xz
;
2298 affine_fn t0
= ova2
;
2301 ova2
= affine_fn_plus (ova2
, overlaps_a_yz
);
2302 ovb
= affine_fn_plus (ovb
, overlaps_b_yz
);
2303 affine_fn_free (t0
);
2304 affine_fn_free (t2
);
2305 *last_conflicts
= last_conflicts_yz
;
2309 affine_fn t0
= ova1
;
2310 affine_fn t2
= ova2
;
2313 ova1
= affine_fn_plus (ova1
, overlaps_a_xyz
);
2314 ova2
= affine_fn_plus (ova2
, overlaps_a_xyz
);
2315 ovb
= affine_fn_plus (ovb
, overlaps_b_xyz
);
2316 affine_fn_free (t0
);
2317 affine_fn_free (t2
);
2318 affine_fn_free (t4
);
2319 *last_conflicts
= last_conflicts_xyz
;
2321 *overlaps_a
= conflict_fn (2, ova1
, ova2
);
2322 *overlaps_b
= conflict_fn (1, ovb
);
2326 *overlaps_a
= conflict_fn (1, affine_fn_cst (integer_zero_node
));
2327 *overlaps_b
= conflict_fn (1, affine_fn_cst (integer_zero_node
));
2328 *last_conflicts
= integer_zero_node
;
2331 affine_fn_free (overlaps_a_xz
);
2332 affine_fn_free (overlaps_b_xz
);
2333 affine_fn_free (overlaps_a_yz
);
2334 affine_fn_free (overlaps_b_yz
);
2335 affine_fn_free (overlaps_a_xyz
);
2336 affine_fn_free (overlaps_b_xyz
);
2339 /* Copy the elements of vector VEC1 with length SIZE to VEC2. */
2342 lambda_vector_copy (lambda_vector vec1
, lambda_vector vec2
,
2345 memcpy (vec2
, vec1
, size
* sizeof (*vec1
));
2348 /* Copy the elements of M x N matrix MAT1 to MAT2. */
2351 lambda_matrix_copy (lambda_matrix mat1
, lambda_matrix mat2
,
2356 for (i
= 0; i
< m
; i
++)
2357 lambda_vector_copy (mat1
[i
], mat2
[i
], n
);
2360 /* Store the N x N identity matrix in MAT. */
2363 lambda_matrix_id (lambda_matrix mat
, int size
)
2367 for (i
= 0; i
< size
; i
++)
2368 for (j
= 0; j
< size
; j
++)
2369 mat
[i
][j
] = (i
== j
) ? 1 : 0;
2372 /* Return the first nonzero element of vector VEC1 between START and N.
2373 We must have START <= N. Returns N if VEC1 is the zero vector. */
2376 lambda_vector_first_nz (lambda_vector vec1
, int n
, int start
)
2379 while (j
< n
&& vec1
[j
] == 0)
2384 /* Add a multiple of row R1 of matrix MAT with N columns to row R2:
2385 R2 = R2 + CONST1 * R1. */
2388 lambda_matrix_row_add (lambda_matrix mat
, int n
, int r1
, int r2
, int const1
)
2395 for (i
= 0; i
< n
; i
++)
2396 mat
[r2
][i
] += const1
* mat
[r1
][i
];
2399 /* Multiply vector VEC1 of length SIZE by a constant CONST1,
2400 and store the result in VEC2. */
2403 lambda_vector_mult_const (lambda_vector vec1
, lambda_vector vec2
,
2404 int size
, int const1
)
2409 lambda_vector_clear (vec2
, size
);
2411 for (i
= 0; i
< size
; i
++)
2412 vec2
[i
] = const1
* vec1
[i
];
2415 /* Negate vector VEC1 with length SIZE and store it in VEC2. */
2418 lambda_vector_negate (lambda_vector vec1
, lambda_vector vec2
,
2421 lambda_vector_mult_const (vec1
, vec2
, size
, -1);
2424 /* Negate row R1 of matrix MAT which has N columns. */
2427 lambda_matrix_row_negate (lambda_matrix mat
, int n
, int r1
)
2429 lambda_vector_negate (mat
[r1
], mat
[r1
], n
);
2432 /* Return true if two vectors are equal. */
2435 lambda_vector_equal (lambda_vector vec1
, lambda_vector vec2
, int size
)
2438 for (i
= 0; i
< size
; i
++)
2439 if (vec1
[i
] != vec2
[i
])
2444 /* Given an M x N integer matrix A, this function determines an M x
2445 M unimodular matrix U, and an M x N echelon matrix S such that
2446 "U.A = S". This decomposition is also known as "right Hermite".
2448 Ref: Algorithm 2.1 page 33 in "Loop Transformations for
2449 Restructuring Compilers" Utpal Banerjee. */
2452 lambda_matrix_right_hermite (lambda_matrix A
, int m
, int n
,
2453 lambda_matrix S
, lambda_matrix U
)
2457 lambda_matrix_copy (A
, S
, m
, n
);
2458 lambda_matrix_id (U
, m
);
2460 for (j
= 0; j
< n
; j
++)
2462 if (lambda_vector_first_nz (S
[j
], m
, i0
) < m
)
2465 for (i
= m
- 1; i
>= i0
; i
--)
2467 while (S
[i
][j
] != 0)
2469 int sigma
, factor
, a
, b
;
2473 sigma
= (a
* b
< 0) ? -1: 1;
2476 factor
= sigma
* (a
/ b
);
2478 lambda_matrix_row_add (S
, n
, i
, i
-1, -factor
);
2479 std::swap (S
[i
], S
[i
-1]);
2481 lambda_matrix_row_add (U
, m
, i
, i
-1, -factor
);
2482 std::swap (U
[i
], U
[i
-1]);
2489 /* Determines the overlapping elements due to accesses CHREC_A and
2490 CHREC_B, that are affine functions. This function cannot handle
2491 symbolic evolution functions, ie. when initial conditions are
2492 parameters, because it uses lambda matrices of integers. */
2495 analyze_subscript_affine_affine (tree chrec_a
,
2497 conflict_function
**overlaps_a
,
2498 conflict_function
**overlaps_b
,
2499 tree
*last_conflicts
)
2501 unsigned nb_vars_a
, nb_vars_b
, dim
;
2502 HOST_WIDE_INT init_a
, init_b
, gamma
, gcd_alpha_beta
;
2503 lambda_matrix A
, U
, S
;
2504 struct obstack scratch_obstack
;
2506 if (eq_evolutions_p (chrec_a
, chrec_b
))
2508 /* The accessed index overlaps for each iteration in the
2510 *overlaps_a
= conflict_fn (1, affine_fn_cst (integer_zero_node
));
2511 *overlaps_b
= conflict_fn (1, affine_fn_cst (integer_zero_node
));
2512 *last_conflicts
= chrec_dont_know
;
2515 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2516 fprintf (dump_file
, "(analyze_subscript_affine_affine \n");
2518 /* For determining the initial intersection, we have to solve a
2519 Diophantine equation. This is the most time consuming part.
2521 For answering to the question: "Is there a dependence?" we have
2522 to prove that there exists a solution to the Diophantine
2523 equation, and that the solution is in the iteration domain,
2524 i.e. the solution is positive or zero, and that the solution
2525 happens before the upper bound loop.nb_iterations. Otherwise
2526 there is no dependence. This function outputs a description of
2527 the iterations that hold the intersections. */
2529 nb_vars_a
= nb_vars_in_chrec (chrec_a
);
2530 nb_vars_b
= nb_vars_in_chrec (chrec_b
);
2532 gcc_obstack_init (&scratch_obstack
);
2534 dim
= nb_vars_a
+ nb_vars_b
;
2535 U
= lambda_matrix_new (dim
, dim
, &scratch_obstack
);
2536 A
= lambda_matrix_new (dim
, 1, &scratch_obstack
);
2537 S
= lambda_matrix_new (dim
, 1, &scratch_obstack
);
2539 init_a
= int_cst_value (initialize_matrix_A (A
, chrec_a
, 0, 1));
2540 init_b
= int_cst_value (initialize_matrix_A (A
, chrec_b
, nb_vars_a
, -1));
2541 gamma
= init_b
- init_a
;
2543 /* Don't do all the hard work of solving the Diophantine equation
2544 when we already know the solution: for example,
2547 | gamma = 3 - 3 = 0.
2548 Then the first overlap occurs during the first iterations:
2549 | {3, +, 1}_1 ({0, +, 4}_x) = {3, +, 4}_2 ({0, +, 1}_x)
2553 if (nb_vars_a
== 1 && nb_vars_b
== 1)
2555 HOST_WIDE_INT step_a
, step_b
;
2556 HOST_WIDE_INT niter
, niter_a
, niter_b
;
2559 niter_a
= max_stmt_executions_int (get_chrec_loop (chrec_a
));
2560 niter_b
= max_stmt_executions_int (get_chrec_loop (chrec_b
));
2561 niter
= MIN (niter_a
, niter_b
);
2562 step_a
= int_cst_value (CHREC_RIGHT (chrec_a
));
2563 step_b
= int_cst_value (CHREC_RIGHT (chrec_b
));
2565 compute_overlap_steps_for_affine_univar (niter
, step_a
, step_b
,
2568 *overlaps_a
= conflict_fn (1, ova
);
2569 *overlaps_b
= conflict_fn (1, ovb
);
2572 else if (nb_vars_a
== 2 && nb_vars_b
== 1)
2573 compute_overlap_steps_for_affine_1_2
2574 (chrec_a
, chrec_b
, overlaps_a
, overlaps_b
, last_conflicts
);
2576 else if (nb_vars_a
== 1 && nb_vars_b
== 2)
2577 compute_overlap_steps_for_affine_1_2
2578 (chrec_b
, chrec_a
, overlaps_b
, overlaps_a
, last_conflicts
);
2582 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2583 fprintf (dump_file
, "affine-affine test failed: too many variables.\n");
2584 *overlaps_a
= conflict_fn_not_known ();
2585 *overlaps_b
= conflict_fn_not_known ();
2586 *last_conflicts
= chrec_dont_know
;
2588 goto end_analyze_subs_aa
;
2592 lambda_matrix_right_hermite (A
, dim
, 1, S
, U
);
2597 lambda_matrix_row_negate (U
, dim
, 0);
2599 gcd_alpha_beta
= S
[0][0];
2601 /* Something went wrong: for example in {1, +, 0}_5 vs. {0, +, 0}_5,
2602 but that is a quite strange case. Instead of ICEing, answer
2604 if (gcd_alpha_beta
== 0)
2606 *overlaps_a
= conflict_fn_not_known ();
2607 *overlaps_b
= conflict_fn_not_known ();
2608 *last_conflicts
= chrec_dont_know
;
2609 goto end_analyze_subs_aa
;
2612 /* The classic "gcd-test". */
2613 if (!int_divides_p (gcd_alpha_beta
, gamma
))
2615 /* The "gcd-test" has determined that there is no integer
2616 solution, i.e. there is no dependence. */
2617 *overlaps_a
= conflict_fn_no_dependence ();
2618 *overlaps_b
= conflict_fn_no_dependence ();
2619 *last_conflicts
= integer_zero_node
;
2622 /* Both access functions are univariate. This includes SIV and MIV cases. */
2623 else if (nb_vars_a
== 1 && nb_vars_b
== 1)
2625 /* Both functions should have the same evolution sign. */
2626 if (((A
[0][0] > 0 && -A
[1][0] > 0)
2627 || (A
[0][0] < 0 && -A
[1][0] < 0)))
2629 /* The solutions are given by:
2631 | [GAMMA/GCD_ALPHA_BETA t].[u11 u12] = [x0]
2634 For a given integer t. Using the following variables,
2636 | i0 = u11 * gamma / gcd_alpha_beta
2637 | j0 = u12 * gamma / gcd_alpha_beta
2644 | y0 = j0 + j1 * t. */
2645 HOST_WIDE_INT i0
, j0
, i1
, j1
;
2647 i0
= U
[0][0] * gamma
/ gcd_alpha_beta
;
2648 j0
= U
[0][1] * gamma
/ gcd_alpha_beta
;
2652 if ((i1
== 0 && i0
< 0)
2653 || (j1
== 0 && j0
< 0))
2655 /* There is no solution.
2656 FIXME: The case "i0 > nb_iterations, j0 > nb_iterations"
2657 falls in here, but for the moment we don't look at the
2658 upper bound of the iteration domain. */
2659 *overlaps_a
= conflict_fn_no_dependence ();
2660 *overlaps_b
= conflict_fn_no_dependence ();
2661 *last_conflicts
= integer_zero_node
;
2662 goto end_analyze_subs_aa
;
2665 if (i1
> 0 && j1
> 0)
2667 HOST_WIDE_INT niter_a
2668 = max_stmt_executions_int (get_chrec_loop (chrec_a
));
2669 HOST_WIDE_INT niter_b
2670 = max_stmt_executions_int (get_chrec_loop (chrec_b
));
2671 HOST_WIDE_INT niter
= MIN (niter_a
, niter_b
);
2673 /* (X0, Y0) is a solution of the Diophantine equation:
2674 "chrec_a (X0) = chrec_b (Y0)". */
2675 HOST_WIDE_INT tau1
= MAX (CEIL (-i0
, i1
),
2677 HOST_WIDE_INT x0
= i1
* tau1
+ i0
;
2678 HOST_WIDE_INT y0
= j1
* tau1
+ j0
;
2680 /* (X1, Y1) is the smallest positive solution of the eq
2681 "chrec_a (X1) = chrec_b (Y1)", i.e. this is where the
2682 first conflict occurs. */
2683 HOST_WIDE_INT min_multiple
= MIN (x0
/ i1
, y0
/ j1
);
2684 HOST_WIDE_INT x1
= x0
- i1
* min_multiple
;
2685 HOST_WIDE_INT y1
= y0
- j1
* min_multiple
;
2689 HOST_WIDE_INT tau2
= MIN (FLOOR_DIV (niter_a
- i0
, i1
),
2690 FLOOR_DIV (niter_b
- j0
, j1
));
2691 HOST_WIDE_INT last_conflict
= tau2
- (x1
- i0
)/i1
;
2693 /* If the overlap occurs outside of the bounds of the
2694 loop, there is no dependence. */
2695 if (x1
>= niter_a
|| y1
>= niter_b
)
2697 *overlaps_a
= conflict_fn_no_dependence ();
2698 *overlaps_b
= conflict_fn_no_dependence ();
2699 *last_conflicts
= integer_zero_node
;
2700 goto end_analyze_subs_aa
;
2703 *last_conflicts
= build_int_cst (NULL_TREE
, last_conflict
);
2706 *last_conflicts
= chrec_dont_know
;
2710 affine_fn_univar (build_int_cst (NULL_TREE
, x1
),
2712 build_int_cst (NULL_TREE
, i1
)));
2715 affine_fn_univar (build_int_cst (NULL_TREE
, y1
),
2717 build_int_cst (NULL_TREE
, j1
)));
2721 /* FIXME: For the moment, the upper bound of the
2722 iteration domain for i and j is not checked. */
2723 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2724 fprintf (dump_file
, "affine-affine test failed: unimplemented.\n");
2725 *overlaps_a
= conflict_fn_not_known ();
2726 *overlaps_b
= conflict_fn_not_known ();
2727 *last_conflicts
= chrec_dont_know
;
2732 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2733 fprintf (dump_file
, "affine-affine test failed: unimplemented.\n");
2734 *overlaps_a
= conflict_fn_not_known ();
2735 *overlaps_b
= conflict_fn_not_known ();
2736 *last_conflicts
= chrec_dont_know
;
2741 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2742 fprintf (dump_file
, "affine-affine test failed: unimplemented.\n");
2743 *overlaps_a
= conflict_fn_not_known ();
2744 *overlaps_b
= conflict_fn_not_known ();
2745 *last_conflicts
= chrec_dont_know
;
2748 end_analyze_subs_aa
:
2749 obstack_free (&scratch_obstack
, NULL
);
2750 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2752 fprintf (dump_file
, " (overlaps_a = ");
2753 dump_conflict_function (dump_file
, *overlaps_a
);
2754 fprintf (dump_file
, ")\n (overlaps_b = ");
2755 dump_conflict_function (dump_file
, *overlaps_b
);
2756 fprintf (dump_file
, "))\n");
2760 /* Returns true when analyze_subscript_affine_affine can be used for
2761 determining the dependence relation between chrec_a and chrec_b,
2762 that contain symbols. This function modifies chrec_a and chrec_b
2763 such that the analysis result is the same, and such that they don't
2764 contain symbols, and then can safely be passed to the analyzer.
2766 Example: The analysis of the following tuples of evolutions produce
2767 the same results: {x+1, +, 1}_1 vs. {x+3, +, 1}_1, and {-2, +, 1}_1
2770 {x+1, +, 1}_1 ({2, +, 1}_1) = {x+3, +, 1}_1 ({0, +, 1}_1)
2771 {-2, +, 1}_1 ({2, +, 1}_1) = {0, +, 1}_1 ({0, +, 1}_1)
2775 can_use_analyze_subscript_affine_affine (tree
*chrec_a
, tree
*chrec_b
)
2777 tree diff
, type
, left_a
, left_b
, right_b
;
2779 if (chrec_contains_symbols (CHREC_RIGHT (*chrec_a
))
2780 || chrec_contains_symbols (CHREC_RIGHT (*chrec_b
)))
2781 /* FIXME: For the moment not handled. Might be refined later. */
2784 type
= chrec_type (*chrec_a
);
2785 left_a
= CHREC_LEFT (*chrec_a
);
2786 left_b
= chrec_convert (type
, CHREC_LEFT (*chrec_b
), NULL
);
2787 diff
= chrec_fold_minus (type
, left_a
, left_b
);
2789 if (!evolution_function_is_constant_p (diff
))
2792 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2793 fprintf (dump_file
, "can_use_subscript_aff_aff_for_symbolic \n");
2795 *chrec_a
= build_polynomial_chrec (CHREC_VARIABLE (*chrec_a
),
2796 diff
, CHREC_RIGHT (*chrec_a
));
2797 right_b
= chrec_convert (type
, CHREC_RIGHT (*chrec_b
), NULL
);
2798 *chrec_b
= build_polynomial_chrec (CHREC_VARIABLE (*chrec_b
),
2799 build_int_cst (type
, 0),
2804 /* Analyze a SIV (Single Index Variable) subscript. *OVERLAPS_A and
2805 *OVERLAPS_B are initialized to the functions that describe the
2806 relation between the elements accessed twice by CHREC_A and
2807 CHREC_B. For k >= 0, the following property is verified:
2809 CHREC_A (*OVERLAPS_A (k)) = CHREC_B (*OVERLAPS_B (k)). */
2812 analyze_siv_subscript (tree chrec_a
,
2814 conflict_function
**overlaps_a
,
2815 conflict_function
**overlaps_b
,
2816 tree
*last_conflicts
,
2819 dependence_stats
.num_siv
++;
2821 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2822 fprintf (dump_file
, "(analyze_siv_subscript \n");
2824 if (evolution_function_is_constant_p (chrec_a
)
2825 && evolution_function_is_affine_in_loop (chrec_b
, loop_nest_num
))
2826 analyze_siv_subscript_cst_affine (chrec_a
, chrec_b
,
2827 overlaps_a
, overlaps_b
, last_conflicts
);
2829 else if (evolution_function_is_affine_in_loop (chrec_a
, loop_nest_num
)
2830 && evolution_function_is_constant_p (chrec_b
))
2831 analyze_siv_subscript_cst_affine (chrec_b
, chrec_a
,
2832 overlaps_b
, overlaps_a
, last_conflicts
);
2834 else if (evolution_function_is_affine_in_loop (chrec_a
, loop_nest_num
)
2835 && evolution_function_is_affine_in_loop (chrec_b
, loop_nest_num
))
2837 if (!chrec_contains_symbols (chrec_a
)
2838 && !chrec_contains_symbols (chrec_b
))
2840 analyze_subscript_affine_affine (chrec_a
, chrec_b
,
2841 overlaps_a
, overlaps_b
,
2844 if (CF_NOT_KNOWN_P (*overlaps_a
)
2845 || CF_NOT_KNOWN_P (*overlaps_b
))
2846 dependence_stats
.num_siv_unimplemented
++;
2847 else if (CF_NO_DEPENDENCE_P (*overlaps_a
)
2848 || CF_NO_DEPENDENCE_P (*overlaps_b
))
2849 dependence_stats
.num_siv_independent
++;
2851 dependence_stats
.num_siv_dependent
++;
2853 else if (can_use_analyze_subscript_affine_affine (&chrec_a
,
2856 analyze_subscript_affine_affine (chrec_a
, chrec_b
,
2857 overlaps_a
, overlaps_b
,
2860 if (CF_NOT_KNOWN_P (*overlaps_a
)
2861 || CF_NOT_KNOWN_P (*overlaps_b
))
2862 dependence_stats
.num_siv_unimplemented
++;
2863 else if (CF_NO_DEPENDENCE_P (*overlaps_a
)
2864 || CF_NO_DEPENDENCE_P (*overlaps_b
))
2865 dependence_stats
.num_siv_independent
++;
2867 dependence_stats
.num_siv_dependent
++;
2870 goto siv_subscript_dontknow
;
2875 siv_subscript_dontknow
:;
2876 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2877 fprintf (dump_file
, " siv test failed: unimplemented");
2878 *overlaps_a
= conflict_fn_not_known ();
2879 *overlaps_b
= conflict_fn_not_known ();
2880 *last_conflicts
= chrec_dont_know
;
2881 dependence_stats
.num_siv_unimplemented
++;
2884 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2885 fprintf (dump_file
, ")\n");
2888 /* Returns false if we can prove that the greatest common divisor of the steps
2889 of CHREC does not divide CST, false otherwise. */
2892 gcd_of_steps_may_divide_p (const_tree chrec
, const_tree cst
)
2894 HOST_WIDE_INT cd
= 0, val
;
2897 if (!tree_fits_shwi_p (cst
))
2899 val
= tree_to_shwi (cst
);
2901 while (TREE_CODE (chrec
) == POLYNOMIAL_CHREC
)
2903 step
= CHREC_RIGHT (chrec
);
2904 if (!tree_fits_shwi_p (step
))
2906 cd
= gcd (cd
, tree_to_shwi (step
));
2907 chrec
= CHREC_LEFT (chrec
);
2910 return val
% cd
== 0;
2913 /* Analyze a MIV (Multiple Index Variable) subscript with respect to
2914 LOOP_NEST. *OVERLAPS_A and *OVERLAPS_B are initialized to the
2915 functions that describe the relation between the elements accessed
2916 twice by CHREC_A and CHREC_B. For k >= 0, the following property
2919 CHREC_A (*OVERLAPS_A (k)) = CHREC_B (*OVERLAPS_B (k)). */
2922 analyze_miv_subscript (tree chrec_a
,
2924 conflict_function
**overlaps_a
,
2925 conflict_function
**overlaps_b
,
2926 tree
*last_conflicts
,
2927 struct loop
*loop_nest
)
2929 tree type
, difference
;
2931 dependence_stats
.num_miv
++;
2932 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2933 fprintf (dump_file
, "(analyze_miv_subscript \n");
2935 type
= signed_type_for_types (TREE_TYPE (chrec_a
), TREE_TYPE (chrec_b
));
2936 chrec_a
= chrec_convert (type
, chrec_a
, NULL
);
2937 chrec_b
= chrec_convert (type
, chrec_b
, NULL
);
2938 difference
= chrec_fold_minus (type
, chrec_a
, chrec_b
);
2940 if (eq_evolutions_p (chrec_a
, chrec_b
))
2942 /* Access functions are the same: all the elements are accessed
2943 in the same order. */
2944 *overlaps_a
= conflict_fn (1, affine_fn_cst (integer_zero_node
));
2945 *overlaps_b
= conflict_fn (1, affine_fn_cst (integer_zero_node
));
2946 *last_conflicts
= max_stmt_executions_tree (get_chrec_loop (chrec_a
));
2947 dependence_stats
.num_miv_dependent
++;
2950 else if (evolution_function_is_constant_p (difference
)
2951 /* For the moment, the following is verified:
2952 evolution_function_is_affine_multivariate_p (chrec_a,
2954 && !gcd_of_steps_may_divide_p (chrec_a
, difference
))
2956 /* testsuite/.../ssa-chrec-33.c
2957 {{21, +, 2}_1, +, -2}_2 vs. {{20, +, 2}_1, +, -2}_2
2959 The difference is 1, and all the evolution steps are multiples
2960 of 2, consequently there are no overlapping elements. */
2961 *overlaps_a
= conflict_fn_no_dependence ();
2962 *overlaps_b
= conflict_fn_no_dependence ();
2963 *last_conflicts
= integer_zero_node
;
2964 dependence_stats
.num_miv_independent
++;
2967 else if (evolution_function_is_affine_multivariate_p (chrec_a
, loop_nest
->num
)
2968 && !chrec_contains_symbols (chrec_a
)
2969 && evolution_function_is_affine_multivariate_p (chrec_b
, loop_nest
->num
)
2970 && !chrec_contains_symbols (chrec_b
))
2972 /* testsuite/.../ssa-chrec-35.c
2973 {0, +, 1}_2 vs. {0, +, 1}_3
2974 the overlapping elements are respectively located at iterations:
2975 {0, +, 1}_x and {0, +, 1}_x,
2976 in other words, we have the equality:
2977 {0, +, 1}_2 ({0, +, 1}_x) = {0, +, 1}_3 ({0, +, 1}_x)
2980 {{0, +, 1}_1, +, 2}_2 ({0, +, 1}_x, {0, +, 1}_y) =
2981 {0, +, 1}_1 ({{0, +, 1}_x, +, 2}_y)
2983 {{0, +, 2}_1, +, 3}_2 ({0, +, 1}_y, {0, +, 1}_x) =
2984 {{0, +, 3}_1, +, 2}_2 ({0, +, 1}_x, {0, +, 1}_y)
2986 analyze_subscript_affine_affine (chrec_a
, chrec_b
,
2987 overlaps_a
, overlaps_b
, last_conflicts
);
2989 if (CF_NOT_KNOWN_P (*overlaps_a
)
2990 || CF_NOT_KNOWN_P (*overlaps_b
))
2991 dependence_stats
.num_miv_unimplemented
++;
2992 else if (CF_NO_DEPENDENCE_P (*overlaps_a
)
2993 || CF_NO_DEPENDENCE_P (*overlaps_b
))
2994 dependence_stats
.num_miv_independent
++;
2996 dependence_stats
.num_miv_dependent
++;
3001 /* When the analysis is too difficult, answer "don't know". */
3002 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3003 fprintf (dump_file
, "analyze_miv_subscript test failed: unimplemented.\n");
3005 *overlaps_a
= conflict_fn_not_known ();
3006 *overlaps_b
= conflict_fn_not_known ();
3007 *last_conflicts
= chrec_dont_know
;
3008 dependence_stats
.num_miv_unimplemented
++;
3011 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3012 fprintf (dump_file
, ")\n");
3015 /* Determines the iterations for which CHREC_A is equal to CHREC_B in
3016 with respect to LOOP_NEST. OVERLAP_ITERATIONS_A and
3017 OVERLAP_ITERATIONS_B are initialized with two functions that
3018 describe the iterations that contain conflicting elements.
3020 Remark: For an integer k >= 0, the following equality is true:
3022 CHREC_A (OVERLAP_ITERATIONS_A (k)) == CHREC_B (OVERLAP_ITERATIONS_B (k)).
3026 analyze_overlapping_iterations (tree chrec_a
,
3028 conflict_function
**overlap_iterations_a
,
3029 conflict_function
**overlap_iterations_b
,
3030 tree
*last_conflicts
, struct loop
*loop_nest
)
3032 unsigned int lnn
= loop_nest
->num
;
3034 dependence_stats
.num_subscript_tests
++;
3036 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3038 fprintf (dump_file
, "(analyze_overlapping_iterations \n");
3039 fprintf (dump_file
, " (chrec_a = ");
3040 print_generic_expr (dump_file
, chrec_a
, 0);
3041 fprintf (dump_file
, ")\n (chrec_b = ");
3042 print_generic_expr (dump_file
, chrec_b
, 0);
3043 fprintf (dump_file
, ")\n");
3046 if (chrec_a
== NULL_TREE
3047 || chrec_b
== NULL_TREE
3048 || chrec_contains_undetermined (chrec_a
)
3049 || chrec_contains_undetermined (chrec_b
))
3051 dependence_stats
.num_subscript_undetermined
++;
3053 *overlap_iterations_a
= conflict_fn_not_known ();
3054 *overlap_iterations_b
= conflict_fn_not_known ();
3057 /* If they are the same chrec, and are affine, they overlap
3058 on every iteration. */
3059 else if (eq_evolutions_p (chrec_a
, chrec_b
)
3060 && (evolution_function_is_affine_multivariate_p (chrec_a
, lnn
)
3061 || operand_equal_p (chrec_a
, chrec_b
, 0)))
3063 dependence_stats
.num_same_subscript_function
++;
3064 *overlap_iterations_a
= conflict_fn (1, affine_fn_cst (integer_zero_node
));
3065 *overlap_iterations_b
= conflict_fn (1, affine_fn_cst (integer_zero_node
));
3066 *last_conflicts
= chrec_dont_know
;
3069 /* If they aren't the same, and aren't affine, we can't do anything
3071 else if ((chrec_contains_symbols (chrec_a
)
3072 || chrec_contains_symbols (chrec_b
))
3073 && (!evolution_function_is_affine_multivariate_p (chrec_a
, lnn
)
3074 || !evolution_function_is_affine_multivariate_p (chrec_b
, lnn
)))
3076 dependence_stats
.num_subscript_undetermined
++;
3077 *overlap_iterations_a
= conflict_fn_not_known ();
3078 *overlap_iterations_b
= conflict_fn_not_known ();
3081 else if (ziv_subscript_p (chrec_a
, chrec_b
))
3082 analyze_ziv_subscript (chrec_a
, chrec_b
,
3083 overlap_iterations_a
, overlap_iterations_b
,
3086 else if (siv_subscript_p (chrec_a
, chrec_b
))
3087 analyze_siv_subscript (chrec_a
, chrec_b
,
3088 overlap_iterations_a
, overlap_iterations_b
,
3089 last_conflicts
, lnn
);
3092 analyze_miv_subscript (chrec_a
, chrec_b
,
3093 overlap_iterations_a
, overlap_iterations_b
,
3094 last_conflicts
, loop_nest
);
3096 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3098 fprintf (dump_file
, " (overlap_iterations_a = ");
3099 dump_conflict_function (dump_file
, *overlap_iterations_a
);
3100 fprintf (dump_file
, ")\n (overlap_iterations_b = ");
3101 dump_conflict_function (dump_file
, *overlap_iterations_b
);
3102 fprintf (dump_file
, "))\n");
3106 /* Helper function for uniquely inserting distance vectors. */
3109 save_dist_v (struct data_dependence_relation
*ddr
, lambda_vector dist_v
)
3114 FOR_EACH_VEC_ELT (DDR_DIST_VECTS (ddr
), i
, v
)
3115 if (lambda_vector_equal (v
, dist_v
, DDR_NB_LOOPS (ddr
)))
3118 DDR_DIST_VECTS (ddr
).safe_push (dist_v
);
3121 /* Helper function for uniquely inserting direction vectors. */
3124 save_dir_v (struct data_dependence_relation
*ddr
, lambda_vector dir_v
)
3129 FOR_EACH_VEC_ELT (DDR_DIR_VECTS (ddr
), i
, v
)
3130 if (lambda_vector_equal (v
, dir_v
, DDR_NB_LOOPS (ddr
)))
3133 DDR_DIR_VECTS (ddr
).safe_push (dir_v
);
3136 /* Add a distance of 1 on all the loops outer than INDEX. If we
3137 haven't yet determined a distance for this outer loop, push a new
3138 distance vector composed of the previous distance, and a distance
3139 of 1 for this outer loop. Example:
3147 Saved vectors are of the form (dist_in_1, dist_in_2). First, we
3148 save (0, 1), then we have to save (1, 0). */
3151 add_outer_distances (struct data_dependence_relation
*ddr
,
3152 lambda_vector dist_v
, int index
)
3154 /* For each outer loop where init_v is not set, the accesses are
3155 in dependence of distance 1 in the loop. */
3156 while (--index
>= 0)
3158 lambda_vector save_v
= lambda_vector_new (DDR_NB_LOOPS (ddr
));
3159 lambda_vector_copy (dist_v
, save_v
, DDR_NB_LOOPS (ddr
));
3161 save_dist_v (ddr
, save_v
);
3165 /* Return false when fail to represent the data dependence as a
3166 distance vector. INIT_B is set to true when a component has been
3167 added to the distance vector DIST_V. INDEX_CARRY is then set to
3168 the index in DIST_V that carries the dependence. */
3171 build_classic_dist_vector_1 (struct data_dependence_relation
*ddr
,
3172 struct data_reference
*ddr_a
,
3173 struct data_reference
*ddr_b
,
3174 lambda_vector dist_v
, bool *init_b
,
3178 lambda_vector init_v
= lambda_vector_new (DDR_NB_LOOPS (ddr
));
3180 for (i
= 0; i
< DDR_NUM_SUBSCRIPTS (ddr
); i
++)
3182 tree access_fn_a
, access_fn_b
;
3183 struct subscript
*subscript
= DDR_SUBSCRIPT (ddr
, i
);
3185 if (chrec_contains_undetermined (SUB_DISTANCE (subscript
)))
3187 non_affine_dependence_relation (ddr
);
3191 access_fn_a
= DR_ACCESS_FN (ddr_a
, i
);
3192 access_fn_b
= DR_ACCESS_FN (ddr_b
, i
);
3194 if (TREE_CODE (access_fn_a
) == POLYNOMIAL_CHREC
3195 && TREE_CODE (access_fn_b
) == POLYNOMIAL_CHREC
)
3199 int var_a
= CHREC_VARIABLE (access_fn_a
);
3200 int var_b
= CHREC_VARIABLE (access_fn_b
);
3203 || chrec_contains_undetermined (SUB_DISTANCE (subscript
)))
3205 non_affine_dependence_relation (ddr
);
3209 dist
= int_cst_value (SUB_DISTANCE (subscript
));
3210 index
= index_in_loop_nest (var_a
, DDR_LOOP_NEST (ddr
));
3211 *index_carry
= MIN (index
, *index_carry
);
3213 /* This is the subscript coupling test. If we have already
3214 recorded a distance for this loop (a distance coming from
3215 another subscript), it should be the same. For example,
3216 in the following code, there is no dependence:
3223 if (init_v
[index
] != 0 && dist_v
[index
] != dist
)
3225 finalize_ddr_dependent (ddr
, chrec_known
);
3229 dist_v
[index
] = dist
;
3233 else if (!operand_equal_p (access_fn_a
, access_fn_b
, 0))
3235 /* This can be for example an affine vs. constant dependence
3236 (T[i] vs. T[3]) that is not an affine dependence and is
3237 not representable as a distance vector. */
3238 non_affine_dependence_relation (ddr
);
3246 /* Return true when the DDR contains only constant access functions. */
3249 constant_access_functions (const struct data_dependence_relation
*ddr
)
3253 for (i
= 0; i
< DDR_NUM_SUBSCRIPTS (ddr
); i
++)
3254 if (!evolution_function_is_constant_p (DR_ACCESS_FN (DDR_A (ddr
), i
))
3255 || !evolution_function_is_constant_p (DR_ACCESS_FN (DDR_B (ddr
), i
)))
3261 /* Helper function for the case where DDR_A and DDR_B are the same
3262 multivariate access function with a constant step. For an example
3266 add_multivariate_self_dist (struct data_dependence_relation
*ddr
, tree c_2
)
3269 tree c_1
= CHREC_LEFT (c_2
);
3270 tree c_0
= CHREC_LEFT (c_1
);
3271 lambda_vector dist_v
;
3272 HOST_WIDE_INT v1
, v2
, cd
;
3274 /* Polynomials with more than 2 variables are not handled yet. When
3275 the evolution steps are parameters, it is not possible to
3276 represent the dependence using classical distance vectors. */
3277 if (TREE_CODE (c_0
) != INTEGER_CST
3278 || TREE_CODE (CHREC_RIGHT (c_1
)) != INTEGER_CST
3279 || TREE_CODE (CHREC_RIGHT (c_2
)) != INTEGER_CST
)
3281 DDR_AFFINE_P (ddr
) = false;
3285 x_2
= index_in_loop_nest (CHREC_VARIABLE (c_2
), DDR_LOOP_NEST (ddr
));
3286 x_1
= index_in_loop_nest (CHREC_VARIABLE (c_1
), DDR_LOOP_NEST (ddr
));
3288 /* For "{{0, +, 2}_1, +, 3}_2" the distance vector is (3, -2). */
3289 dist_v
= lambda_vector_new (DDR_NB_LOOPS (ddr
));
3290 v1
= int_cst_value (CHREC_RIGHT (c_1
));
3291 v2
= int_cst_value (CHREC_RIGHT (c_2
));
3304 save_dist_v (ddr
, dist_v
);
3306 add_outer_distances (ddr
, dist_v
, x_1
);
3309 /* Helper function for the case where DDR_A and DDR_B are the same
3310 access functions. */
3313 add_other_self_distances (struct data_dependence_relation
*ddr
)
3315 lambda_vector dist_v
;
3317 int index_carry
= DDR_NB_LOOPS (ddr
);
3319 for (i
= 0; i
< DDR_NUM_SUBSCRIPTS (ddr
); i
++)
3321 tree access_fun
= DR_ACCESS_FN (DDR_A (ddr
), i
);
3323 if (TREE_CODE (access_fun
) == POLYNOMIAL_CHREC
)
3325 if (!evolution_function_is_univariate_p (access_fun
))
3327 if (DDR_NUM_SUBSCRIPTS (ddr
) != 1)
3329 DDR_ARE_DEPENDENT (ddr
) = chrec_dont_know
;
3333 access_fun
= DR_ACCESS_FN (DDR_A (ddr
), 0);
3335 if (TREE_CODE (CHREC_LEFT (access_fun
)) == POLYNOMIAL_CHREC
)
3336 add_multivariate_self_dist (ddr
, access_fun
);
3338 /* The evolution step is not constant: it varies in
3339 the outer loop, so this cannot be represented by a
3340 distance vector. For example in pr34635.c the
3341 evolution is {0, +, {0, +, 4}_1}_2. */
3342 DDR_AFFINE_P (ddr
) = false;
3347 index_carry
= MIN (index_carry
,
3348 index_in_loop_nest (CHREC_VARIABLE (access_fun
),
3349 DDR_LOOP_NEST (ddr
)));
3353 dist_v
= lambda_vector_new (DDR_NB_LOOPS (ddr
));
3354 add_outer_distances (ddr
, dist_v
, index_carry
);
3358 insert_innermost_unit_dist_vector (struct data_dependence_relation
*ddr
)
3360 lambda_vector dist_v
= lambda_vector_new (DDR_NB_LOOPS (ddr
));
3362 dist_v
[DDR_INNER_LOOP (ddr
)] = 1;
3363 save_dist_v (ddr
, dist_v
);
3366 /* Adds a unit distance vector to DDR when there is a 0 overlap. This
3367 is the case for example when access functions are the same and
3368 equal to a constant, as in:
3375 in which case the distance vectors are (0) and (1). */
3378 add_distance_for_zero_overlaps (struct data_dependence_relation
*ddr
)
3382 for (i
= 0; i
< DDR_NUM_SUBSCRIPTS (ddr
); i
++)
3384 subscript_p sub
= DDR_SUBSCRIPT (ddr
, i
);
3385 conflict_function
*ca
= SUB_CONFLICTS_IN_A (sub
);
3386 conflict_function
*cb
= SUB_CONFLICTS_IN_B (sub
);
3388 for (j
= 0; j
< ca
->n
; j
++)
3389 if (affine_function_zero_p (ca
->fns
[j
]))
3391 insert_innermost_unit_dist_vector (ddr
);
3395 for (j
= 0; j
< cb
->n
; j
++)
3396 if (affine_function_zero_p (cb
->fns
[j
]))
3398 insert_innermost_unit_dist_vector (ddr
);
3404 /* Compute the classic per loop distance vector. DDR is the data
3405 dependence relation to build a vector from. Return false when fail
3406 to represent the data dependence as a distance vector. */
3409 build_classic_dist_vector (struct data_dependence_relation
*ddr
,
3410 struct loop
*loop_nest
)
3412 bool init_b
= false;
3413 int index_carry
= DDR_NB_LOOPS (ddr
);
3414 lambda_vector dist_v
;
3416 if (DDR_ARE_DEPENDENT (ddr
) != NULL_TREE
)
3419 if (same_access_functions (ddr
))
3421 /* Save the 0 vector. */
3422 dist_v
= lambda_vector_new (DDR_NB_LOOPS (ddr
));
3423 save_dist_v (ddr
, dist_v
);
3425 if (constant_access_functions (ddr
))
3426 add_distance_for_zero_overlaps (ddr
);
3428 if (DDR_NB_LOOPS (ddr
) > 1)
3429 add_other_self_distances (ddr
);
3434 dist_v
= lambda_vector_new (DDR_NB_LOOPS (ddr
));
3435 if (!build_classic_dist_vector_1 (ddr
, DDR_A (ddr
), DDR_B (ddr
),
3436 dist_v
, &init_b
, &index_carry
))
3439 /* Save the distance vector if we initialized one. */
3442 /* Verify a basic constraint: classic distance vectors should
3443 always be lexicographically positive.
3445 Data references are collected in the order of execution of
3446 the program, thus for the following loop
3448 | for (i = 1; i < 100; i++)
3449 | for (j = 1; j < 100; j++)
3451 | t = T[j+1][i-1]; // A
3452 | T[j][i] = t + 2; // B
3455 references are collected following the direction of the wind:
3456 A then B. The data dependence tests are performed also
3457 following this order, such that we're looking at the distance
3458 separating the elements accessed by A from the elements later
3459 accessed by B. But in this example, the distance returned by
3460 test_dep (A, B) is lexicographically negative (-1, 1), that
3461 means that the access A occurs later than B with respect to
3462 the outer loop, ie. we're actually looking upwind. In this
3463 case we solve test_dep (B, A) looking downwind to the
3464 lexicographically positive solution, that returns the
3465 distance vector (1, -1). */
3466 if (!lambda_vector_lexico_pos (dist_v
, DDR_NB_LOOPS (ddr
)))
3468 lambda_vector save_v
= lambda_vector_new (DDR_NB_LOOPS (ddr
));
3469 if (!subscript_dependence_tester_1 (ddr
, DDR_B (ddr
), DDR_A (ddr
),
3472 compute_subscript_distance (ddr
);
3473 if (!build_classic_dist_vector_1 (ddr
, DDR_B (ddr
), DDR_A (ddr
),
3474 save_v
, &init_b
, &index_carry
))
3476 save_dist_v (ddr
, save_v
);
3477 DDR_REVERSED_P (ddr
) = true;
3479 /* In this case there is a dependence forward for all the
3482 | for (k = 1; k < 100; k++)
3483 | for (i = 1; i < 100; i++)
3484 | for (j = 1; j < 100; j++)
3486 | t = T[j+1][i-1]; // A
3487 | T[j][i] = t + 2; // B
3495 if (DDR_NB_LOOPS (ddr
) > 1)
3497 add_outer_distances (ddr
, save_v
, index_carry
);
3498 add_outer_distances (ddr
, dist_v
, index_carry
);
3503 lambda_vector save_v
= lambda_vector_new (DDR_NB_LOOPS (ddr
));
3504 lambda_vector_copy (dist_v
, save_v
, DDR_NB_LOOPS (ddr
));
3506 if (DDR_NB_LOOPS (ddr
) > 1)
3508 lambda_vector opposite_v
= lambda_vector_new (DDR_NB_LOOPS (ddr
));
3510 if (!subscript_dependence_tester_1 (ddr
, DDR_B (ddr
),
3511 DDR_A (ddr
), loop_nest
))
3513 compute_subscript_distance (ddr
);
3514 if (!build_classic_dist_vector_1 (ddr
, DDR_B (ddr
), DDR_A (ddr
),
3515 opposite_v
, &init_b
,
3519 save_dist_v (ddr
, save_v
);
3520 add_outer_distances (ddr
, dist_v
, index_carry
);
3521 add_outer_distances (ddr
, opposite_v
, index_carry
);
3524 save_dist_v (ddr
, save_v
);
3529 /* There is a distance of 1 on all the outer loops: Example:
3530 there is a dependence of distance 1 on loop_1 for the array A.
3536 add_outer_distances (ddr
, dist_v
,
3537 lambda_vector_first_nz (dist_v
,
3538 DDR_NB_LOOPS (ddr
), 0));
3541 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3545 fprintf (dump_file
, "(build_classic_dist_vector\n");
3546 for (i
= 0; i
< DDR_NUM_DIST_VECTS (ddr
); i
++)
3548 fprintf (dump_file
, " dist_vector = (");
3549 print_lambda_vector (dump_file
, DDR_DIST_VECT (ddr
, i
),
3550 DDR_NB_LOOPS (ddr
));
3551 fprintf (dump_file
, " )\n");
3553 fprintf (dump_file
, ")\n");
3559 /* Return the direction for a given distance.
3560 FIXME: Computing dir this way is suboptimal, since dir can catch
3561 cases that dist is unable to represent. */
3563 static inline enum data_dependence_direction
3564 dir_from_dist (int dist
)
3567 return dir_positive
;
3569 return dir_negative
;
3574 /* Compute the classic per loop direction vector. DDR is the data
3575 dependence relation to build a vector from. */
3578 build_classic_dir_vector (struct data_dependence_relation
*ddr
)
3581 lambda_vector dist_v
;
3583 FOR_EACH_VEC_ELT (DDR_DIST_VECTS (ddr
), i
, dist_v
)
3585 lambda_vector dir_v
= lambda_vector_new (DDR_NB_LOOPS (ddr
));
3587 for (j
= 0; j
< DDR_NB_LOOPS (ddr
); j
++)
3588 dir_v
[j
] = dir_from_dist (dist_v
[j
]);
3590 save_dir_v (ddr
, dir_v
);
3594 /* Helper function. Returns true when there is a dependence between
3595 data references DRA and DRB. */
3598 subscript_dependence_tester_1 (struct data_dependence_relation
*ddr
,
3599 struct data_reference
*dra
,
3600 struct data_reference
*drb
,
3601 struct loop
*loop_nest
)
3604 tree last_conflicts
;
3605 struct subscript
*subscript
;
3606 tree res
= NULL_TREE
;
3608 for (i
= 0; DDR_SUBSCRIPTS (ddr
).iterate (i
, &subscript
); i
++)
3610 conflict_function
*overlaps_a
, *overlaps_b
;
3612 analyze_overlapping_iterations (DR_ACCESS_FN (dra
, i
),
3613 DR_ACCESS_FN (drb
, i
),
3614 &overlaps_a
, &overlaps_b
,
3615 &last_conflicts
, loop_nest
);
3617 if (SUB_CONFLICTS_IN_A (subscript
))
3618 free_conflict_function (SUB_CONFLICTS_IN_A (subscript
));
3619 if (SUB_CONFLICTS_IN_B (subscript
))
3620 free_conflict_function (SUB_CONFLICTS_IN_B (subscript
));
3622 SUB_CONFLICTS_IN_A (subscript
) = overlaps_a
;
3623 SUB_CONFLICTS_IN_B (subscript
) = overlaps_b
;
3624 SUB_LAST_CONFLICT (subscript
) = last_conflicts
;
3626 /* If there is any undetermined conflict function we have to
3627 give a conservative answer in case we cannot prove that
3628 no dependence exists when analyzing another subscript. */
3629 if (CF_NOT_KNOWN_P (overlaps_a
)
3630 || CF_NOT_KNOWN_P (overlaps_b
))
3632 res
= chrec_dont_know
;
3636 /* When there is a subscript with no dependence we can stop. */
3637 else if (CF_NO_DEPENDENCE_P (overlaps_a
)
3638 || CF_NO_DEPENDENCE_P (overlaps_b
))
3645 if (res
== NULL_TREE
)
3648 if (res
== chrec_known
)
3649 dependence_stats
.num_dependence_independent
++;
3651 dependence_stats
.num_dependence_undetermined
++;
3652 finalize_ddr_dependent (ddr
, res
);
3656 /* Computes the conflicting iterations in LOOP_NEST, and initialize DDR. */
3659 subscript_dependence_tester (struct data_dependence_relation
*ddr
,
3660 struct loop
*loop_nest
)
3662 if (subscript_dependence_tester_1 (ddr
, DDR_A (ddr
), DDR_B (ddr
), loop_nest
))
3663 dependence_stats
.num_dependence_dependent
++;
3665 compute_subscript_distance (ddr
);
3666 if (build_classic_dist_vector (ddr
, loop_nest
))
3667 build_classic_dir_vector (ddr
);
3670 /* Returns true when all the access functions of A are affine or
3671 constant with respect to LOOP_NEST. */
3674 access_functions_are_affine_or_constant_p (const struct data_reference
*a
,
3675 const struct loop
*loop_nest
)
3678 vec
<tree
> fns
= DR_ACCESS_FNS (a
);
3681 FOR_EACH_VEC_ELT (fns
, i
, t
)
3682 if (!evolution_function_is_invariant_p (t
, loop_nest
->num
)
3683 && !evolution_function_is_affine_multivariate_p (t
, loop_nest
->num
))
3689 /* This computes the affine dependence relation between A and B with
3690 respect to LOOP_NEST. CHREC_KNOWN is used for representing the
3691 independence between two accesses, while CHREC_DONT_KNOW is used
3692 for representing the unknown relation.
3694 Note that it is possible to stop the computation of the dependence
3695 relation the first time we detect a CHREC_KNOWN element for a given
3699 compute_affine_dependence (struct data_dependence_relation
*ddr
,
3700 struct loop
*loop_nest
)
3702 struct data_reference
*dra
= DDR_A (ddr
);
3703 struct data_reference
*drb
= DDR_B (ddr
);
3705 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3707 fprintf (dump_file
, "(compute_affine_dependence\n");
3708 fprintf (dump_file
, " stmt_a: ");
3709 print_gimple_stmt (dump_file
, DR_STMT (dra
), 0, TDF_SLIM
);
3710 fprintf (dump_file
, " stmt_b: ");
3711 print_gimple_stmt (dump_file
, DR_STMT (drb
), 0, TDF_SLIM
);
3714 /* Analyze only when the dependence relation is not yet known. */
3715 if (DDR_ARE_DEPENDENT (ddr
) == NULL_TREE
)
3717 dependence_stats
.num_dependence_tests
++;
3719 if (access_functions_are_affine_or_constant_p (dra
, loop_nest
)
3720 && access_functions_are_affine_or_constant_p (drb
, loop_nest
))
3721 subscript_dependence_tester (ddr
, loop_nest
);
3723 /* As a last case, if the dependence cannot be determined, or if
3724 the dependence is considered too difficult to determine, answer
3728 dependence_stats
.num_dependence_undetermined
++;
3730 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3732 fprintf (dump_file
, "Data ref a:\n");
3733 dump_data_reference (dump_file
, dra
);
3734 fprintf (dump_file
, "Data ref b:\n");
3735 dump_data_reference (dump_file
, drb
);
3736 fprintf (dump_file
, "affine dependence test not usable: access function not affine or constant.\n");
3738 finalize_ddr_dependent (ddr
, chrec_dont_know
);
3742 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3744 if (DDR_ARE_DEPENDENT (ddr
) == chrec_known
)
3745 fprintf (dump_file
, ") -> no dependence\n");
3746 else if (DDR_ARE_DEPENDENT (ddr
) == chrec_dont_know
)
3747 fprintf (dump_file
, ") -> dependence analysis failed\n");
3749 fprintf (dump_file
, ")\n");
3753 /* Compute in DEPENDENCE_RELATIONS the data dependence graph for all
3754 the data references in DATAREFS, in the LOOP_NEST. When
3755 COMPUTE_SELF_AND_RR is FALSE, don't compute read-read and self
3756 relations. Return true when successful, i.e. data references number
3757 is small enough to be handled. */
3760 compute_all_dependences (vec
<data_reference_p
> datarefs
,
3761 vec
<ddr_p
> *dependence_relations
,
3762 vec
<loop_p
> loop_nest
,
3763 bool compute_self_and_rr
)
3765 struct data_dependence_relation
*ddr
;
3766 struct data_reference
*a
, *b
;
3769 if ((int) datarefs
.length ()
3770 > PARAM_VALUE (PARAM_LOOP_MAX_DATAREFS_FOR_DATADEPS
))
3772 struct data_dependence_relation
*ddr
;
3774 /* Insert a single relation into dependence_relations:
3776 ddr
= initialize_data_dependence_relation (NULL
, NULL
, loop_nest
);
3777 dependence_relations
->safe_push (ddr
);
3781 FOR_EACH_VEC_ELT (datarefs
, i
, a
)
3782 for (j
= i
+ 1; datarefs
.iterate (j
, &b
); j
++)
3783 if (DR_IS_WRITE (a
) || DR_IS_WRITE (b
) || compute_self_and_rr
)
3785 ddr
= initialize_data_dependence_relation (a
, b
, loop_nest
);
3786 dependence_relations
->safe_push (ddr
);
3787 if (loop_nest
.exists ())
3788 compute_affine_dependence (ddr
, loop_nest
[0]);
3791 if (compute_self_and_rr
)
3792 FOR_EACH_VEC_ELT (datarefs
, i
, a
)
3794 ddr
= initialize_data_dependence_relation (a
, a
, loop_nest
);
3795 dependence_relations
->safe_push (ddr
);
3796 if (loop_nest
.exists ())
3797 compute_affine_dependence (ddr
, loop_nest
[0]);
3803 /* Describes a location of a memory reference. */
3807 /* The memory reference. */
3810 /* True if the memory reference is read. */
3815 /* Stores the locations of memory references in STMT to REFERENCES. Returns
3816 true if STMT clobbers memory, false otherwise. */
3819 get_references_in_stmt (gimple
*stmt
, vec
<data_ref_loc
, va_heap
> *references
)
3821 bool clobbers_memory
= false;
3824 enum gimple_code stmt_code
= gimple_code (stmt
);
3826 /* ASM_EXPR and CALL_EXPR may embed arbitrary side effects.
3827 As we cannot model data-references to not spelled out
3828 accesses give up if they may occur. */
3829 if (stmt_code
== GIMPLE_CALL
3830 && !(gimple_call_flags (stmt
) & ECF_CONST
))
3832 /* Allow IFN_GOMP_SIMD_LANE in their own loops. */
3833 if (gimple_call_internal_p (stmt
))
3834 switch (gimple_call_internal_fn (stmt
))
3836 case IFN_GOMP_SIMD_LANE
:
3838 struct loop
*loop
= gimple_bb (stmt
)->loop_father
;
3839 tree uid
= gimple_call_arg (stmt
, 0);
3840 gcc_assert (TREE_CODE (uid
) == SSA_NAME
);
3842 || loop
->simduid
!= SSA_NAME_VAR (uid
))
3843 clobbers_memory
= true;
3847 case IFN_MASK_STORE
:
3850 clobbers_memory
= true;
3854 clobbers_memory
= true;
3856 else if (stmt_code
== GIMPLE_ASM
3857 && (gimple_asm_volatile_p (as_a
<gasm
*> (stmt
))
3858 || gimple_vuse (stmt
)))
3859 clobbers_memory
= true;
3861 if (!gimple_vuse (stmt
))
3862 return clobbers_memory
;
3864 if (stmt_code
== GIMPLE_ASSIGN
)
3867 op0
= gimple_assign_lhs (stmt
);
3868 op1
= gimple_assign_rhs1 (stmt
);
3871 || (REFERENCE_CLASS_P (op1
)
3872 && (base
= get_base_address (op1
))
3873 && TREE_CODE (base
) != SSA_NAME
3874 && !is_gimple_min_invariant (base
)))
3878 references
->safe_push (ref
);
3881 else if (stmt_code
== GIMPLE_CALL
)
3887 ref
.is_read
= false;
3888 if (gimple_call_internal_p (stmt
))
3889 switch (gimple_call_internal_fn (stmt
))
3892 if (gimple_call_lhs (stmt
) == NULL_TREE
)
3896 case IFN_MASK_STORE
:
3897 ptr
= build_int_cst (TREE_TYPE (gimple_call_arg (stmt
, 1)), 0);
3898 align
= tree_to_shwi (gimple_call_arg (stmt
, 1));
3900 type
= TREE_TYPE (gimple_call_lhs (stmt
));
3902 type
= TREE_TYPE (gimple_call_arg (stmt
, 3));
3903 if (TYPE_ALIGN (type
) != align
)
3904 type
= build_aligned_type (type
, align
);
3905 ref
.ref
= fold_build2 (MEM_REF
, type
, gimple_call_arg (stmt
, 0),
3907 references
->safe_push (ref
);
3913 op0
= gimple_call_lhs (stmt
);
3914 n
= gimple_call_num_args (stmt
);
3915 for (i
= 0; i
< n
; i
++)
3917 op1
= gimple_call_arg (stmt
, i
);
3920 || (REFERENCE_CLASS_P (op1
) && get_base_address (op1
)))
3924 references
->safe_push (ref
);
3929 return clobbers_memory
;
3933 || (REFERENCE_CLASS_P (op0
) && get_base_address (op0
))))
3936 ref
.is_read
= false;
3937 references
->safe_push (ref
);
3939 return clobbers_memory
;
3943 /* Returns true if the loop-nest has any data reference. */
3946 loop_nest_has_data_refs (loop_p loop
)
3948 basic_block
*bbs
= get_loop_body (loop
);
3949 auto_vec
<data_ref_loc
, 3> references
;
3951 for (unsigned i
= 0; i
< loop
->num_nodes
; i
++)
3953 basic_block bb
= bbs
[i
];
3954 gimple_stmt_iterator bsi
;
3956 for (bsi
= gsi_start_bb (bb
); !gsi_end_p (bsi
); gsi_next (&bsi
))
3958 gimple
*stmt
= gsi_stmt (bsi
);
3959 get_references_in_stmt (stmt
, &references
);
3960 if (references
.length ())
3974 if (loop_nest_has_data_refs (loop
))
3982 /* Stores the data references in STMT to DATAREFS. If there is an unanalyzable
3983 reference, returns false, otherwise returns true. NEST is the outermost
3984 loop of the loop nest in which the references should be analyzed. */
3987 find_data_references_in_stmt (struct loop
*nest
, gimple
*stmt
,
3988 vec
<data_reference_p
> *datarefs
)
3991 auto_vec
<data_ref_loc
, 2> references
;
3994 data_reference_p dr
;
3996 if (get_references_in_stmt (stmt
, &references
))
3999 FOR_EACH_VEC_ELT (references
, i
, ref
)
4001 dr
= create_data_ref (nest
, loop_containing_stmt (stmt
),
4002 ref
->ref
, stmt
, ref
->is_read
);
4003 gcc_assert (dr
!= NULL
);
4004 datarefs
->safe_push (dr
);
4010 /* Stores the data references in STMT to DATAREFS. If there is an
4011 unanalyzable reference, returns false, otherwise returns true.
4012 NEST is the outermost loop of the loop nest in which the references
4013 should be instantiated, LOOP is the loop in which the references
4014 should be analyzed. */
4017 graphite_find_data_references_in_stmt (loop_p nest
, loop_p loop
, gimple
*stmt
,
4018 vec
<data_reference_p
> *datarefs
)
4021 auto_vec
<data_ref_loc
, 2> references
;
4024 data_reference_p dr
;
4026 if (get_references_in_stmt (stmt
, &references
))
4029 FOR_EACH_VEC_ELT (references
, i
, ref
)
4031 dr
= create_data_ref (nest
, loop
, ref
->ref
, stmt
, ref
->is_read
);
4032 gcc_assert (dr
!= NULL
);
4033 datarefs
->safe_push (dr
);
4039 /* Search the data references in LOOP, and record the information into
4040 DATAREFS. Returns chrec_dont_know when failing to analyze a
4041 difficult case, returns NULL_TREE otherwise. */
4044 find_data_references_in_bb (struct loop
*loop
, basic_block bb
,
4045 vec
<data_reference_p
> *datarefs
)
4047 gimple_stmt_iterator bsi
;
4049 for (bsi
= gsi_start_bb (bb
); !gsi_end_p (bsi
); gsi_next (&bsi
))
4051 gimple
*stmt
= gsi_stmt (bsi
);
4053 if (!find_data_references_in_stmt (loop
, stmt
, datarefs
))
4055 struct data_reference
*res
;
4056 res
= XCNEW (struct data_reference
);
4057 datarefs
->safe_push (res
);
4059 return chrec_dont_know
;
4066 /* Search the data references in LOOP, and record the information into
4067 DATAREFS. Returns chrec_dont_know when failing to analyze a
4068 difficult case, returns NULL_TREE otherwise.
4070 TODO: This function should be made smarter so that it can handle address
4071 arithmetic as if they were array accesses, etc. */
4074 find_data_references_in_loop (struct loop
*loop
,
4075 vec
<data_reference_p
> *datarefs
)
4077 basic_block bb
, *bbs
;
4080 bbs
= get_loop_body_in_dom_order (loop
);
4082 for (i
= 0; i
< loop
->num_nodes
; i
++)
4086 if (find_data_references_in_bb (loop
, bb
, datarefs
) == chrec_dont_know
)
4089 return chrec_dont_know
;
4097 /* Recursive helper function. */
4100 find_loop_nest_1 (struct loop
*loop
, vec
<loop_p
> *loop_nest
)
4102 /* Inner loops of the nest should not contain siblings. Example:
4103 when there are two consecutive loops,
4114 the dependence relation cannot be captured by the distance
4119 loop_nest
->safe_push (loop
);
4121 return find_loop_nest_1 (loop
->inner
, loop_nest
);
4125 /* Return false when the LOOP is not well nested. Otherwise return
4126 true and insert in LOOP_NEST the loops of the nest. LOOP_NEST will
4127 contain the loops from the outermost to the innermost, as they will
4128 appear in the classic distance vector. */
4131 find_loop_nest (struct loop
*loop
, vec
<loop_p
> *loop_nest
)
4133 loop_nest
->safe_push (loop
);
4135 return find_loop_nest_1 (loop
->inner
, loop_nest
);
4139 /* Returns true when the data dependences have been computed, false otherwise.
4140 Given a loop nest LOOP, the following vectors are returned:
4141 DATAREFS is initialized to all the array elements contained in this loop,
4142 DEPENDENCE_RELATIONS contains the relations between the data references.
4143 Compute read-read and self relations if
4144 COMPUTE_SELF_AND_READ_READ_DEPENDENCES is TRUE. */
4147 compute_data_dependences_for_loop (struct loop
*loop
,
4148 bool compute_self_and_read_read_dependences
,
4149 vec
<loop_p
> *loop_nest
,
4150 vec
<data_reference_p
> *datarefs
,
4151 vec
<ddr_p
> *dependence_relations
)
4155 memset (&dependence_stats
, 0, sizeof (dependence_stats
));
4157 /* If the loop nest is not well formed, or one of the data references
4158 is not computable, give up without spending time to compute other
4161 || !find_loop_nest (loop
, loop_nest
)
4162 || find_data_references_in_loop (loop
, datarefs
) == chrec_dont_know
4163 || !compute_all_dependences (*datarefs
, dependence_relations
, *loop_nest
,
4164 compute_self_and_read_read_dependences
))
4167 if (dump_file
&& (dump_flags
& TDF_STATS
))
4169 fprintf (dump_file
, "Dependence tester statistics:\n");
4171 fprintf (dump_file
, "Number of dependence tests: %d\n",
4172 dependence_stats
.num_dependence_tests
);
4173 fprintf (dump_file
, "Number of dependence tests classified dependent: %d\n",
4174 dependence_stats
.num_dependence_dependent
);
4175 fprintf (dump_file
, "Number of dependence tests classified independent: %d\n",
4176 dependence_stats
.num_dependence_independent
);
4177 fprintf (dump_file
, "Number of undetermined dependence tests: %d\n",
4178 dependence_stats
.num_dependence_undetermined
);
4180 fprintf (dump_file
, "Number of subscript tests: %d\n",
4181 dependence_stats
.num_subscript_tests
);
4182 fprintf (dump_file
, "Number of undetermined subscript tests: %d\n",
4183 dependence_stats
.num_subscript_undetermined
);
4184 fprintf (dump_file
, "Number of same subscript function: %d\n",
4185 dependence_stats
.num_same_subscript_function
);
4187 fprintf (dump_file
, "Number of ziv tests: %d\n",
4188 dependence_stats
.num_ziv
);
4189 fprintf (dump_file
, "Number of ziv tests returning dependent: %d\n",
4190 dependence_stats
.num_ziv_dependent
);
4191 fprintf (dump_file
, "Number of ziv tests returning independent: %d\n",
4192 dependence_stats
.num_ziv_independent
);
4193 fprintf (dump_file
, "Number of ziv tests unimplemented: %d\n",
4194 dependence_stats
.num_ziv_unimplemented
);
4196 fprintf (dump_file
, "Number of siv tests: %d\n",
4197 dependence_stats
.num_siv
);
4198 fprintf (dump_file
, "Number of siv tests returning dependent: %d\n",
4199 dependence_stats
.num_siv_dependent
);
4200 fprintf (dump_file
, "Number of siv tests returning independent: %d\n",
4201 dependence_stats
.num_siv_independent
);
4202 fprintf (dump_file
, "Number of siv tests unimplemented: %d\n",
4203 dependence_stats
.num_siv_unimplemented
);
4205 fprintf (dump_file
, "Number of miv tests: %d\n",
4206 dependence_stats
.num_miv
);
4207 fprintf (dump_file
, "Number of miv tests returning dependent: %d\n",
4208 dependence_stats
.num_miv_dependent
);
4209 fprintf (dump_file
, "Number of miv tests returning independent: %d\n",
4210 dependence_stats
.num_miv_independent
);
4211 fprintf (dump_file
, "Number of miv tests unimplemented: %d\n",
4212 dependence_stats
.num_miv_unimplemented
);
4218 /* Free the memory used by a data dependence relation DDR. */
4221 free_dependence_relation (struct data_dependence_relation
*ddr
)
4226 if (DDR_SUBSCRIPTS (ddr
).exists ())
4227 free_subscripts (DDR_SUBSCRIPTS (ddr
));
4228 DDR_DIST_VECTS (ddr
).release ();
4229 DDR_DIR_VECTS (ddr
).release ();
4234 /* Free the memory used by the data dependence relations from
4235 DEPENDENCE_RELATIONS. */
4238 free_dependence_relations (vec
<ddr_p
> dependence_relations
)
4241 struct data_dependence_relation
*ddr
;
4243 FOR_EACH_VEC_ELT (dependence_relations
, i
, ddr
)
4245 free_dependence_relation (ddr
);
4247 dependence_relations
.release ();
4250 /* Free the memory used by the data references from DATAREFS. */
4253 free_data_refs (vec
<data_reference_p
> datarefs
)
4256 struct data_reference
*dr
;
4258 FOR_EACH_VEC_ELT (datarefs
, i
, dr
)
4260 datarefs
.release ();