1 /* Conversion of SESE regions to Polyhedra.
2 Copyright (C) 2009-2013 Free Software Foundation, Inc.
3 Contributed by Sebastian Pop <sebastian.pop@amd.com>.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
26 #include <isl/union_map.h>
27 #include <isl/constraint.h>
29 #include <cloog/cloog.h>
30 #include <cloog/cloog.h>
31 #include <cloog/isl/domain.h>
35 #include "coretypes.h"
36 #include "tree-flow.h"
37 #include "tree-pass.h"
39 #include "tree-chrec.h"
40 #include "tree-data-ref.h"
41 #include "tree-scalar-evolution.h"
46 #include "graphite-poly.h"
47 #include "graphite-sese-to-poly.h"
50 /* Assigns to RES the value of the INTEGER_CST T. */
53 tree_int_to_gmp (tree t
, mpz_t res
)
55 double_int di
= tree_to_double_int (t
);
56 mpz_set_double_int (res
, di
, TYPE_UNSIGNED (TREE_TYPE (t
)));
59 /* Returns the index of the PHI argument defined in the outermost
63 phi_arg_in_outermost_loop (gimple phi
)
65 loop_p loop
= gimple_bb (phi
)->loop_father
;
68 for (i
= 0; i
< gimple_phi_num_args (phi
); i
++)
69 if (!flow_bb_inside_loop_p (loop
, gimple_phi_arg_edge (phi
, i
)->src
))
71 loop
= gimple_phi_arg_edge (phi
, i
)->src
->loop_father
;
78 /* Removes a simple copy phi node "RES = phi (INIT, RES)" at position
79 PSI by inserting on the loop ENTRY edge assignment "RES = INIT". */
82 remove_simple_copy_phi (gimple_stmt_iterator
*psi
)
84 gimple phi
= gsi_stmt (*psi
);
85 tree res
= gimple_phi_result (phi
);
86 size_t entry
= phi_arg_in_outermost_loop (phi
);
87 tree init
= gimple_phi_arg_def (phi
, entry
);
88 gimple stmt
= gimple_build_assign (res
, init
);
89 edge e
= gimple_phi_arg_edge (phi
, entry
);
91 remove_phi_node (psi
, false);
92 gsi_insert_on_edge_immediate (e
, stmt
);
93 SSA_NAME_DEF_STMT (res
) = stmt
;
96 /* Removes an invariant phi node at position PSI by inserting on the
97 loop ENTRY edge the assignment RES = INIT. */
100 remove_invariant_phi (sese region
, gimple_stmt_iterator
*psi
)
102 gimple phi
= gsi_stmt (*psi
);
103 loop_p loop
= loop_containing_stmt (phi
);
104 tree res
= gimple_phi_result (phi
);
105 tree scev
= scalar_evolution_in_region (region
, loop
, res
);
106 size_t entry
= phi_arg_in_outermost_loop (phi
);
107 edge e
= gimple_phi_arg_edge (phi
, entry
);
110 gimple_seq stmts
= NULL
;
112 if (tree_contains_chrecs (scev
, NULL
))
113 scev
= gimple_phi_arg_def (phi
, entry
);
115 var
= force_gimple_operand (scev
, &stmts
, true, NULL_TREE
);
116 stmt
= gimple_build_assign (res
, var
);
117 remove_phi_node (psi
, false);
119 gimple_seq_add_stmt (&stmts
, stmt
);
120 gsi_insert_seq_on_edge (e
, stmts
);
121 gsi_commit_edge_inserts ();
122 SSA_NAME_DEF_STMT (res
) = stmt
;
125 /* Returns true when the phi node at PSI is of the form "a = phi (a, x)". */
128 simple_copy_phi_p (gimple phi
)
132 if (gimple_phi_num_args (phi
) != 2)
135 res
= gimple_phi_result (phi
);
136 return (res
== gimple_phi_arg_def (phi
, 0)
137 || res
== gimple_phi_arg_def (phi
, 1));
140 /* Returns true when the phi node at position PSI is a reduction phi
141 node in REGION. Otherwise moves the pointer PSI to the next phi to
145 reduction_phi_p (sese region
, gimple_stmt_iterator
*psi
)
148 gimple phi
= gsi_stmt (*psi
);
149 tree res
= gimple_phi_result (phi
);
151 loop
= loop_containing_stmt (phi
);
153 if (simple_copy_phi_p (phi
))
155 /* PRE introduces phi nodes like these, for an example,
156 see id-5.f in the fortran graphite testsuite:
158 # prephitmp.85_265 = PHI <prephitmp.85_258(33), prephitmp.85_265(18)>
160 remove_simple_copy_phi (psi
);
164 if (scev_analyzable_p (res
, region
))
166 tree scev
= scalar_evolution_in_region (region
, loop
, res
);
168 if (evolution_function_is_invariant_p (scev
, loop
->num
))
169 remove_invariant_phi (region
, psi
);
176 /* All the other cases are considered reductions. */
180 /* Store the GRAPHITE representation of BB. */
183 new_gimple_bb (basic_block bb
, vec
<data_reference_p
> drs
)
185 struct gimple_bb
*gbb
;
187 gbb
= XNEW (struct gimple_bb
);
190 GBB_DATA_REFS (gbb
) = drs
;
191 GBB_CONDITIONS (gbb
).create (0);
192 GBB_CONDITION_CASES (gbb
).create (0);
198 free_data_refs_aux (vec
<data_reference_p
> datarefs
)
201 struct data_reference
*dr
;
203 FOR_EACH_VEC_ELT (datarefs
, i
, dr
)
206 base_alias_pair
*bap
= (base_alias_pair
*)(dr
->aux
);
208 free (bap
->alias_set
);
217 free_gimple_bb (struct gimple_bb
*gbb
)
219 free_data_refs_aux (GBB_DATA_REFS (gbb
));
220 free_data_refs (GBB_DATA_REFS (gbb
));
222 GBB_CONDITIONS (gbb
).release ();
223 GBB_CONDITION_CASES (gbb
).release ();
224 GBB_BB (gbb
)->aux
= 0;
228 /* Deletes all gimple bbs in SCOP. */
231 remove_gbbs_in_scop (scop_p scop
)
236 FOR_EACH_VEC_ELT (SCOP_BBS (scop
), i
, pbb
)
237 free_gimple_bb (PBB_BLACK_BOX (pbb
));
240 /* Deletes all scops in SCOPS. */
243 free_scops (vec
<scop_p
> scops
)
248 FOR_EACH_VEC_ELT (scops
, i
, scop
)
250 remove_gbbs_in_scop (scop
);
251 free_sese (SCOP_REGION (scop
));
258 /* Same as outermost_loop_in_sese, returns the outermost loop
259 containing BB in REGION, but makes sure that the returned loop
260 belongs to the REGION, and so this returns the first loop in the
261 REGION when the loop containing BB does not belong to REGION. */
264 outermost_loop_in_sese_1 (sese region
, basic_block bb
)
266 loop_p nest
= outermost_loop_in_sese (region
, bb
);
268 if (loop_in_sese_p (nest
, region
))
271 /* When the basic block BB does not belong to a loop in the region,
272 return the first loop in the region. */
275 if (loop_in_sese_p (nest
, region
))
284 /* Generates a polyhedral black box only if the bb contains interesting
288 try_generate_gimple_bb (scop_p scop
, basic_block bb
)
290 vec
<data_reference_p
> drs
;
292 sese region
= SCOP_REGION (scop
);
293 loop_p nest
= outermost_loop_in_sese_1 (region
, bb
);
294 gimple_stmt_iterator gsi
;
296 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
298 gimple stmt
= gsi_stmt (gsi
);
301 if (is_gimple_debug (stmt
))
304 loop
= loop_containing_stmt (stmt
);
305 if (!loop_in_sese_p (loop
, region
))
308 graphite_find_data_references_in_stmt (nest
, loop
, stmt
, &drs
);
311 return new_gimple_bb (bb
, drs
);
314 /* Returns true if all predecessors of BB, that are not dominated by BB, are
315 marked in MAP. The predecessors dominated by BB are loop latches and will
316 be handled after BB. */
319 all_non_dominated_preds_marked_p (basic_block bb
, sbitmap map
)
324 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
325 if (!bitmap_bit_p (map
, e
->src
->index
)
326 && !dominated_by_p (CDI_DOMINATORS
, e
->src
, bb
))
332 /* Compare the depth of two basic_block's P1 and P2. */
335 compare_bb_depths (const void *p1
, const void *p2
)
337 const_basic_block
const bb1
= *(const_basic_block
const*)p1
;
338 const_basic_block
const bb2
= *(const_basic_block
const*)p2
;
339 int d1
= loop_depth (bb1
->loop_father
);
340 int d2
= loop_depth (bb2
->loop_father
);
351 /* Sort the basic blocks from DOM such that the first are the ones at
352 a deepest loop level. */
355 graphite_sort_dominated_info (vec
<basic_block
> dom
)
357 dom
.qsort (compare_bb_depths
);
360 /* Recursive helper function for build_scops_bbs. */
363 build_scop_bbs_1 (scop_p scop
, sbitmap visited
, basic_block bb
)
365 sese region
= SCOP_REGION (scop
);
366 vec
<basic_block
> dom
;
369 if (bitmap_bit_p (visited
, bb
->index
)
370 || !bb_in_sese_p (bb
, region
))
373 pbb
= new_poly_bb (scop
, try_generate_gimple_bb (scop
, bb
));
374 SCOP_BBS (scop
).safe_push (pbb
);
375 bitmap_set_bit (visited
, bb
->index
);
377 dom
= get_dominated_by (CDI_DOMINATORS
, bb
);
382 graphite_sort_dominated_info (dom
);
384 while (!dom
.is_empty ())
389 FOR_EACH_VEC_ELT (dom
, i
, dom_bb
)
390 if (all_non_dominated_preds_marked_p (dom_bb
, visited
))
392 build_scop_bbs_1 (scop
, visited
, dom_bb
);
393 dom
.unordered_remove (i
);
401 /* Gather the basic blocks belonging to the SCOP. */
404 build_scop_bbs (scop_p scop
)
406 sbitmap visited
= sbitmap_alloc (last_basic_block
);
407 sese region
= SCOP_REGION (scop
);
409 bitmap_clear (visited
);
410 build_scop_bbs_1 (scop
, visited
, SESE_ENTRY_BB (region
));
411 sbitmap_free (visited
);
414 /* Return an ISL identifier for the polyhedral basic block PBB. */
417 isl_id_for_pbb (scop_p s
, poly_bb_p pbb
)
420 snprintf (name
, sizeof (name
), "S_%d", pbb_index (pbb
));
421 return isl_id_alloc (s
->ctx
, name
, pbb
);
424 /* Converts the STATIC_SCHEDULE of PBB into a scattering polyhedron.
425 We generate SCATTERING_DIMENSIONS scattering dimensions.
427 CLooG 0.15.0 and previous versions require, that all
428 scattering functions of one CloogProgram have the same number of
429 scattering dimensions, therefore we allow to specify it. This
430 should be removed in future versions of CLooG.
432 The scattering polyhedron consists of these dimensions: scattering,
433 loop_iterators, parameters.
437 | scattering_dimensions = 5
438 | used_scattering_dimensions = 3
446 | Scattering polyhedron:
448 | scattering: {s1, s2, s3, s4, s5}
449 | loop_iterators: {i}
450 | parameters: {p1, p2}
452 | s1 s2 s3 s4 s5 i p1 p2 1
453 | 1 0 0 0 0 0 0 0 -4 = 0
454 | 0 1 0 0 0 -1 0 0 0 = 0
455 | 0 0 1 0 0 0 0 0 -5 = 0 */
458 build_pbb_scattering_polyhedrons (isl_aff
*static_sched
,
459 poly_bb_p pbb
, int scattering_dimensions
)
462 int nb_iterators
= pbb_dim_iter_domain (pbb
);
463 int used_scattering_dimensions
= nb_iterators
* 2 + 1;
467 gcc_assert (scattering_dimensions
>= used_scattering_dimensions
);
471 dc
= isl_set_get_space (pbb
->domain
);
472 dm
= isl_space_add_dims (isl_space_from_domain (dc
),
473 isl_dim_out
, scattering_dimensions
);
474 pbb
->schedule
= isl_map_universe (dm
);
476 for (i
= 0; i
< scattering_dimensions
; i
++)
478 /* Textual order inside this loop. */
481 isl_constraint
*c
= isl_equality_alloc
482 (isl_local_space_from_space (isl_map_get_space (pbb
->schedule
)));
484 if (0 != isl_aff_get_coefficient (static_sched
, isl_dim_in
,
488 isl_int_neg (val
, val
);
489 c
= isl_constraint_set_constant (c
, val
);
490 c
= isl_constraint_set_coefficient_si (c
, isl_dim_out
, i
, 1);
491 pbb
->schedule
= isl_map_add_constraint (pbb
->schedule
, c
);
494 /* Iterations of this loop. */
495 else /* if ((i % 2) == 1) */
497 int loop
= (i
- 1) / 2;
498 pbb
->schedule
= isl_map_equate (pbb
->schedule
, isl_dim_in
, loop
,
505 pbb
->transformed
= isl_map_copy (pbb
->schedule
);
508 /* Build for BB the static schedule.
510 The static schedule is a Dewey numbering of the abstract syntax
511 tree: http://en.wikipedia.org/wiki/Dewey_Decimal_Classification
513 The following example informally defines the static schedule:
532 Static schedules for A to F:
545 build_scop_scattering (scop_p scop
)
549 gimple_bb_p previous_gbb
= NULL
;
550 isl_space
*dc
= isl_set_get_space (scop
->context
);
551 isl_aff
*static_sched
;
553 dc
= isl_space_add_dims (dc
, isl_dim_set
, number_of_loops (cfun
));
554 static_sched
= isl_aff_zero_on_domain (isl_local_space_from_space (dc
));
556 /* We have to start schedules at 0 on the first component and
557 because we cannot compare_prefix_loops against a previous loop,
558 prefix will be equal to zero, and that index will be
559 incremented before copying. */
560 static_sched
= isl_aff_add_coefficient_si (static_sched
, isl_dim_in
, 0, -1);
562 FOR_EACH_VEC_ELT (SCOP_BBS (scop
), i
, pbb
)
564 gimple_bb_p gbb
= PBB_BLACK_BOX (pbb
);
566 int nb_scat_dims
= pbb_dim_iter_domain (pbb
) * 2 + 1;
569 prefix
= nb_common_loops (SCOP_REGION (scop
), previous_gbb
, gbb
);
575 static_sched
= isl_aff_add_coefficient_si (static_sched
, isl_dim_in
,
577 build_pbb_scattering_polyhedrons (static_sched
, pbb
, nb_scat_dims
);
580 isl_aff_free (static_sched
);
583 static isl_pw_aff
*extract_affine (scop_p
, tree
, __isl_take isl_space
*space
);
585 /* Extract an affine expression from the chain of recurrence E. */
588 extract_affine_chrec (scop_p s
, tree e
, __isl_take isl_space
*space
)
590 isl_pw_aff
*lhs
= extract_affine (s
, CHREC_LEFT (e
), isl_space_copy (space
));
591 isl_pw_aff
*rhs
= extract_affine (s
, CHREC_RIGHT (e
), isl_space_copy (space
));
592 isl_local_space
*ls
= isl_local_space_from_space (space
);
593 unsigned pos
= sese_loop_depth ((sese
) s
->region
, get_chrec_loop (e
)) - 1;
594 isl_aff
*loop
= isl_aff_set_coefficient_si
595 (isl_aff_zero_on_domain (ls
), isl_dim_in
, pos
, 1);
596 isl_pw_aff
*l
= isl_pw_aff_from_aff (loop
);
598 /* Before multiplying, make sure that the result is affine. */
599 gcc_assert (isl_pw_aff_is_cst (rhs
)
600 || isl_pw_aff_is_cst (l
));
602 return isl_pw_aff_add (lhs
, isl_pw_aff_mul (rhs
, l
));
605 /* Extract an affine expression from the mult_expr E. */
608 extract_affine_mul (scop_p s
, tree e
, __isl_take isl_space
*space
)
610 isl_pw_aff
*lhs
= extract_affine (s
, TREE_OPERAND (e
, 0),
611 isl_space_copy (space
));
612 isl_pw_aff
*rhs
= extract_affine (s
, TREE_OPERAND (e
, 1), space
);
614 if (!isl_pw_aff_is_cst (lhs
)
615 && !isl_pw_aff_is_cst (rhs
))
617 isl_pw_aff_free (lhs
);
618 isl_pw_aff_free (rhs
);
622 return isl_pw_aff_mul (lhs
, rhs
);
625 /* Return an ISL identifier from the name of the ssa_name E. */
628 isl_id_for_ssa_name (scop_p s
, tree e
)
630 const char *name
= get_name (e
);
634 id
= isl_id_alloc (s
->ctx
, name
, e
);
638 snprintf (name1
, sizeof (name1
), "P_%d", SSA_NAME_VERSION (e
));
639 id
= isl_id_alloc (s
->ctx
, name1
, e
);
645 /* Return an ISL identifier for the data reference DR. */
648 isl_id_for_dr (scop_p s
, data_reference_p dr ATTRIBUTE_UNUSED
)
650 /* Data references all get the same isl_id. They need to be comparable
651 and are distinguished through the first dimension, which contains the
653 return isl_id_alloc (s
->ctx
, "", 0);
656 /* Extract an affine expression from the ssa_name E. */
659 extract_affine_name (scop_p s
, tree e
, __isl_take isl_space
*space
)
666 id
= isl_id_for_ssa_name (s
, e
);
667 dimension
= isl_space_find_dim_by_id (space
, isl_dim_param
, id
);
669 dom
= isl_set_universe (isl_space_copy (space
));
670 aff
= isl_aff_zero_on_domain (isl_local_space_from_space (space
));
671 aff
= isl_aff_add_coefficient_si (aff
, isl_dim_param
, dimension
, 1);
672 return isl_pw_aff_alloc (dom
, aff
);
675 /* Extract an affine expression from the gmp constant G. */
678 extract_affine_gmp (mpz_t g
, __isl_take isl_space
*space
)
680 isl_local_space
*ls
= isl_local_space_from_space (isl_space_copy (space
));
681 isl_aff
*aff
= isl_aff_zero_on_domain (ls
);
682 isl_set
*dom
= isl_set_universe (space
);
686 isl_int_set_gmp (v
, g
);
687 aff
= isl_aff_add_constant (aff
, v
);
690 return isl_pw_aff_alloc (dom
, aff
);
693 /* Extract an affine expression from the integer_cst E. */
696 extract_affine_int (tree e
, __isl_take isl_space
*space
)
702 tree_int_to_gmp (e
, g
);
703 res
= extract_affine_gmp (g
, space
);
709 /* Compute pwaff mod 2^width. */
712 wrap (isl_pw_aff
*pwaff
, unsigned width
)
717 isl_int_set_si (mod
, 1);
718 isl_int_mul_2exp (mod
, mod
, width
);
720 pwaff
= isl_pw_aff_mod (pwaff
, mod
);
727 /* When parameter NAME is in REGION, returns its index in SESE_PARAMS.
728 Otherwise returns -1. */
731 parameter_index_in_region_1 (tree name
, sese region
)
736 gcc_assert (TREE_CODE (name
) == SSA_NAME
);
738 FOR_EACH_VEC_ELT (SESE_PARAMS (region
), i
, p
)
745 /* When the parameter NAME is in REGION, returns its index in
746 SESE_PARAMS. Otherwise this function inserts NAME in SESE_PARAMS
747 and returns the index of NAME. */
750 parameter_index_in_region (tree name
, sese region
)
754 gcc_assert (TREE_CODE (name
) == SSA_NAME
);
756 i
= parameter_index_in_region_1 (name
, region
);
760 gcc_assert (SESE_ADD_PARAMS (region
));
762 i
= SESE_PARAMS (region
).length ();
763 SESE_PARAMS (region
).safe_push (name
);
767 /* Extract an affine expression from the tree E in the scop S. */
770 extract_affine (scop_p s
, tree e
, __isl_take isl_space
*space
)
772 isl_pw_aff
*lhs
, *rhs
, *res
;
775 if (e
== chrec_dont_know
) {
776 isl_space_free (space
);
780 switch (TREE_CODE (e
))
782 case POLYNOMIAL_CHREC
:
783 res
= extract_affine_chrec (s
, e
, space
);
787 res
= extract_affine_mul (s
, e
, space
);
791 case POINTER_PLUS_EXPR
:
792 lhs
= extract_affine (s
, TREE_OPERAND (e
, 0), isl_space_copy (space
));
793 rhs
= extract_affine (s
, TREE_OPERAND (e
, 1), space
);
794 res
= isl_pw_aff_add (lhs
, rhs
);
798 lhs
= extract_affine (s
, TREE_OPERAND (e
, 0), isl_space_copy (space
));
799 rhs
= extract_affine (s
, TREE_OPERAND (e
, 1), space
);
800 res
= isl_pw_aff_sub (lhs
, rhs
);
805 lhs
= extract_affine (s
, TREE_OPERAND (e
, 0), isl_space_copy (space
));
806 rhs
= extract_affine (s
, integer_minus_one_node
, space
);
807 res
= isl_pw_aff_mul (lhs
, rhs
);
811 gcc_assert (-1 != parameter_index_in_region_1 (e
, SCOP_REGION (s
)));
812 res
= extract_affine_name (s
, e
, space
);
816 res
= extract_affine_int (e
, space
);
817 /* No need to wrap a single integer. */
821 case NON_LVALUE_EXPR
:
822 res
= extract_affine (s
, TREE_OPERAND (e
, 0), space
);
830 type
= TREE_TYPE (e
);
831 if (TYPE_UNSIGNED (type
))
832 res
= wrap (res
, TYPE_PRECISION (type
));
837 /* In the context of sese S, scan the expression E and translate it to
838 a linear expression C. When parsing a symbolic multiplication, K
839 represents the constant multiplier of an expression containing
843 scan_tree_for_params (sese s
, tree e
)
845 if (e
== chrec_dont_know
)
848 switch (TREE_CODE (e
))
850 case POLYNOMIAL_CHREC
:
851 scan_tree_for_params (s
, CHREC_LEFT (e
));
855 if (chrec_contains_symbols (TREE_OPERAND (e
, 0)))
856 scan_tree_for_params (s
, TREE_OPERAND (e
, 0));
858 scan_tree_for_params (s
, TREE_OPERAND (e
, 1));
862 case POINTER_PLUS_EXPR
:
864 scan_tree_for_params (s
, TREE_OPERAND (e
, 0));
865 scan_tree_for_params (s
, TREE_OPERAND (e
, 1));
871 case NON_LVALUE_EXPR
:
872 scan_tree_for_params (s
, TREE_OPERAND (e
, 0));
876 parameter_index_in_region (e
, s
);
889 /* Find parameters with respect to REGION in BB. We are looking in memory
890 access functions, conditions and loop bounds. */
893 find_params_in_bb (sese region
, gimple_bb_p gbb
)
899 loop_p loop
= GBB_BB (gbb
)->loop_father
;
901 /* Find parameters in the access functions of data references. */
902 FOR_EACH_VEC_ELT (GBB_DATA_REFS (gbb
), i
, dr
)
903 for (j
= 0; j
< DR_NUM_DIMENSIONS (dr
); j
++)
904 scan_tree_for_params (region
, DR_ACCESS_FN (dr
, j
));
906 /* Find parameters in conditional statements. */
907 FOR_EACH_VEC_ELT (GBB_CONDITIONS (gbb
), i
, stmt
)
909 tree lhs
= scalar_evolution_in_region (region
, loop
,
910 gimple_cond_lhs (stmt
));
911 tree rhs
= scalar_evolution_in_region (region
, loop
,
912 gimple_cond_rhs (stmt
));
914 scan_tree_for_params (region
, lhs
);
915 scan_tree_for_params (region
, rhs
);
919 /* Record the parameters used in the SCOP. A variable is a parameter
920 in a scop if it does not vary during the execution of that scop. */
923 find_scop_parameters (scop_p scop
)
927 sese region
= SCOP_REGION (scop
);
931 /* Find the parameters used in the loop bounds. */
932 FOR_EACH_VEC_ELT (SESE_LOOP_NEST (region
), i
, loop
)
934 tree nb_iters
= number_of_latch_executions (loop
);
936 if (!chrec_contains_symbols (nb_iters
))
939 nb_iters
= scalar_evolution_in_region (region
, loop
, nb_iters
);
940 scan_tree_for_params (region
, nb_iters
);
943 /* Find the parameters used in data accesses. */
944 FOR_EACH_VEC_ELT (SCOP_BBS (scop
), i
, pbb
)
945 find_params_in_bb (region
, PBB_BLACK_BOX (pbb
));
947 nbp
= sese_nb_params (region
);
948 scop_set_nb_params (scop
, nbp
);
949 SESE_ADD_PARAMS (region
) = false;
953 isl_space
*space
= isl_space_set_alloc (scop
->ctx
, nbp
, 0);
955 FOR_EACH_VEC_ELT (SESE_PARAMS (region
), i
, e
)
956 space
= isl_space_set_dim_id (space
, isl_dim_param
, i
,
957 isl_id_for_ssa_name (scop
, e
));
959 scop
->context
= isl_set_universe (space
);
963 /* Builds the constraint polyhedra for LOOP in SCOP. OUTER_PH gives
964 the constraints for the surrounding loops. */
967 build_loop_iteration_domains (scop_p scop
, struct loop
*loop
,
969 isl_set
*outer
, isl_set
**doms
)
971 tree nb_iters
= number_of_latch_executions (loop
);
972 sese region
= SCOP_REGION (scop
);
974 isl_set
*inner
= isl_set_copy (outer
);
977 int pos
= isl_set_dim (outer
, isl_dim_set
);
984 inner
= isl_set_add_dims (inner
, isl_dim_set
, 1);
985 space
= isl_set_get_space (inner
);
988 c
= isl_inequality_alloc
989 (isl_local_space_from_space (isl_space_copy (space
)));
990 c
= isl_constraint_set_coefficient_si (c
, isl_dim_set
, pos
, 1);
991 inner
= isl_set_add_constraint (inner
, c
);
993 /* loop_i <= cst_nb_iters */
994 if (TREE_CODE (nb_iters
) == INTEGER_CST
)
996 c
= isl_inequality_alloc
997 (isl_local_space_from_space(isl_space_copy (space
)));
998 c
= isl_constraint_set_coefficient_si (c
, isl_dim_set
, pos
, -1);
999 tree_int_to_gmp (nb_iters
, g
);
1000 isl_int_set_gmp (v
, g
);
1001 c
= isl_constraint_set_constant (c
, v
);
1002 inner
= isl_set_add_constraint (inner
, c
);
1005 /* loop_i <= expr_nb_iters */
1006 else if (!chrec_contains_undetermined (nb_iters
))
1011 isl_local_space
*ls
;
1015 nb_iters
= scalar_evolution_in_region (region
, loop
, nb_iters
);
1017 aff
= extract_affine (scop
, nb_iters
, isl_set_get_space (inner
));
1018 valid
= isl_pw_aff_nonneg_set (isl_pw_aff_copy (aff
));
1019 valid
= isl_set_project_out (valid
, isl_dim_set
, 0,
1020 isl_set_dim (valid
, isl_dim_set
));
1021 scop
->context
= isl_set_intersect (scop
->context
, valid
);
1023 ls
= isl_local_space_from_space (isl_space_copy (space
));
1024 al
= isl_aff_set_coefficient_si (isl_aff_zero_on_domain (ls
),
1025 isl_dim_in
, pos
, 1);
1026 le
= isl_pw_aff_le_set (isl_pw_aff_from_aff (al
),
1027 isl_pw_aff_copy (aff
));
1028 inner
= isl_set_intersect (inner
, le
);
1030 if (max_stmt_executions (loop
, &nit
))
1032 /* Insert in the context the constraints from the
1033 estimation of the number of iterations NIT and the
1034 symbolic number of iterations (involving parameter
1035 names) NB_ITERS. First, build the affine expression
1036 "NIT - NB_ITERS" and then say that it is positive,
1037 i.e., NIT approximates NB_ITERS: "NIT >= NB_ITERS". */
1044 mpz_set_double_int (g
, nit
, false);
1045 mpz_sub_ui (g
, g
, 1);
1046 approx
= extract_affine_gmp (g
, isl_set_get_space (inner
));
1047 x
= isl_pw_aff_ge_set (approx
, aff
);
1048 x
= isl_set_project_out (x
, isl_dim_set
, 0,
1049 isl_set_dim (x
, isl_dim_set
));
1050 scop
->context
= isl_set_intersect (scop
->context
, x
);
1052 c
= isl_inequality_alloc
1053 (isl_local_space_from_space (isl_space_copy (space
)));
1054 c
= isl_constraint_set_coefficient_si (c
, isl_dim_set
, pos
, -1);
1055 isl_int_set_gmp (v
, g
);
1057 c
= isl_constraint_set_constant (c
, v
);
1058 inner
= isl_set_add_constraint (inner
, c
);
1061 isl_pw_aff_free (aff
);
1066 if (loop
->inner
&& loop_in_sese_p (loop
->inner
, region
))
1067 build_loop_iteration_domains (scop
, loop
->inner
, nb
+ 1,
1068 isl_set_copy (inner
), doms
);
1072 && loop_in_sese_p (loop
->next
, region
))
1073 build_loop_iteration_domains (scop
, loop
->next
, nb
,
1074 isl_set_copy (outer
), doms
);
1076 doms
[loop
->num
] = inner
;
1078 isl_set_free (outer
);
1079 isl_space_free (space
);
1084 /* Returns a linear expression for tree T evaluated in PBB. */
1087 create_pw_aff_from_tree (poly_bb_p pbb
, tree t
)
1089 scop_p scop
= PBB_SCOP (pbb
);
1091 t
= scalar_evolution_in_region (SCOP_REGION (scop
), pbb_loop (pbb
), t
);
1092 gcc_assert (!automatically_generated_chrec_p (t
));
1094 return extract_affine (scop
, t
, isl_set_get_space (pbb
->domain
));
1097 /* Add conditional statement STMT to pbb. CODE is used as the comparison
1098 operator. This allows us to invert the condition or to handle
1102 add_condition_to_pbb (poly_bb_p pbb
, gimple stmt
, enum tree_code code
)
1104 isl_pw_aff
*lhs
= create_pw_aff_from_tree (pbb
, gimple_cond_lhs (stmt
));
1105 isl_pw_aff
*rhs
= create_pw_aff_from_tree (pbb
, gimple_cond_rhs (stmt
));
1111 cond
= isl_pw_aff_lt_set (lhs
, rhs
);
1115 cond
= isl_pw_aff_gt_set (lhs
, rhs
);
1119 cond
= isl_pw_aff_le_set (lhs
, rhs
);
1123 cond
= isl_pw_aff_ge_set (lhs
, rhs
);
1127 cond
= isl_pw_aff_eq_set (lhs
, rhs
);
1131 cond
= isl_pw_aff_ne_set (lhs
, rhs
);
1135 isl_pw_aff_free(lhs
);
1136 isl_pw_aff_free(rhs
);
1140 cond
= isl_set_coalesce (cond
);
1141 cond
= isl_set_set_tuple_id (cond
, isl_set_get_tuple_id (pbb
->domain
));
1142 pbb
->domain
= isl_set_intersect (pbb
->domain
, cond
);
1145 /* Add conditions to the domain of PBB. */
1148 add_conditions_to_domain (poly_bb_p pbb
)
1152 gimple_bb_p gbb
= PBB_BLACK_BOX (pbb
);
1154 if (GBB_CONDITIONS (gbb
).is_empty ())
1157 FOR_EACH_VEC_ELT (GBB_CONDITIONS (gbb
), i
, stmt
)
1158 switch (gimple_code (stmt
))
1162 enum tree_code code
= gimple_cond_code (stmt
);
1164 /* The conditions for ELSE-branches are inverted. */
1165 if (!GBB_CONDITION_CASES (gbb
)[i
])
1166 code
= invert_tree_comparison (code
, false);
1168 add_condition_to_pbb (pbb
, stmt
, code
);
1173 /* Switch statements are not supported right now - fall through. */
1181 /* Traverses all the GBBs of the SCOP and add their constraints to the
1182 iteration domains. */
1185 add_conditions_to_constraints (scop_p scop
)
1190 FOR_EACH_VEC_ELT (SCOP_BBS (scop
), i
, pbb
)
1191 add_conditions_to_domain (pbb
);
1194 /* Structure used to pass data to dom_walk. */
1198 vec
<gimple
> *conditions
, *cases
;
1202 /* Returns a COND_EXPR statement when BB has a single predecessor, the
1203 edge between BB and its predecessor is not a loop exit edge, and
1204 the last statement of the single predecessor is a COND_EXPR. */
1207 single_pred_cond_non_loop_exit (basic_block bb
)
1209 if (single_pred_p (bb
))
1211 edge e
= single_pred_edge (bb
);
1212 basic_block pred
= e
->src
;
1215 if (loop_depth (pred
->loop_father
) > loop_depth (bb
->loop_father
))
1218 stmt
= last_stmt (pred
);
1220 if (stmt
&& gimple_code (stmt
) == GIMPLE_COND
)
1227 /* Call-back for dom_walk executed before visiting the dominated
1231 build_sese_conditions_before (struct dom_walk_data
*dw_data
,
1234 struct bsc
*data
= (struct bsc
*) dw_data
->global_data
;
1235 vec
<gimple
> *conditions
= data
->conditions
;
1236 vec
<gimple
> *cases
= data
->cases
;
1240 if (!bb_in_sese_p (bb
, data
->region
))
1243 stmt
= single_pred_cond_non_loop_exit (bb
);
1247 edge e
= single_pred_edge (bb
);
1249 conditions
->safe_push (stmt
);
1251 if (e
->flags
& EDGE_TRUE_VALUE
)
1252 cases
->safe_push (stmt
);
1254 cases
->safe_push (NULL
);
1257 gbb
= gbb_from_bb (bb
);
1261 GBB_CONDITIONS (gbb
) = conditions
->copy ();
1262 GBB_CONDITION_CASES (gbb
) = cases
->copy ();
1266 /* Call-back for dom_walk executed after visiting the dominated
1270 build_sese_conditions_after (struct dom_walk_data
*dw_data
,
1273 struct bsc
*data
= (struct bsc
*) dw_data
->global_data
;
1274 vec
<gimple
> *conditions
= data
->conditions
;
1275 vec
<gimple
> *cases
= data
->cases
;
1277 if (!bb_in_sese_p (bb
, data
->region
))
1280 if (single_pred_cond_non_loop_exit (bb
))
1287 /* Record all conditions in REGION. */
1290 build_sese_conditions (sese region
)
1292 struct dom_walk_data walk_data
;
1293 vec
<gimple
> conditions
;
1294 conditions
.create (3);
1299 data
.conditions
= &conditions
;
1300 data
.cases
= &cases
;
1301 data
.region
= region
;
1303 walk_data
.dom_direction
= CDI_DOMINATORS
;
1304 walk_data
.initialize_block_local_data
= NULL
;
1305 walk_data
.before_dom_children
= build_sese_conditions_before
;
1306 walk_data
.after_dom_children
= build_sese_conditions_after
;
1307 walk_data
.global_data
= &data
;
1308 walk_data
.block_local_data_size
= 0;
1310 init_walk_dominator_tree (&walk_data
);
1311 walk_dominator_tree (&walk_data
, SESE_ENTRY_BB (region
));
1312 fini_walk_dominator_tree (&walk_data
);
1314 conditions
.release ();
1318 /* Add constraints on the possible values of parameter P from the type
1322 add_param_constraints (scop_p scop
, graphite_dim_t p
)
1324 tree parameter
= SESE_PARAMS (SCOP_REGION (scop
))[p
];
1325 tree type
= TREE_TYPE (parameter
);
1326 tree lb
= NULL_TREE
;
1327 tree ub
= NULL_TREE
;
1329 if (POINTER_TYPE_P (type
) || !TYPE_MIN_VALUE (type
))
1330 lb
= lower_bound_in_type (type
, type
);
1332 lb
= TYPE_MIN_VALUE (type
);
1334 if (POINTER_TYPE_P (type
) || !TYPE_MAX_VALUE (type
))
1335 ub
= upper_bound_in_type (type
, type
);
1337 ub
= TYPE_MAX_VALUE (type
);
1341 isl_space
*space
= isl_set_get_space (scop
->context
);
1346 c
= isl_inequality_alloc (isl_local_space_from_space (space
));
1349 tree_int_to_gmp (lb
, g
);
1350 isl_int_set_gmp (v
, g
);
1353 c
= isl_constraint_set_constant (c
, v
);
1355 c
= isl_constraint_set_coefficient_si (c
, isl_dim_param
, p
, 1);
1357 scop
->context
= isl_set_add_constraint (scop
->context
, c
);
1362 isl_space
*space
= isl_set_get_space (scop
->context
);
1367 c
= isl_inequality_alloc (isl_local_space_from_space (space
));
1371 tree_int_to_gmp (ub
, g
);
1372 isl_int_set_gmp (v
, g
);
1374 c
= isl_constraint_set_constant (c
, v
);
1376 c
= isl_constraint_set_coefficient_si (c
, isl_dim_param
, p
, -1);
1378 scop
->context
= isl_set_add_constraint (scop
->context
, c
);
1382 /* Build the context of the SCOP. The context usually contains extra
1383 constraints that are added to the iteration domains that constrain
1387 build_scop_context (scop_p scop
)
1389 graphite_dim_t p
, n
= scop_nb_params (scop
);
1391 for (p
= 0; p
< n
; p
++)
1392 add_param_constraints (scop
, p
);
1395 /* Build the iteration domains: the loops belonging to the current
1396 SCOP, and that vary for the execution of the current basic block.
1397 Returns false if there is no loop in SCOP. */
1400 build_scop_iteration_domain (scop_p scop
)
1403 sese region
= SCOP_REGION (scop
);
1406 int nb_loops
= number_of_loops (cfun
);
1407 isl_set
**doms
= XCNEWVEC (isl_set
*, nb_loops
);
1409 FOR_EACH_VEC_ELT (SESE_LOOP_NEST (region
), i
, loop
)
1410 if (!loop_in_sese_p (loop_outer (loop
), region
))
1411 build_loop_iteration_domains (scop
, loop
, 0,
1412 isl_set_copy (scop
->context
), doms
);
1414 FOR_EACH_VEC_ELT (SCOP_BBS (scop
), i
, pbb
)
1416 loop
= pbb_loop (pbb
);
1418 if (doms
[loop
->num
])
1419 pbb
->domain
= isl_set_copy (doms
[loop
->num
]);
1421 pbb
->domain
= isl_set_copy (scop
->context
);
1423 pbb
->domain
= isl_set_set_tuple_id (pbb
->domain
,
1424 isl_id_for_pbb (scop
, pbb
));
1427 for (i
= 0; i
< nb_loops
; i
++)
1429 isl_set_free (doms
[i
]);
1434 /* Add a constrain to the ACCESSES polyhedron for the alias set of
1435 data reference DR. ACCESSP_NB_DIMS is the dimension of the
1436 ACCESSES polyhedron, DOM_NB_DIMS is the dimension of the iteration
1440 pdr_add_alias_set (isl_map
*acc
, data_reference_p dr
)
1443 int alias_set_num
= 0;
1444 base_alias_pair
*bap
= (base_alias_pair
*)(dr
->aux
);
1446 if (bap
&& bap
->alias_set
)
1447 alias_set_num
= *(bap
->alias_set
);
1449 c
= isl_equality_alloc
1450 (isl_local_space_from_space (isl_map_get_space (acc
)));
1451 c
= isl_constraint_set_constant_si (c
, -alias_set_num
);
1452 c
= isl_constraint_set_coefficient_si (c
, isl_dim_out
, 0, 1);
1454 return isl_map_add_constraint (acc
, c
);
1457 /* Assign the affine expression INDEX to the output dimension POS of
1458 MAP and return the result. */
1461 set_index (isl_map
*map
, int pos
, isl_pw_aff
*index
)
1464 int len
= isl_map_dim (map
, isl_dim_out
);
1467 index_map
= isl_map_from_pw_aff (index
);
1468 index_map
= isl_map_insert_dims (index_map
, isl_dim_out
, 0, pos
);
1469 index_map
= isl_map_add_dims (index_map
, isl_dim_out
, len
- pos
- 1);
1471 id
= isl_map_get_tuple_id (map
, isl_dim_out
);
1472 index_map
= isl_map_set_tuple_id (index_map
, isl_dim_out
, id
);
1473 id
= isl_map_get_tuple_id (map
, isl_dim_in
);
1474 index_map
= isl_map_set_tuple_id (index_map
, isl_dim_in
, id
);
1476 return isl_map_intersect (map
, index_map
);
1479 /* Add to ACCESSES polyhedron equalities defining the access functions
1480 to the memory. ACCESSP_NB_DIMS is the dimension of the ACCESSES
1481 polyhedron, DOM_NB_DIMS is the dimension of the iteration domain.
1482 PBB is the poly_bb_p that contains the data reference DR. */
1485 pdr_add_memory_accesses (isl_map
*acc
, data_reference_p dr
, poly_bb_p pbb
)
1487 int i
, nb_subscripts
= DR_NUM_DIMENSIONS (dr
);
1488 scop_p scop
= PBB_SCOP (pbb
);
1490 for (i
= 0; i
< nb_subscripts
; i
++)
1493 tree afn
= DR_ACCESS_FN (dr
, nb_subscripts
- 1 - i
);
1495 aff
= extract_affine (scop
, afn
,
1496 isl_space_domain (isl_map_get_space (acc
)));
1497 acc
= set_index (acc
, i
+ 1, aff
);
1503 /* Add constrains representing the size of the accessed data to the
1504 ACCESSES polyhedron. ACCESSP_NB_DIMS is the dimension of the
1505 ACCESSES polyhedron, DOM_NB_DIMS is the dimension of the iteration
1509 pdr_add_data_dimensions (isl_set
*extent
, scop_p scop
, data_reference_p dr
)
1511 tree ref
= DR_REF (dr
);
1512 int i
, nb_subscripts
= DR_NUM_DIMENSIONS (dr
);
1514 for (i
= nb_subscripts
- 1; i
>= 0; i
--, ref
= TREE_OPERAND (ref
, 0))
1518 if (TREE_CODE (ref
) != ARRAY_REF
)
1521 low
= array_ref_low_bound (ref
);
1522 high
= array_ref_up_bound (ref
);
1524 /* XXX The PPL code dealt separately with
1525 subscript - low >= 0 and high - subscript >= 0 in case one of
1526 the two bounds isn't known. Do the same here? */
1528 if (host_integerp (low
, 0)
1530 && host_integerp (high
, 0)
1531 /* 1-element arrays at end of structures may extend over
1532 their declared size. */
1533 && !(array_at_struct_end_p (ref
)
1534 && operand_equal_p (low
, high
, 0)))
1538 isl_set
*univ
, *lbs
, *ubs
;
1542 isl_pw_aff
*lb
= extract_affine_int (low
, isl_set_get_space (extent
));
1543 isl_pw_aff
*ub
= extract_affine_int (high
, isl_set_get_space (extent
));
1546 valid
= isl_pw_aff_nonneg_set (isl_pw_aff_copy (ub
));
1547 valid
= isl_set_project_out (valid
, isl_dim_set
, 0,
1548 isl_set_dim (valid
, isl_dim_set
));
1549 scop
->context
= isl_set_intersect (scop
->context
, valid
);
1551 space
= isl_set_get_space (extent
);
1552 aff
= isl_aff_zero_on_domain (isl_local_space_from_space (space
));
1553 aff
= isl_aff_add_coefficient_si (aff
, isl_dim_in
, i
+ 1, 1);
1554 univ
= isl_set_universe (isl_space_domain (isl_aff_get_space (aff
)));
1555 index
= isl_pw_aff_alloc (univ
, aff
);
1557 id
= isl_set_get_tuple_id (extent
);
1558 lb
= isl_pw_aff_set_tuple_id (lb
, isl_dim_in
, isl_id_copy (id
));
1559 ub
= isl_pw_aff_set_tuple_id (ub
, isl_dim_in
, id
);
1561 /* low <= sub_i <= high */
1562 lbs
= isl_pw_aff_ge_set (isl_pw_aff_copy (index
), lb
);
1563 ubs
= isl_pw_aff_le_set (index
, ub
);
1564 extent
= isl_set_intersect (extent
, lbs
);
1565 extent
= isl_set_intersect (extent
, ubs
);
1572 /* Build data accesses for DR in PBB. */
1575 build_poly_dr (data_reference_p dr
, poly_bb_p pbb
)
1577 int dr_base_object_set
;
1580 scop_p scop
= PBB_SCOP (pbb
);
1583 isl_space
*dc
= isl_set_get_space (pbb
->domain
);
1584 int nb_out
= 1 + DR_NUM_DIMENSIONS (dr
);
1585 isl_space
*space
= isl_space_add_dims (isl_space_from_domain (dc
),
1586 isl_dim_out
, nb_out
);
1588 acc
= isl_map_universe (space
);
1589 acc
= isl_map_set_tuple_id (acc
, isl_dim_out
, isl_id_for_dr (scop
, dr
));
1592 acc
= pdr_add_alias_set (acc
, dr
);
1593 acc
= pdr_add_memory_accesses (acc
, dr
, pbb
);
1596 isl_id
*id
= isl_id_for_dr (scop
, dr
);
1597 int nb
= 1 + DR_NUM_DIMENSIONS (dr
);
1598 isl_space
*space
= isl_space_set_alloc (scop
->ctx
, 0, nb
);
1599 int alias_set_num
= 0;
1600 base_alias_pair
*bap
= (base_alias_pair
*)(dr
->aux
);
1602 if (bap
&& bap
->alias_set
)
1603 alias_set_num
= *(bap
->alias_set
);
1605 space
= isl_space_set_tuple_id (space
, isl_dim_set
, id
);
1606 extent
= isl_set_nat_universe (space
);
1607 extent
= isl_set_fix_si (extent
, isl_dim_set
, 0, alias_set_num
);
1608 extent
= pdr_add_data_dimensions (extent
, scop
, dr
);
1611 gcc_assert (dr
->aux
);
1612 dr_base_object_set
= ((base_alias_pair
*)(dr
->aux
))->base_obj_set
;
1614 new_poly_dr (pbb
, dr_base_object_set
,
1615 DR_IS_READ (dr
) ? PDR_READ
: PDR_WRITE
,
1616 dr
, DR_NUM_DIMENSIONS (dr
), acc
, extent
);
1619 /* Write to FILE the alias graph of data references in DIMACS format. */
1622 write_alias_graph_to_ascii_dimacs (FILE *file
, char *comment
,
1623 vec
<data_reference_p
> drs
)
1625 int num_vertex
= drs
.length ();
1627 data_reference_p dr1
, dr2
;
1630 if (num_vertex
== 0)
1633 FOR_EACH_VEC_ELT (drs
, i
, dr1
)
1634 for (j
= i
+ 1; drs
.iterate (j
, &dr2
); j
++)
1635 if (dr_may_alias_p (dr1
, dr2
, true))
1638 fprintf (file
, "$\n");
1641 fprintf (file
, "c %s\n", comment
);
1643 fprintf (file
, "p edge %d %d\n", num_vertex
, edge_num
);
1645 FOR_EACH_VEC_ELT (drs
, i
, dr1
)
1646 for (j
= i
+ 1; drs
.iterate (j
, &dr2
); j
++)
1647 if (dr_may_alias_p (dr1
, dr2
, true))
1648 fprintf (file
, "e %d %d\n", i
+ 1, j
+ 1);
1653 /* Write to FILE the alias graph of data references in DOT format. */
1656 write_alias_graph_to_ascii_dot (FILE *file
, char *comment
,
1657 vec
<data_reference_p
> drs
)
1659 int num_vertex
= drs
.length ();
1660 data_reference_p dr1
, dr2
;
1663 if (num_vertex
== 0)
1666 fprintf (file
, "$\n");
1669 fprintf (file
, "c %s\n", comment
);
1671 /* First print all the vertices. */
1672 FOR_EACH_VEC_ELT (drs
, i
, dr1
)
1673 fprintf (file
, "n%d;\n", i
);
1675 FOR_EACH_VEC_ELT (drs
, i
, dr1
)
1676 for (j
= i
+ 1; drs
.iterate (j
, &dr2
); j
++)
1677 if (dr_may_alias_p (dr1
, dr2
, true))
1678 fprintf (file
, "n%d n%d\n", i
, j
);
1683 /* Write to FILE the alias graph of data references in ECC format. */
1686 write_alias_graph_to_ascii_ecc (FILE *file
, char *comment
,
1687 vec
<data_reference_p
> drs
)
1689 int num_vertex
= drs
.length ();
1690 data_reference_p dr1
, dr2
;
1693 if (num_vertex
== 0)
1696 fprintf (file
, "$\n");
1699 fprintf (file
, "c %s\n", comment
);
1701 FOR_EACH_VEC_ELT (drs
, i
, dr1
)
1702 for (j
= i
+ 1; drs
.iterate (j
, &dr2
); j
++)
1703 if (dr_may_alias_p (dr1
, dr2
, true))
1704 fprintf (file
, "%d %d\n", i
, j
);
1709 /* Check if DR1 and DR2 are in the same object set. */
1712 dr_same_base_object_p (const struct data_reference
*dr1
,
1713 const struct data_reference
*dr2
)
1715 return operand_equal_p (DR_BASE_OBJECT (dr1
), DR_BASE_OBJECT (dr2
), 0);
1718 /* Uses DFS component number as representative of alias-sets. Also tests for
1719 optimality by verifying if every connected component is a clique. Returns
1720 true (1) if the above test is true, and false (0) otherwise. */
1723 build_alias_set_optimal_p (vec
<data_reference_p
> drs
)
1725 int num_vertices
= drs
.length ();
1726 struct graph
*g
= new_graph (num_vertices
);
1727 data_reference_p dr1
, dr2
;
1729 int num_connected_components
;
1730 int v_indx1
, v_indx2
, num_vertices_in_component
;
1733 struct graph_edge
*e
;
1734 int this_component_is_clique
;
1735 int all_components_are_cliques
= 1;
1737 FOR_EACH_VEC_ELT (drs
, i
, dr1
)
1738 for (j
= i
+1; drs
.iterate (j
, &dr2
); j
++)
1739 if (dr_may_alias_p (dr1
, dr2
, true))
1745 all_vertices
= XNEWVEC (int, num_vertices
);
1746 vertices
= XNEWVEC (int, num_vertices
);
1747 for (i
= 0; i
< num_vertices
; i
++)
1748 all_vertices
[i
] = i
;
1750 num_connected_components
= graphds_dfs (g
, all_vertices
, num_vertices
,
1752 for (i
= 0; i
< g
->n_vertices
; i
++)
1754 data_reference_p dr
= drs
[i
];
1755 base_alias_pair
*bap
;
1757 gcc_assert (dr
->aux
);
1758 bap
= (base_alias_pair
*)(dr
->aux
);
1760 bap
->alias_set
= XNEW (int);
1761 *(bap
->alias_set
) = g
->vertices
[i
].component
+ 1;
1764 /* Verify if the DFS numbering results in optimal solution. */
1765 for (i
= 0; i
< num_connected_components
; i
++)
1767 num_vertices_in_component
= 0;
1768 /* Get all vertices whose DFS component number is the same as i. */
1769 for (j
= 0; j
< num_vertices
; j
++)
1770 if (g
->vertices
[j
].component
== i
)
1771 vertices
[num_vertices_in_component
++] = j
;
1773 /* Now test if the vertices in 'vertices' form a clique, by testing
1774 for edges among each pair. */
1775 this_component_is_clique
= 1;
1776 for (v_indx1
= 0; v_indx1
< num_vertices_in_component
; v_indx1
++)
1778 for (v_indx2
= v_indx1
+1; v_indx2
< num_vertices_in_component
; v_indx2
++)
1780 /* Check if the two vertices are connected by iterating
1781 through all the edges which have one of these are source. */
1782 e
= g
->vertices
[vertices
[v_indx2
]].pred
;
1785 if (e
->src
== vertices
[v_indx1
])
1791 this_component_is_clique
= 0;
1795 if (!this_component_is_clique
)
1796 all_components_are_cliques
= 0;
1800 free (all_vertices
);
1803 return all_components_are_cliques
;
1806 /* Group each data reference in DRS with its base object set num. */
1809 build_base_obj_set_for_drs (vec
<data_reference_p
> drs
)
1811 int num_vertex
= drs
.length ();
1812 struct graph
*g
= new_graph (num_vertex
);
1813 data_reference_p dr1
, dr2
;
1817 FOR_EACH_VEC_ELT (drs
, i
, dr1
)
1818 for (j
= i
+ 1; drs
.iterate (j
, &dr2
); j
++)
1819 if (dr_same_base_object_p (dr1
, dr2
))
1825 queue
= XNEWVEC (int, num_vertex
);
1826 for (i
= 0; i
< num_vertex
; i
++)
1829 graphds_dfs (g
, queue
, num_vertex
, NULL
, true, NULL
);
1831 for (i
= 0; i
< g
->n_vertices
; i
++)
1833 data_reference_p dr
= drs
[i
];
1834 base_alias_pair
*bap
;
1836 gcc_assert (dr
->aux
);
1837 bap
= (base_alias_pair
*)(dr
->aux
);
1839 bap
->base_obj_set
= g
->vertices
[i
].component
+ 1;
1846 /* Build the data references for PBB. */
1849 build_pbb_drs (poly_bb_p pbb
)
1852 data_reference_p dr
;
1853 vec
<data_reference_p
> gbb_drs
= GBB_DATA_REFS (PBB_BLACK_BOX (pbb
));
1855 FOR_EACH_VEC_ELT (gbb_drs
, j
, dr
)
1856 build_poly_dr (dr
, pbb
);
1859 /* Dump to file the alias graphs for the data references in DRS. */
1862 dump_alias_graphs (vec
<data_reference_p
> drs
)
1865 FILE *file_dimacs
, *file_ecc
, *file_dot
;
1867 file_dimacs
= fopen ("/tmp/dr_alias_graph_dimacs", "ab");
1870 snprintf (comment
, sizeof (comment
), "%s %s", main_input_filename
,
1871 current_function_name ());
1872 write_alias_graph_to_ascii_dimacs (file_dimacs
, comment
, drs
);
1873 fclose (file_dimacs
);
1876 file_ecc
= fopen ("/tmp/dr_alias_graph_ecc", "ab");
1879 snprintf (comment
, sizeof (comment
), "%s %s", main_input_filename
,
1880 current_function_name ());
1881 write_alias_graph_to_ascii_ecc (file_ecc
, comment
, drs
);
1885 file_dot
= fopen ("/tmp/dr_alias_graph_dot", "ab");
1888 snprintf (comment
, sizeof (comment
), "%s %s", main_input_filename
,
1889 current_function_name ());
1890 write_alias_graph_to_ascii_dot (file_dot
, comment
, drs
);
1895 /* Build data references in SCOP. */
1898 build_scop_drs (scop_p scop
)
1902 data_reference_p dr
;
1903 vec
<data_reference_p
> drs
;
1906 /* Remove all the PBBs that do not have data references: these basic
1907 blocks are not handled in the polyhedral representation. */
1908 for (i
= 0; SCOP_BBS (scop
).iterate (i
, &pbb
); i
++)
1909 if (GBB_DATA_REFS (PBB_BLACK_BOX (pbb
)).is_empty ())
1911 free_gimple_bb (PBB_BLACK_BOX (pbb
));
1913 SCOP_BBS (scop
).ordered_remove (i
);
1917 FOR_EACH_VEC_ELT (SCOP_BBS (scop
), i
, pbb
)
1918 for (j
= 0; GBB_DATA_REFS (PBB_BLACK_BOX (pbb
)).iterate (j
, &dr
); j
++)
1921 FOR_EACH_VEC_ELT (drs
, i
, dr
)
1922 dr
->aux
= XNEW (base_alias_pair
);
1924 if (!build_alias_set_optimal_p (drs
))
1926 /* TODO: Add support when building alias set is not optimal. */
1930 build_base_obj_set_for_drs (drs
);
1932 /* When debugging, enable the following code. This cannot be used
1933 in production compilers. */
1935 dump_alias_graphs (drs
);
1939 FOR_EACH_VEC_ELT (SCOP_BBS (scop
), i
, pbb
)
1940 build_pbb_drs (pbb
);
1943 /* Return a gsi at the position of the phi node STMT. */
1945 static gimple_stmt_iterator
1946 gsi_for_phi_node (gimple stmt
)
1948 gimple_stmt_iterator psi
;
1949 basic_block bb
= gimple_bb (stmt
);
1951 for (psi
= gsi_start_phis (bb
); !gsi_end_p (psi
); gsi_next (&psi
))
1952 if (stmt
== gsi_stmt (psi
))
1959 /* Analyze all the data references of STMTS and add them to the
1960 GBB_DATA_REFS vector of BB. */
1963 analyze_drs_in_stmts (scop_p scop
, basic_block bb
, vec
<gimple
> stmts
)
1969 sese region
= SCOP_REGION (scop
);
1971 if (!bb_in_sese_p (bb
, region
))
1974 nest
= outermost_loop_in_sese_1 (region
, bb
);
1975 gbb
= gbb_from_bb (bb
);
1977 FOR_EACH_VEC_ELT (stmts
, i
, stmt
)
1981 if (is_gimple_debug (stmt
))
1984 loop
= loop_containing_stmt (stmt
);
1985 if (!loop_in_sese_p (loop
, region
))
1988 graphite_find_data_references_in_stmt (nest
, loop
, stmt
,
1989 &GBB_DATA_REFS (gbb
));
1993 /* Insert STMT at the end of the STMTS sequence and then insert the
1994 statements from STMTS at INSERT_GSI and call analyze_drs_in_stmts
1998 insert_stmts (scop_p scop
, gimple stmt
, gimple_seq stmts
,
1999 gimple_stmt_iterator insert_gsi
)
2001 gimple_stmt_iterator gsi
;
2005 gimple_seq_add_stmt (&stmts
, stmt
);
2006 for (gsi
= gsi_start (stmts
); !gsi_end_p (gsi
); gsi_next (&gsi
))
2007 x
.safe_push (gsi_stmt (gsi
));
2009 gsi_insert_seq_before (&insert_gsi
, stmts
, GSI_SAME_STMT
);
2010 analyze_drs_in_stmts (scop
, gsi_bb (insert_gsi
), x
);
2014 /* Insert the assignment "RES := EXPR" just after AFTER_STMT. */
2017 insert_out_of_ssa_copy (scop_p scop
, tree res
, tree expr
, gimple after_stmt
)
2020 gimple_stmt_iterator gsi
;
2021 tree var
= force_gimple_operand (expr
, &stmts
, true, NULL_TREE
);
2022 gimple stmt
= gimple_build_assign (unshare_expr (res
), var
);
2026 gimple_seq_add_stmt (&stmts
, stmt
);
2027 for (gsi
= gsi_start (stmts
); !gsi_end_p (gsi
); gsi_next (&gsi
))
2028 x
.safe_push (gsi_stmt (gsi
));
2030 if (gimple_code (after_stmt
) == GIMPLE_PHI
)
2032 gsi
= gsi_after_labels (gimple_bb (after_stmt
));
2033 gsi_insert_seq_before (&gsi
, stmts
, GSI_NEW_STMT
);
2037 gsi
= gsi_for_stmt (after_stmt
);
2038 gsi_insert_seq_after (&gsi
, stmts
, GSI_NEW_STMT
);
2041 analyze_drs_in_stmts (scop
, gimple_bb (after_stmt
), x
);
2045 /* Creates a poly_bb_p for basic_block BB from the existing PBB. */
2048 new_pbb_from_pbb (scop_p scop
, poly_bb_p pbb
, basic_block bb
)
2050 vec
<data_reference_p
> drs
;
2052 gimple_bb_p gbb
= PBB_BLACK_BOX (pbb
);
2053 gimple_bb_p gbb1
= new_gimple_bb (bb
, drs
);
2054 poly_bb_p pbb1
= new_poly_bb (scop
, gbb1
);
2055 int index
, n
= SCOP_BBS (scop
).length ();
2057 /* The INDEX of PBB in SCOP_BBS. */
2058 for (index
= 0; index
< n
; index
++)
2059 if (SCOP_BBS (scop
)[index
] == pbb
)
2062 pbb1
->domain
= isl_set_copy (pbb
->domain
);
2064 GBB_PBB (gbb1
) = pbb1
;
2065 GBB_CONDITIONS (gbb1
) = GBB_CONDITIONS (gbb
).copy ();
2066 GBB_CONDITION_CASES (gbb1
) = GBB_CONDITION_CASES (gbb
).copy ();
2067 SCOP_BBS (scop
).safe_insert (index
+ 1, pbb1
);
2070 /* Insert on edge E the assignment "RES := EXPR". */
2073 insert_out_of_ssa_copy_on_edge (scop_p scop
, edge e
, tree res
, tree expr
)
2075 gimple_stmt_iterator gsi
;
2076 gimple_seq stmts
= NULL
;
2077 tree var
= force_gimple_operand (expr
, &stmts
, true, NULL_TREE
);
2078 gimple stmt
= gimple_build_assign (unshare_expr (res
), var
);
2083 gimple_seq_add_stmt (&stmts
, stmt
);
2084 for (gsi
= gsi_start (stmts
); !gsi_end_p (gsi
); gsi_next (&gsi
))
2085 x
.safe_push (gsi_stmt (gsi
));
2087 gsi_insert_seq_on_edge (e
, stmts
);
2088 gsi_commit_edge_inserts ();
2089 bb
= gimple_bb (stmt
);
2091 if (!bb_in_sese_p (bb
, SCOP_REGION (scop
)))
2094 if (!gbb_from_bb (bb
))
2095 new_pbb_from_pbb (scop
, pbb_from_bb (e
->src
), bb
);
2097 analyze_drs_in_stmts (scop
, bb
, x
);
2101 /* Creates a zero dimension array of the same type as VAR. */
2104 create_zero_dim_array (tree var
, const char *base_name
)
2106 tree index_type
= build_index_type (integer_zero_node
);
2107 tree elt_type
= TREE_TYPE (var
);
2108 tree array_type
= build_array_type (elt_type
, index_type
);
2109 tree base
= create_tmp_var (array_type
, base_name
);
2111 return build4 (ARRAY_REF
, elt_type
, base
, integer_zero_node
, NULL_TREE
,
2115 /* Returns true when PHI is a loop close phi node. */
2118 scalar_close_phi_node_p (gimple phi
)
2120 if (gimple_code (phi
) != GIMPLE_PHI
2121 || virtual_operand_p (gimple_phi_result (phi
)))
2124 /* Note that loop close phi nodes should have a single argument
2125 because we translated the representation into a canonical form
2126 before Graphite: see canonicalize_loop_closed_ssa_form. */
2127 return (gimple_phi_num_args (phi
) == 1);
2130 /* For a definition DEF in REGION, propagates the expression EXPR in
2131 all the uses of DEF outside REGION. */
2134 propagate_expr_outside_region (tree def
, tree expr
, sese region
)
2136 imm_use_iterator imm_iter
;
2139 bool replaced_once
= false;
2141 gcc_assert (TREE_CODE (def
) == SSA_NAME
);
2143 expr
= force_gimple_operand (unshare_expr (expr
), &stmts
, true,
2146 FOR_EACH_IMM_USE_STMT (use_stmt
, imm_iter
, def
)
2147 if (!is_gimple_debug (use_stmt
)
2148 && !bb_in_sese_p (gimple_bb (use_stmt
), region
))
2151 use_operand_p use_p
;
2153 FOR_EACH_PHI_OR_STMT_USE (use_p
, use_stmt
, iter
, SSA_OP_ALL_USES
)
2154 if (operand_equal_p (def
, USE_FROM_PTR (use_p
), 0)
2155 && (replaced_once
= true))
2156 replace_exp (use_p
, expr
);
2158 update_stmt (use_stmt
);
2163 gsi_insert_seq_on_edge (SESE_ENTRY (region
), stmts
);
2164 gsi_commit_edge_inserts ();
2168 /* Rewrite out of SSA the reduction phi node at PSI by creating a zero
2169 dimension array for it. */
2172 rewrite_close_phi_out_of_ssa (scop_p scop
, gimple_stmt_iterator
*psi
)
2174 sese region
= SCOP_REGION (scop
);
2175 gimple phi
= gsi_stmt (*psi
);
2176 tree res
= gimple_phi_result (phi
);
2177 basic_block bb
= gimple_bb (phi
);
2178 gimple_stmt_iterator gsi
= gsi_after_labels (bb
);
2179 tree arg
= gimple_phi_arg_def (phi
, 0);
2182 /* Note that loop close phi nodes should have a single argument
2183 because we translated the representation into a canonical form
2184 before Graphite: see canonicalize_loop_closed_ssa_form. */
2185 gcc_assert (gimple_phi_num_args (phi
) == 1);
2187 /* The phi node can be a non close phi node, when its argument is
2188 invariant, or a default definition. */
2189 if (is_gimple_min_invariant (arg
)
2190 || SSA_NAME_IS_DEFAULT_DEF (arg
))
2192 propagate_expr_outside_region (res
, arg
, region
);
2197 else if (gimple_bb (SSA_NAME_DEF_STMT (arg
))->loop_father
== bb
->loop_father
)
2199 propagate_expr_outside_region (res
, arg
, region
);
2200 stmt
= gimple_build_assign (res
, arg
);
2201 remove_phi_node (psi
, false);
2202 gsi_insert_before (&gsi
, stmt
, GSI_NEW_STMT
);
2203 SSA_NAME_DEF_STMT (res
) = stmt
;
2207 /* If res is scev analyzable and is not a scalar value, it is safe
2208 to ignore the close phi node: it will be code generated in the
2209 out of Graphite pass. */
2210 else if (scev_analyzable_p (res
, region
))
2212 loop_p loop
= loop_containing_stmt (SSA_NAME_DEF_STMT (res
));
2215 if (!loop_in_sese_p (loop
, region
))
2217 loop
= loop_containing_stmt (SSA_NAME_DEF_STMT (arg
));
2218 scev
= scalar_evolution_in_region (region
, loop
, arg
);
2219 scev
= compute_overall_effect_of_inner_loop (loop
, scev
);
2222 scev
= scalar_evolution_in_region (region
, loop
, res
);
2224 if (tree_does_not_contain_chrecs (scev
))
2225 propagate_expr_outside_region (res
, scev
, region
);
2232 tree zero_dim_array
= create_zero_dim_array (res
, "Close_Phi");
2234 stmt
= gimple_build_assign (res
, unshare_expr (zero_dim_array
));
2236 if (TREE_CODE (arg
) == SSA_NAME
)
2237 insert_out_of_ssa_copy (scop
, zero_dim_array
, arg
,
2238 SSA_NAME_DEF_STMT (arg
));
2240 insert_out_of_ssa_copy_on_edge (scop
, single_pred_edge (bb
),
2241 zero_dim_array
, arg
);
2244 remove_phi_node (psi
, false);
2245 SSA_NAME_DEF_STMT (res
) = stmt
;
2247 insert_stmts (scop
, stmt
, NULL
, gsi_after_labels (bb
));
2250 /* Rewrite out of SSA the reduction phi node at PSI by creating a zero
2251 dimension array for it. */
2254 rewrite_phi_out_of_ssa (scop_p scop
, gimple_stmt_iterator
*psi
)
2257 gimple phi
= gsi_stmt (*psi
);
2258 basic_block bb
= gimple_bb (phi
);
2259 tree res
= gimple_phi_result (phi
);
2260 tree zero_dim_array
= create_zero_dim_array (res
, "phi_out_of_ssa");
2263 for (i
= 0; i
< gimple_phi_num_args (phi
); i
++)
2265 tree arg
= gimple_phi_arg_def (phi
, i
);
2266 edge e
= gimple_phi_arg_edge (phi
, i
);
2268 /* Avoid the insertion of code in the loop latch to please the
2269 pattern matching of the vectorizer. */
2270 if (TREE_CODE (arg
) == SSA_NAME
2271 && e
->src
== bb
->loop_father
->latch
)
2272 insert_out_of_ssa_copy (scop
, zero_dim_array
, arg
,
2273 SSA_NAME_DEF_STMT (arg
));
2275 insert_out_of_ssa_copy_on_edge (scop
, e
, zero_dim_array
, arg
);
2278 stmt
= gimple_build_assign (res
, unshare_expr (zero_dim_array
));
2279 remove_phi_node (psi
, false);
2280 SSA_NAME_DEF_STMT (res
) = stmt
;
2281 insert_stmts (scop
, stmt
, NULL
, gsi_after_labels (bb
));
2284 /* Rewrite the degenerate phi node at position PSI from the degenerate
2285 form "x = phi (y, y, ..., y)" to "x = y". */
2288 rewrite_degenerate_phi (gimple_stmt_iterator
*psi
)
2292 gimple_stmt_iterator gsi
;
2293 gimple phi
= gsi_stmt (*psi
);
2294 tree res
= gimple_phi_result (phi
);
2297 bb
= gimple_bb (phi
);
2298 rhs
= degenerate_phi_result (phi
);
2301 stmt
= gimple_build_assign (res
, rhs
);
2302 remove_phi_node (psi
, false);
2303 SSA_NAME_DEF_STMT (res
) = stmt
;
2305 gsi
= gsi_after_labels (bb
);
2306 gsi_insert_before (&gsi
, stmt
, GSI_NEW_STMT
);
2309 /* Rewrite out of SSA all the reduction phi nodes of SCOP. */
2312 rewrite_reductions_out_of_ssa (scop_p scop
)
2315 gimple_stmt_iterator psi
;
2316 sese region
= SCOP_REGION (scop
);
2319 if (bb_in_sese_p (bb
, region
))
2320 for (psi
= gsi_start_phis (bb
); !gsi_end_p (psi
);)
2322 gimple phi
= gsi_stmt (psi
);
2324 if (virtual_operand_p (gimple_phi_result (phi
)))
2330 if (gimple_phi_num_args (phi
) > 1
2331 && degenerate_phi_result (phi
))
2332 rewrite_degenerate_phi (&psi
);
2334 else if (scalar_close_phi_node_p (phi
))
2335 rewrite_close_phi_out_of_ssa (scop
, &psi
);
2337 else if (reduction_phi_p (region
, &psi
))
2338 rewrite_phi_out_of_ssa (scop
, &psi
);
2341 update_ssa (TODO_update_ssa
);
2342 #ifdef ENABLE_CHECKING
2343 verify_loop_closed_ssa (true);
2347 /* Rewrite the scalar dependence of DEF used in USE_STMT with a memory
2348 read from ZERO_DIM_ARRAY. */
2351 rewrite_cross_bb_scalar_dependence (scop_p scop
, tree zero_dim_array
,
2352 tree def
, gimple use_stmt
)
2357 use_operand_p use_p
;
2359 gcc_assert (gimple_code (use_stmt
) != GIMPLE_PHI
);
2361 name
= copy_ssa_name (def
, NULL
);
2362 name_stmt
= gimple_build_assign (name
, zero_dim_array
);
2364 gimple_assign_set_lhs (name_stmt
, name
);
2365 insert_stmts (scop
, name_stmt
, NULL
, gsi_for_stmt (use_stmt
));
2367 FOR_EACH_SSA_USE_OPERAND (use_p
, use_stmt
, iter
, SSA_OP_ALL_USES
)
2368 if (operand_equal_p (def
, USE_FROM_PTR (use_p
), 0))
2369 replace_exp (use_p
, name
);
2371 update_stmt (use_stmt
);
2374 /* For every definition DEF in the SCOP that is used outside the scop,
2375 insert a closing-scop definition in the basic block just after this
2379 handle_scalar_deps_crossing_scop_limits (scop_p scop
, tree def
, gimple stmt
)
2381 tree var
= create_tmp_reg (TREE_TYPE (def
), NULL
);
2382 tree new_name
= make_ssa_name (var
, stmt
);
2383 bool needs_copy
= false;
2384 use_operand_p use_p
;
2385 imm_use_iterator imm_iter
;
2387 sese region
= SCOP_REGION (scop
);
2389 FOR_EACH_IMM_USE_STMT (use_stmt
, imm_iter
, def
)
2391 if (!bb_in_sese_p (gimple_bb (use_stmt
), region
))
2393 FOR_EACH_IMM_USE_ON_STMT (use_p
, imm_iter
)
2395 SET_USE (use_p
, new_name
);
2397 update_stmt (use_stmt
);
2402 /* Insert in the empty BB just after the scop a use of DEF such
2403 that the rewrite of cross_bb_scalar_dependences won't insert
2404 arrays everywhere else. */
2407 gimple assign
= gimple_build_assign (new_name
, def
);
2408 gimple_stmt_iterator psi
= gsi_after_labels (SESE_EXIT (region
)->dest
);
2410 SSA_NAME_DEF_STMT (new_name
) = assign
;
2411 update_stmt (assign
);
2412 gsi_insert_before (&psi
, assign
, GSI_SAME_STMT
);
2416 /* Rewrite the scalar dependences crossing the boundary of the BB
2417 containing STMT with an array. Return true when something has been
2421 rewrite_cross_bb_scalar_deps (scop_p scop
, gimple_stmt_iterator
*gsi
)
2423 sese region
= SCOP_REGION (scop
);
2424 gimple stmt
= gsi_stmt (*gsi
);
2425 imm_use_iterator imm_iter
;
2428 tree zero_dim_array
= NULL_TREE
;
2432 switch (gimple_code (stmt
))
2435 def
= gimple_assign_lhs (stmt
);
2439 def
= gimple_call_lhs (stmt
);
2447 || !is_gimple_reg (def
))
2450 if (scev_analyzable_p (def
, region
))
2452 loop_p loop
= loop_containing_stmt (SSA_NAME_DEF_STMT (def
));
2453 tree scev
= scalar_evolution_in_region (region
, loop
, def
);
2455 if (tree_contains_chrecs (scev
, NULL
))
2458 propagate_expr_outside_region (def
, scev
, region
);
2462 def_bb
= gimple_bb (stmt
);
2464 handle_scalar_deps_crossing_scop_limits (scop
, def
, stmt
);
2466 FOR_EACH_IMM_USE_STMT (use_stmt
, imm_iter
, def
)
2467 if (gimple_code (use_stmt
) == GIMPLE_PHI
2470 gimple_stmt_iterator psi
= gsi_for_stmt (use_stmt
);
2472 if (scalar_close_phi_node_p (gsi_stmt (psi
)))
2473 rewrite_close_phi_out_of_ssa (scop
, &psi
);
2475 rewrite_phi_out_of_ssa (scop
, &psi
);
2478 FOR_EACH_IMM_USE_STMT (use_stmt
, imm_iter
, def
)
2479 if (gimple_code (use_stmt
) != GIMPLE_PHI
2480 && def_bb
!= gimple_bb (use_stmt
)
2481 && !is_gimple_debug (use_stmt
)
2484 if (!zero_dim_array
)
2486 zero_dim_array
= create_zero_dim_array
2487 (def
, "Cross_BB_scalar_dependence");
2488 insert_out_of_ssa_copy (scop
, zero_dim_array
, def
,
2489 SSA_NAME_DEF_STMT (def
));
2493 rewrite_cross_bb_scalar_dependence (scop
, zero_dim_array
,
2500 /* Rewrite out of SSA all the reduction phi nodes of SCOP. */
2503 rewrite_cross_bb_scalar_deps_out_of_ssa (scop_p scop
)
2506 gimple_stmt_iterator psi
;
2507 sese region
= SCOP_REGION (scop
);
2508 bool changed
= false;
2510 /* Create an extra empty BB after the scop. */
2511 split_edge (SESE_EXIT (region
));
2514 if (bb_in_sese_p (bb
, region
))
2515 for (psi
= gsi_start_bb (bb
); !gsi_end_p (psi
); gsi_next (&psi
))
2516 changed
|= rewrite_cross_bb_scalar_deps (scop
, &psi
);
2521 update_ssa (TODO_update_ssa
);
2522 #ifdef ENABLE_CHECKING
2523 verify_loop_closed_ssa (true);
2528 /* Returns the number of pbbs that are in loops contained in SCOP. */
2531 nb_pbbs_in_loops (scop_p scop
)
2537 FOR_EACH_VEC_ELT (SCOP_BBS (scop
), i
, pbb
)
2538 if (loop_in_sese_p (gbb_loop (PBB_BLACK_BOX (pbb
)), SCOP_REGION (scop
)))
2544 /* Return the number of data references in BB that write in
2548 nb_data_writes_in_bb (basic_block bb
)
2551 gimple_stmt_iterator gsi
;
2553 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
2554 if (gimple_vdef (gsi_stmt (gsi
)))
2560 /* Splits at STMT the basic block BB represented as PBB in the
2564 split_pbb (scop_p scop
, poly_bb_p pbb
, basic_block bb
, gimple stmt
)
2566 edge e1
= split_block (bb
, stmt
);
2567 new_pbb_from_pbb (scop
, pbb
, e1
->dest
);
2571 /* Splits STMT out of its current BB. This is done for reduction
2572 statements for which we want to ignore data dependences. */
2575 split_reduction_stmt (scop_p scop
, gimple stmt
)
2577 basic_block bb
= gimple_bb (stmt
);
2578 poly_bb_p pbb
= pbb_from_bb (bb
);
2579 gimple_bb_p gbb
= gbb_from_bb (bb
);
2582 data_reference_p dr
;
2584 /* Do not split basic blocks with no writes to memory: the reduction
2585 will be the only write to memory. */
2586 if (nb_data_writes_in_bb (bb
) == 0
2587 /* Or if we have already marked BB as a reduction. */
2588 || PBB_IS_REDUCTION (pbb_from_bb (bb
)))
2591 e1
= split_pbb (scop
, pbb
, bb
, stmt
);
2593 /* Split once more only when the reduction stmt is not the only one
2594 left in the original BB. */
2595 if (!gsi_one_before_end_p (gsi_start_nondebug_bb (bb
)))
2597 gimple_stmt_iterator gsi
= gsi_last_bb (bb
);
2599 e1
= split_pbb (scop
, pbb
, bb
, gsi_stmt (gsi
));
2602 /* A part of the data references will end in a different basic block
2603 after the split: move the DRs from the original GBB to the newly
2605 FOR_EACH_VEC_ELT (GBB_DATA_REFS (gbb
), i
, dr
)
2607 basic_block bb1
= gimple_bb (DR_STMT (dr
));
2611 gimple_bb_p gbb1
= gbb_from_bb (bb1
);
2612 GBB_DATA_REFS (gbb1
).safe_push (dr
);
2613 GBB_DATA_REFS (gbb
).ordered_remove (i
);
2621 /* Return true when stmt is a reduction operation. */
2624 is_reduction_operation_p (gimple stmt
)
2626 enum tree_code code
;
2628 gcc_assert (is_gimple_assign (stmt
));
2629 code
= gimple_assign_rhs_code (stmt
);
2631 return flag_associative_math
2632 && commutative_tree_code (code
)
2633 && associative_tree_code (code
);
2636 /* Returns true when PHI contains an argument ARG. */
2639 phi_contains_arg (gimple phi
, tree arg
)
2643 for (i
= 0; i
< gimple_phi_num_args (phi
); i
++)
2644 if (operand_equal_p (arg
, gimple_phi_arg_def (phi
, i
), 0))
2650 /* Return a loop phi node that corresponds to a reduction containing LHS. */
2653 follow_ssa_with_commutative_ops (tree arg
, tree lhs
)
2657 if (TREE_CODE (arg
) != SSA_NAME
)
2660 stmt
= SSA_NAME_DEF_STMT (arg
);
2662 if (gimple_code (stmt
) == GIMPLE_NOP
2663 || gimple_code (stmt
) == GIMPLE_CALL
)
2666 if (gimple_code (stmt
) == GIMPLE_PHI
)
2668 if (phi_contains_arg (stmt
, lhs
))
2673 if (!is_gimple_assign (stmt
))
2676 if (gimple_num_ops (stmt
) == 2)
2677 return follow_ssa_with_commutative_ops (gimple_assign_rhs1 (stmt
), lhs
);
2679 if (is_reduction_operation_p (stmt
))
2681 gimple res
= follow_ssa_with_commutative_ops (gimple_assign_rhs1 (stmt
), lhs
);
2684 follow_ssa_with_commutative_ops (gimple_assign_rhs2 (stmt
), lhs
);
2690 /* Detect commutative and associative scalar reductions starting at
2691 the STMT. Return the phi node of the reduction cycle, or NULL. */
2694 detect_commutative_reduction_arg (tree lhs
, gimple stmt
, tree arg
,
2698 gimple phi
= follow_ssa_with_commutative_ops (arg
, lhs
);
2703 in
->safe_push (stmt
);
2704 out
->safe_push (stmt
);
2708 /* Detect commutative and associative scalar reductions starting at
2709 STMT. Return the phi node of the reduction cycle, or NULL. */
2712 detect_commutative_reduction_assign (gimple stmt
, vec
<gimple
> *in
,
2715 tree lhs
= gimple_assign_lhs (stmt
);
2717 if (gimple_num_ops (stmt
) == 2)
2718 return detect_commutative_reduction_arg (lhs
, stmt
,
2719 gimple_assign_rhs1 (stmt
),
2722 if (is_reduction_operation_p (stmt
))
2724 gimple res
= detect_commutative_reduction_arg (lhs
, stmt
,
2725 gimple_assign_rhs1 (stmt
),
2728 : detect_commutative_reduction_arg (lhs
, stmt
,
2729 gimple_assign_rhs2 (stmt
),
2736 /* Return a loop phi node that corresponds to a reduction containing LHS. */
2739 follow_inital_value_to_phi (tree arg
, tree lhs
)
2743 if (!arg
|| TREE_CODE (arg
) != SSA_NAME
)
2746 stmt
= SSA_NAME_DEF_STMT (arg
);
2748 if (gimple_code (stmt
) == GIMPLE_PHI
2749 && phi_contains_arg (stmt
, lhs
))
2756 /* Return the argument of the loop PHI that is the initial value coming
2757 from outside the loop. */
2760 edge_initial_value_for_loop_phi (gimple phi
)
2764 for (i
= 0; i
< gimple_phi_num_args (phi
); i
++)
2766 edge e
= gimple_phi_arg_edge (phi
, i
);
2768 if (loop_depth (e
->src
->loop_father
)
2769 < loop_depth (e
->dest
->loop_father
))
2776 /* Return the argument of the loop PHI that is the initial value coming
2777 from outside the loop. */
2780 initial_value_for_loop_phi (gimple phi
)
2784 for (i
= 0; i
< gimple_phi_num_args (phi
); i
++)
2786 edge e
= gimple_phi_arg_edge (phi
, i
);
2788 if (loop_depth (e
->src
->loop_father
)
2789 < loop_depth (e
->dest
->loop_father
))
2790 return gimple_phi_arg_def (phi
, i
);
2796 /* Returns true when DEF is used outside the reduction cycle of
2800 used_outside_reduction (tree def
, gimple loop_phi
)
2802 use_operand_p use_p
;
2803 imm_use_iterator imm_iter
;
2804 loop_p loop
= loop_containing_stmt (loop_phi
);
2806 /* In LOOP, DEF should be used only in LOOP_PHI. */
2807 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, def
)
2809 gimple stmt
= USE_STMT (use_p
);
2811 if (stmt
!= loop_phi
2812 && !is_gimple_debug (stmt
)
2813 && flow_bb_inside_loop_p (loop
, gimple_bb (stmt
)))
2820 /* Detect commutative and associative scalar reductions belonging to
2821 the SCOP starting at the loop closed phi node STMT. Return the phi
2822 node of the reduction cycle, or NULL. */
2825 detect_commutative_reduction (scop_p scop
, gimple stmt
, vec
<gimple
> *in
,
2828 if (scalar_close_phi_node_p (stmt
))
2830 gimple def
, loop_phi
, phi
, close_phi
= stmt
;
2831 tree init
, lhs
, arg
= gimple_phi_arg_def (close_phi
, 0);
2833 if (TREE_CODE (arg
) != SSA_NAME
)
2836 /* Note that loop close phi nodes should have a single argument
2837 because we translated the representation into a canonical form
2838 before Graphite: see canonicalize_loop_closed_ssa_form. */
2839 gcc_assert (gimple_phi_num_args (close_phi
) == 1);
2841 def
= SSA_NAME_DEF_STMT (arg
);
2842 if (!stmt_in_sese_p (def
, SCOP_REGION (scop
))
2843 || !(loop_phi
= detect_commutative_reduction (scop
, def
, in
, out
)))
2846 lhs
= gimple_phi_result (close_phi
);
2847 init
= initial_value_for_loop_phi (loop_phi
);
2848 phi
= follow_inital_value_to_phi (init
, lhs
);
2850 if (phi
&& (used_outside_reduction (lhs
, phi
)
2851 || !has_single_use (gimple_phi_result (phi
))))
2854 in
->safe_push (loop_phi
);
2855 out
->safe_push (close_phi
);
2859 if (gimple_code (stmt
) == GIMPLE_ASSIGN
)
2860 return detect_commutative_reduction_assign (stmt
, in
, out
);
2865 /* Translate the scalar reduction statement STMT to an array RED
2866 knowing that its recursive phi node is LOOP_PHI. */
2869 translate_scalar_reduction_to_array_for_stmt (scop_p scop
, tree red
,
2870 gimple stmt
, gimple loop_phi
)
2872 tree res
= gimple_phi_result (loop_phi
);
2873 gimple assign
= gimple_build_assign (res
, unshare_expr (red
));
2874 gimple_stmt_iterator gsi
;
2876 insert_stmts (scop
, assign
, NULL
, gsi_after_labels (gimple_bb (loop_phi
)));
2878 assign
= gimple_build_assign (unshare_expr (red
), gimple_assign_lhs (stmt
));
2879 gsi
= gsi_for_stmt (stmt
);
2881 insert_stmts (scop
, assign
, NULL
, gsi
);
2884 /* Removes the PHI node and resets all the debug stmts that are using
2888 remove_phi (gimple phi
)
2890 imm_use_iterator imm_iter
;
2892 use_operand_p use_p
;
2893 gimple_stmt_iterator gsi
;
2899 def
= PHI_RESULT (phi
);
2900 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, def
)
2902 stmt
= USE_STMT (use_p
);
2904 if (is_gimple_debug (stmt
))
2906 gimple_debug_bind_reset_value (stmt
);
2907 update
.safe_push (stmt
);
2911 FOR_EACH_VEC_ELT (update
, i
, stmt
)
2916 gsi
= gsi_for_phi_node (phi
);
2917 remove_phi_node (&gsi
, false);
2920 /* Helper function for for_each_index. For each INDEX of the data
2921 reference REF, returns true when its indices are valid in the loop
2922 nest LOOP passed in as DATA. */
2925 dr_indices_valid_in_loop (tree ref ATTRIBUTE_UNUSED
, tree
*index
, void *data
)
2928 basic_block header
, def_bb
;
2931 if (TREE_CODE (*index
) != SSA_NAME
)
2934 loop
= *((loop_p
*) data
);
2935 header
= loop
->header
;
2936 stmt
= SSA_NAME_DEF_STMT (*index
);
2941 def_bb
= gimple_bb (stmt
);
2946 return dominated_by_p (CDI_DOMINATORS
, header
, def_bb
);
2949 /* When the result of a CLOSE_PHI is written to a memory location,
2950 return a pointer to that memory reference, otherwise return
2954 close_phi_written_to_memory (gimple close_phi
)
2956 imm_use_iterator imm_iter
;
2957 use_operand_p use_p
;
2959 tree res
, def
= gimple_phi_result (close_phi
);
2961 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, def
)
2962 if ((stmt
= USE_STMT (use_p
))
2963 && gimple_code (stmt
) == GIMPLE_ASSIGN
2964 && (res
= gimple_assign_lhs (stmt
)))
2966 switch (TREE_CODE (res
))
2976 tree arg
= gimple_phi_arg_def (close_phi
, 0);
2977 loop_p nest
= loop_containing_stmt (SSA_NAME_DEF_STMT (arg
));
2979 /* FIXME: this restriction is for id-{24,25}.f and
2980 could be handled by duplicating the computation of
2981 array indices before the loop of the close_phi. */
2982 if (for_each_index (&res
, dr_indices_valid_in_loop
, &nest
))
2994 /* Rewrite out of SSA the reduction described by the loop phi nodes
2995 IN, and the close phi nodes OUT. IN and OUT are structured by loop
2998 IN: stmt, loop_n, ..., loop_0
2999 OUT: stmt, close_n, ..., close_0
3001 the first element is the reduction statement, and the next elements
3002 are the loop and close phi nodes of each of the outer loops. */
3005 translate_scalar_reduction_to_array (scop_p scop
,
3010 unsigned int i
= out
.length () - 1;
3011 tree red
= close_phi_written_to_memory (out
[i
]);
3013 FOR_EACH_VEC_ELT (in
, i
, loop_phi
)
3015 gimple close_phi
= out
[i
];
3019 gimple stmt
= loop_phi
;
3020 basic_block bb
= split_reduction_stmt (scop
, stmt
);
3021 poly_bb_p pbb
= pbb_from_bb (bb
);
3022 PBB_IS_REDUCTION (pbb
) = true;
3023 gcc_assert (close_phi
== loop_phi
);
3026 red
= create_zero_dim_array
3027 (gimple_assign_lhs (stmt
), "Commutative_Associative_Reduction");
3029 translate_scalar_reduction_to_array_for_stmt (scop
, red
, stmt
, in
[1]);
3033 if (i
== in
.length () - 1)
3035 insert_out_of_ssa_copy (scop
, gimple_phi_result (close_phi
),
3036 unshare_expr (red
), close_phi
);
3037 insert_out_of_ssa_copy_on_edge
3038 (scop
, edge_initial_value_for_loop_phi (loop_phi
),
3039 unshare_expr (red
), initial_value_for_loop_phi (loop_phi
));
3042 remove_phi (loop_phi
);
3043 remove_phi (close_phi
);
3047 /* Rewrites out of SSA a commutative reduction at CLOSE_PHI. Returns
3048 true when something has been changed. */
3051 rewrite_commutative_reductions_out_of_ssa_close_phi (scop_p scop
,
3060 detect_commutative_reduction (scop
, close_phi
, &in
, &out
);
3061 res
= in
.length () > 1;
3063 translate_scalar_reduction_to_array (scop
, in
, out
);
3070 /* Rewrites all the commutative reductions from LOOP out of SSA.
3071 Returns true when something has been changed. */
3074 rewrite_commutative_reductions_out_of_ssa_loop (scop_p scop
,
3077 gimple_stmt_iterator gsi
;
3078 edge exit
= single_exit (loop
);
3080 bool changed
= false;
3085 for (gsi
= gsi_start_phis (exit
->dest
); !gsi_end_p (gsi
); gsi_next (&gsi
))
3086 if ((res
= gimple_phi_result (gsi_stmt (gsi
)))
3087 && !virtual_operand_p (res
)
3088 && !scev_analyzable_p (res
, SCOP_REGION (scop
)))
3089 changed
|= rewrite_commutative_reductions_out_of_ssa_close_phi
3090 (scop
, gsi_stmt (gsi
));
3095 /* Rewrites all the commutative reductions from SCOP out of SSA. */
3098 rewrite_commutative_reductions_out_of_ssa (scop_p scop
)
3102 bool changed
= false;
3103 sese region
= SCOP_REGION (scop
);
3105 FOR_EACH_LOOP (li
, loop
, 0)
3106 if (loop_in_sese_p (loop
, region
))
3107 changed
|= rewrite_commutative_reductions_out_of_ssa_loop (scop
, loop
);
3112 gsi_commit_edge_inserts ();
3113 update_ssa (TODO_update_ssa
);
3114 #ifdef ENABLE_CHECKING
3115 verify_loop_closed_ssa (true);
3120 /* Can all ivs be represented by a signed integer?
3121 As CLooG might generate negative values in its expressions, signed loop ivs
3122 are required in the backend. */
3125 scop_ivs_can_be_represented (scop_p scop
)
3129 gimple_stmt_iterator psi
;
3132 FOR_EACH_LOOP (li
, loop
, 0)
3134 if (!loop_in_sese_p (loop
, SCOP_REGION (scop
)))
3137 for (psi
= gsi_start_phis (loop
->header
);
3138 !gsi_end_p (psi
); gsi_next (&psi
))
3140 gimple phi
= gsi_stmt (psi
);
3141 tree res
= PHI_RESULT (phi
);
3142 tree type
= TREE_TYPE (res
);
3144 if (TYPE_UNSIGNED (type
)
3145 && TYPE_PRECISION (type
) >= TYPE_PRECISION (long_long_integer_type_node
))
3152 FOR_EACH_LOOP_BREAK (li
);
3158 /* Builds the polyhedral representation for a SESE region. */
3161 build_poly_scop (scop_p scop
)
3163 sese region
= SCOP_REGION (scop
);
3164 graphite_dim_t max_dim
;
3166 build_scop_bbs (scop
);
3168 /* FIXME: This restriction is needed to avoid a problem in CLooG.
3169 Once CLooG is fixed, remove this guard. Anyways, it makes no
3170 sense to optimize a scop containing only PBBs that do not belong
3172 if (nb_pbbs_in_loops (scop
) == 0)
3175 if (!scop_ivs_can_be_represented (scop
))
3178 if (flag_associative_math
)
3179 rewrite_commutative_reductions_out_of_ssa (scop
);
3181 build_sese_loop_nests (region
);
3182 build_sese_conditions (region
);
3183 find_scop_parameters (scop
);
3185 max_dim
= PARAM_VALUE (PARAM_GRAPHITE_MAX_NB_SCOP_PARAMS
);
3186 if (scop_nb_params (scop
) > max_dim
)
3189 build_scop_iteration_domain (scop
);
3190 build_scop_context (scop
);
3191 add_conditions_to_constraints (scop
);
3193 /* Rewrite out of SSA only after having translated the
3194 representation to the polyhedral representation to avoid scev
3195 analysis failures. That means that these functions will insert
3196 new data references that they create in the right place. */
3197 rewrite_reductions_out_of_ssa (scop
);
3198 rewrite_cross_bb_scalar_deps_out_of_ssa (scop
);
3200 build_scop_drs (scop
);
3202 build_scop_scattering (scop
);
3204 /* This SCoP has been translated to the polyhedral
3206 POLY_SCOP_P (scop
) = true;