1 /* Gimple Represented as Polyhedra.
2 Copyright (C) 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
3 Contributed by Sebastian Pop <sebastian.pop@inria.fr>.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* This pass converts GIMPLE to GRAPHITE, performs some loop
22 transformations and then converts the resulting representation back
25 An early description of this pass can be found in the GCC Summit'06
26 paper "GRAPHITE: Polyhedral Analyses and Optimizations for GCC".
27 The wiki page http://gcc.gnu.org/wiki/Graphite contains pointers to
30 One important document to read is CLooG's internal manual:
31 http://repo.or.cz/w/cloog-ppl.git?a=blob_plain;f=doc/cloog.texi;hb=HEAD
32 that describes the data structure of loops used in this file, and
33 the functions that are used for transforming the code. */
37 #include "coretypes.h"
42 #include "basic-block.h"
43 #include "diagnostic.h"
44 #include "tree-flow.h"
46 #include "tree-dump.h"
49 #include "tree-chrec.h"
50 #include "tree-data-ref.h"
51 #include "tree-scalar-evolution.h"
52 #include "tree-pass.h"
54 #include "value-prof.h"
55 #include "pointer-set.h"
59 #include "cloog/cloog.h"
62 static VEC (scop_p
, heap
) *current_scops
;
64 /* Converts a GMP constant V to a tree and returns it. */
67 gmp_cst_to_tree (tree type
, Value v
)
69 return build_int_cst (type
, value_get_si (v
));
72 /* Debug the list of old induction variables for this SCOP. */
75 debug_oldivs (scop_p scop
)
80 fprintf (stderr
, "Old IVs:");
82 for (i
= 0; VEC_iterate (name_tree
, SCOP_OLDIVS (scop
), i
, oldiv
); i
++)
84 fprintf (stderr
, "(");
85 print_generic_expr (stderr
, oldiv
->t
, 0);
86 fprintf (stderr
, ", %s, %d)\n", oldiv
->name
, oldiv
->loop
->num
);
88 fprintf (stderr
, "\n");
91 /* Debug the loops around basic block GB. */
94 debug_loop_vec (graphite_bb_p gb
)
99 fprintf (stderr
, "Loop Vec:");
101 for (i
= 0; VEC_iterate (loop_p
, GBB_LOOPS (gb
), i
, loop
); i
++)
102 fprintf (stderr
, "%d: %d, ", i
, loop
? loop
->num
: -1);
104 fprintf (stderr
, "\n");
107 /* Returns true if stack ENTRY is a constant. */
110 iv_stack_entry_is_constant (iv_stack_entry
*entry
)
112 return entry
->kind
== iv_stack_entry_const
;
115 /* Returns true if stack ENTRY is an induction variable. */
118 iv_stack_entry_is_iv (iv_stack_entry
*entry
)
120 return entry
->kind
== iv_stack_entry_iv
;
123 /* Push (IV, NAME) on STACK. */
126 loop_iv_stack_push_iv (loop_iv_stack stack
, tree iv
, const char *name
)
128 iv_stack_entry
*entry
= XNEW (iv_stack_entry
);
129 name_tree named_iv
= XNEW (struct name_tree
);
132 named_iv
->name
= name
;
134 entry
->kind
= iv_stack_entry_iv
;
135 entry
->data
.iv
= named_iv
;
137 VEC_safe_push (iv_stack_entry_p
, heap
, *stack
, entry
);
140 /* Inserts a CONSTANT in STACK at INDEX. */
143 loop_iv_stack_insert_constant (loop_iv_stack stack
, int index
,
146 iv_stack_entry
*entry
= XNEW (iv_stack_entry
);
148 entry
->kind
= iv_stack_entry_const
;
149 entry
->data
.constant
= constant
;
151 VEC_safe_insert (iv_stack_entry_p
, heap
, *stack
, index
, entry
);
154 /* Pops and frees an element out of STACK. */
157 loop_iv_stack_pop (loop_iv_stack stack
)
159 iv_stack_entry_p entry
= VEC_pop (iv_stack_entry_p
, *stack
);
161 free (entry
->data
.iv
);
165 /* Get the IV at INDEX in STACK. */
168 loop_iv_stack_get_iv (loop_iv_stack stack
, int index
)
170 iv_stack_entry_p entry
= VEC_index (iv_stack_entry_p
, *stack
, index
);
171 iv_stack_entry_data data
= entry
->data
;
173 return iv_stack_entry_is_iv (entry
) ? data
.iv
->t
: data
.constant
;
176 /* Get the IV from its NAME in STACK. */
179 loop_iv_stack_get_iv_from_name (loop_iv_stack stack
, const char* name
)
182 iv_stack_entry_p entry
;
184 for (i
= 0; VEC_iterate (iv_stack_entry_p
, *stack
, i
, entry
); i
++)
186 name_tree iv
= entry
->data
.iv
;
187 if (!strcmp (name
, iv
->name
))
194 /* Prints on stderr the contents of STACK. */
197 debug_loop_iv_stack (loop_iv_stack stack
)
200 iv_stack_entry_p entry
;
203 fprintf (stderr
, "(");
205 for (i
= 0; VEC_iterate (iv_stack_entry_p
, *stack
, i
, entry
); i
++)
210 fprintf (stderr
, " ");
212 if (iv_stack_entry_is_iv (entry
))
214 name_tree iv
= entry
->data
.iv
;
215 fprintf (stderr
, "%s:", iv
->name
);
216 print_generic_expr (stderr
, iv
->t
, 0);
220 tree constant
= entry
->data
.constant
;
221 print_generic_expr (stderr
, constant
, 0);
222 fprintf (stderr
, ":");
223 print_generic_expr (stderr
, constant
, 0);
227 fprintf (stderr
, ")\n");
233 free_loop_iv_stack (loop_iv_stack stack
)
236 iv_stack_entry_p entry
;
238 for (i
= 0; VEC_iterate (iv_stack_entry_p
, *stack
, i
, entry
); i
++)
240 free (entry
->data
.iv
);
244 VEC_free (iv_stack_entry_p
, heap
, *stack
);
249 /* Structure containing the mapping between the CLooG's induction
250 variable and the type of the old induction variable. */
251 typedef struct ivtype_map_elt
254 const char *cloog_iv
;
257 /* Print to stderr the element ELT. */
260 debug_ivtype_elt (ivtype_map_elt elt
)
262 fprintf (stderr
, "(%s, ", elt
->cloog_iv
);
263 print_generic_expr (stderr
, elt
->type
, 0);
264 fprintf (stderr
, ")\n");
267 /* Helper function for debug_ivtype_map. */
270 debug_ivtype_map_1 (void **slot
, void *s ATTRIBUTE_UNUSED
)
272 struct ivtype_map_elt
*entry
= (struct ivtype_map_elt
*) *slot
;
273 debug_ivtype_elt (entry
);
277 /* Print to stderr all the elements of MAP. */
280 debug_ivtype_map (htab_t map
)
282 htab_traverse (map
, debug_ivtype_map_1
, NULL
);
285 /* Constructs a new SCEV_INFO_STR structure for VAR and INSTANTIATED_BELOW. */
287 static inline ivtype_map_elt
288 new_ivtype_map_elt (const char *cloog_iv
, tree type
)
292 res
= XNEW (struct ivtype_map_elt
);
293 res
->cloog_iv
= cloog_iv
;
299 /* Computes a hash function for database element ELT. */
302 ivtype_map_elt_info (const void *elt
)
304 return htab_hash_pointer (((const struct ivtype_map_elt
*) elt
)->cloog_iv
);
307 /* Compares database elements E1 and E2. */
310 eq_ivtype_map_elts (const void *e1
, const void *e2
)
312 const struct ivtype_map_elt
*elt1
= (const struct ivtype_map_elt
*) e1
;
313 const struct ivtype_map_elt
*elt2
= (const struct ivtype_map_elt
*) e2
;
315 return (elt1
->cloog_iv
== elt2
->cloog_iv
);
320 /* Given a CLOOG_IV, returns the type that it should have in GCC land.
321 If the information is not available, i.e. in the case one of the
322 transforms created the loop, just return integer_type_node. */
325 gcc_type_for_cloog_iv (const char *cloog_iv
, graphite_bb_p gbb
)
327 struct ivtype_map_elt tmp
;
330 tmp
.cloog_iv
= cloog_iv
;
331 slot
= htab_find_slot (GBB_CLOOG_IV_TYPES (gbb
), &tmp
, NO_INSERT
);
334 return ((ivtype_map_elt
) *slot
)->type
;
336 return integer_type_node
;
339 /* Inserts constants derived from the USER_STMT argument list into the
340 STACK. This is needed to map old ivs to constants when loops have
344 loop_iv_stack_patch_for_consts (loop_iv_stack stack
,
345 struct clast_user_stmt
*user_stmt
)
347 struct clast_stmt
*t
;
349 CloogStatement
*cs
= user_stmt
->statement
;
350 graphite_bb_p gbb
= (graphite_bb_p
) cloog_statement_usr (cs
);
352 for (t
= user_stmt
->substitutions
; t
; t
= t
->next
)
354 struct clast_expr
*expr
= (struct clast_expr
*)
355 ((struct clast_assignment
*)t
)->RHS
;
356 struct clast_term
*term
= (struct clast_term
*) expr
;
358 /* FIXME: What should be done with expr_bin, expr_red? */
359 if (expr
->type
== expr_term
362 loop_p loop
= gbb_loop_at_index (gbb
, index
);
363 tree oldiv
= oldiv_for_loop (GBB_SCOP (gbb
), loop
);
364 tree type
= oldiv
? TREE_TYPE (oldiv
) : integer_type_node
;
365 tree value
= gmp_cst_to_tree (type
, term
->val
);
366 loop_iv_stack_insert_constant (stack
, index
, value
);
372 /* Removes all constants in the iv STACK. */
375 loop_iv_stack_remove_constants (loop_iv_stack stack
)
378 iv_stack_entry
*entry
;
380 for (i
= 0; VEC_iterate (iv_stack_entry_p
, *stack
, i
, entry
);)
382 if (iv_stack_entry_is_constant (entry
))
384 free (VEC_index (iv_stack_entry_p
, *stack
, i
));
385 VEC_ordered_remove (iv_stack_entry_p
, *stack
, i
);
392 /* Returns a new loop_to_cloog_loop_str structure. */
394 static inline struct loop_to_cloog_loop_str
*
395 new_loop_to_cloog_loop_str (int loop_num
,
397 CloogLoop
*cloog_loop
)
399 struct loop_to_cloog_loop_str
*result
;
401 result
= XNEW (struct loop_to_cloog_loop_str
);
402 result
->loop_num
= loop_num
;
403 result
->cloog_loop
= cloog_loop
;
404 result
->loop_position
= loop_position
;
409 /* Hash function for SCOP_LOOP2CLOOG_LOOP hash table. */
412 hash_loop_to_cloog_loop (const void *elt
)
414 return ((const struct loop_to_cloog_loop_str
*) elt
)->loop_num
;
417 /* Equality function for SCOP_LOOP2CLOOG_LOOP hash table. */
420 eq_loop_to_cloog_loop (const void *el1
, const void *el2
)
422 const struct loop_to_cloog_loop_str
*elt1
, *elt2
;
424 elt1
= (const struct loop_to_cloog_loop_str
*) el1
;
425 elt2
= (const struct loop_to_cloog_loop_str
*) el2
;
426 return elt1
->loop_num
== elt2
->loop_num
;
429 /* Compares two graphite bbs and returns an integer less than, equal to, or
430 greater than zero if the first argument is considered to be respectively
431 less than, equal to, or greater than the second.
432 We compare using the lexicographic order of the static schedules. */
435 gbb_compare (const void *p_1
, const void *p_2
)
437 const struct graphite_bb
*const gbb_1
438 = *(const struct graphite_bb
*const*) p_1
;
439 const struct graphite_bb
*const gbb_2
440 = *(const struct graphite_bb
*const*) p_2
;
442 return lambda_vector_compare (GBB_STATIC_SCHEDULE (gbb_1
),
443 gbb_nb_loops (gbb_1
) + 1,
444 GBB_STATIC_SCHEDULE (gbb_2
),
445 gbb_nb_loops (gbb_2
) + 1);
448 /* Sort graphite bbs in SCOP. */
451 graphite_sort_gbbs (scop_p scop
)
453 VEC (graphite_bb_p
, heap
) *bbs
= SCOP_BBS (scop
);
455 qsort (VEC_address (graphite_bb_p
, bbs
),
456 VEC_length (graphite_bb_p
, bbs
),
457 sizeof (graphite_bb_p
), gbb_compare
);
460 /* Dump conditions of a graphite basic block GBB on FILE. */
463 dump_gbb_conditions (FILE *file
, graphite_bb_p gbb
)
467 VEC (gimple
, heap
) *conditions
= GBB_CONDITIONS (gbb
);
469 if (VEC_empty (gimple
, conditions
))
472 fprintf (file
, "\tbb %d\t: cond = {", GBB_BB (gbb
)->index
);
474 for (i
= 0; VEC_iterate (gimple
, conditions
, i
, stmt
); i
++)
475 print_gimple_stmt (file
, stmt
, 0, 0);
477 fprintf (file
, "}\n");
480 /* Converts the graphite scheduling function into a cloog scattering
481 matrix. This scattering matrix is used to limit the possible cloog
482 output to valid programs in respect to the scheduling function.
484 SCATTERING_DIMENSIONS specifies the dimensionality of the scattering
485 matrix. CLooG 0.14.0 and previous versions require, that all scattering
486 functions of one CloogProgram have the same dimensionality, therefore we
487 allow to specify it. (Should be removed in future versions) */
490 schedule_to_scattering (graphite_bb_p gb
, int scattering_dimensions
)
493 scop_p scop
= GBB_SCOP (gb
);
495 int nb_iterators
= gbb_nb_loops (gb
);
497 /* The cloog scattering matrix consists of these colums:
499 scattering_dimensions cols = Scattering dimensions,
500 nb_iterators cols = bb's iterators,
501 scop_nb_params cols = Parameters,
506 scattering_dimensions = 5
516 s1 s2 s3 s4 s5 i p1 p2 1
517 1 0 0 0 0 0 0 0 -4 = 0
518 0 1 0 0 0 -1 0 0 0 = 0
519 0 0 1 0 0 0 0 0 -5 = 0 */
520 int nb_params
= scop_nb_params (scop
);
521 int nb_cols
= 1 + scattering_dimensions
+ nb_iterators
+ nb_params
+ 1;
522 int col_const
= nb_cols
- 1;
523 int col_iter_offset
= 1 + scattering_dimensions
;
525 CloogMatrix
*scat
= cloog_matrix_alloc (scattering_dimensions
, nb_cols
);
527 gcc_assert (scattering_dimensions
>= nb_iterators
* 2 + 1);
529 /* Initialize the identity matrix. */
530 for (i
= 0; i
< scattering_dimensions
; i
++)
531 value_set_si (scat
->p
[i
][i
+ 1], 1);
533 /* Textual order outside the first loop */
534 value_set_si (scat
->p
[0][col_const
], -GBB_STATIC_SCHEDULE (gb
)[0]);
536 /* For all surrounding loops. */
537 for (i
= 0; i
< nb_iterators
; i
++)
539 int schedule
= GBB_STATIC_SCHEDULE (gb
)[i
+ 1];
541 /* Iterations of this loop. */
542 value_set_si (scat
->p
[2 * i
+ 1][col_iter_offset
+ i
], -1);
544 /* Textual order inside this loop. */
545 value_set_si (scat
->p
[2 * i
+ 2][col_const
], -schedule
);
551 /* Print the schedules of GB to FILE with INDENT white spaces before.
552 VERBOSITY determines how verbose the code pretty printers are. */
555 print_graphite_bb (FILE *file
, graphite_bb_p gb
, int indent
, int verbosity
)
557 CloogMatrix
*scattering
;
560 fprintf (file
, "\nGBB (\n");
562 print_loops_bb (file
, GBB_BB (gb
), indent
+2, verbosity
);
566 fprintf (file
, " (domain: \n");
567 cloog_matrix_print (file
, GBB_DOMAIN (gb
));
568 fprintf (file
, " )\n");
571 if (GBB_STATIC_SCHEDULE (gb
))
573 fprintf (file
, " (static schedule: ");
574 print_lambda_vector (file
, GBB_STATIC_SCHEDULE (gb
),
575 gbb_nb_loops (gb
) + 1);
576 fprintf (file
, " )\n");
581 fprintf (file
, " (contained loops: \n");
582 for (i
= 0; VEC_iterate (loop_p
, GBB_LOOPS (gb
), i
, loop
); i
++)
584 fprintf (file
, " iterator %d => NULL \n", i
);
586 fprintf (file
, " iterator %d => loop %d \n", i
,
588 fprintf (file
, " )\n");
591 if (GBB_DATA_REFS (gb
))
592 dump_data_references (file
, GBB_DATA_REFS (gb
));
594 if (GBB_CONDITIONS (gb
))
596 fprintf (file
, " (conditions: \n");
597 dump_gbb_conditions (file
, gb
);
598 fprintf (file
, " )\n");
602 && GBB_STATIC_SCHEDULE (gb
))
604 fprintf (file
, " (scattering: \n");
605 scattering
= schedule_to_scattering (gb
, 2 * gbb_nb_loops (gb
) + 1);
606 cloog_matrix_print (file
, scattering
);
607 cloog_matrix_free (scattering
);
608 fprintf (file
, " )\n");
611 fprintf (file
, ")\n");
614 /* Print to STDERR the schedules of GB with VERBOSITY level. */
617 debug_gbb (graphite_bb_p gb
, int verbosity
)
619 print_graphite_bb (stderr
, gb
, 0, verbosity
);
623 /* Print SCOP to FILE. VERBOSITY determines how verbose the pretty
627 print_scop (FILE *file
, scop_p scop
, int verbosity
)
632 fprintf (file
, "\nSCoP_%d_%d (\n",
633 SCOP_ENTRY (scop
)->index
, SCOP_EXIT (scop
)->index
);
635 fprintf (file
, " (cloog: \n");
636 cloog_program_print (file
, SCOP_PROG (scop
));
637 fprintf (file
, " )\n");
644 for (i
= 0; VEC_iterate (graphite_bb_p
, SCOP_BBS (scop
), i
, gb
); i
++)
645 print_graphite_bb (file
, gb
, 0, verbosity
);
648 fprintf (file
, ")\n");
651 /* Print all the SCOPs to FILE. VERBOSITY determines how verbose the
652 code pretty printers are. */
655 print_scops (FILE *file
, int verbosity
)
660 for (i
= 0; VEC_iterate (scop_p
, current_scops
, i
, scop
); i
++)
661 print_scop (file
, scop
, verbosity
);
664 /* Debug SCOP. VERBOSITY determines how verbose the code pretty
668 debug_scop (scop_p scop
, int verbosity
)
670 print_scop (stderr
, scop
, verbosity
);
673 /* Debug all SCOPs from CURRENT_SCOPS. VERBOSITY determines how
674 verbose the code pretty printers are. */
677 debug_scops (int verbosity
)
679 print_scops (stderr
, verbosity
);
682 /* Pretty print to FILE the SCOP in DOT format. */
685 dot_scop_1 (FILE *file
, scop_p scop
)
690 basic_block entry
= SCOP_ENTRY (scop
);
691 basic_block exit
= SCOP_EXIT (scop
);
693 fprintf (file
, "digraph SCoP_%d_%d {\n", entry
->index
,
699 fprintf (file
, "%d [shape=triangle];\n", bb
->index
);
702 fprintf (file
, "%d [shape=box];\n", bb
->index
);
704 if (bb_in_scop_p (bb
, scop
))
705 fprintf (file
, "%d [color=red];\n", bb
->index
);
707 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
708 fprintf (file
, "%d -> %d;\n", bb
->index
, e
->dest
->index
);
711 fputs ("}\n\n", file
);
714 /* Display SCOP using dotty. */
717 dot_scop (scop_p scop
)
719 dot_scop_1 (stderr
, scop
);
722 /* Pretty print all SCoPs in DOT format and mark them with different colors.
723 If there are not enough colors, paint later SCoPs gray.
725 - "*" after the node number: entry of a SCoP,
726 - "#" after the node number: exit of a SCoP,
727 - "()" entry or exit not part of SCoP. */
730 dot_all_scops_1 (FILE *file
)
739 /* Disable debugging while printing graph. */
740 int tmp_dump_flags
= dump_flags
;
743 fprintf (file
, "digraph all {\n");
747 int part_of_scop
= false;
749 /* Use HTML for every bb label. So we are able to print bbs
750 which are part of two different SCoPs, with two different
751 background colors. */
752 fprintf (file
, "%d [label=<\n <TABLE BORDER=\"0\" CELLBORDER=\"1\" ",
754 fprintf (file
, "CELLSPACING=\"0\">\n");
756 /* Select color for SCoP. */
757 for (i
= 0; VEC_iterate (scop_p
, current_scops
, i
, scop
); i
++)
758 if (bb_in_scop_p (bb
, scop
)
759 || (SCOP_EXIT (scop
) == bb
)
760 || (SCOP_ENTRY (scop
) == bb
))
819 fprintf (file
, " <TR><TD WIDTH=\"50\" BGCOLOR=\"%s\">", color
);
821 if (!bb_in_scop_p (bb
, scop
))
822 fprintf (file
, " (");
824 if (bb
== SCOP_ENTRY (scop
)
825 && bb
== SCOP_EXIT (scop
))
826 fprintf (file
, " %d*# ", bb
->index
);
827 else if (bb
== SCOP_ENTRY (scop
))
828 fprintf (file
, " %d* ", bb
->index
);
829 else if (bb
== SCOP_EXIT (scop
))
830 fprintf (file
, " %d# ", bb
->index
);
832 fprintf (file
, " %d ", bb
->index
);
834 if (!bb_in_scop_p (bb
, scop
))
837 fprintf (file
, "</TD></TR>\n");
843 fprintf (file
, " <TR><TD WIDTH=\"50\" BGCOLOR=\"#ffffff\">");
844 fprintf (file
, " %d </TD></TR>\n", bb
->index
);
847 fprintf (file
, " </TABLE>>, shape=box, style=\"setlinewidth(0)\"]\n");
852 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
853 fprintf (file
, "%d -> %d;\n", bb
->index
, e
->dest
->index
);
856 fputs ("}\n\n", file
);
858 /* Enable debugging again. */
859 dump_flags
= tmp_dump_flags
;
862 /* Display all SCoPs using dotty. */
867 /* When debugging, enable the following code. This cannot be used
868 in production compilers because it calls "system". */
870 FILE *stream
= fopen ("/tmp/allscops.dot", "w");
873 dot_all_scops_1 (stream
);
876 system ("dotty /tmp/allscops.dot");
878 dot_all_scops_1 (stderr
);
882 /* Returns the outermost loop in SCOP that contains BB. */
885 outermost_loop_in_scop (scop_p scop
, basic_block bb
)
889 nest
= bb
->loop_father
;
890 while (loop_outer (nest
) && loop_in_scop_p (loop_outer (nest
), scop
))
891 nest
= loop_outer (nest
);
896 /* Returns the block preceding the entry of SCOP. */
899 block_before_scop (scop_p scop
)
901 return SESE_ENTRY (SCOP_REGION (scop
))->src
;
904 /* Return true when EXPR is an affine function in LOOP with parameters
905 instantiated relative to SCOP_ENTRY. */
908 loop_affine_expr (basic_block scop_entry
, struct loop
*loop
, tree expr
)
911 tree scev
= analyze_scalar_evolution (loop
, expr
);
913 scev
= instantiate_scev (scop_entry
, loop
, scev
);
915 return (evolution_function_is_invariant_p (scev
, n
)
916 || evolution_function_is_affine_multivariate_p (scev
, n
));
919 /* Return false if the tree_code of the operand OP or any of its operands
923 exclude_component_ref (tree op
)
930 if (TREE_CODE (op
) == COMPONENT_REF
)
934 len
= TREE_OPERAND_LENGTH (op
);
935 for (i
= 0; i
< len
; ++i
)
937 if (!exclude_component_ref (TREE_OPERAND (op
, i
)))
946 /* Return true if the operand OP is simple. */
949 is_simple_operand (loop_p loop
, gimple stmt
, tree op
)
951 /* It is not a simple operand when it is a declaration, */
953 /* or a structure, */
954 || AGGREGATE_TYPE_P (TREE_TYPE (op
))
955 /* or a memory access that cannot be analyzed by the data
956 reference analysis. */
957 || ((handled_component_p (op
) || INDIRECT_REF_P (op
))
958 && !stmt_simple_memref_p (loop
, stmt
, op
)))
961 return exclude_component_ref (op
);
964 /* Return true only when STMT is simple enough for being handled by
965 Graphite. This depends on SCOP_ENTRY, as the parametetrs are
966 initialized relatively to this basic block. */
969 stmt_simple_for_scop_p (basic_block scop_entry
, gimple stmt
)
971 basic_block bb
= gimple_bb (stmt
);
972 struct loop
*loop
= bb
->loop_father
;
974 /* GIMPLE_ASM and GIMPLE_CALL may embed arbitrary side effects.
975 Calls have side-effects, except those to const or pure
977 if (gimple_has_volatile_ops (stmt
)
978 || (gimple_code (stmt
) == GIMPLE_CALL
979 && !(gimple_call_flags (stmt
) & (ECF_CONST
| ECF_PURE
)))
980 || (gimple_code (stmt
) == GIMPLE_ASM
))
983 switch (gimple_code (stmt
))
993 enum tree_code code
= gimple_cond_code (stmt
);
995 /* We can only handle this kind of conditional expressions.
996 For inequalities like "if (i != 3 * k)" we need unions of
997 polyhedrons. Expressions like "if (a)" or "if (a == 15)" need
998 them for the else branch. */
999 if (!(code
== LT_EXPR
1002 || code
== GE_EXPR
))
1008 FOR_EACH_SSA_TREE_OPERAND (op
, stmt
, op_iter
, SSA_OP_ALL_USES
)
1009 if (!loop_affine_expr (scop_entry
, loop
, op
))
1017 enum tree_code code
= gimple_assign_rhs_code (stmt
);
1019 switch (get_gimple_rhs_class (code
))
1021 case GIMPLE_UNARY_RHS
:
1022 case GIMPLE_SINGLE_RHS
:
1023 return (is_simple_operand (loop
, stmt
, gimple_assign_lhs (stmt
))
1024 && is_simple_operand (loop
, stmt
, gimple_assign_rhs1 (stmt
)));
1026 case GIMPLE_BINARY_RHS
:
1027 return (is_simple_operand (loop
, stmt
, gimple_assign_lhs (stmt
))
1028 && is_simple_operand (loop
, stmt
, gimple_assign_rhs1 (stmt
))
1029 && is_simple_operand (loop
, stmt
, gimple_assign_rhs2 (stmt
)));
1031 case GIMPLE_INVALID_RHS
:
1040 size_t n
= gimple_call_num_args (stmt
);
1041 tree lhs
= gimple_call_lhs (stmt
);
1043 if (lhs
&& !is_simple_operand (loop
, stmt
, lhs
))
1046 for (i
= 0; i
< n
; i
++)
1047 if (!is_simple_operand (loop
, stmt
, gimple_call_arg (stmt
, i
)))
1054 /* These nodes cut a new scope. */
1061 /* Returns the statement of BB that contains a harmful operation: that
1062 can be a function call with side effects, the induction variables
1063 are not linear with respect to SCOP_ENTRY, etc. The current open
1064 scop should end before this statement. */
1067 harmful_stmt_in_bb (basic_block scop_entry
, basic_block bb
)
1069 gimple_stmt_iterator gsi
;
1071 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
1072 if (!stmt_simple_for_scop_p (scop_entry
, gsi_stmt (gsi
)))
1073 return gsi_stmt (gsi
);
1078 /* Returns true when BB will be represented in graphite. Return false
1079 for the basic blocks that contain code eliminated in the code
1080 generation pass: i.e. induction variables and exit conditions. */
1083 graphite_stmt_p (scop_p scop
, basic_block bb
,
1084 VEC (data_reference_p
, heap
) *drs
)
1086 gimple_stmt_iterator gsi
;
1087 loop_p loop
= bb
->loop_father
;
1089 if (VEC_length (data_reference_p
, drs
) > 0)
1092 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
1094 gimple stmt
= gsi_stmt (gsi
);
1096 switch (gimple_code (stmt
))
1098 /* Control flow expressions can be ignored, as they are
1099 represented in the iteration domains and will be
1100 regenerated by graphite. */
1108 tree var
= gimple_assign_lhs (stmt
);
1109 var
= analyze_scalar_evolution (loop
, var
);
1110 var
= instantiate_scev (block_before_scop (scop
), loop
, var
);
1112 if (chrec_contains_undetermined (var
))
1126 /* Store the GRAPHITE representation of BB. */
1129 new_graphite_bb (scop_p scop
, basic_block bb
)
1131 struct graphite_bb
*gbb
;
1132 VEC (data_reference_p
, heap
) *drs
= VEC_alloc (data_reference_p
, heap
, 5);
1133 struct loop
*nest
= outermost_loop_in_scop (scop
, bb
);
1134 gimple_stmt_iterator gsi
;
1136 bitmap_set_bit (SCOP_BBS_B (scop
), bb
->index
);
1138 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
1139 find_data_references_in_stmt (nest
, gsi_stmt (gsi
), &drs
);
1141 if (!graphite_stmt_p (scop
, bb
, drs
))
1143 free_data_refs (drs
);
1147 gbb
= XNEW (struct graphite_bb
);
1150 GBB_SCOP (gbb
) = scop
;
1151 GBB_DATA_REFS (gbb
) = drs
;
1152 GBB_DOMAIN (gbb
) = NULL
;
1153 GBB_CONDITIONS (gbb
) = NULL
;
1154 GBB_CONDITION_CASES (gbb
) = NULL
;
1155 GBB_LOOPS (gbb
) = NULL
;
1156 GBB_STATIC_SCHEDULE (gbb
) = NULL
;
1157 GBB_CLOOG_IV_TYPES (gbb
) = NULL
;
1158 VEC_safe_push (graphite_bb_p
, heap
, SCOP_BBS (scop
), gbb
);
1164 free_graphite_bb (struct graphite_bb
*gbb
)
1166 if (GBB_DOMAIN (gbb
))
1167 cloog_matrix_free (GBB_DOMAIN (gbb
));
1169 if (GBB_CLOOG_IV_TYPES (gbb
))
1170 htab_delete (GBB_CLOOG_IV_TYPES (gbb
));
1172 /* FIXME: free_data_refs is disabled for the moment, but should be
1175 free_data_refs (GBB_DATA_REFS (gbb)); */
1177 VEC_free (gimple
, heap
, GBB_CONDITIONS (gbb
));
1178 VEC_free (gimple
, heap
, GBB_CONDITION_CASES (gbb
));
1179 VEC_free (loop_p
, heap
, GBB_LOOPS (gbb
));
1180 GBB_BB (gbb
)->aux
= 0;
1186 /* Structure containing the mapping between the old names and the new
1187 names used after block copy in the new loop context. */
1188 typedef struct rename_map_elt
1190 tree old_name
, new_name
;
1194 /* Print to stderr the element ELT. */
1197 debug_rename_elt (rename_map_elt elt
)
1199 fprintf (stderr
, "(");
1200 print_generic_expr (stderr
, elt
->old_name
, 0);
1201 fprintf (stderr
, ", ");
1202 print_generic_expr (stderr
, elt
->new_name
, 0);
1203 fprintf (stderr
, ")\n");
1206 /* Helper function for debug_rename_map. */
1209 debug_rename_map_1 (void **slot
, void *s ATTRIBUTE_UNUSED
)
1211 struct rename_map_elt
*entry
= (struct rename_map_elt
*) *slot
;
1212 debug_rename_elt (entry
);
1216 /* Print to stderr all the elements of MAP. */
1219 debug_rename_map (htab_t map
)
1221 htab_traverse (map
, debug_rename_map_1
, NULL
);
1224 /* Constructs a new SCEV_INFO_STR structure for VAR and INSTANTIATED_BELOW. */
1226 static inline rename_map_elt
1227 new_rename_map_elt (tree old_name
, tree new_name
)
1231 res
= XNEW (struct rename_map_elt
);
1232 res
->old_name
= old_name
;
1233 res
->new_name
= new_name
;
1238 /* Computes a hash function for database element ELT. */
1241 rename_map_elt_info (const void *elt
)
1243 return htab_hash_pointer (((const struct rename_map_elt
*) elt
)->old_name
);
1246 /* Compares database elements E1 and E2. */
1249 eq_rename_map_elts (const void *e1
, const void *e2
)
1251 const struct rename_map_elt
*elt1
= (const struct rename_map_elt
*) e1
;
1252 const struct rename_map_elt
*elt2
= (const struct rename_map_elt
*) e2
;
1254 return (elt1
->old_name
== elt2
->old_name
);
1257 /* Returns the new name associated to OLD_NAME in MAP. */
1260 get_new_name_from_old_name (htab_t map
, tree old_name
)
1262 struct rename_map_elt tmp
;
1265 tmp
.old_name
= old_name
;
1266 slot
= htab_find_slot (map
, &tmp
, NO_INSERT
);
1269 return ((rename_map_elt
) *slot
)->new_name
;
1276 /* Returns true when BB is in REGION. */
1279 bb_in_sese_p (basic_block bb
, sese region
)
1281 return pointer_set_contains (SESE_REGION_BBS (region
), bb
);
1284 /* For a USE in BB, if BB is outside REGION, mark the USE in the
1285 SESE_LIVEIN and SESE_LIVEOUT sets. */
1288 sese_build_livein_liveouts_use (sese region
, basic_block bb
, tree use
)
1293 if (TREE_CODE (use
) != SSA_NAME
)
1296 ver
= SSA_NAME_VERSION (use
);
1297 def_bb
= gimple_bb (SSA_NAME_DEF_STMT (use
));
1299 || !bb_in_sese_p (def_bb
, region
)
1300 || bb_in_sese_p (bb
, region
))
1303 if (!SESE_LIVEIN_VER (region
, ver
))
1304 SESE_LIVEIN_VER (region
, ver
) = BITMAP_ALLOC (NULL
);
1306 bitmap_set_bit (SESE_LIVEIN_VER (region
, ver
), bb
->index
);
1307 bitmap_set_bit (SESE_LIVEOUT (region
), ver
);
1310 /* Marks for rewrite all the SSA_NAMES defined in REGION and that are
1311 used in BB that is outside of the REGION. */
1314 sese_build_livein_liveouts_bb (sese region
, basic_block bb
)
1316 gimple_stmt_iterator bsi
;
1322 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
1323 for (bsi
= gsi_start_phis (e
->dest
); !gsi_end_p (bsi
); gsi_next (&bsi
))
1324 sese_build_livein_liveouts_use (region
, bb
,
1325 PHI_ARG_DEF_FROM_EDGE (gsi_stmt (bsi
), e
));
1327 for (bsi
= gsi_start_bb (bb
); !gsi_end_p (bsi
); gsi_next (&bsi
))
1328 FOR_EACH_SSA_TREE_OPERAND (var
, gsi_stmt (bsi
), iter
, SSA_OP_ALL_USES
)
1329 sese_build_livein_liveouts_use (region
, bb
, var
);
1332 /* Build the SESE_LIVEIN and SESE_LIVEOUT for REGION. */
1335 sese_build_livein_liveouts (sese region
)
1339 SESE_LIVEOUT (region
) = BITMAP_ALLOC (NULL
);
1340 SESE_NUM_VER (region
) = num_ssa_names
;
1341 SESE_LIVEIN (region
) = XCNEWVEC (bitmap
, SESE_NUM_VER (region
));
1344 sese_build_livein_liveouts_bb (region
, bb
);
1347 /* Register basic blocks belonging to a region in a pointer set. */
1350 register_bb_in_sese (basic_block entry_bb
, basic_block exit_bb
, sese region
)
1354 basic_block bb
= entry_bb
;
1356 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
1358 if (!pointer_set_contains (SESE_REGION_BBS (region
), e
->dest
) &&
1359 e
->dest
->index
!= exit_bb
->index
)
1361 pointer_set_insert (SESE_REGION_BBS (region
), e
->dest
);
1362 register_bb_in_sese (e
->dest
, exit_bb
, region
);
1367 /* Builds a new SESE region from edges ENTRY and EXIT. */
1370 new_sese (edge entry
, edge exit
)
1372 sese res
= XNEW (struct sese
);
1374 SESE_ENTRY (res
) = entry
;
1375 SESE_EXIT (res
) = exit
;
1376 SESE_REGION_BBS (res
) = pointer_set_create ();
1377 register_bb_in_sese (entry
->dest
, exit
->dest
, res
);
1379 SESE_LIVEOUT (res
) = NULL
;
1380 SESE_NUM_VER (res
) = 0;
1381 SESE_LIVEIN (res
) = NULL
;
1386 /* Deletes REGION. */
1389 free_sese (sese region
)
1393 for (i
= 0; i
< SESE_NUM_VER (region
); i
++)
1394 BITMAP_FREE (SESE_LIVEIN_VER (region
, i
));
1396 if (SESE_LIVEIN (region
))
1397 free (SESE_LIVEIN (region
));
1399 if (SESE_LIVEOUT (region
))
1400 BITMAP_FREE (SESE_LIVEOUT (region
));
1402 pointer_set_destroy (SESE_REGION_BBS (region
));
1408 /* Creates a new scop starting with ENTRY. */
1411 new_scop (edge entry
, edge exit
)
1413 scop_p scop
= XNEW (struct scop
);
1415 gcc_assert (entry
&& exit
);
1417 SCOP_REGION (scop
) = new_sese (entry
, exit
);
1418 SCOP_BBS (scop
) = VEC_alloc (graphite_bb_p
, heap
, 3);
1419 SCOP_OLDIVS (scop
) = VEC_alloc (name_tree
, heap
, 3);
1420 SCOP_BBS_B (scop
) = BITMAP_ALLOC (NULL
);
1421 SCOP_LOOPS (scop
) = BITMAP_ALLOC (NULL
);
1422 SCOP_LOOP_NEST (scop
) = VEC_alloc (loop_p
, heap
, 3);
1423 SCOP_ADD_PARAMS (scop
) = true;
1424 SCOP_PARAMS (scop
) = VEC_alloc (name_tree
, heap
, 3);
1425 SCOP_PROG (scop
) = cloog_program_malloc ();
1426 cloog_program_set_names (SCOP_PROG (scop
), cloog_names_malloc ());
1427 SCOP_LOOP2CLOOG_LOOP (scop
) = htab_create (10, hash_loop_to_cloog_loop
,
1428 eq_loop_to_cloog_loop
,
1430 SCOP_LIVEOUT_RENAMES (scop
) = htab_create (10, rename_map_elt_info
,
1431 eq_rename_map_elts
, free
);
1438 free_scop (scop_p scop
)
1442 struct graphite_bb
*gb
;
1445 for (i
= 0; VEC_iterate (graphite_bb_p
, SCOP_BBS (scop
), i
, gb
); i
++)
1446 free_graphite_bb (gb
);
1448 VEC_free (graphite_bb_p
, heap
, SCOP_BBS (scop
));
1449 BITMAP_FREE (SCOP_BBS_B (scop
));
1450 BITMAP_FREE (SCOP_LOOPS (scop
));
1451 VEC_free (loop_p
, heap
, SCOP_LOOP_NEST (scop
));
1453 for (i
= 0; VEC_iterate (name_tree
, SCOP_OLDIVS (scop
), i
, iv
); i
++)
1455 VEC_free (name_tree
, heap
, SCOP_OLDIVS (scop
));
1457 for (i
= 0; VEC_iterate (name_tree
, SCOP_PARAMS (scop
), i
, p
); i
++)
1460 VEC_free (name_tree
, heap
, SCOP_PARAMS (scop
));
1461 cloog_program_free (SCOP_PROG (scop
));
1462 htab_delete (SCOP_LOOP2CLOOG_LOOP (scop
));
1463 htab_delete (SCOP_LIVEOUT_RENAMES (scop
));
1464 free_sese (SCOP_REGION (scop
));
1468 /* Deletes all scops in SCOPS. */
1471 free_scops (VEC (scop_p
, heap
) *scops
)
1476 for (i
= 0; VEC_iterate (scop_p
, scops
, i
, scop
); i
++)
1479 VEC_free (scop_p
, heap
, scops
);
1482 typedef enum gbb_type
{
1484 GBB_LOOP_SING_EXIT_HEADER
,
1485 GBB_LOOP_MULT_EXIT_HEADER
,
1492 /* Detect the type of BB. Loop headers are only marked, if they are
1493 new. This means their loop_father is different to LAST_LOOP.
1494 Otherwise they are treated like any other bb and their type can be
1498 get_bb_type (basic_block bb
, struct loop
*last_loop
)
1500 VEC (basic_block
, heap
) *dom
;
1502 struct loop
*loop
= bb
->loop_father
;
1504 /* Check, if we entry into a new loop. */
1505 if (loop
!= last_loop
)
1507 if (single_exit (loop
) != NULL
)
1508 return GBB_LOOP_SING_EXIT_HEADER
;
1509 else if (loop
->num
!= 0)
1510 return GBB_LOOP_MULT_EXIT_HEADER
;
1512 return GBB_COND_HEADER
;
1515 dom
= get_dominated_by (CDI_DOMINATORS
, bb
);
1516 nb_dom
= VEC_length (basic_block
, dom
);
1517 VEC_free (basic_block
, heap
, dom
);
1522 nb_suc
= VEC_length (edge
, bb
->succs
);
1524 if (nb_dom
== 1 && nb_suc
== 1)
1527 return GBB_COND_HEADER
;
1530 /* A SCoP detection region, defined using bbs as borders.
1531 All control flow touching this region, comes in passing basic_block ENTRY and
1532 leaves passing basic_block EXIT. By using bbs instead of edges for the
1533 borders we are able to represent also regions that do not have a single
1535 But as they have a single entry basic_block and a single exit basic_block, we
1536 are able to generate for every sd_region a single entry and exit edge.
1543 / \ This region contains: {3, 4, 5, 6, 7, 8}
1551 typedef struct sd_region_p
1553 /* The entry bb dominates all bbs in the sd_region. It is part of the
1557 /* The exit bb postdominates all bbs in the sd_region, but is not
1558 part of the region. */
1562 DEF_VEC_O(sd_region
);
1563 DEF_VEC_ALLOC_O(sd_region
, heap
);
1566 /* Moves the scops from SOURCE to TARGET and clean up SOURCE. */
1569 move_sd_regions (VEC (sd_region
, heap
) **source
, VEC (sd_region
, heap
) **target
)
1574 for (i
= 0; VEC_iterate (sd_region
, *source
, i
, s
); i
++)
1575 VEC_safe_push (sd_region
, heap
, *target
, s
);
1577 VEC_free (sd_region
, heap
, *source
);
1580 /* Return true when it is not possible to represent the upper bound of
1581 LOOP in the polyhedral representation. */
1584 graphite_cannot_represent_loop_niter (loop_p loop
)
1586 tree niter
= number_of_latch_executions (loop
);
1588 return chrec_contains_undetermined (niter
)
1589 || !scev_is_linear_expression (niter
);
1591 /* Store information needed by scopdet_* functions. */
1595 /* Where the last open scop would stop if the current BB is harmful. */
1598 /* Where the next scop would start if the current BB is harmful. */
1601 /* The bb or one of its children contains open loop exits. That means
1602 loop exit nodes that are not surrounded by a loop dominated by bb. */
1605 /* The bb or one of its children contains only structures we can handle. */
1610 static struct scopdet_info
build_scops_1 (basic_block
, VEC (sd_region
, heap
) **,
1613 /* Calculates BB infos. If bb is difficult we add valid SCoPs dominated by BB
1614 to SCOPS. TYPE is the gbb_type of BB. */
1616 static struct scopdet_info
1617 scopdet_basic_block_info (basic_block bb
, VEC (sd_region
, heap
) **scops
,
1620 struct loop
*loop
= bb
->loop_father
;
1621 struct scopdet_info result
;
1624 /* XXX: ENTRY_BLOCK_PTR could be optimized in later steps. */
1625 stmt
= harmful_stmt_in_bb (ENTRY_BLOCK_PTR
, bb
);
1626 result
.difficult
= (stmt
!= NULL
);
1633 result
.exits
= false;
1638 result
.next
= single_succ (bb
);
1639 result
.exits
= false;
1643 case GBB_LOOP_SING_EXIT_HEADER
:
1645 VEC (sd_region
, heap
) *tmp_scops
= VEC_alloc (sd_region
, heap
,3);
1646 struct scopdet_info sinfo
;
1648 sinfo
= build_scops_1 (bb
, &tmp_scops
, loop
);
1650 result
.last
= single_exit (bb
->loop_father
)->src
;
1651 result
.next
= single_exit (bb
->loop_father
)->dest
;
1653 /* If we do not dominate result.next, remove it. It's either
1654 the EXIT_BLOCK_PTR, or another bb dominates it and will
1655 call the scop detection for this bb. */
1656 if (!dominated_by_p (CDI_DOMINATORS
, result
.next
, bb
))
1659 if (result
.last
->loop_father
!= loop
)
1662 if (graphite_cannot_represent_loop_niter (loop
))
1663 result
.difficult
= true;
1665 if (sinfo
.difficult
)
1666 move_sd_regions (&tmp_scops
, scops
);
1668 VEC_free (sd_region
, heap
, tmp_scops
);
1670 result
.exits
= false;
1671 result
.difficult
|= sinfo
.difficult
;
1675 case GBB_LOOP_MULT_EXIT_HEADER
:
1677 /* XXX: For now we just do not join loops with multiple exits. If the
1678 exits lead to the same bb it may be possible to join the loop. */
1679 VEC (sd_region
, heap
) *tmp_scops
= VEC_alloc (sd_region
, heap
, 3);
1680 VEC (edge
, heap
) *exits
= get_loop_exit_edges (loop
);
1683 build_scops_1 (bb
, &tmp_scops
, loop
);
1685 /* Scan the code dominated by this loop. This means all bbs, that are
1686 are dominated by a bb in this loop, but are not part of this loop.
1689 - The loop exit destination is dominated by the exit sources.
1691 TODO: We miss here the more complex cases:
1692 - The exit destinations are dominated by another bb inside the
1694 - The loop dominates bbs, that are not exit destinations. */
1695 for (i
= 0; VEC_iterate (edge
, exits
, i
, e
); i
++)
1696 if (e
->src
->loop_father
== loop
1697 && dominated_by_p (CDI_DOMINATORS
, e
->dest
, e
->src
))
1699 /* Pass loop_outer to recognize e->dest as loop header in
1701 if (e
->dest
->loop_father
->header
== e
->dest
)
1702 build_scops_1 (e
->dest
, &tmp_scops
,
1703 loop_outer (e
->dest
->loop_father
));
1705 build_scops_1 (e
->dest
, &tmp_scops
, e
->dest
->loop_father
);
1710 result
.difficult
= true;
1711 result
.exits
= false;
1712 move_sd_regions (&tmp_scops
, scops
);
1713 VEC_free (edge
, heap
, exits
);
1716 case GBB_COND_HEADER
:
1718 VEC (sd_region
, heap
) *tmp_scops
= VEC_alloc (sd_region
, heap
, 3);
1719 struct scopdet_info sinfo
;
1720 VEC (basic_block
, heap
) *dominated
;
1723 basic_block last_bb
= NULL
;
1725 result
.exits
= false;
1727 /* First check the successors of BB, and check if it is possible to join
1728 the different branches. */
1729 for (i
= 0; VEC_iterate (edge
, bb
->succs
, i
, e
); i
++)
1731 /* Ignore loop exits. They will be handled after the loop body. */
1732 if (is_loop_exit (loop
, e
->dest
))
1734 result
.exits
= true;
1738 /* Do not follow edges that lead to the end of the
1739 conditions block. For example, in
1749 the edge from 0 => 6. Only check if all paths lead to
1752 if (!single_pred_p (e
->dest
))
1754 /* Check, if edge leads directly to the end of this
1761 if (e
->dest
!= last_bb
)
1762 result
.difficult
= true;
1767 if (!dominated_by_p (CDI_DOMINATORS
, e
->dest
, bb
))
1769 result
.difficult
= true;
1773 sinfo
= build_scops_1 (e
->dest
, &tmp_scops
, loop
);
1775 result
.exits
|= sinfo
.exits
;
1776 result
.last
= sinfo
.last
;
1777 result
.difficult
|= sinfo
.difficult
;
1779 /* Checks, if all branches end at the same point.
1780 If that is true, the condition stays joinable.
1781 Have a look at the example above. */
1782 if (sinfo
.last
&& single_succ_p (sinfo
.last
))
1784 basic_block next_tmp
= single_succ (sinfo
.last
);
1789 if (next_tmp
!= last_bb
)
1790 result
.difficult
= true;
1793 result
.difficult
= true;
1796 /* If the condition is joinable. */
1797 if (!result
.exits
&& !result
.difficult
)
1799 /* Only return a next pointer if we dominate this pointer.
1800 Otherwise it will be handled by the bb dominating it. */
1801 if (dominated_by_p (CDI_DOMINATORS
, last_bb
, bb
) && last_bb
!= bb
)
1802 result
.next
= last_bb
;
1806 VEC_free (sd_region
, heap
, tmp_scops
);
1810 /* Scan remaining bbs dominated by BB. */
1811 dominated
= get_dominated_by (CDI_DOMINATORS
, bb
);
1813 for (i
= 0; VEC_iterate (basic_block
, dominated
, i
, dom_bb
); i
++)
1815 /* Ignore loop exits: they will be handled after the loop body. */
1816 if (loop_depth (find_common_loop (loop
, dom_bb
->loop_father
))
1817 < loop_depth (loop
))
1819 result
.exits
= true;
1823 /* Ignore the bbs processed above. */
1824 if (single_pred_p (dom_bb
) && single_pred (dom_bb
) == bb
)
1827 if (loop_depth (loop
) > loop_depth (dom_bb
->loop_father
))
1828 sinfo
= build_scops_1 (dom_bb
, &tmp_scops
, loop_outer (loop
));
1830 sinfo
= build_scops_1 (dom_bb
, &tmp_scops
, loop
);
1833 result
.exits
|= sinfo
.exits
;
1834 result
.difficult
= true;
1838 VEC_free (basic_block
, heap
, dominated
);
1841 move_sd_regions (&tmp_scops
, scops
);
1853 /* Creates the SCoPs and writes entry and exit points for every SCoP. */
1855 static struct scopdet_info
1856 build_scops_1 (basic_block current
, VEC (sd_region
, heap
) **scops
, loop_p loop
)
1858 bool in_scop
= false;
1859 sd_region open_scop
;
1860 struct scopdet_info sinfo
;
1862 /* Initialize result. */
1863 struct scopdet_info result
;
1864 result
.exits
= false;
1865 result
.difficult
= false;
1868 open_scop
.entry
= NULL
;
1869 open_scop
.exit
= NULL
;
1872 /* Loop over the dominance tree. If we meet a difficult bb, close
1873 the current SCoP. Loop and condition header start a new layer,
1874 and can only be added if all bbs in deeper layers are simple. */
1875 while (current
!= NULL
)
1877 sinfo
= scopdet_basic_block_info (current
, scops
, get_bb_type (current
,
1880 if (!in_scop
&& !(sinfo
.exits
|| sinfo
.difficult
))
1882 open_scop
.entry
= current
;
1883 open_scop
.exit
= NULL
;
1886 else if (in_scop
&& (sinfo
.exits
|| sinfo
.difficult
))
1888 open_scop
.exit
= current
;
1889 VEC_safe_push (sd_region
, heap
, *scops
, &open_scop
);
1893 result
.difficult
|= sinfo
.difficult
;
1894 result
.exits
|= sinfo
.exits
;
1896 current
= sinfo
.next
;
1899 /* Try to close open_scop, if we are still in an open SCoP. */
1905 for (i
= 0; VEC_iterate (edge
, sinfo
.last
->succs
, i
, e
); i
++)
1906 if (dominated_by_p (CDI_POST_DOMINATORS
, sinfo
.last
, e
->dest
))
1907 open_scop
.exit
= e
->dest
;
1909 if (!open_scop
.exit
&& open_scop
.entry
!= sinfo
.last
)
1910 open_scop
.exit
= sinfo
.last
;
1913 VEC_safe_push (sd_region
, heap
, *scops
, &open_scop
);
1917 result
.last
= sinfo
.last
;
1921 /* Checks if a bb is contained in REGION. */
1924 bb_in_sd_region (basic_block bb
, sd_region
*region
)
1926 return dominated_by_p (CDI_DOMINATORS
, bb
, region
->entry
)
1927 && !(dominated_by_p (CDI_DOMINATORS
, bb
, region
->exit
)
1928 && !dominated_by_p (CDI_DOMINATORS
, region
->entry
,
1932 /* Returns the single entry edge of REGION, if it does not exits NULL. */
1935 find_single_entry_edge (sd_region
*region
)
1941 FOR_EACH_EDGE (e
, ei
, region
->entry
->preds
)
1942 if (!bb_in_sd_region (e
->src
, region
))
1957 /* Returns the single exit edge of REGION, if it does not exits NULL. */
1960 find_single_exit_edge (sd_region
*region
)
1966 FOR_EACH_EDGE (e
, ei
, region
->exit
->preds
)
1967 if (bb_in_sd_region (e
->src
, region
))
1982 /* Create a single entry edge for REGION. */
1985 create_single_entry_edge (sd_region
*region
)
1987 if (find_single_entry_edge (region
))
1990 /* There are multiple predecessors for bb_3
2003 There are two edges (1->3, 2->3), that point from outside into the region,
2004 and another one (5->3), a loop latch, lead to bb_3.
2012 | |\ (3.0 -> 3.1) = single entry edge
2021 If the loop is part of the SCoP, we have to redirect the loop latches.
2027 | | (3.0 -> 3.1) = entry edge
2036 if (region
->entry
->loop_father
->header
!= region
->entry
2037 || dominated_by_p (CDI_DOMINATORS
,
2038 loop_latch_edge (region
->entry
->loop_father
)->src
,
2041 edge forwarder
= split_block_after_labels (region
->entry
);
2042 region
->entry
= forwarder
->dest
;
2045 /* This case is never executed, as the loop headers seem always to have a
2046 single edge pointing from outside into the loop. */
2049 #ifdef ENABLE_CHECKING
2050 gcc_assert (find_single_entry_edge (region
));
2054 /* Check if the sd_region, mentioned in EDGE, has no exit bb. */
2057 sd_region_without_exit (edge e
)
2059 sd_region
*r
= (sd_region
*) e
->aux
;
2062 return r
->exit
== NULL
;
2067 /* Create a single exit edge for REGION. */
2070 create_single_exit_edge (sd_region
*region
)
2074 edge forwarder
= NULL
;
2077 if (find_single_exit_edge (region
))
2080 /* We create a forwarder bb (5) for all edges leaving this region
2081 (3->5, 4->5). All other edges leading to the same bb, are moved
2082 to a new bb (6). If these edges where part of another region (2->5)
2083 we update the region->exit pointer, of this region.
2085 To identify which edge belongs to which region we depend on the e->aux
2086 pointer in every edge. It points to the region of the edge or to NULL,
2087 if the edge is not part of any region.
2089 1 2 3 4 1->5 no region, 2->5 region->exit = 5,
2090 \| |/ 3->5 region->exit = NULL, 4->5 region->exit = NULL
2095 1 2 3 4 1->6 no region, 2->6 region->exit = 6,
2096 | | \/ 3->5 no region, 4->5 no region,
2098 \| / 5->6 region->exit = 6
2101 Now there is only a single exit edge (5->6). */
2102 exit
= region
->exit
;
2103 region
->exit
= NULL
;
2104 forwarder
= make_forwarder_block (exit
, &sd_region_without_exit
, NULL
);
2106 /* Unmark the edges, that are no longer exit edges. */
2107 FOR_EACH_EDGE (e
, ei
, forwarder
->src
->preds
)
2111 /* Mark the new exit edge. */
2112 single_succ_edge (forwarder
->src
)->aux
= region
;
2114 /* Update the exit bb of all regions, where exit edges lead to
2116 FOR_EACH_EDGE (e
, ei
, forwarder
->dest
->preds
)
2118 ((sd_region
*) e
->aux
)->exit
= forwarder
->dest
;
2120 #ifdef ENABLE_CHECKING
2121 gcc_assert (find_single_exit_edge (region
));
2125 /* Unmark the exit edges of all REGIONS.
2126 See comment in "create_single_exit_edge". */
2129 unmark_exit_edges (VEC (sd_region
, heap
) *regions
)
2136 for (i
= 0; VEC_iterate (sd_region
, regions
, i
, s
); i
++)
2137 FOR_EACH_EDGE (e
, ei
, s
->exit
->preds
)
2142 /* Mark the exit edges of all REGIONS.
2143 See comment in "create_single_exit_edge". */
2146 mark_exit_edges (VEC (sd_region
, heap
) *regions
)
2153 for (i
= 0; VEC_iterate (sd_region
, regions
, i
, s
); i
++)
2154 FOR_EACH_EDGE (e
, ei
, s
->exit
->preds
)
2155 if (bb_in_sd_region (e
->src
, s
))
2159 /* Free and compute again all the dominators information. */
2162 recompute_all_dominators (void)
2164 mark_irreducible_loops ();
2165 free_dominance_info (CDI_DOMINATORS
);
2166 free_dominance_info (CDI_POST_DOMINATORS
);
2167 calculate_dominance_info (CDI_DOMINATORS
);
2168 calculate_dominance_info (CDI_POST_DOMINATORS
);
2171 /* Verifies properties that GRAPHITE should maintain during translation. */
2174 graphite_verify (void)
2176 #ifdef ENABLE_CHECKING
2177 verify_loop_structure ();
2178 verify_dominators (CDI_DOMINATORS
);
2179 verify_dominators (CDI_POST_DOMINATORS
);
2181 verify_loop_closed_ssa ();
2185 /* Create for all scop regions a single entry and a single exit edge. */
2188 create_sese_edges (VEC (sd_region
, heap
) *regions
)
2193 for (i
= 0; VEC_iterate (sd_region
, regions
, i
, s
); i
++)
2194 create_single_entry_edge (s
);
2196 mark_exit_edges (regions
);
2198 for (i
= 0; VEC_iterate (sd_region
, regions
, i
, s
); i
++)
2199 create_single_exit_edge (s
);
2201 unmark_exit_edges (regions
);
2203 fix_loop_structure (NULL
);
2205 #ifdef ENABLE_CHECKING
2206 verify_loop_structure ();
2207 verify_dominators (CDI_DOMINATORS
);
2212 /* Create graphite SCoPs from an array of scop detection regions. */
2215 build_graphite_scops (VEC (sd_region
, heap
) *scop_regions
)
2220 for (i
= 0; VEC_iterate (sd_region
, scop_regions
, i
, s
); i
++)
2222 edge entry
= find_single_entry_edge (s
);
2223 edge exit
= find_single_exit_edge (s
);
2224 scop_p scop
= new_scop (entry
, exit
);
2225 VEC_safe_push (scop_p
, heap
, current_scops
, scop
);
2227 /* Are there overlapping SCoPs? */
2228 #ifdef ENABLE_CHECKING
2233 for (j
= 0; VEC_iterate (sd_region
, scop_regions
, j
, s2
); j
++)
2235 gcc_assert (!bb_in_sd_region (s
->entry
, s2
));
2241 /* Find static control parts. */
2246 struct loop
*loop
= current_loops
->tree_root
;
2247 VEC (sd_region
, heap
) *tmp_scops
= VEC_alloc (sd_region
, heap
, 3);
2249 build_scops_1 (single_succ (ENTRY_BLOCK_PTR
), &tmp_scops
, loop
);
2250 create_sese_edges (tmp_scops
);
2251 build_graphite_scops (tmp_scops
);
2252 VEC_free (sd_region
, heap
, tmp_scops
);
2255 /* Gather the basic blocks belonging to the SCOP. */
2258 build_scop_bbs (scop_p scop
)
2260 basic_block
*stack
= XNEWVEC (basic_block
, n_basic_blocks
+ 1);
2261 sbitmap visited
= sbitmap_alloc (last_basic_block
);
2264 sbitmap_zero (visited
);
2265 stack
[sp
++] = SCOP_ENTRY (scop
);
2269 basic_block bb
= stack
[--sp
];
2270 int depth
= loop_depth (bb
->loop_father
);
2271 int num
= bb
->loop_father
->num
;
2275 /* Scop's exit is not in the scop. Exclude also bbs, which are
2276 dominated by the SCoP exit. These are e.g. loop latches. */
2277 if (TEST_BIT (visited
, bb
->index
)
2278 || dominated_by_p (CDI_DOMINATORS
, bb
, SCOP_EXIT (scop
))
2279 /* Every block in the scop is dominated by scop's entry. */
2280 || !dominated_by_p (CDI_DOMINATORS
, bb
, SCOP_ENTRY (scop
)))
2283 new_graphite_bb (scop
, bb
);
2284 SET_BIT (visited
, bb
->index
);
2286 /* First push the blocks that have to be processed last. Note
2287 that this means that the order in which the code is organized
2288 below is important: do not reorder the following code. */
2289 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
2290 if (! TEST_BIT (visited
, e
->dest
->index
)
2291 && (int) loop_depth (e
->dest
->loop_father
) < depth
)
2292 stack
[sp
++] = e
->dest
;
2294 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
2295 if (! TEST_BIT (visited
, e
->dest
->index
)
2296 && (int) loop_depth (e
->dest
->loop_father
) == depth
2297 && e
->dest
->loop_father
->num
!= num
)
2298 stack
[sp
++] = e
->dest
;
2300 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
2301 if (! TEST_BIT (visited
, e
->dest
->index
)
2302 && (int) loop_depth (e
->dest
->loop_father
) == depth
2303 && e
->dest
->loop_father
->num
== num
2304 && EDGE_COUNT (e
->dest
->preds
) > 1)
2305 stack
[sp
++] = e
->dest
;
2307 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
2308 if (! TEST_BIT (visited
, e
->dest
->index
)
2309 && (int) loop_depth (e
->dest
->loop_father
) == depth
2310 && e
->dest
->loop_father
->num
== num
2311 && EDGE_COUNT (e
->dest
->preds
) == 1)
2312 stack
[sp
++] = e
->dest
;
2314 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
2315 if (! TEST_BIT (visited
, e
->dest
->index
)
2316 && (int) loop_depth (e
->dest
->loop_father
) > depth
)
2317 stack
[sp
++] = e
->dest
;
2321 sbitmap_free (visited
);
2324 /* Returns the number of reduction phi nodes in LOOP. */
2327 nb_reductions_in_loop (loop_p loop
)
2330 gimple_stmt_iterator gsi
;
2332 for (gsi
= gsi_start_phis (loop
->header
); !gsi_end_p (gsi
); gsi_next (&gsi
))
2334 gimple phi
= gsi_stmt (gsi
);
2338 if (!is_gimple_reg (PHI_RESULT (phi
)))
2341 scev
= analyze_scalar_evolution (loop
, PHI_RESULT (phi
));
2342 scev
= instantiate_parameters (loop
, scev
);
2343 if (!simple_iv (loop
, phi
, PHI_RESULT (phi
), &iv
, true))
2350 /* A LOOP is in normal form when it contains only one scalar phi node
2351 that defines the main induction variable of the loop, only one
2352 increment of the IV, and only one exit condition. */
2355 graphite_loop_normal_form (loop_p loop
)
2357 struct tree_niter_desc niter
;
2360 edge exit
= single_dom_exit (loop
);
2362 gcc_assert (number_of_iterations_exit (loop
, exit
, &niter
, false));
2363 nit
= force_gimple_operand (unshare_expr (niter
.niter
), &stmts
, true,
2366 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop
), stmts
);
2368 /* One IV per loop. */
2369 if (nb_reductions_in_loop (loop
) > 0)
2372 return canonicalize_loop_ivs (loop
, NULL
, nit
);
2375 /* Record LOOP as occuring in SCOP. Returns true when the operation
2379 scop_record_loop (scop_p scop
, loop_p loop
)
2384 if (bitmap_bit_p (SCOP_LOOPS (scop
), loop
->num
))
2387 bitmap_set_bit (SCOP_LOOPS (scop
), loop
->num
);
2388 VEC_safe_push (loop_p
, heap
, SCOP_LOOP_NEST (scop
), loop
);
2390 induction_var
= graphite_loop_normal_form (loop
);
2394 oldiv
= XNEW (struct name_tree
);
2395 oldiv
->t
= induction_var
;
2396 oldiv
->name
= get_name (SSA_NAME_VAR (oldiv
->t
));
2398 VEC_safe_push (name_tree
, heap
, SCOP_OLDIVS (scop
), oldiv
);
2402 /* Build the loop nests contained in SCOP. Returns true when the
2403 operation was successful. */
2406 build_scop_loop_nests (scop_p scop
)
2410 struct loop
*loop0
, *loop1
;
2413 if (bb_in_scop_p (bb
, scop
))
2415 struct loop
*loop
= bb
->loop_father
;
2417 /* Only add loops if they are completely contained in the SCoP. */
2418 if (loop
->header
== bb
2419 && bb_in_scop_p (loop
->latch
, scop
))
2421 if (!scop_record_loop (scop
, loop
))
2426 /* Make sure that the loops in the SCOP_LOOP_NEST are ordered. It
2427 can be the case that an inner loop is inserted before an outer
2428 loop. To avoid this, semi-sort once. */
2429 for (i
= 0; VEC_iterate (loop_p
, SCOP_LOOP_NEST (scop
), i
, loop0
); i
++)
2431 if (VEC_length (loop_p
, SCOP_LOOP_NEST (scop
)) == i
+ 1)
2434 loop1
= VEC_index (loop_p
, SCOP_LOOP_NEST (scop
), i
+ 1);
2435 if (loop0
->num
> loop1
->num
)
2437 VEC_replace (loop_p
, SCOP_LOOP_NEST (scop
), i
, loop1
);
2438 VEC_replace (loop_p
, SCOP_LOOP_NEST (scop
), i
+ 1, loop0
);
2445 /* Build dynamic schedules for all the BBs. */
2448 build_scop_dynamic_schedules (scop_p scop
)
2450 int i
, dim
, loop_num
, row
, col
;
2453 for (i
= 0; VEC_iterate (graphite_bb_p
, SCOP_BBS (scop
), i
, gb
); i
++)
2455 loop_num
= GBB_BB (gb
)->loop_father
->num
;
2459 dim
= nb_loops_around_gb (gb
);
2460 GBB_DYNAMIC_SCHEDULE (gb
) = cloog_matrix_alloc (dim
, dim
);
2462 for (row
= 0; row
< GBB_DYNAMIC_SCHEDULE (gb
)->NbRows
; row
++)
2463 for (col
= 0; col
< GBB_DYNAMIC_SCHEDULE (gb
)->NbColumns
; col
++)
2465 value_set_si (GBB_DYNAMIC_SCHEDULE (gb
)->p
[row
][col
], 1);
2467 value_set_si (GBB_DYNAMIC_SCHEDULE (gb
)->p
[row
][col
], 0);
2470 GBB_DYNAMIC_SCHEDULE (gb
) = NULL
;
2474 /* Returns the number of loops that are identical at the beginning of
2475 the vectors A and B. */
2478 compare_prefix_loops (VEC (loop_p
, heap
) *a
, VEC (loop_p
, heap
) *b
)
2487 lb
= VEC_length (loop_p
, b
);
2489 for (i
= 0; VEC_iterate (loop_p
, a
, i
, ea
); i
++)
2491 || ea
!= VEC_index (loop_p
, b
, i
))
2497 /* Build for BB the static schedule.
2499 The STATIC_SCHEDULE is defined like this:
2518 Static schedules for A to F:
2531 build_scop_canonical_schedules (scop_p scop
)
2535 int nb_loops
= scop_nb_loops (scop
);
2536 lambda_vector static_schedule
= lambda_vector_new (nb_loops
+ 1);
2537 VEC (loop_p
, heap
) *loops_previous
= NULL
;
2539 /* We have to start schedules at 0 on the first component and
2540 because we cannot compare_prefix_loops against a previous loop,
2541 prefix will be equal to zero, and that index will be
2542 incremented before copying. */
2543 static_schedule
[0] = -1;
2545 for (i
= 0; VEC_iterate (graphite_bb_p
, SCOP_BBS (scop
), i
, gb
); i
++)
2547 int prefix
= compare_prefix_loops (loops_previous
, GBB_LOOPS (gb
));
2548 int nb
= gbb_nb_loops (gb
);
2550 loops_previous
= GBB_LOOPS (gb
);
2551 memset (&(static_schedule
[prefix
+ 1]), 0, sizeof (int) * (nb_loops
- prefix
));
2552 ++static_schedule
[prefix
];
2553 GBB_STATIC_SCHEDULE (gb
) = lambda_vector_new (nb
+ 1);
2554 lambda_vector_copy (static_schedule
,
2555 GBB_STATIC_SCHEDULE (gb
), nb
+ 1);
2559 /* Build the LOOPS vector for all bbs in SCOP. */
2562 build_bb_loops (scop_p scop
)
2567 for (i
= 0; VEC_iterate (graphite_bb_p
, SCOP_BBS (scop
), i
, gb
); i
++)
2572 depth
= nb_loops_around_gb (gb
) - 1;
2574 GBB_LOOPS (gb
) = VEC_alloc (loop_p
, heap
, 3);
2575 VEC_safe_grow_cleared (loop_p
, heap
, GBB_LOOPS (gb
), depth
+ 1);
2577 loop
= GBB_BB (gb
)->loop_father
;
2579 while (scop_contains_loop (scop
, loop
))
2581 VEC_replace (loop_p
, GBB_LOOPS (gb
), depth
, loop
);
2582 loop
= loop_outer (loop
);
2588 /* Get the index for parameter VAR in SCOP. */
2591 param_index (tree var
, scop_p scop
)
2597 gcc_assert (TREE_CODE (var
) == SSA_NAME
);
2599 for (i
= 0; VEC_iterate (name_tree
, SCOP_PARAMS (scop
), i
, p
); i
++)
2603 gcc_assert (SCOP_ADD_PARAMS (scop
));
2605 nvar
= XNEW (struct name_tree
);
2608 VEC_safe_push (name_tree
, heap
, SCOP_PARAMS (scop
), nvar
);
2609 return VEC_length (name_tree
, SCOP_PARAMS (scop
)) - 1;
2612 /* Scan EXPR and translate it to an inequality vector INEQ that will
2613 be added, or subtracted, in the constraint domain matrix C at row
2614 R. K is the number of columns for loop iterators in C. */
2617 scan_tree_for_params (scop_p s
, tree e
, CloogMatrix
*c
, int r
, Value k
,
2620 int cst_col
, param_col
;
2622 if (e
== chrec_dont_know
)
2625 switch (TREE_CODE (e
))
2627 case POLYNOMIAL_CHREC
:
2629 tree left
= CHREC_LEFT (e
);
2630 tree right
= CHREC_RIGHT (e
);
2631 int var
= CHREC_VARIABLE (e
);
2633 if (TREE_CODE (right
) != INTEGER_CST
)
2638 int loop_col
= scop_gimple_loop_depth (s
, get_loop (var
)) + 1;
2641 value_sub_int (c
->p
[r
][loop_col
], c
->p
[r
][loop_col
],
2642 int_cst_value (right
));
2644 value_add_int (c
->p
[r
][loop_col
], c
->p
[r
][loop_col
],
2645 int_cst_value (right
));
2648 switch (TREE_CODE (left
))
2650 case POLYNOMIAL_CHREC
:
2651 scan_tree_for_params (s
, left
, c
, r
, k
, subtract
);
2655 /* Constant part. */
2658 int v
= int_cst_value (left
);
2659 cst_col
= c
->NbColumns
- 1;
2664 subtract
= subtract
? false : true;
2668 value_sub_int (c
->p
[r
][cst_col
], c
->p
[r
][cst_col
], v
);
2670 value_add_int (c
->p
[r
][cst_col
], c
->p
[r
][cst_col
], v
);
2675 scan_tree_for_params (s
, left
, c
, r
, k
, subtract
);
2682 if (chrec_contains_symbols (TREE_OPERAND (e
, 0)))
2687 gcc_assert (host_integerp (TREE_OPERAND (e
, 1), 0));
2689 value_set_si (val
, int_cst_value (TREE_OPERAND (e
, 1)));
2690 value_multiply (k
, k
, val
);
2693 scan_tree_for_params (s
, TREE_OPERAND (e
, 0), c
, r
, k
, subtract
);
2700 gcc_assert (host_integerp (TREE_OPERAND (e
, 0), 0));
2702 value_set_si (val
, int_cst_value (TREE_OPERAND (e
, 0)));
2703 value_multiply (k
, k
, val
);
2706 scan_tree_for_params (s
, TREE_OPERAND (e
, 1), c
, r
, k
, subtract
);
2711 case POINTER_PLUS_EXPR
:
2712 scan_tree_for_params (s
, TREE_OPERAND (e
, 0), c
, r
, k
, subtract
);
2713 scan_tree_for_params (s
, TREE_OPERAND (e
, 1), c
, r
, k
, subtract
);
2717 scan_tree_for_params (s
, TREE_OPERAND (e
, 0), c
, r
, k
, subtract
);
2718 scan_tree_for_params (s
, TREE_OPERAND (e
, 1), c
, r
, k
, !subtract
);
2722 scan_tree_for_params (s
, TREE_OPERAND (e
, 0), c
, r
, k
, !subtract
);
2726 param_col
= param_index (e
, s
);
2730 param_col
+= c
->NbColumns
- scop_nb_params (s
) - 1;
2733 value_subtract (c
->p
[r
][param_col
], c
->p
[r
][param_col
], k
);
2735 value_addto (c
->p
[r
][param_col
], c
->p
[r
][param_col
], k
);
2742 int v
= int_cst_value (e
);
2743 cst_col
= c
->NbColumns
- 1;
2748 subtract
= subtract
? false : true;
2752 value_sub_int (c
->p
[r
][cst_col
], c
->p
[r
][cst_col
], v
);
2754 value_add_int (c
->p
[r
][cst_col
], c
->p
[r
][cst_col
], v
);
2759 case NON_LVALUE_EXPR
:
2760 scan_tree_for_params (s
, TREE_OPERAND (e
, 0), c
, r
, k
, subtract
);
2769 /* Data structure for idx_record_params. */
2777 /* For a data reference with an ARRAY_REF as its BASE, record the
2778 parameters occurring in IDX. DTA is passed in as complementary
2779 information, and is used by the automatic walker function. This
2780 function is a callback for for_each_index. */
2783 idx_record_params (tree base
, tree
*idx
, void *dta
)
2785 struct irp_data
*data
= (struct irp_data
*) dta
;
2787 if (TREE_CODE (base
) != ARRAY_REF
)
2790 if (TREE_CODE (*idx
) == SSA_NAME
)
2793 scop_p scop
= data
->scop
;
2794 struct loop
*loop
= data
->loop
;
2797 scev
= analyze_scalar_evolution (loop
, *idx
);
2798 scev
= instantiate_scev (block_before_scop (scop
), loop
, scev
);
2801 value_set_si (one
, 1);
2802 scan_tree_for_params (scop
, scev
, NULL
, 0, one
, false);
2809 /* Find parameters with respect to SCOP in BB. We are looking in memory
2810 access functions, conditions and loop bounds. */
2813 find_params_in_bb (scop_p scop
, graphite_bb_p gb
)
2816 data_reference_p dr
;
2818 loop_p father
= GBB_BB (gb
)->loop_father
;
2820 for (i
= 0; VEC_iterate (data_reference_p
, GBB_DATA_REFS (gb
), i
, dr
); i
++)
2822 struct irp_data irp
;
2826 for_each_index (&dr
->ref
, idx_record_params
, &irp
);
2829 /* Find parameters in conditional statements. */
2830 for (i
= 0; VEC_iterate (gimple
, GBB_CONDITIONS (gb
), i
, stmt
); i
++)
2833 loop_p loop
= father
;
2837 lhs
= gimple_cond_lhs (stmt
);
2838 lhs
= analyze_scalar_evolution (loop
, lhs
);
2839 lhs
= instantiate_scev (block_before_scop (scop
), loop
, lhs
);
2841 rhs
= gimple_cond_rhs (stmt
);
2842 rhs
= analyze_scalar_evolution (loop
, rhs
);
2843 rhs
= instantiate_scev (block_before_scop (scop
), loop
, rhs
);
2846 scan_tree_for_params (scop
, lhs
, NULL
, 0, one
, false);
2847 value_set_si (one
, 1);
2848 scan_tree_for_params (scop
, rhs
, NULL
, 0, one
, false);
2853 /* Saves in NV the name of variable P->T. */
2856 save_var_name (char **nv
, int i
, name_tree p
)
2858 const char *name
= get_name (SSA_NAME_VAR (p
->t
));
2862 int len
= strlen (name
) + 16;
2863 nv
[i
] = XNEWVEC (char, len
);
2864 snprintf (nv
[i
], len
, "%s_%d", name
, SSA_NAME_VERSION (p
->t
));
2868 nv
[i
] = XNEWVEC (char, 16);
2869 snprintf (nv
[i
], 2 + 16, "T_%d", SSA_NAME_VERSION (p
->t
));
2875 /* Return the maximal loop depth in SCOP. */
2878 scop_max_loop_depth (scop_p scop
)
2882 int max_nb_loops
= 0;
2884 for (i
= 0; VEC_iterate (graphite_bb_p
, SCOP_BBS (scop
), i
, gbb
); i
++)
2886 int nb_loops
= gbb_nb_loops (gbb
);
2887 if (max_nb_loops
< nb_loops
)
2888 max_nb_loops
= nb_loops
;
2891 return max_nb_loops
;
2894 /* Initialize Cloog's parameter names from the names used in GIMPLE.
2895 Initialize Cloog's iterator names, using 'graphite_iterator_%d'
2896 from 0 to scop_nb_loops (scop). */
2899 initialize_cloog_names (scop_p scop
)
2901 int i
, nb_params
= VEC_length (name_tree
, SCOP_PARAMS (scop
));
2902 char **params
= XNEWVEC (char *, nb_params
);
2903 int nb_iterators
= scop_max_loop_depth (scop
);
2904 int nb_scattering
= cloog_program_nb_scattdims (SCOP_PROG (scop
));
2905 char **iterators
= XNEWVEC (char *, nb_iterators
* 2);
2906 char **scattering
= XNEWVEC (char *, nb_scattering
);
2909 for (i
= 0; VEC_iterate (name_tree
, SCOP_PARAMS (scop
), i
, p
); i
++)
2910 save_var_name (params
, i
, p
);
2912 cloog_names_set_nb_parameters (cloog_program_names (SCOP_PROG (scop
)),
2914 cloog_names_set_parameters (cloog_program_names (SCOP_PROG (scop
)),
2917 for (i
= 0; i
< nb_iterators
; i
++)
2920 iterators
[i
] = XNEWVEC (char, len
);
2921 snprintf (iterators
[i
], len
, "graphite_iterator_%d", i
);
2924 cloog_names_set_nb_iterators (cloog_program_names (SCOP_PROG (scop
)),
2926 cloog_names_set_iterators (cloog_program_names (SCOP_PROG (scop
)),
2929 for (i
= 0; i
< nb_scattering
; i
++)
2932 scattering
[i
] = XNEWVEC (char, len
);
2933 snprintf (scattering
[i
], len
, "s_%d", i
);
2936 cloog_names_set_nb_scattering (cloog_program_names (SCOP_PROG (scop
)),
2938 cloog_names_set_scattering (cloog_program_names (SCOP_PROG (scop
)),
2942 /* Record the parameters used in the SCOP. A variable is a parameter
2943 in a scop if it does not vary during the execution of that scop. */
2946 find_scop_parameters (scop_p scop
)
2954 value_set_si (one
, 1);
2956 /* Find the parameters used in the loop bounds. */
2957 for (i
= 0; VEC_iterate (loop_p
, SCOP_LOOP_NEST (scop
), i
, loop
); i
++)
2959 tree nb_iters
= number_of_latch_executions (loop
);
2961 if (!chrec_contains_symbols (nb_iters
))
2964 nb_iters
= analyze_scalar_evolution (loop
, nb_iters
);
2965 nb_iters
= instantiate_scev (block_before_scop (scop
), loop
, nb_iters
);
2966 scan_tree_for_params (scop
, nb_iters
, NULL
, 0, one
, false);
2971 /* Find the parameters used in data accesses. */
2972 for (i
= 0; VEC_iterate (graphite_bb_p
, SCOP_BBS (scop
), i
, gb
); i
++)
2973 find_params_in_bb (scop
, gb
);
2975 SCOP_ADD_PARAMS (scop
) = false;
2978 /* Build the context constraints for SCOP: constraints and relations
2982 build_scop_context (scop_p scop
)
2984 int nb_params
= scop_nb_params (scop
);
2985 CloogMatrix
*matrix
= cloog_matrix_alloc (1, nb_params
+ 2);
2987 /* Insert '0 >= 0' in the context matrix, as it is not allowed to be
2990 value_set_si (matrix
->p
[0][0], 1);
2992 value_set_si (matrix
->p
[0][nb_params
+ 1], 0);
2994 cloog_program_set_context (SCOP_PROG (scop
),
2995 cloog_domain_matrix2domain (matrix
));
2996 cloog_matrix_free (matrix
);
2999 /* Returns a graphite_bb from BB. */
3001 static inline graphite_bb_p
3002 gbb_from_bb (basic_block bb
)
3004 return (graphite_bb_p
) bb
->aux
;
3007 /* Builds the constraint matrix for LOOP in SCOP. NB_OUTER_LOOPS is the
3008 number of loops surrounding LOOP in SCOP. OUTER_CSTR gives the
3009 constraints matrix for the surrounding loops. */
3012 build_loop_iteration_domains (scop_p scop
, struct loop
*loop
,
3013 CloogMatrix
*outer_cstr
, int nb_outer_loops
)
3019 int nb_rows
= outer_cstr
->NbRows
+ 1;
3020 int nb_cols
= outer_cstr
->NbColumns
+ 1;
3022 /* Last column of CSTR is the column of constants. */
3023 int cst_col
= nb_cols
- 1;
3025 /* The column for the current loop is just after the columns of
3026 other outer loops. */
3027 int loop_col
= nb_outer_loops
+ 1;
3029 tree nb_iters
= number_of_latch_executions (loop
);
3031 /* When the number of iterations is a constant or a parameter, we
3032 add a constraint for the upper bound of the loop. So add a row
3033 to the constraint matrix before allocating it. */
3034 if (TREE_CODE (nb_iters
) == INTEGER_CST
3035 || !chrec_contains_undetermined (nb_iters
))
3038 cstr
= cloog_matrix_alloc (nb_rows
, nb_cols
);
3040 /* Copy the outer constraints. */
3041 for (i
= 0; i
< outer_cstr
->NbRows
; i
++)
3043 /* Copy the eq/ineq and loops columns. */
3044 for (j
= 0; j
< loop_col
; j
++)
3045 value_assign (cstr
->p
[i
][j
], outer_cstr
->p
[i
][j
]);
3047 /* Leave an empty column in CSTR for the current loop, and then
3048 copy the parameter columns. */
3049 for (j
= loop_col
; j
< outer_cstr
->NbColumns
; j
++)
3050 value_assign (cstr
->p
[i
][j
+ 1], outer_cstr
->p
[i
][j
]);
3054 row
= outer_cstr
->NbRows
;
3055 value_set_si (cstr
->p
[row
][0], 1);
3056 value_set_si (cstr
->p
[row
][loop_col
], 1);
3058 /* loop_i <= nb_iters */
3059 if (TREE_CODE (nb_iters
) == INTEGER_CST
)
3062 value_set_si (cstr
->p
[row
][0], 1);
3063 value_set_si (cstr
->p
[row
][loop_col
], -1);
3065 value_set_si (cstr
->p
[row
][cst_col
],
3066 int_cst_value (nb_iters
));
3068 else if (!chrec_contains_undetermined (nb_iters
))
3070 /* Otherwise nb_iters contains parameters: scan the nb_iters
3071 expression and build its matrix representation. */
3075 value_set_si (cstr
->p
[row
][0], 1);
3076 value_set_si (cstr
->p
[row
][loop_col
], -1);
3078 nb_iters
= analyze_scalar_evolution (loop
, nb_iters
);
3079 nb_iters
= instantiate_scev (block_before_scop (scop
), loop
, nb_iters
);
3082 value_set_si (one
, 1);
3083 scan_tree_for_params (scop
, nb_iters
, cstr
, row
, one
, false);
3089 if (loop
->inner
&& loop_in_scop_p (loop
->inner
, scop
))
3090 build_loop_iteration_domains (scop
, loop
->inner
, cstr
, nb_outer_loops
+ 1);
3092 /* Only go to the next loops, if we are not at the outermost layer. These
3093 have to be handled seperately, as we can be sure, that the chain at this
3094 layer will be connected. */
3095 if (nb_outer_loops
!= 0 && loop
->next
&& loop_in_scop_p (loop
->next
, scop
))
3096 build_loop_iteration_domains (scop
, loop
->next
, outer_cstr
, nb_outer_loops
);
3098 for (i
= 0; VEC_iterate (graphite_bb_p
, SCOP_BBS (scop
), i
, gb
); i
++)
3099 if (gbb_loop (gb
) == loop
)
3100 GBB_DOMAIN (gb
) = cloog_matrix_copy (cstr
);
3102 cloog_matrix_free (cstr
);
3105 /* Add conditions to the domain of GB. */
3108 add_conditions_to_domain (graphite_bb_p gb
)
3112 VEC (gimple
, heap
) *conditions
= GBB_CONDITIONS (gb
);
3113 CloogMatrix
*domain
= GBB_DOMAIN (gb
);
3114 scop_p scop
= GBB_SCOP (gb
);
3118 unsigned nb_new_rows
= 0;
3121 if (VEC_empty (gimple
, conditions
))
3126 nb_rows
= domain
->NbRows
;
3127 nb_cols
= domain
->NbColumns
;
3132 nb_cols
= nb_loops_around_gb (gb
) + scop_nb_params (scop
) + 2;
3135 /* Count number of necessary new rows to add the conditions to the
3137 for (i
= 0; VEC_iterate (gimple
, conditions
, i
, stmt
); i
++)
3139 switch (gimple_code (stmt
))
3143 enum tree_code code
= gimple_cond_code (stmt
);
3149 /* NE and EQ statements are not supported right know. */
3165 /* Switch statements are not supported right know. */
3176 /* Enlarge the matrix. */
3178 CloogMatrix
*new_domain
;
3179 new_domain
= cloog_matrix_alloc (nb_rows
+ nb_new_rows
, nb_cols
);
3183 for (i
= 0; i
< nb_rows
; i
++)
3184 for (j
= 0; j
< nb_cols
; j
++)
3185 value_assign (new_domain
->p
[i
][j
], domain
->p
[i
][j
]);
3187 cloog_matrix_free (domain
);
3190 domain
= new_domain
;
3191 GBB_DOMAIN (gb
) = new_domain
;
3194 /* Add the conditions to the new enlarged domain matrix. */
3196 for (i
= 0; VEC_iterate (gimple
, conditions
, i
, stmt
); i
++)
3198 switch (gimple_code (stmt
))
3203 enum tree_code code
;
3206 loop_p loop
= GBB_BB (gb
)->loop_father
;
3208 left
= gimple_cond_lhs (stmt
);
3209 right
= gimple_cond_rhs (stmt
);
3211 left
= analyze_scalar_evolution (loop
, left
);
3212 right
= analyze_scalar_evolution (loop
, right
);
3214 left
= instantiate_scev (block_before_scop (scop
), loop
, left
);
3215 right
= instantiate_scev (block_before_scop (scop
), loop
, right
);
3217 code
= gimple_cond_code (stmt
);
3219 /* The conditions for ELSE-branches are inverted. */
3220 if (VEC_index (gimple
, gb
->condition_cases
, i
) == NULL
)
3221 code
= invert_tree_comparison (code
, false);
3226 /* NE statements are not supported right know. */
3230 value_set_si (domain
->p
[row
][0], 1);
3232 value_set_si (one
, 1);
3233 scan_tree_for_params (scop
, left
, domain
, row
, one
, true);
3234 value_set_si (one
, 1);
3235 scan_tree_for_params (scop
, right
, domain
, row
, one
, false);
3237 value_set_si (domain
->p
[row
][0], 1);
3238 value_set_si (one
, 1);
3239 scan_tree_for_params (scop
, left
, domain
, row
, one
, false);
3240 value_set_si (one
, 1);
3241 scan_tree_for_params (scop
, right
, domain
, row
, one
, true);
3246 value_set_si (domain
->p
[row
][0], 1);
3248 value_set_si (one
, 1);
3249 scan_tree_for_params (scop
, left
, domain
, row
, one
, true);
3250 value_set_si (one
, 1);
3251 scan_tree_for_params (scop
, right
, domain
, row
, one
, false);
3252 value_sub_int (domain
->p
[row
][nb_cols
- 1],
3253 domain
->p
[row
][nb_cols
- 1], 1);
3258 value_set_si (domain
->p
[row
][0], 1);
3260 value_set_si (one
, 1);
3261 scan_tree_for_params (scop
, left
, domain
, row
, one
, false);
3262 value_set_si (one
, 1);
3263 scan_tree_for_params (scop
, right
, domain
, row
, one
, true);
3264 value_sub_int (domain
->p
[row
][nb_cols
- 1],
3265 domain
->p
[row
][nb_cols
- 1], 1);
3270 value_set_si (domain
->p
[row
][0], 1);
3272 value_set_si (one
, 1);
3273 scan_tree_for_params (scop
, left
, domain
, row
, one
, true);
3274 value_set_si (one
, 1);
3275 scan_tree_for_params (scop
, right
, domain
, row
, one
, false);
3280 value_set_si (domain
->p
[row
][0], 1);
3282 value_set_si (one
, 1);
3283 scan_tree_for_params (scop
, left
, domain
, row
, one
, false);
3284 value_set_si (one
, 1);
3285 scan_tree_for_params (scop
, right
, domain
, row
, one
, true);
3296 /* Switch statements are not supported right know. */
3307 /* Returns true when PHI defines an induction variable in the loop
3308 containing the PHI node. */
3311 phi_node_is_iv (gimple phi
)
3313 loop_p loop
= gimple_bb (phi
)->loop_father
;
3314 tree scev
= analyze_scalar_evolution (loop
, gimple_phi_result (phi
));
3316 return tree_contains_chrecs (scev
, NULL
);
3319 /* Returns true when BB contains scalar phi nodes that are not an
3320 induction variable of a loop. */
3323 bb_contains_non_iv_scalar_phi_nodes (basic_block bb
)
3326 gimple_stmt_iterator si
;
3328 for (si
= gsi_start_phis (bb
); !gsi_end_p (si
); gsi_next (&si
))
3329 if (is_gimple_reg (gimple_phi_result (gsi_stmt (si
))))
3331 /* Store the unique scalar PHI node: at this point, loops
3332 should be in cannonical form, so we expect to see at most
3333 one scalar phi node in the loop header. */
3335 || bb
!= bb
->loop_father
->header
)
3338 phi
= gsi_stmt (si
);
3342 || phi_node_is_iv (phi
))
3348 /* Helper recursive function. Record in CONDITIONS and CASES all
3349 conditions from 'if's and 'switch'es occurring in BB from SCOP.
3351 Returns false when the conditions contain scalar computations that
3352 depend on the condition, i.e. when there are scalar phi nodes on
3353 the junction after the condition. Only the computations occurring
3354 on memory can be handled in the polyhedral model: operations that
3355 define scalar evolutions in conditions, that can potentially be
3356 used to index memory, can't be handled by the polyhedral model. */
3359 build_scop_conditions_1 (VEC (gimple
, heap
) **conditions
,
3360 VEC (gimple
, heap
) **cases
, basic_block bb
,
3366 gimple_stmt_iterator gsi
;
3367 basic_block bb_child
, bb_iter
;
3368 VEC (basic_block
, heap
) *dom
;
3370 /* Make sure we are in the SCoP. */
3371 if (!bb_in_scop_p (bb
, scop
))
3374 if (bb_contains_non_iv_scalar_phi_nodes (bb
))
3377 gbb
= gbb_from_bb (bb
);
3380 GBB_CONDITIONS (gbb
) = VEC_copy (gimple
, heap
, *conditions
);
3381 GBB_CONDITION_CASES (gbb
) = VEC_copy (gimple
, heap
, *cases
);
3384 dom
= get_dominated_by (CDI_DOMINATORS
, bb
);
3386 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
3388 gimple stmt
= gsi_stmt (gsi
);
3389 VEC (edge
, gc
) *edges
;
3392 switch (gimple_code (stmt
))
3396 for (i
= 0; VEC_iterate (edge
, edges
, i
, e
); i
++)
3397 if ((dominated_by_p (CDI_DOMINATORS
, e
->dest
, bb
))
3398 && VEC_length (edge
, e
->dest
->preds
) == 1)
3400 /* Remove the scanned block from the dominator successors. */
3401 for (j
= 0; VEC_iterate (basic_block
, dom
, j
, bb_iter
); j
++)
3402 if (bb_iter
== e
->dest
)
3404 VEC_unordered_remove (basic_block
, dom
, j
);
3408 /* Recursively scan the then or else part. */
3409 if (e
->flags
& EDGE_TRUE_VALUE
)
3410 VEC_safe_push (gimple
, heap
, *cases
, stmt
);
3413 gcc_assert (e
->flags
& EDGE_FALSE_VALUE
);
3414 VEC_safe_push (gimple
, heap
, *cases
, NULL
);
3417 VEC_safe_push (gimple
, heap
, *conditions
, stmt
);
3418 if (!build_scop_conditions_1 (conditions
, cases
, e
->dest
, scop
))
3423 VEC_pop (gimple
, *conditions
);
3424 VEC_pop (gimple
, *cases
);
3431 gimple_stmt_iterator gsi_search_gimple_label
;
3433 for (i
= 0; i
< gimple_switch_num_labels (stmt
); ++i
)
3435 basic_block bb_iter
;
3437 size_t n_cases
= VEC_length (gimple
, *conditions
);
3438 unsigned n
= gimple_switch_num_labels (stmt
);
3440 bb_child
= label_to_block
3441 (CASE_LABEL (gimple_switch_label (stmt
, i
)));
3443 for (k
= 0; k
< n
; k
++)
3446 (CASE_LABEL (gimple_switch_label (stmt
, k
))) == bb_child
)
3449 /* Switches with multiple case values for the same
3450 block are not handled. */
3452 /* Switch cases with more than one predecessor are
3454 || VEC_length (edge
, bb_child
->preds
) != 1)
3460 /* Recursively scan the corresponding 'case' block. */
3461 for (gsi_search_gimple_label
= gsi_start_bb (bb_child
);
3462 !gsi_end_p (gsi_search_gimple_label
);
3463 gsi_next (&gsi_search_gimple_label
))
3465 gimple label
= gsi_stmt (gsi_search_gimple_label
);
3467 if (gimple_code (label
) == GIMPLE_LABEL
)
3469 tree t
= gimple_label_label (label
);
3471 gcc_assert (t
== gimple_switch_label (stmt
, i
));
3472 VEC_replace (gimple
, *cases
, n_cases
, label
);
3477 if (!build_scop_conditions_1 (conditions
, cases
, bb_child
, scop
))
3483 /* Remove the scanned block from the dominator successors. */
3484 for (j
= 0; VEC_iterate (basic_block
, dom
, j
, bb_iter
); j
++)
3485 if (bb_iter
== bb_child
)
3487 VEC_unordered_remove (basic_block
, dom
, j
);
3492 VEC_pop (gimple
, *conditions
);
3493 VEC_pop (gimple
, *cases
);
3502 /* Scan all immediate dominated successors. */
3503 for (i
= 0; VEC_iterate (basic_block
, dom
, i
, bb_child
); i
++)
3504 if (!build_scop_conditions_1 (conditions
, cases
, bb_child
, scop
))
3511 VEC_free (basic_block
, heap
, dom
);
3515 /* Record all conditions from SCOP.
3517 Returns false when the conditions contain scalar computations that
3518 depend on the condition, i.e. when there are scalar phi nodes on
3519 the junction after the condition. Only the computations occurring
3520 on memory can be handled in the polyhedral model: operations that
3521 define scalar evolutions in conditions, that can potentially be
3522 used to index memory, can't be handled by the polyhedral model. */
3525 build_scop_conditions (scop_p scop
)
3528 VEC (gimple
, heap
) *conditions
= NULL
;
3529 VEC (gimple
, heap
) *cases
= NULL
;
3531 res
= build_scop_conditions_1 (&conditions
, &cases
, SCOP_ENTRY (scop
), scop
);
3533 VEC_free (gimple
, heap
, conditions
);
3534 VEC_free (gimple
, heap
, cases
);
3538 /* Traverses all the GBBs of the SCOP and add their constraints to the
3539 iteration domains. */
3542 add_conditions_to_constraints (scop_p scop
)
3547 for (i
= 0; VEC_iterate (graphite_bb_p
, SCOP_BBS (scop
), i
, gbb
); i
++)
3548 add_conditions_to_domain (gbb
);
3551 /* Build the current domain matrix: the loops belonging to the current
3552 SCOP, and that vary for the execution of the current basic block.
3553 Returns false if there is no loop in SCOP. */
3556 build_scop_iteration_domain (scop_p scop
)
3559 CloogMatrix
*outer_cstr
;
3562 /* Build cloog loop for all loops, that are in the uppermost loop layer of
3564 for (i
= 0; VEC_iterate (loop_p
, SCOP_LOOP_NEST (scop
), i
, loop
); i
++)
3565 if (!loop_in_scop_p (loop_outer (loop
), scop
))
3567 /* The outermost constraints is a matrix that has:
3568 -first column: eq/ineq boolean
3569 -last column: a constant
3570 -scop_nb_params columns for the parameters used in the scop. */
3571 outer_cstr
= cloog_matrix_alloc (0, scop_nb_params (scop
) + 2);
3572 build_loop_iteration_domains (scop
, loop
, outer_cstr
, 0);
3573 cloog_matrix_free (outer_cstr
);
3579 /* Initializes an equation CY of the access matrix using the
3580 information for a subscript from AF, relatively to the loop
3581 indexes from LOOP_NEST and parameter indexes from PARAMS. NDIM is
3582 the dimension of the array access, i.e. the number of
3583 subscripts. Returns true when the operation succeeds. */
3586 build_access_matrix_with_af (tree af
, lambda_vector cy
,
3587 scop_p scop
, int ndim
)
3591 switch (TREE_CODE (af
))
3593 case POLYNOMIAL_CHREC
:
3595 struct loop
*outer_loop
;
3596 tree left
= CHREC_LEFT (af
);
3597 tree right
= CHREC_RIGHT (af
);
3600 if (TREE_CODE (right
) != INTEGER_CST
)
3603 outer_loop
= get_loop (CHREC_VARIABLE (af
));
3604 var
= nb_loops_around_loop_in_scop (outer_loop
, scop
);
3605 cy
[var
] = int_cst_value (right
);
3607 switch (TREE_CODE (left
))
3609 case POLYNOMIAL_CHREC
:
3610 return build_access_matrix_with_af (left
, cy
, scop
, ndim
);
3613 cy
[ndim
- 1] = int_cst_value (left
);
3617 return build_access_matrix_with_af (left
, cy
, scop
, ndim
);
3622 build_access_matrix_with_af (TREE_OPERAND (af
, 0), cy
, scop
, ndim
);
3623 build_access_matrix_with_af (TREE_OPERAND (af
, 1), cy
, scop
, ndim
);
3627 build_access_matrix_with_af (TREE_OPERAND (af
, 0), cy
, scop
, ndim
);
3628 build_access_matrix_with_af (TREE_OPERAND (af
, 1), cy
, scop
, ndim
);
3632 cy
[ndim
- 1] = int_cst_value (af
);
3636 param_col
= param_index (af
, scop
);
3637 cy
[ndim
- scop_nb_params (scop
) + param_col
- 1] = 1;
3641 /* FIXME: access_fn can have parameters. */
3646 /* Initialize the access matrix in the data reference REF with respect
3647 to the loop nesting LOOP_NEST. Return true when the operation
3651 build_access_matrix (data_reference_p ref
, graphite_bb_p gb
)
3653 int i
, ndim
= DR_NUM_DIMENSIONS (ref
);
3654 struct access_matrix
*am
= GGC_NEW (struct access_matrix
);
3656 AM_MATRIX (am
) = VEC_alloc (lambda_vector
, gc
, ndim
);
3657 DR_SCOP (ref
) = GBB_SCOP (gb
);
3659 for (i
= 0; i
< ndim
; i
++)
3661 lambda_vector v
= lambda_vector_new (ref_nb_loops (ref
));
3662 scop_p scop
= GBB_SCOP (gb
);
3663 tree af
= DR_ACCESS_FN (ref
, i
);
3665 if (!build_access_matrix_with_af (af
, v
, scop
, ref_nb_loops (ref
)))
3668 VEC_quick_push (lambda_vector
, AM_MATRIX (am
), v
);
3671 DR_ACCESS_MATRIX (ref
) = am
;
3675 /* Build the access matrices for the data references in the SCOP. */
3678 build_scop_data_accesses (scop_p scop
)
3683 /* FIXME: Construction of access matrix is disabled until some
3684 pass, like the data dependence analysis, is using it. */
3687 for (i
= 0; VEC_iterate (graphite_bb_p
, SCOP_BBS (scop
), i
, gb
); i
++)
3690 data_reference_p dr
;
3692 /* Construct the access matrix for each data ref, with respect to
3693 the loop nest of the current BB in the considered SCOP. */
3695 VEC_iterate (data_reference_p
, GBB_DATA_REFS (gb
), j
, dr
);
3698 bool res
= build_access_matrix (dr
, gb
);
3700 /* FIXME: At this point the DRs should always have an affine
3701 form. For the moment this fails as build_access_matrix
3702 does not build matrices with parameters. */
3708 /* Returns the tree variable from the name NAME that was given in
3709 Cloog representation. All the parameters are stored in PARAMS, and
3710 all the loop induction variables are stored in IVSTACK.
3712 FIXME: This is a hack, and Cloog should be fixed to not work with
3713 variable names represented as "char *string", but with void
3714 pointers that could be casted back to a tree. The only problem in
3715 doing that is that Cloog's pretty printer still assumes that
3716 variable names are char *strings. The solution would be to have a
3717 function pointer for pretty-printing that can be redirected to be
3718 print_generic_stmt in our case, or fprintf by default.
3719 ??? Too ugly to live. */
3722 clast_name_to_gcc (const char *name
, VEC (name_tree
, heap
) *params
,
3723 loop_iv_stack ivstack
)
3730 for (i
= 0; VEC_iterate (name_tree
, params
, i
, t
); i
++)
3731 if (!strcmp (name
, t
->name
))
3734 iv
= loop_iv_stack_get_iv_from_name (ivstack
, name
);
3741 /* Returns the maximal precision type for expressions E1 and E2. */
3744 max_precision_type (tree e1
, tree e2
)
3746 tree type1
= TREE_TYPE (e1
);
3747 tree type2
= TREE_TYPE (e2
);
3748 return TYPE_PRECISION (type1
) > TYPE_PRECISION (type2
) ? type1
: type2
;
3752 clast_to_gcc_expression (tree
, struct clast_expr
*, VEC (name_tree
, heap
) *,
3755 /* Converts a Cloog reduction expression R with reduction operation OP
3756 to a GCC expression tree of type TYPE. PARAMS is a vector of
3757 parameters of the scop, and IVSTACK contains the stack of induction
3761 clast_to_gcc_expression_red (tree type
, enum tree_code op
,
3762 struct clast_reduction
*r
,
3763 VEC (name_tree
, heap
) *params
,
3764 loop_iv_stack ivstack
)
3767 tree res
= clast_to_gcc_expression (type
, r
->elts
[0], params
, ivstack
);
3769 for (i
= 1; i
< r
->n
; i
++)
3771 tree t
= clast_to_gcc_expression (type
, r
->elts
[i
], params
, ivstack
);
3772 res
= fold_build2 (op
, type
, res
, t
);
3777 /* Converts a Cloog AST expression E back to a GCC expression tree of
3778 type TYPE. PARAMS is a vector of parameters of the scop, and
3779 IVSTACK contains the stack of induction variables. */
3782 clast_to_gcc_expression (tree type
, struct clast_expr
*e
,
3783 VEC (name_tree
, heap
) *params
,
3784 loop_iv_stack ivstack
)
3790 struct clast_term
*t
= (struct clast_term
*) e
;
3794 if (value_one_p (t
->val
))
3796 tree name
= clast_name_to_gcc (t
->var
, params
, ivstack
);
3797 return fold_convert (type
, name
);
3800 else if (value_mone_p (t
->val
))
3802 tree name
= clast_name_to_gcc (t
->var
, params
, ivstack
);
3803 name
= fold_convert (type
, name
);
3804 return fold_build1 (NEGATE_EXPR
, type
, name
);
3808 tree name
= clast_name_to_gcc (t
->var
, params
, ivstack
);
3809 tree cst
= gmp_cst_to_tree (type
, t
->val
);
3810 name
= fold_convert (type
, name
);
3811 return fold_build2 (MULT_EXPR
, type
, cst
, name
);
3815 return gmp_cst_to_tree (type
, t
->val
);
3820 struct clast_reduction
*r
= (struct clast_reduction
*) e
;
3825 return clast_to_gcc_expression_red (type
, PLUS_EXPR
, r
, params
, ivstack
);
3828 return clast_to_gcc_expression_red (type
, MIN_EXPR
, r
, params
, ivstack
);
3831 return clast_to_gcc_expression_red (type
, MAX_EXPR
, r
, params
, ivstack
);
3841 struct clast_binary
*b
= (struct clast_binary
*) e
;
3842 struct clast_expr
*lhs
= (struct clast_expr
*) b
->LHS
;
3843 tree tl
= clast_to_gcc_expression (type
, lhs
, params
, ivstack
);
3844 tree tr
= gmp_cst_to_tree (type
, b
->RHS
);
3848 case clast_bin_fdiv
:
3849 return fold_build2 (FLOOR_DIV_EXPR
, type
, tl
, tr
);
3851 case clast_bin_cdiv
:
3852 return fold_build2 (CEIL_DIV_EXPR
, type
, tl
, tr
);
3855 return fold_build2 (EXACT_DIV_EXPR
, type
, tl
, tr
);
3858 return fold_build2 (TRUNC_MOD_EXPR
, type
, tl
, tr
);
3872 /* Returns the type for the expression E. */
3875 gcc_type_for_clast_expr (struct clast_expr
*e
,
3876 VEC (name_tree
, heap
) *params
,
3877 loop_iv_stack ivstack
)
3883 struct clast_term
*t
= (struct clast_term
*) e
;
3886 return TREE_TYPE (clast_name_to_gcc (t
->var
, params
, ivstack
));
3893 struct clast_reduction
*r
= (struct clast_reduction
*) e
;
3896 return gcc_type_for_clast_expr (r
->elts
[0], params
, ivstack
);
3900 for (i
= 0; i
< r
->n
; i
++)
3902 tree type
= gcc_type_for_clast_expr (r
->elts
[i
], params
, ivstack
);
3912 struct clast_binary
*b
= (struct clast_binary
*) e
;
3913 struct clast_expr
*lhs
= (struct clast_expr
*) b
->LHS
;
3914 return gcc_type_for_clast_expr (lhs
, params
, ivstack
);
3924 /* Returns the type for the equation CLEQ. */
3927 gcc_type_for_clast_eq (struct clast_equation
*cleq
,
3928 VEC (name_tree
, heap
) *params
,
3929 loop_iv_stack ivstack
)
3931 tree type
= gcc_type_for_clast_expr (cleq
->LHS
, params
, ivstack
);
3935 return gcc_type_for_clast_expr (cleq
->RHS
, params
, ivstack
);
3938 /* Translates a clast equation CLEQ to a tree. */
3941 graphite_translate_clast_equation (scop_p scop
,
3942 struct clast_equation
*cleq
,
3943 loop_iv_stack ivstack
)
3945 enum tree_code comp
;
3946 tree type
= gcc_type_for_clast_eq (cleq
, SCOP_PARAMS (scop
), ivstack
);
3947 tree lhs
= clast_to_gcc_expression (type
, cleq
->LHS
, SCOP_PARAMS (scop
), ivstack
);
3948 tree rhs
= clast_to_gcc_expression (type
, cleq
->RHS
, SCOP_PARAMS (scop
), ivstack
);
3950 if (cleq
->sign
== 0)
3953 else if (cleq
->sign
> 0)
3959 return fold_build2 (comp
, type
, lhs
, rhs
);
3962 /* Creates the test for the condition in STMT. */
3965 graphite_create_guard_cond_expr (scop_p scop
, struct clast_guard
*stmt
,
3966 loop_iv_stack ivstack
)
3971 for (i
= 0; i
< stmt
->n
; i
++)
3973 tree eq
= graphite_translate_clast_equation (scop
, &stmt
->eq
[i
], ivstack
);
3976 cond
= fold_build2 (TRUTH_AND_EXPR
, TREE_TYPE (eq
), cond
, eq
);
3984 /* Creates a new if region corresponding to Cloog's guard. */
3987 graphite_create_new_guard (scop_p scop
, edge entry_edge
,
3988 struct clast_guard
*stmt
,
3989 loop_iv_stack ivstack
)
3991 tree cond_expr
= graphite_create_guard_cond_expr (scop
, stmt
, ivstack
);
3992 edge exit_edge
= create_empty_if_region_on_edge (entry_edge
, cond_expr
);
3996 /* Walks a CLAST and returns the first statement in the body of a
3999 static struct clast_user_stmt
*
4000 clast_get_body_of_loop (struct clast_stmt
*stmt
)
4003 || CLAST_STMT_IS_A (stmt
, stmt_user
))
4004 return (struct clast_user_stmt
*) stmt
;
4006 if (CLAST_STMT_IS_A (stmt
, stmt_for
))
4007 return clast_get_body_of_loop (((struct clast_for
*) stmt
)->body
);
4009 if (CLAST_STMT_IS_A (stmt
, stmt_guard
))
4010 return clast_get_body_of_loop (((struct clast_guard
*) stmt
)->then
);
4012 if (CLAST_STMT_IS_A (stmt
, stmt_block
))
4013 return clast_get_body_of_loop (((struct clast_block
*) stmt
)->body
);
4018 /* Returns the induction variable for the loop that gets translated to
4022 gcc_type_for_iv_of_clast_loop (struct clast_for
*stmt_for
)
4024 struct clast_user_stmt
*stmt
= clast_get_body_of_loop ((struct clast_stmt
*) stmt_for
);
4025 const char *cloog_iv
= stmt_for
->iterator
;
4026 CloogStatement
*cs
= stmt
->statement
;
4027 graphite_bb_p gbb
= (graphite_bb_p
) cloog_statement_usr (cs
);
4029 return gcc_type_for_cloog_iv (cloog_iv
, gbb
);
4032 /* Creates a new LOOP corresponding to Cloog's STMT. Inserts an induction
4033 variable for the new LOOP. New LOOP is attached to CFG starting at
4034 ENTRY_EDGE. LOOP is inserted into the loop tree and becomes the child
4035 loop of the OUTER_LOOP. */
4037 static struct loop
*
4038 graphite_create_new_loop (scop_p scop
, edge entry_edge
,
4039 struct clast_for
*stmt
, loop_iv_stack ivstack
,
4042 tree type
= gcc_type_for_iv_of_clast_loop (stmt
);
4043 VEC (name_tree
, heap
) *params
= SCOP_PARAMS (scop
);
4044 tree lb
= clast_to_gcc_expression (type
, stmt
->LB
, params
, ivstack
);
4045 tree ub
= clast_to_gcc_expression (type
, stmt
->UB
, params
, ivstack
);
4046 tree stride
= gmp_cst_to_tree (type
, stmt
->stride
);
4047 tree ivvar
= create_tmp_var (type
, "graphiteIV");
4049 loop_p loop
= create_empty_loop_on_edge
4050 (entry_edge
, lb
, stride
, ub
, ivvar
, &iv_before
,
4051 outer
? outer
: entry_edge
->src
->loop_father
);
4053 add_referenced_var (ivvar
);
4054 loop_iv_stack_push_iv (ivstack
, iv_before
, stmt
->iterator
);
4058 /* Rename the SSA_NAMEs used in STMT and that appear in IVSTACK. */
4061 rename_variables_in_stmt (gimple stmt
, htab_t map
)
4064 use_operand_p use_p
;
4066 FOR_EACH_SSA_USE_OPERAND (use_p
, stmt
, iter
, SSA_OP_USE
)
4068 tree use
= USE_FROM_PTR (use_p
);
4069 tree new_name
= get_new_name_from_old_name (map
, use
);
4071 replace_exp (use_p
, new_name
);
4077 /* Returns true if SSA_NAME is a parameter of SCOP. */
4080 is_parameter (scop_p scop
, tree ssa_name
)
4083 VEC (name_tree
, heap
) *params
= SCOP_PARAMS (scop
);
4086 for (i
= 0; VEC_iterate (name_tree
, params
, i
, param
); i
++)
4087 if (param
->t
== ssa_name
)
4093 /* Returns true if NAME is an induction variable. */
4098 return gimple_code (SSA_NAME_DEF_STMT (name
)) == GIMPLE_PHI
;
4101 static void expand_scalar_variables_stmt (gimple
, basic_block
, scop_p
,
4104 expand_scalar_variables_expr (tree
, tree
, enum tree_code
, tree
, basic_block
,
4105 scop_p
, htab_t
, gimple_stmt_iterator
*);
4107 /* Copies at GSI all the scalar computations on which the ssa_name OP0
4108 depends on in the SCOP: these are all the scalar variables used in
4109 the definition of OP0, that are defined outside BB and still in the
4110 SCOP, i.e. not a parameter of the SCOP. The expression that is
4111 returned contains only induction variables from the generated code:
4112 MAP contains the induction variables renaming mapping, and is used
4113 to translate the names of induction variables. */
4116 expand_scalar_variables_ssa_name (tree op0
, basic_block bb
,
4117 scop_p scop
, htab_t map
,
4118 gimple_stmt_iterator
*gsi
)
4120 tree var0
, var1
, type
;
4122 enum tree_code subcode
;
4124 if (is_parameter (scop
, op0
)
4126 return get_new_name_from_old_name (map
, op0
);
4128 def_stmt
= SSA_NAME_DEF_STMT (op0
);
4130 if (gimple_bb (def_stmt
) == bb
)
4132 /* If the defining statement is in the basic block already
4133 we do not need to create a new expression for it, we
4134 only need to ensure its operands are expanded. */
4135 expand_scalar_variables_stmt (def_stmt
, bb
, scop
, map
);
4136 return get_new_name_from_old_name (map
, op0
);
4140 if (gimple_code (def_stmt
) != GIMPLE_ASSIGN
4141 || !bb_in_scop_p (gimple_bb (def_stmt
), scop
))
4142 return get_new_name_from_old_name (map
, op0
);
4144 var0
= gimple_assign_rhs1 (def_stmt
);
4145 subcode
= gimple_assign_rhs_code (def_stmt
);
4146 var1
= gimple_assign_rhs2 (def_stmt
);
4147 type
= gimple_expr_type (def_stmt
);
4149 return expand_scalar_variables_expr (type
, var0
, subcode
, var1
, bb
, scop
,
4154 /* Copies at GSI all the scalar computations on which the expression
4155 OP0 CODE OP1 depends on in the SCOP: these are all the scalar
4156 variables used in OP0 and OP1, defined outside BB and still defined
4157 in the SCOP, i.e. not a parameter of the SCOP. The expression that
4158 is returned contains only induction variables from the generated
4159 code: MAP contains the induction variables renaming mapping, and is
4160 used to translate the names of induction variables. */
4163 expand_scalar_variables_expr (tree type
, tree op0
, enum tree_code code
,
4164 tree op1
, basic_block bb
, scop_p scop
,
4165 htab_t map
, gimple_stmt_iterator
*gsi
)
4167 if (TREE_CODE_CLASS (code
) == tcc_constant
4168 || TREE_CODE_CLASS (code
) == tcc_declaration
)
4171 /* For data references we have to duplicate also its memory
4173 if (TREE_CODE_CLASS (code
) == tcc_reference
)
4179 tree old_name
= TREE_OPERAND (op0
, 0);
4180 tree expr
= expand_scalar_variables_ssa_name
4181 (old_name
, bb
, scop
, map
, gsi
);
4182 tree new_name
= force_gimple_operand_gsi (gsi
, expr
, true, NULL
,
4183 true, GSI_SAME_STMT
);
4185 set_symbol_mem_tag (SSA_NAME_VAR (new_name
),
4186 symbol_mem_tag (SSA_NAME_VAR (old_name
)));
4187 return fold_build1 (code
, type
, new_name
);
4192 tree op00
= TREE_OPERAND (op0
, 0);
4193 tree op01
= TREE_OPERAND (op0
, 1);
4194 tree op02
= TREE_OPERAND (op0
, 2);
4195 tree op03
= TREE_OPERAND (op0
, 3);
4196 tree base
= expand_scalar_variables_expr
4197 (TREE_TYPE (op00
), op00
, TREE_CODE (op00
), NULL
, bb
, scop
,
4199 tree subscript
= expand_scalar_variables_expr
4200 (TREE_TYPE (op01
), op01
, TREE_CODE (op01
), NULL
, bb
, scop
,
4203 return build4 (ARRAY_REF
, type
, base
, subscript
, op02
, op03
);
4207 /* The above cases should catch everything. */
4212 if (TREE_CODE_CLASS (code
) == tcc_unary
)
4214 tree op0_type
= TREE_TYPE (op0
);
4215 enum tree_code op0_code
= TREE_CODE (op0
);
4216 tree op0_expr
= expand_scalar_variables_expr (op0_type
, op0
, op0_code
,
4217 NULL
, bb
, scop
, map
, gsi
);
4219 return fold_build1 (code
, type
, op0_expr
);
4222 if (TREE_CODE_CLASS (code
) == tcc_binary
)
4224 tree op0_type
= TREE_TYPE (op0
);
4225 enum tree_code op0_code
= TREE_CODE (op0
);
4226 tree op0_expr
= expand_scalar_variables_expr (op0_type
, op0
, op0_code
,
4227 NULL
, bb
, scop
, map
, gsi
);
4228 tree op1_type
= TREE_TYPE (op1
);
4229 enum tree_code op1_code
= TREE_CODE (op1
);
4230 tree op1_expr
= expand_scalar_variables_expr (op1_type
, op1
, op1_code
,
4231 NULL
, bb
, scop
, map
, gsi
);
4233 return fold_build2 (code
, type
, op0_expr
, op1_expr
);
4236 if (code
== SSA_NAME
)
4237 return expand_scalar_variables_ssa_name (op0
, bb
, scop
, map
, gsi
);
4243 /* Copies at the beginning of BB all the scalar computations on which
4244 STMT depends on in the SCOP: these are all the scalar variables used
4245 in STMT, defined outside BB and still defined in the SCOP, i.e. not a
4246 parameter of the SCOP. The expression that is returned contains
4247 only induction variables from the generated code: MAP contains the
4248 induction variables renaming mapping, and is used to translate the
4249 names of induction variables. */
4252 expand_scalar_variables_stmt (gimple stmt
, basic_block bb
, scop_p scop
,
4256 use_operand_p use_p
;
4257 gimple_stmt_iterator gsi
= gsi_after_labels (bb
);
4259 FOR_EACH_SSA_USE_OPERAND (use_p
, stmt
, iter
, SSA_OP_USE
)
4261 tree use
= USE_FROM_PTR (use_p
);
4262 tree type
= TREE_TYPE (use
);
4263 enum tree_code code
= TREE_CODE (use
);
4264 tree use_expr
= expand_scalar_variables_expr (type
, use
, code
, NULL
, bb
,
4266 if (use_expr
!= use
)
4269 force_gimple_operand_gsi (&gsi
, use_expr
, true, NULL
,
4270 true, GSI_NEW_STMT
);
4271 replace_exp (use_p
, new_use
);
4278 /* Copies at the beginning of BB all the scalar computations on which
4279 BB depends on in the SCOP: these are all the scalar variables used
4280 in BB, defined outside BB and still defined in the SCOP, i.e. not a
4281 parameter of the SCOP. The expression that is returned contains
4282 only induction variables from the generated code: MAP contains the
4283 induction variables renaming mapping, and is used to translate the
4284 names of induction variables. */
4287 expand_scalar_variables (basic_block bb
, scop_p scop
, htab_t map
)
4289 gimple_stmt_iterator gsi
;
4291 for (gsi
= gsi_after_labels (bb
); !gsi_end_p (gsi
);)
4293 gimple stmt
= gsi_stmt (gsi
);
4294 expand_scalar_variables_stmt (stmt
, bb
, scop
, map
);
4299 /* Rename all the SSA_NAMEs from block BB according to the MAP. */
4302 rename_variables (basic_block bb
, htab_t map
)
4304 gimple_stmt_iterator gsi
;
4306 for (gsi
= gsi_after_labels (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
4307 rename_variables_in_stmt (gsi_stmt (gsi
), map
);
4310 /* Remove condition from BB. */
4313 remove_condition (basic_block bb
)
4315 gimple last
= last_stmt (bb
);
4317 if (last
&& gimple_code (last
) == GIMPLE_COND
)
4319 gimple_stmt_iterator gsi
= gsi_last_bb (bb
);
4320 gsi_remove (&gsi
, true);
4324 /* Returns the first successor edge of BB with EDGE_TRUE_VALUE flag set. */
4327 get_true_edge_from_guard_bb (basic_block bb
)
4332 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
4333 if (e
->flags
& EDGE_TRUE_VALUE
)
4340 /* Returns the first successor edge of BB with EDGE_TRUE_VALUE flag cleared. */
4343 get_false_edge_from_guard_bb (basic_block bb
)
4348 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
4349 if (!(e
->flags
& EDGE_TRUE_VALUE
))
4356 /* Inserts in MAP a tuple (OLD_NAME, NEW_NAME) for the induction
4357 variables of the loops around GBB in SCOP, i.e. GBB_LOOPS.
4358 NEW_NAME is obtained from IVSTACK. IVSTACK has the same stack
4359 ordering as GBB_LOOPS. */
4362 build_iv_mapping (loop_iv_stack ivstack
, htab_t map
, gbb_p gbb
, scop_p scop
)
4368 for (i
= 0; VEC_iterate (name_tree
, SCOP_OLDIVS (scop
), i
, iv
); i
++)
4370 struct rename_map_elt tmp
;
4372 if (!flow_bb_inside_loop_p (iv
->loop
, GBB_BB (gbb
)))
4375 tmp
.old_name
= iv
->t
;
4376 slot
= htab_find_slot (map
, &tmp
, INSERT
);
4380 tree new_name
= loop_iv_stack_get_iv (ivstack
,
4381 gbb_loop_index (gbb
, iv
->loop
));
4382 *slot
= new_rename_map_elt (iv
->t
, new_name
);
4387 /* Register in MAP the tuple (old_name, new_name). */
4390 register_old_and_new_names (htab_t map
, tree old_name
, tree new_name
)
4392 struct rename_map_elt tmp
;
4395 tmp
.old_name
= old_name
;
4396 slot
= htab_find_slot (map
, &tmp
, INSERT
);
4399 *slot
= new_rename_map_elt (old_name
, new_name
);
4402 /* Create a duplicate of the basic block BB. NOTE: This does not
4403 preserve SSA form. */
4406 graphite_copy_stmts_from_block (basic_block bb
, basic_block new_bb
, htab_t map
)
4408 gimple_stmt_iterator gsi
, gsi_tgt
;
4410 gsi_tgt
= gsi_start_bb (new_bb
);
4411 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
4413 def_operand_p def_p
;
4414 ssa_op_iter op_iter
;
4416 gimple stmt
= gsi_stmt (gsi
);
4419 if (gimple_code (stmt
) == GIMPLE_LABEL
)
4422 /* Create a new copy of STMT and duplicate STMT's virtual
4424 copy
= gimple_copy (stmt
);
4425 gsi_insert_after (&gsi_tgt
, copy
, GSI_NEW_STMT
);
4426 mark_symbols_for_renaming (copy
);
4428 region
= lookup_stmt_eh_region (stmt
);
4430 add_stmt_to_eh_region (copy
, region
);
4431 gimple_duplicate_stmt_histograms (cfun
, copy
, cfun
, stmt
);
4433 /* Create new names for all the definitions created by COPY and
4434 add replacement mappings for each new name. */
4435 FOR_EACH_SSA_DEF_OPERAND (def_p
, copy
, op_iter
, SSA_OP_DEF
)
4437 tree old_name
= DEF_FROM_PTR (def_p
);
4438 tree new_name
= create_new_def_for (old_name
, copy
, def_p
);
4439 register_old_and_new_names (map
, old_name
, new_name
);
4444 /* Records in SCOP_LIVEOUT_RENAMES the names that are live out of
4445 the SCOP and that appear in the RENAME_MAP. */
4448 register_scop_liveout_renames (scop_p scop
, htab_t rename_map
)
4451 sese region
= SCOP_REGION (scop
);
4453 for (i
= 0; i
< SESE_NUM_VER (region
); i
++)
4454 if (bitmap_bit_p (SESE_LIVEOUT (region
), i
)
4455 && is_gimple_reg (ssa_name (i
)))
4457 tree old_name
= ssa_name (i
);
4458 tree new_name
= get_new_name_from_old_name (rename_map
, old_name
);
4460 register_old_and_new_names (SCOP_LIVEOUT_RENAMES (scop
),
4461 old_name
, new_name
);
4465 /* Copies BB and includes in the copied BB all the statements that can
4466 be reached following the use-def chains from the memory accesses,
4467 and returns the next edge following this new block. */
4470 copy_bb_and_scalar_dependences (basic_block bb
, scop_p scop
,
4471 edge next_e
, htab_t map
)
4473 basic_block new_bb
= split_edge (next_e
);
4475 next_e
= single_succ_edge (new_bb
);
4476 graphite_copy_stmts_from_block (bb
, new_bb
, map
);
4477 remove_condition (new_bb
);
4478 rename_variables (new_bb
, map
);
4479 remove_phi_nodes (new_bb
);
4480 expand_scalar_variables (new_bb
, scop
, map
);
4481 register_scop_liveout_renames (scop
, map
);
4486 /* Helper function for htab_traverse in insert_loop_close_phis. */
4489 add_loop_exit_phis (void **slot
, void *s
)
4491 struct rename_map_elt
*entry
= (struct rename_map_elt
*) *slot
;
4492 tree new_name
= entry
->new_name
;
4493 basic_block bb
= (basic_block
) s
;
4494 gimple phi
= create_phi_node (new_name
, bb
);
4495 tree res
= create_new_def_for (gimple_phi_result (phi
), phi
,
4496 gimple_phi_result_ptr (phi
));
4498 add_phi_arg (phi
, new_name
, single_pred_edge (bb
));
4500 entry
->new_name
= res
;
4505 /* Iterate over the SCOP_LIVEOUT_RENAMES (SCOP) and get tuples of the
4506 form (OLD_NAME, NEW_NAME). Insert in BB "RES = phi (NEW_NAME)",
4507 and finally register in SCOP_LIVEOUT_RENAMES (scop) the tuple
4511 insert_loop_close_phis (scop_p scop
, basic_block bb
)
4513 update_ssa (TODO_update_ssa
);
4514 htab_traverse (SCOP_LIVEOUT_RENAMES (scop
), add_loop_exit_phis
, bb
);
4515 update_ssa (TODO_update_ssa
);
4518 /* Helper structure for htab_traverse in insert_guard_phis. */
4522 edge true_edge
, false_edge
;
4523 htab_t liveout_before_guard
;
4526 /* Return the default name that is before the guard. */
4529 default_liveout_before_guard (htab_t liveout_before_guard
, tree old_name
)
4531 tree res
= get_new_name_from_old_name (liveout_before_guard
, old_name
);
4533 if (res
== old_name
)
4535 if (is_gimple_reg (res
))
4536 return fold_convert (TREE_TYPE (res
), integer_zero_node
);
4537 return gimple_default_def (cfun
, res
);
4543 /* Helper function for htab_traverse in insert_guard_phis. */
4546 add_guard_exit_phis (void **slot
, void *s
)
4548 struct rename_map_elt
*entry
= (struct rename_map_elt
*) *slot
;
4549 struct igp
*i
= (struct igp
*) s
;
4550 basic_block bb
= i
->bb
;
4551 edge true_edge
= i
->true_edge
;
4552 edge false_edge
= i
->false_edge
;
4553 tree name1
= entry
->new_name
;
4554 tree name2
= default_liveout_before_guard (i
->liveout_before_guard
,
4556 gimple phi
= create_phi_node (name1
, bb
);
4557 tree res
= create_new_def_for (gimple_phi_result (phi
), phi
,
4558 gimple_phi_result_ptr (phi
));
4560 add_phi_arg (phi
, name1
, true_edge
);
4561 add_phi_arg (phi
, name2
, false_edge
);
4563 entry
->new_name
= res
;
4568 /* Iterate over the SCOP_LIVEOUT_RENAMES (SCOP) and get tuples of the
4569 form (OLD_NAME, NAME1). If there is a correspondent tuple of
4570 OLD_NAME in LIVEOUT_BEFORE_GUARD, i.e. (OLD_NAME, NAME2) then
4573 | RES = phi (NAME1 (on TRUE_EDGE), NAME2 (on FALSE_EDGE))"
4575 if there is no tuple for OLD_NAME in LIVEOUT_BEFORE_GUARD, insert
4577 | RES = phi (NAME1 (on TRUE_EDGE),
4578 | DEFAULT_DEFINITION of NAME1 (on FALSE_EDGE))".
4580 Finally register in SCOP_LIVEOUT_RENAMES (scop) the tuple
4584 insert_guard_phis (scop_p scop
, basic_block bb
, edge true_edge
,
4585 edge false_edge
, htab_t liveout_before_guard
)
4589 i
.true_edge
= true_edge
;
4590 i
.false_edge
= false_edge
;
4591 i
.liveout_before_guard
= liveout_before_guard
;
4593 update_ssa (TODO_update_ssa
);
4594 htab_traverse (SCOP_LIVEOUT_RENAMES (scop
), add_guard_exit_phis
, &i
);
4595 update_ssa (TODO_update_ssa
);
4598 /* Helper function for htab_traverse. */
4601 copy_renames (void **slot
, void *s
)
4603 struct rename_map_elt
*entry
= (struct rename_map_elt
*) *slot
;
4604 htab_t res
= (htab_t
) s
;
4605 tree old_name
= entry
->old_name
;
4606 tree new_name
= entry
->new_name
;
4607 struct rename_map_elt tmp
;
4610 tmp
.old_name
= old_name
;
4611 x
= htab_find_slot (res
, &tmp
, INSERT
);
4614 *x
= new_rename_map_elt (old_name
, new_name
);
4619 /* Translates a CLAST statement STMT to GCC representation in the
4622 - NEXT_E is the edge where new generated code should be attached.
4623 - CONTEXT_LOOP is the loop in which the generated code will be placed
4625 - IVSTACK contains the surrounding loops around the statement to be
4630 translate_clast (scop_p scop
, struct loop
*context_loop
,
4631 struct clast_stmt
*stmt
, edge next_e
, loop_iv_stack ivstack
)
4636 if (CLAST_STMT_IS_A (stmt
, stmt_root
))
4637 return translate_clast (scop
, context_loop
, stmt
->next
, next_e
, ivstack
);
4639 if (CLAST_STMT_IS_A (stmt
, stmt_user
))
4642 CloogStatement
*cs
= ((struct clast_user_stmt
*) stmt
)->statement
;
4643 graphite_bb_p gbb
= (graphite_bb_p
) cloog_statement_usr (cs
);
4645 if (GBB_BB (gbb
) == ENTRY_BLOCK_PTR
)
4648 map
= htab_create (10, rename_map_elt_info
, eq_rename_map_elts
, free
);
4649 loop_iv_stack_patch_for_consts (ivstack
, (struct clast_user_stmt
*) stmt
);
4650 build_iv_mapping (ivstack
, map
, gbb
, scop
);
4651 next_e
= copy_bb_and_scalar_dependences (GBB_BB (gbb
), scop
,
4654 loop_iv_stack_remove_constants (ivstack
);
4655 update_ssa (TODO_update_ssa
);
4656 recompute_all_dominators ();
4658 return translate_clast (scop
, context_loop
, stmt
->next
, next_e
, ivstack
);
4661 if (CLAST_STMT_IS_A (stmt
, stmt_for
))
4664 = graphite_create_new_loop (scop
, next_e
, (struct clast_for
*) stmt
,
4665 ivstack
, context_loop
? context_loop
4667 edge last_e
= single_exit (loop
);
4669 next_e
= translate_clast (scop
, loop
, ((struct clast_for
*) stmt
)->body
,
4670 single_pred_edge (loop
->latch
), ivstack
);
4671 redirect_edge_succ_nodup (next_e
, loop
->latch
);
4673 set_immediate_dominator (CDI_DOMINATORS
, next_e
->dest
, next_e
->src
);
4674 loop_iv_stack_pop (ivstack
);
4675 last_e
= single_succ_edge (split_edge (last_e
));
4676 insert_loop_close_phis (scop
, last_e
->src
);
4678 recompute_all_dominators ();
4680 return translate_clast (scop
, context_loop
, stmt
->next
, last_e
, ivstack
);
4683 if (CLAST_STMT_IS_A (stmt
, stmt_guard
))
4685 htab_t liveout_before_guard
= htab_create (10, rename_map_elt_info
,
4686 eq_rename_map_elts
, free
);
4687 edge last_e
= graphite_create_new_guard (scop
, next_e
,
4688 ((struct clast_guard
*) stmt
),
4690 edge true_e
= get_true_edge_from_guard_bb (next_e
->dest
);
4691 edge false_e
= get_false_edge_from_guard_bb (next_e
->dest
);
4692 edge exit_true_e
= single_succ_edge (true_e
->dest
);
4693 edge exit_false_e
= single_succ_edge (false_e
->dest
);
4695 htab_traverse (SCOP_LIVEOUT_RENAMES (scop
), copy_renames
,
4696 liveout_before_guard
);
4698 next_e
= translate_clast (scop
, context_loop
,
4699 ((struct clast_guard
*) stmt
)->then
,
4701 insert_guard_phis (scop
, last_e
->src
, exit_true_e
, exit_false_e
,
4702 liveout_before_guard
);
4703 htab_delete (liveout_before_guard
);
4704 recompute_all_dominators ();
4707 return translate_clast (scop
, context_loop
, stmt
->next
, last_e
, ivstack
);
4710 if (CLAST_STMT_IS_A (stmt
, stmt_block
))
4712 next_e
= translate_clast (scop
, context_loop
,
4713 ((struct clast_block
*) stmt
)->body
,
4715 recompute_all_dominators ();
4717 return translate_clast (scop
, context_loop
, stmt
->next
, next_e
, ivstack
);
4723 /* Free the SCATTERING domain list. */
4726 free_scattering (CloogDomainList
*scattering
)
4730 CloogDomain
*dom
= cloog_domain (scattering
);
4731 CloogDomainList
*next
= cloog_next_domain (scattering
);
4733 cloog_domain_free (dom
);
4739 /* Build cloog program for SCoP. */
4742 build_cloog_prog (scop_p scop
)
4745 int max_nb_loops
= scop_max_loop_depth (scop
);
4747 CloogLoop
*loop_list
= NULL
;
4748 CloogBlockList
*block_list
= NULL
;
4749 CloogDomainList
*scattering
= NULL
;
4750 CloogProgram
*prog
= SCOP_PROG (scop
);
4751 int nbs
= 2 * max_nb_loops
+ 1;
4752 int *scaldims
= (int *) xmalloc (nbs
* (sizeof (int)));
4754 cloog_program_set_nb_scattdims (prog
, nbs
);
4755 initialize_cloog_names (scop
);
4757 for (i
= 0; VEC_iterate (graphite_bb_p
, SCOP_BBS (scop
), i
, gbb
); i
++)
4759 /* Build new block. */
4760 CloogMatrix
*domain
= GBB_DOMAIN (gbb
);
4761 CloogStatement
*stmt
= cloog_statement_alloc (GBB_BB (gbb
)->index
);
4762 CloogBlock
*block
= cloog_block_alloc (stmt
, 0, NULL
,
4763 nb_loops_around_gb (gbb
));
4764 cloog_statement_set_usr (stmt
, gbb
);
4766 /* Add empty domain to all bbs, which do not yet have a domain, as they
4767 are not part of any loop. */
4770 domain
= cloog_matrix_alloc (0, scop_nb_params (scop
) + 2);
4771 GBB_DOMAIN (gbb
) = domain
;
4774 /* Build loop list. */
4776 CloogLoop
*new_loop_list
= cloog_loop_malloc ();
4777 cloog_loop_set_next (new_loop_list
, loop_list
);
4778 cloog_loop_set_domain (new_loop_list
,
4779 cloog_domain_matrix2domain (domain
));
4780 cloog_loop_set_block (new_loop_list
, block
);
4781 loop_list
= new_loop_list
;
4784 /* Build block list. */
4786 CloogBlockList
*new_block_list
= cloog_block_list_malloc ();
4788 cloog_block_list_set_next (new_block_list
, block_list
);
4789 cloog_block_list_set_block (new_block_list
, block
);
4790 block_list
= new_block_list
;
4793 /* Build scattering list. */
4795 /* XXX: Replace with cloog_domain_list_alloc(), when available. */
4796 CloogDomainList
*new_scattering
4797 = (CloogDomainList
*) xmalloc (sizeof (CloogDomainList
));
4798 CloogMatrix
*scat_mat
= schedule_to_scattering (gbb
, nbs
);
4800 cloog_set_next_domain (new_scattering
, scattering
);
4801 cloog_set_domain (new_scattering
,
4802 cloog_domain_matrix2domain (scat_mat
));
4803 scattering
= new_scattering
;
4804 cloog_matrix_free (scat_mat
);
4808 cloog_program_set_loop (prog
, loop_list
);
4809 cloog_program_set_blocklist (prog
, block_list
);
4811 for (i
= 0; i
< nbs
; i
++)
4814 cloog_program_set_scaldims (prog
, scaldims
);
4816 /* Extract scalar dimensions to simplify the code generation problem. */
4817 cloog_program_extract_scalars (prog
, scattering
);
4819 /* Apply scattering. */
4820 cloog_program_scatter (prog
, scattering
);
4821 free_scattering (scattering
);
4823 /* Iterators corresponding to scalar dimensions have to be extracted. */
4824 cloog_names_scalarize (cloog_program_names (prog
), nbs
,
4825 cloog_program_scaldims (prog
));
4827 /* Free blocklist. */
4829 CloogBlockList
*next
= cloog_program_blocklist (prog
);
4833 CloogBlockList
*toDelete
= next
;
4834 next
= cloog_block_list_next (next
);
4835 cloog_block_list_set_next (toDelete
, NULL
);
4836 cloog_block_list_set_block (toDelete
, NULL
);
4837 cloog_block_list_free (toDelete
);
4839 cloog_program_set_blocklist (prog
, NULL
);
4843 /* Return the options that will be used in GLOOG. */
4845 static CloogOptions
*
4846 set_cloog_options (void)
4848 CloogOptions
*options
= cloog_options_malloc ();
4850 /* Change cloog output language to C. If we do use FORTRAN instead, cloog
4851 will stop e.g. with "ERROR: unbounded loops not allowed in FORTRAN.", if
4852 we pass an incomplete program to cloog. */
4853 options
->language
= LANGUAGE_C
;
4855 /* Enable complex equality spreading: removes dummy statements
4856 (assignments) in the generated code which repeats the
4857 substitution equations for statements. This is useless for
4861 /* Enable C pretty-printing mode: normalizes the substitution
4862 equations for statements. */
4865 /* Allow cloog to build strides with a stride width different to one.
4866 This example has stride = 4:
4868 for (i = 0; i < 20; i += 4)
4870 options
->strides
= 1;
4872 /* Disable optimizations and make cloog generate source code closer to the
4873 input. This is useful for debugging, but later we want the optimized
4876 XXX: We can not disable optimizations, as loop blocking is not working
4881 options
->l
= INT_MAX
;
4887 /* Prints STMT to STDERR. */
4890 debug_clast_stmt (struct clast_stmt
*stmt
)
4892 CloogOptions
*options
= set_cloog_options ();
4894 pprint (stderr
, stmt
, 0, options
);
4897 /* Find the right transform for the SCOP, and return a Cloog AST
4898 representing the new form of the program. */
4900 static struct clast_stmt
*
4901 find_transform (scop_p scop
)
4903 struct clast_stmt
*stmt
;
4904 CloogOptions
*options
= set_cloog_options ();
4906 /* Connect new cloog prog generation to graphite. */
4907 build_cloog_prog (scop
);
4909 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4911 fprintf (dump_file
, "Cloog Input [\n");
4912 cloog_program_print (dump_file
, SCOP_PROG(scop
));
4913 fprintf (dump_file
, "]\n");
4916 SCOP_PROG (scop
) = cloog_program_generate (SCOP_PROG (scop
), options
);
4917 stmt
= cloog_clast_create (SCOP_PROG (scop
), options
);
4919 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4921 fprintf (dump_file
, "Cloog Output[\n");
4922 pprint (dump_file
, stmt
, 0, options
);
4923 cloog_program_dump_cloog (dump_file
, SCOP_PROG (scop
));
4924 fprintf (dump_file
, "]\n");
4927 cloog_options_free (options
);
4931 /* Remove from the CFG the REGION. */
4934 remove_sese_region (sese region
)
4936 VEC (basic_block
, heap
) *bbs
= NULL
;
4937 basic_block entry_bb
= SESE_ENTRY (region
)->dest
;
4938 basic_block exit_bb
= SESE_EXIT (region
)->dest
;
4942 VEC_safe_push (basic_block
, heap
, bbs
, entry_bb
);
4943 gather_blocks_in_sese_region (entry_bb
, exit_bb
, &bbs
);
4945 for (i
= 0; VEC_iterate (basic_block
, bbs
, i
, bb
); i
++)
4946 delete_basic_block (bb
);
4948 VEC_free (basic_block
, heap
, bbs
);
4951 typedef struct ifsese
{
4958 if_region_entry (ifsese if_region
)
4960 return SESE_ENTRY (if_region
->region
);
4964 if_region_exit (ifsese if_region
)
4966 return SESE_EXIT (if_region
->region
);
4969 static inline basic_block
4970 if_region_get_condition_block (ifsese if_region
)
4972 return if_region_entry (if_region
)->dest
;
4976 if_region_set_false_region (ifsese if_region
, sese region
)
4978 basic_block condition
= if_region_get_condition_block (if_region
);
4979 edge false_edge
= get_false_edge_from_guard_bb (condition
);
4980 edge entry_region
= SESE_ENTRY (region
);
4981 edge exit_region
= SESE_EXIT (region
);
4982 basic_block before_region
= entry_region
->src
;
4983 basic_block last_in_region
= exit_region
->src
;
4984 void **slot
= htab_find_slot_with_hash (current_loops
->exits
, exit_region
,
4985 htab_hash_pointer (exit_region
),
4988 entry_region
->flags
= false_edge
->flags
;
4989 false_edge
->flags
= exit_region
->flags
;
4991 redirect_edge_pred (entry_region
, condition
);
4992 redirect_edge_pred (exit_region
, before_region
);
4993 redirect_edge_pred (false_edge
, last_in_region
);
4995 exit_region
->flags
= EDGE_FALLTHRU
;
4996 recompute_all_dominators ();
4998 SESE_EXIT (region
) = single_succ_edge (false_edge
->dest
);
4999 if_region
->false_region
= region
;
5003 struct loop_exit
*loop_exit
= GGC_CNEW (struct loop_exit
);
5005 memcpy (loop_exit
, *((struct loop_exit
**) slot
), sizeof (struct loop_exit
));
5006 htab_clear_slot (current_loops
->exits
, slot
);
5008 slot
= htab_find_slot_with_hash (current_loops
->exits
, false_edge
,
5009 htab_hash_pointer (false_edge
),
5011 loop_exit
->e
= false_edge
;
5013 false_edge
->src
->loop_father
->exits
->next
= loop_exit
;
5018 create_if_region_on_edge (edge entry
, tree condition
)
5022 sese sese_region
= GGC_NEW (struct sese
);
5023 sese true_region
= GGC_NEW (struct sese
);
5024 sese false_region
= GGC_NEW (struct sese
);
5025 ifsese if_region
= GGC_NEW (struct ifsese
);
5026 edge exit
= create_empty_if_region_on_edge (entry
, condition
);
5028 if_region
->region
= sese_region
;
5029 if_region
->region
->entry
= entry
;
5030 if_region
->region
->exit
= exit
;
5032 FOR_EACH_EDGE (e
, ei
, entry
->dest
->succs
)
5034 if (e
->flags
& EDGE_TRUE_VALUE
)
5036 true_region
->entry
= e
;
5037 true_region
->exit
= single_succ_edge (e
->dest
);
5038 if_region
->true_region
= true_region
;
5040 else if (e
->flags
& EDGE_FALSE_VALUE
)
5042 false_region
->entry
= e
;
5043 false_region
->exit
= single_succ_edge (e
->dest
);
5044 if_region
->false_region
= false_region
;
5051 /* Moves REGION in a condition expression:
5059 move_sese_in_condition (sese region
)
5061 basic_block pred_block
= split_edge (SESE_ENTRY (region
));
5062 ifsese if_region
= NULL
;
5064 SESE_ENTRY (region
) = single_succ_edge (pred_block
);
5065 if_region
= create_if_region_on_edge (single_pred_edge (pred_block
), integer_one_node
);
5066 if_region_set_false_region (if_region
, region
);
5071 /* Add exit phis for USE on EXIT. */
5074 scop_add_exit_phis_edge (basic_block exit
, tree use
, edge false_e
, edge true_e
)
5076 gimple phi
= create_phi_node (use
, exit
);
5078 create_new_def_for (gimple_phi_result (phi
), phi
,
5079 gimple_phi_result_ptr (phi
));
5080 add_phi_arg (phi
, use
, false_e
);
5081 add_phi_arg (phi
, use
, true_e
);
5084 /* Add phi nodes for VAR that is used in LIVEIN. Phi nodes are
5085 inserted in block BB. */
5088 scop_add_exit_phis_var (basic_block bb
, tree var
, bitmap livein
,
5089 edge false_e
, edge true_e
)
5092 basic_block def_bb
= gimple_bb (SSA_NAME_DEF_STMT (var
));
5094 if (is_gimple_reg (var
))
5095 bitmap_clear_bit (livein
, def_bb
->index
);
5097 bitmap_set_bit (livein
, def_bb
->index
);
5099 def
= BITMAP_ALLOC (NULL
);
5100 bitmap_set_bit (def
, def_bb
->index
);
5101 compute_global_livein (livein
, def
);
5104 scop_add_exit_phis_edge (bb
, var
, false_e
, true_e
);
5107 /* Insert in the block BB phi nodes for variables defined in REGION
5108 and used outside the REGION. The code generation moves REGION in
5109 the else clause of an "if (1)" and generates code in the then
5110 clause that is at this point empty:
5119 scop_insert_phis_for_liveouts (sese region
, basic_block bb
,
5120 edge false_e
, edge true_e
)
5125 update_ssa (TODO_update_ssa
);
5127 EXECUTE_IF_SET_IN_BITMAP (SESE_LIVEOUT (region
), 0, i
, bi
)
5128 scop_add_exit_phis_var (bb
, ssa_name (i
), SESE_LIVEIN_VER (region
, i
),
5131 update_ssa (TODO_update_ssa
);
5134 /* Get the definition of NAME before the SCOP. Keep track of the
5135 basic blocks that have been VISITED in a bitmap. */
5138 get_vdef_before_scop (scop_p scop
, tree name
, sbitmap visited
)
5141 gimple def_stmt
= SSA_NAME_DEF_STMT (name
);
5142 basic_block def_bb
= gimple_bb (def_stmt
);
5145 || !bb_in_scop_p (def_bb
, scop
))
5148 if (TEST_BIT (visited
, def_bb
->index
))
5151 SET_BIT (visited
, def_bb
->index
);
5153 switch (gimple_code (def_stmt
))
5156 for (i
= 0; i
< gimple_phi_num_args (def_stmt
); i
++)
5158 tree arg
= gimple_phi_arg_def (def_stmt
, i
);
5159 tree res
= get_vdef_before_scop (scop
, arg
, visited
);
5170 /* Adjust a virtual phi node PHI that is placed at the end of the
5171 generated code for SCOP:
5174 | generated code from REGION;
5178 The FALSE_E edge comes from the original code, TRUE_E edge comes
5179 from the code generated for the SCOP. */
5182 scop_adjust_vphi (scop_p scop
, gimple phi
, edge true_e
)
5186 gcc_assert (gimple_phi_num_args (phi
) == 2);
5188 for (i
= 0; i
< gimple_phi_num_args (phi
); i
++)
5189 if (gimple_phi_arg_edge (phi
, i
) == true_e
)
5191 tree true_arg
, false_arg
, before_scop_arg
;
5194 true_arg
= gimple_phi_arg_def (phi
, i
);
5195 if (!SSA_NAME_IS_DEFAULT_DEF (true_arg
))
5198 false_arg
= gimple_phi_arg_def (phi
, i
== 0 ? 1 : 0);
5199 if (SSA_NAME_IS_DEFAULT_DEF (false_arg
))
5202 visited
= sbitmap_alloc (last_basic_block
);
5203 sbitmap_zero (visited
);
5204 before_scop_arg
= get_vdef_before_scop (scop
, false_arg
, visited
);
5205 gcc_assert (before_scop_arg
!= NULL_TREE
);
5206 SET_PHI_ARG_DEF (phi
, i
, before_scop_arg
);
5207 sbitmap_free (visited
);
5211 /* Adjusts the phi nodes in the block BB for variables defined in
5212 SCOP_REGION and used outside the SCOP_REGION. The code generation
5213 moves SCOP_REGION in the else clause of an "if (1)" and generates
5214 code in the then clause:
5217 | generated code from REGION;
5221 To adjust the phi nodes after the condition, SCOP_LIVEOUT_RENAMES
5222 hash table is used: this stores for a name that is part of the
5223 LIVEOUT of SCOP_REGION its new name in the generated code. */
5226 scop_adjust_phis_for_liveouts (scop_p scop
, basic_block bb
, edge false_e
,
5229 gimple_stmt_iterator si
;
5231 for (si
= gsi_start_phis (bb
); !gsi_end_p (si
); gsi_next (&si
))
5234 unsigned false_i
= 0;
5235 gimple phi
= gsi_stmt (si
);
5237 if (!is_gimple_reg (PHI_RESULT (phi
)))
5239 scop_adjust_vphi (scop
, phi
, true_e
);
5243 for (i
= 0; i
< gimple_phi_num_args (phi
); i
++)
5244 if (gimple_phi_arg_edge (phi
, i
) == false_e
)
5250 for (i
= 0; i
< gimple_phi_num_args (phi
); i
++)
5251 if (gimple_phi_arg_edge (phi
, i
) == true_e
)
5253 tree old_name
= gimple_phi_arg_def (phi
, false_i
);
5254 tree new_name
= get_new_name_from_old_name
5255 (SCOP_LIVEOUT_RENAMES (scop
), old_name
);
5257 gcc_assert (old_name
!= new_name
);
5258 SET_PHI_ARG_DEF (phi
, i
, new_name
);
5263 /* Returns the first cloog name used in EXPR. */
5266 find_cloog_iv_in_expr (struct clast_expr
*expr
)
5268 struct clast_term
*term
= (struct clast_term
*) expr
;
5270 if (expr
->type
== expr_term
5274 if (expr
->type
== expr_term
)
5277 if (expr
->type
== expr_red
)
5280 struct clast_reduction
*red
= (struct clast_reduction
*) expr
;
5282 for (i
= 0; i
< red
->n
; i
++)
5284 const char *res
= find_cloog_iv_in_expr ((red
)->elts
[i
]);
5294 /* Build for a clast_user_stmt USER_STMT a map between the CLAST
5295 induction variables and the corresponding GCC old induction
5296 variables. This information is stored on each GRAPHITE_BB. */
5299 compute_cloog_iv_types_1 (graphite_bb_p gbb
,
5300 struct clast_user_stmt
*user_stmt
)
5302 struct clast_stmt
*t
;
5305 for (t
= user_stmt
->substitutions
; t
; t
= t
->next
, index
++)
5308 struct ivtype_map_elt tmp
;
5309 struct clast_expr
*expr
= (struct clast_expr
*)
5310 ((struct clast_assignment
*)t
)->RHS
;
5312 /* Create an entry (clast_var, type). */
5313 tmp
.cloog_iv
= find_cloog_iv_in_expr (expr
);
5317 slot
= htab_find_slot (GBB_CLOOG_IV_TYPES (gbb
), &tmp
, INSERT
);
5321 loop_p loop
= gbb_loop_at_index (gbb
, index
);
5322 tree oldiv
= oldiv_for_loop (GBB_SCOP (gbb
), loop
);
5323 tree type
= oldiv
? TREE_TYPE (oldiv
) : integer_type_node
;
5324 *slot
= new_ivtype_map_elt (tmp
.cloog_iv
, type
);
5329 /* Walk the CLAST tree starting from STMT and build for each
5330 clast_user_stmt a map between the CLAST induction variables and the
5331 corresponding GCC old induction variables. This information is
5332 stored on each GRAPHITE_BB. */
5335 compute_cloog_iv_types (struct clast_stmt
*stmt
)
5340 if (CLAST_STMT_IS_A (stmt
, stmt_root
))
5343 if (CLAST_STMT_IS_A (stmt
, stmt_user
))
5345 CloogStatement
*cs
= ((struct clast_user_stmt
*) stmt
)->statement
;
5346 graphite_bb_p gbb
= (graphite_bb_p
) cloog_statement_usr (cs
);
5347 GBB_CLOOG_IV_TYPES (gbb
) = htab_create (10, ivtype_map_elt_info
,
5348 eq_ivtype_map_elts
, free
);
5349 compute_cloog_iv_types_1 (gbb
, (struct clast_user_stmt
*) stmt
);
5353 if (CLAST_STMT_IS_A (stmt
, stmt_for
))
5355 struct clast_stmt
*s
= ((struct clast_for
*) stmt
)->body
;
5356 compute_cloog_iv_types (s
);
5360 if (CLAST_STMT_IS_A (stmt
, stmt_guard
))
5362 struct clast_stmt
*s
= ((struct clast_guard
*) stmt
)->then
;
5363 compute_cloog_iv_types (s
);
5367 if (CLAST_STMT_IS_A (stmt
, stmt_block
))
5369 struct clast_stmt
*s
= ((struct clast_block
*) stmt
)->body
;
5370 compute_cloog_iv_types (s
);
5377 compute_cloog_iv_types (stmt
->next
);
5380 /* GIMPLE Loop Generator: generates loops from STMT in GIMPLE form for
5381 the given SCOP. Return true if code generation succeeded. */
5384 gloog (scop_p scop
, struct clast_stmt
*stmt
)
5386 edge new_scop_exit_edge
= NULL
;
5387 VEC (iv_stack_entry_p
, heap
) *ivstack
= VEC_alloc (iv_stack_entry_p
, heap
,
5389 loop_p context_loop
;
5390 ifsese if_region
= NULL
;
5392 /* To maintain the loop closed SSA form, we have to keep the phi
5393 nodes after the last loop in the scop. */
5394 if (loop_depth (SESE_EXIT (SCOP_REGION (scop
))->dest
->loop_father
)
5395 != loop_depth (SESE_EXIT (SCOP_REGION (scop
))->src
->loop_father
))
5397 basic_block bb
= SESE_EXIT (SCOP_REGION (scop
))->dest
;
5398 SESE_EXIT (SCOP_REGION (scop
)) = split_block_after_labels (bb
);
5399 bitmap_set_bit (SCOP_BBS_B (scop
), bb
->index
);
5400 pointer_set_insert (SESE_REGION_BBS (SCOP_REGION (scop
)), bb
);
5403 recompute_all_dominators ();
5405 if_region
= move_sese_in_condition (SCOP_REGION (scop
));
5406 sese_build_livein_liveouts (SCOP_REGION (scop
));
5407 scop_insert_phis_for_liveouts (SCOP_REGION (scop
),
5408 if_region
->region
->exit
->src
,
5409 if_region
->false_region
->exit
,
5410 if_region
->true_region
->exit
);
5411 recompute_all_dominators ();
5413 context_loop
= SESE_ENTRY (SCOP_REGION (scop
))->src
->loop_father
;
5414 compute_cloog_iv_types (stmt
);
5416 new_scop_exit_edge
= translate_clast (scop
, context_loop
, stmt
,
5417 if_region
->true_region
->entry
,
5419 free_loop_iv_stack (&ivstack
);
5420 cloog_clast_free (stmt
);
5423 scop_adjust_phis_for_liveouts (scop
,
5424 if_region
->region
->exit
->src
,
5425 if_region
->false_region
->exit
,
5426 if_region
->true_region
->exit
);
5428 recompute_all_dominators ();
5433 /* Returns the number of data references in SCOP. */
5436 nb_data_refs_in_scop (scop_p scop
)
5442 for (i
= 0; VEC_iterate (graphite_bb_p
, SCOP_BBS (scop
), i
, gbb
); i
++)
5443 res
+= VEC_length (data_reference_p
, GBB_DATA_REFS (gbb
));
5448 /* Move the loop at index LOOP and insert it before index NEW_LOOP_POS.
5449 This transformartion is only valid, if the loop nest between i and k is
5450 perfectly nested. Therefore we do not need to change the static schedule.
5454 for (i = 0; i < 50; i++)
5456 for (k = 5; k < 100; k++)
5459 To move k before i use:
5461 graphite_trans_bb_move_loop (A, 2, 0)
5463 for (k = 5; k < 100; k++)
5464 for (i = 0; i < 50; i++)
5470 graphite_trans_bb_move_loop (A, 0, 2)
5472 This function does not check the validity of interchanging loops.
5473 This should be checked before calling this function. */
5476 graphite_trans_bb_move_loop (graphite_bb_p gb
, int loop
,
5479 CloogMatrix
*domain
= GBB_DOMAIN (gb
);
5483 gcc_assert (loop
< gbb_nb_loops (gb
)
5484 && new_loop_pos
< gbb_nb_loops (gb
));
5486 /* Update LOOPS vector. */
5487 tmp_loop_p
= VEC_index (loop_p
, GBB_LOOPS (gb
), loop
);
5488 VEC_ordered_remove (loop_p
, GBB_LOOPS (gb
), loop
);
5489 VEC_safe_insert (loop_p
, heap
, GBB_LOOPS (gb
), new_loop_pos
, tmp_loop_p
);
5491 /* Move the domain columns. */
5492 if (loop
< new_loop_pos
)
5493 for (row
= 0; row
< domain
->NbRows
; row
++)
5497 value_assign (tmp
, domain
->p
[row
][loop
+ 1]);
5499 for (j
= loop
; j
< new_loop_pos
- 1; j
++)
5500 value_assign (domain
->p
[row
][j
+ 1], domain
->p
[row
][j
+ 2]);
5502 value_assign (domain
->p
[row
][new_loop_pos
], tmp
);
5506 for (row
= 0; row
< domain
->NbRows
; row
++)
5510 value_assign (tmp
, domain
->p
[row
][loop
+ 1]);
5512 for (j
= loop
; j
> new_loop_pos
; j
--)
5513 value_assign (domain
->p
[row
][j
+ 1], domain
->p
[row
][j
]);
5515 value_assign (domain
->p
[row
][new_loop_pos
+ 1], tmp
);
5520 /* Get the index of the column representing constants in the DOMAIN
5524 const_column_index (CloogMatrix
*domain
)
5526 return domain
->NbColumns
- 1;
5530 /* Get the first index that is positive or negative, determined
5531 following the value of POSITIVE, in matrix DOMAIN in COLUMN. */
5534 get_first_matching_sign_row_index (CloogMatrix
*domain
, int column
,
5539 for (row
= 0; row
< domain
->NbRows
; row
++)
5541 int val
= value_get_si (domain
->p
[row
][column
]);
5543 if (val
> 0 && positive
)
5546 else if (val
< 0 && !positive
)
5553 /* Get the lower bound of COLUMN in matrix DOMAIN. */
5556 get_lower_bound_row (CloogMatrix
*domain
, int column
)
5558 return get_first_matching_sign_row_index (domain
, column
, true);
5561 /* Get the upper bound of COLUMN in matrix DOMAIN. */
5564 get_upper_bound_row (CloogMatrix
*domain
, int column
)
5566 return get_first_matching_sign_row_index (domain
, column
, false);
5569 /* Copies the OLD_ROW constraint from OLD_DOMAIN to the NEW_DOMAIN at
5573 copy_constraint (CloogMatrix
*old_domain
, CloogMatrix
*new_domain
,
5574 int old_row
, int new_row
)
5578 gcc_assert (old_domain
->NbColumns
== new_domain
->NbColumns
5579 && old_row
< old_domain
->NbRows
5580 && new_row
< new_domain
->NbRows
);
5582 for (i
= 0; i
< old_domain
->NbColumns
; i
++)
5583 value_assign (new_domain
->p
[new_row
][i
], old_domain
->p
[old_row
][i
]);
5586 /* Swap coefficients of variables X and Y on row R. */
5589 swap_constraint_variables (CloogMatrix
*domain
,
5590 int r
, int x
, int y
)
5592 value_swap (domain
->p
[r
][x
], domain
->p
[r
][y
]);
5595 /* Scale by X the coefficient C of constraint at row R in DOMAIN. */
5598 scale_constraint_variable (CloogMatrix
*domain
,
5599 int r
, int c
, int x
)
5601 Value strip_size_value
;
5602 value_init (strip_size_value
);
5603 value_set_si (strip_size_value
, x
);
5604 value_multiply (domain
->p
[r
][c
], domain
->p
[r
][c
], strip_size_value
);
5605 value_clear (strip_size_value
);
5608 /* Strip mines the loop of BB at the position LOOP_DEPTH with STRIDE.
5609 Always valid, but not always a performance improvement. */
5612 graphite_trans_bb_strip_mine (graphite_bb_p gb
, int loop_depth
, int stride
)
5616 CloogMatrix
*domain
= GBB_DOMAIN (gb
);
5617 CloogMatrix
*new_domain
= cloog_matrix_alloc (domain
->NbRows
+ 3,
5618 domain
->NbColumns
+ 1);
5620 int col_loop_old
= loop_depth
+ 2;
5621 int col_loop_strip
= col_loop_old
- 1;
5623 gcc_assert (loop_depth
<= gbb_nb_loops (gb
) - 1);
5625 VEC_safe_insert (loop_p
, heap
, GBB_LOOPS (gb
), loop_depth
, NULL
);
5627 GBB_DOMAIN (gb
) = new_domain
;
5629 for (row
= 0; row
< domain
->NbRows
; row
++)
5630 for (col
= 0; col
< domain
->NbColumns
; col
++)
5631 if (col
<= loop_depth
)
5632 value_assign (new_domain
->p
[row
][col
], domain
->p
[row
][col
]);
5634 value_assign (new_domain
->p
[row
][col
+ 1], domain
->p
[row
][col
]);
5636 row
= domain
->NbRows
;
5638 /* Lower bound of the outer stripped loop. */
5639 copy_constraint (new_domain
, new_domain
,
5640 get_lower_bound_row (new_domain
, col_loop_old
), row
);
5641 swap_constraint_variables (new_domain
, row
, col_loop_old
, col_loop_strip
);
5644 /* Upper bound of the outer stripped loop. */
5645 copy_constraint (new_domain
, new_domain
,
5646 get_upper_bound_row (new_domain
, col_loop_old
), row
);
5647 swap_constraint_variables (new_domain
, row
, col_loop_old
, col_loop_strip
);
5648 scale_constraint_variable (new_domain
, row
, col_loop_strip
, stride
);
5651 /* Lower bound of a tile starts at "stride * outer_iv". */
5652 row
= get_lower_bound_row (new_domain
, col_loop_old
);
5653 value_set_si (new_domain
->p
[row
][0], 1);
5654 value_set_si (new_domain
->p
[row
][const_column_index (new_domain
)], 0);
5655 value_set_si (new_domain
->p
[row
][col_loop_old
], 1);
5656 value_set_si (new_domain
->p
[row
][col_loop_strip
], -1 * stride
);
5658 /* Upper bound of a tile stops at "stride * outer_iv + stride - 1",
5659 or at the old upper bound that is not modified. */
5660 row
= new_domain
->NbRows
- 1;
5661 value_set_si (new_domain
->p
[row
][0], 1);
5662 value_set_si (new_domain
->p
[row
][col_loop_old
], -1);
5663 value_set_si (new_domain
->p
[row
][col_loop_strip
], stride
);
5664 value_set_si (new_domain
->p
[row
][const_column_index (new_domain
)],
5667 cloog_matrix_free (domain
);
5669 /* Update static schedule. */
5672 int nb_loops
= gbb_nb_loops (gb
);
5673 lambda_vector new_schedule
= lambda_vector_new (nb_loops
+ 1);
5675 for (i
= 0; i
<= loop_depth
; i
++)
5676 new_schedule
[i
] = GBB_STATIC_SCHEDULE (gb
)[i
];
5678 for (i
= loop_depth
+ 1; i
<= nb_loops
- 2; i
++)
5679 new_schedule
[i
+ 2] = GBB_STATIC_SCHEDULE (gb
)[i
];
5681 GBB_STATIC_SCHEDULE (gb
) = new_schedule
;
5685 /* Returns true when the strip mining of LOOP_INDEX by STRIDE is
5686 profitable or undecidable. GB is the statement around which the
5687 loops will be strip mined. */
5690 strip_mine_profitable_p (graphite_bb_p gb
, int stride
,
5699 loop
= VEC_index (loop_p
, GBB_LOOPS (gb
), loop_index
);
5700 exit
= single_exit (loop
);
5702 niter
= find_loop_niter (loop
, &exit
);
5703 if (niter
== chrec_dont_know
5704 || TREE_CODE (niter
) != INTEGER_CST
)
5707 niter_val
= int_cst_value (niter
);
5709 if (niter_val
< stride
)
5712 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
5714 fprintf (dump_file
, "\nStrip Mining is not profitable for loop %d:",
5716 fprintf (dump_file
, "number of iterations is too low.\n");
5723 /* Determines when the interchange of LOOP_A and LOOP_B belonging to
5724 SCOP is legal. DEPTH is the number of loops around. */
5727 is_interchange_valid (scop_p scop
, int loop_a
, int loop_b
, int depth
)
5730 VEC (ddr_p
, heap
) *dependence_relations
;
5731 VEC (data_reference_p
, heap
) *datarefs
;
5733 struct loop
*nest
= VEC_index (loop_p
, SCOP_LOOP_NEST (scop
), loop_a
);
5734 lambda_trans_matrix trans
;
5736 gcc_assert (loop_a
< loop_b
);
5738 dependence_relations
= VEC_alloc (ddr_p
, heap
, 10 * 10);
5739 datarefs
= VEC_alloc (data_reference_p
, heap
, 10);
5741 if (!compute_data_dependences_for_loop (nest
, true, &datarefs
,
5742 &dependence_relations
))
5745 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
5746 dump_ddrs (dump_file
, dependence_relations
);
5748 trans
= lambda_trans_matrix_new (depth
, depth
);
5749 lambda_matrix_id (LTM_MATRIX (trans
), depth
);
5751 lambda_matrix_row_exchange (LTM_MATRIX (trans
), 0, loop_b
- loop_a
);
5753 if (!lambda_transform_legal_p (trans
, depth
, dependence_relations
))
5755 lambda_matrix_row_exchange (LTM_MATRIX (trans
), 0, loop_b
- loop_a
);
5761 free_dependence_relations (dependence_relations
);
5762 free_data_refs (datarefs
);
5766 /* Loop block the LOOPS innermost loops of GB with stride size STRIDE.
5770 for (i = 0; i <= 50; i++=4)
5771 for (k = 0; k <= 100; k++=4)
5772 for (l = 0; l <= 200; l++=4)
5775 To strip mine the two inner most loops with stride = 4 call:
5777 graphite_trans_bb_block (A, 4, 2)
5779 for (i = 0; i <= 50; i++)
5780 for (kk = 0; kk <= 100; kk+=4)
5781 for (ll = 0; ll <= 200; ll+=4)
5782 for (k = kk; k <= min (100, kk + 3); k++)
5783 for (l = ll; l <= min (200, ll + 3); l++)
5788 graphite_trans_bb_block (graphite_bb_p gb
, int stride
, int loops
)
5791 int nb_loops
= gbb_nb_loops (gb
);
5792 int start
= nb_loops
- loops
;
5793 scop_p scop
= GBB_SCOP (gb
);
5795 gcc_assert (scop_contains_loop (scop
, gbb_loop (gb
)));
5797 for (i
= start
; i
< nb_loops
; i
++)
5798 for (j
= i
+ 1; j
< nb_loops
; j
++)
5799 if (!is_interchange_valid (scop
, i
, j
, nb_loops
))
5801 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
5803 "\nInterchange not valid for loops %d and %d:\n", i
, j
);
5806 else if (dump_file
&& (dump_flags
& TDF_DETAILS
))
5808 "\nInterchange valid for loops %d and %d:\n", i
, j
);
5810 /* Check if strip mining is profitable for every loop. */
5811 for (i
= 0; i
< nb_loops
- start
; i
++)
5812 if (!strip_mine_profitable_p (gb
, stride
, start
+ i
))
5815 /* Strip mine loops. */
5816 for (i
= 0; i
< nb_loops
- start
; i
++)
5817 graphite_trans_bb_strip_mine (gb
, start
+ 2 * i
, stride
);
5819 /* Interchange loops. */
5820 for (i
= 1; i
< nb_loops
- start
; i
++)
5821 graphite_trans_bb_move_loop (gb
, start
+ 2 * i
, start
+ i
);
5823 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
5824 fprintf (dump_file
, "\nLoops containing BB %d will be loop blocked.\n",
5825 GBB_BB (gb
)->index
);
5830 /* Loop block LOOPS innermost loops of a loop nest. BBS represent the
5831 basic blocks that belong to the loop nest to be blocked. */
5834 graphite_trans_loop_block (VEC (graphite_bb_p
, heap
) *bbs
, int loops
)
5838 bool transform_done
= false;
5840 /* TODO: - Calculate the stride size automatically. */
5841 int stride_size
= 64;
5843 for (i
= 0; VEC_iterate (graphite_bb_p
, bbs
, i
, gb
); i
++)
5844 transform_done
|= graphite_trans_bb_block (gb
, stride_size
, loops
);
5846 return transform_done
;
5849 /* Loop block all basic blocks of SCOP. Return false when the
5850 transform is not performed. */
5853 graphite_trans_scop_block (scop_p scop
)
5859 bool perfect
= true;
5860 bool transform_done
= false;
5862 VEC (graphite_bb_p
, heap
) *bbs
= VEC_alloc (graphite_bb_p
, heap
, 3);
5863 int max_schedule
= scop_max_loop_depth (scop
) + 1;
5864 lambda_vector last_schedule
= lambda_vector_new (max_schedule
);
5866 if (VEC_length (graphite_bb_p
, SCOP_BBS (scop
)) == 0)
5869 /* Get the data of the first bb. */
5870 gb
= VEC_index (graphite_bb_p
, SCOP_BBS (scop
), 0);
5871 last_nb_loops
= gbb_nb_loops (gb
);
5872 lambda_vector_copy (GBB_STATIC_SCHEDULE (gb
), last_schedule
,
5874 VEC_safe_push (graphite_bb_p
, heap
, bbs
, gb
);
5876 for (i
= 0; VEC_iterate (graphite_bb_p
, SCOP_BBS (scop
), i
, gb
); i
++)
5878 /* We did the first bb before. */
5882 nb_loops
= gbb_nb_loops (gb
);
5884 /* If the number of loops is unchanged and only the last element of the
5885 schedule changes, we stay in the loop nest. */
5886 if (nb_loops
== last_nb_loops
5887 && (last_schedule
[nb_loops
+ 1]
5888 != GBB_STATIC_SCHEDULE (gb
)[nb_loops
+ 1]))
5890 VEC_safe_push (graphite_bb_p
, heap
, bbs
, gb
);
5894 /* Otherwise, we left the innermost loop. So check, if the last bb was in
5895 a perfect loop nest and how many loops are contained in this perfect
5898 Count the number of zeros from the end of the schedule. They are the
5899 number of surrounding loops.
5902 last_bb 2 3 2 0 0 0 0 3
5906 last_bb 2 3 2 0 0 0 0 3
5910 If there is no zero, there were other bbs in outer loops and the loop
5911 nest is not perfect. */
5912 for (j
= last_nb_loops
- 1; j
>= 0; j
--)
5914 if (last_schedule
[j
] != 0
5915 || (j
<= nb_loops
&& GBB_STATIC_SCHEDULE (gb
)[j
] == 1))
5924 /* Found perfect loop nest. */
5925 if (perfect
&& last_nb_loops
- j
>= 2)
5926 transform_done
|= graphite_trans_loop_block (bbs
, last_nb_loops
- j
);
5928 /* Check if we start with a new loop.
5932 last_bb 2 3 2 0 0 0 0 3
5935 Here we start with the loop "2 3 2 0 0 1"
5937 last_bb 2 3 2 0 0 0 0 3
5940 But here not, so the loop nest can never be perfect. */
5942 perfect
= (GBB_STATIC_SCHEDULE (gb
)[nb_loops
] == 0);
5944 /* Update the last_bb infos. We do not do that for the bbs in the same
5945 loop, as the data we use is not changed. */
5946 last_nb_loops
= nb_loops
;
5947 lambda_vector_copy (GBB_STATIC_SCHEDULE (gb
), last_schedule
,
5949 VEC_truncate (graphite_bb_p
, bbs
, 0);
5950 VEC_safe_push (graphite_bb_p
, heap
, bbs
, gb
);
5953 /* Check if the last loop nest was perfect. It is the same check as above,
5954 but the comparison with the next bb is missing. */
5955 for (j
= last_nb_loops
- 1; j
>= 0; j
--)
5956 if (last_schedule
[j
] != 0)
5964 /* Found perfect loop nest. */
5965 if (last_nb_loops
- j
>= 2)
5966 transform_done
|= graphite_trans_loop_block (bbs
, last_nb_loops
- j
);
5967 VEC_free (graphite_bb_p
, heap
, bbs
);
5969 return transform_done
;
5972 /* Apply graphite transformations to all the basic blocks of SCOP. */
5975 graphite_apply_transformations (scop_p scop
)
5977 bool transform_done
= false;
5979 /* Sort the list of bbs. Keep them always sorted. */
5980 graphite_sort_gbbs (scop
);
5982 if (flag_loop_block
)
5983 transform_done
= graphite_trans_scop_block (scop
);
5985 /* Generate code even if we did not apply any real transformation.
5986 This also allows to check the performance for the identity
5987 transformation: GIMPLE -> GRAPHITE -> GIMPLE
5988 Keep in mind that CLooG optimizes in control, so the loop structure
5989 may change, even if we only use -fgraphite-identity. */
5990 if (flag_graphite_identity
)
5991 transform_done
= true;
5993 return transform_done
;
5996 /* We limit all SCoPs to SCoPs, that are completely surrounded by a loop.
6006 * SCoP frontier, as this line is not surrounded by any loop. *
6010 This is necessary as scalar evolution and parameter detection need a
6011 outermost loop to initialize parameters correctly.
6013 TODO: FIX scalar evolution and parameter detection to allow more flexible
6019 VEC (sd_region
, heap
) *tmp_scops
= VEC_alloc (sd_region
, heap
, 3);
6024 for (i
= 0; VEC_iterate (scop_p
, current_scops
, i
, scop
); i
++)
6028 build_scop_bbs (scop
);
6030 if (!build_scop_loop_nests (scop
))
6033 for (j
= 0; VEC_iterate (loop_p
, SCOP_LOOP_NEST (scop
), j
, loop
); j
++)
6034 if (!loop_in_scop_p (loop_outer (loop
), scop
))
6036 sd_region open_scop
;
6037 open_scop
.entry
= loop
->header
;
6038 open_scop
.exit
= single_exit (loop
)->dest
;
6039 VEC_safe_push (sd_region
, heap
, tmp_scops
, &open_scop
);
6043 free_scops (current_scops
);
6044 current_scops
= VEC_alloc (scop_p
, heap
, 3);
6046 create_sese_edges (tmp_scops
);
6047 build_graphite_scops (tmp_scops
);
6048 VEC_free (sd_region
, heap
, tmp_scops
);
6051 /* Perform a set of linear transforms on the loops of the current
6055 graphite_transform_loops (void)
6059 bool transform_done
= false;
6061 if (number_of_loops () <= 1)
6064 current_scops
= VEC_alloc (scop_p
, heap
, 3);
6065 recompute_all_dominators ();
6067 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
6068 fprintf (dump_file
, "Graphite loop transformations \n");
6070 initialize_original_copy_tables ();
6071 cloog_initialize ();
6075 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
6076 fprintf (dump_file
, "\nnumber of SCoPs: %d\n",
6077 VEC_length (scop_p
, current_scops
));
6079 for (i
= 0; VEC_iterate (scop_p
, current_scops
, i
, scop
); i
++)
6081 build_scop_bbs (scop
);
6082 if (!build_scop_loop_nests (scop
))
6085 build_bb_loops (scop
);
6087 if (!build_scop_conditions (scop
))
6090 find_scop_parameters (scop
);
6091 build_scop_context (scop
);
6093 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
6095 fprintf (dump_file
, "\n(In SCoP %d:\n", i
);
6096 fprintf (dump_file
, "\nnumber of bbs: %d\n",
6097 VEC_length (graphite_bb_p
, SCOP_BBS (scop
)));
6098 fprintf (dump_file
, "\nnumber of loops: %d)\n",
6099 VEC_length (loop_p
, SCOP_LOOP_NEST (scop
)));
6102 if (!build_scop_iteration_domain (scop
))
6105 add_conditions_to_constraints (scop
);
6106 build_scop_canonical_schedules (scop
);
6108 build_scop_data_accesses (scop
);
6109 build_scop_dynamic_schedules (scop
);
6111 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
6113 int nbrefs
= nb_data_refs_in_scop (scop
);
6114 fprintf (dump_file
, "\nnumber of data refs: %d\n", nbrefs
);
6117 if (graphite_apply_transformations (scop
))
6118 transform_done
= gloog (scop
, find_transform (scop
));
6119 #ifdef ENABLE_CHECKING
6122 struct clast_stmt
*stmt
= find_transform (scop
);
6123 cloog_clast_free (stmt
);
6130 cleanup_tree_cfg ();
6132 free_scops (current_scops
);
6134 free_original_copy_tables ();
6137 #else /* If Cloog is not available: #ifndef HAVE_cloog. */
6140 graphite_transform_loops (void)
6142 sorry ("Graphite loop optimizations cannot be used");