2 * Copyright 2011 INRIA Saclay
3 * Copyright 2012-2014 Ecole Normale Superieure
4 * Copyright 2015-2016 Sven Verdoolaege
5 * Copyright 2016 INRIA Paris
6 * Copyright 2017 Sven Verdoolaege
8 * Use of this software is governed by the MIT license
10 * Written by Sven Verdoolaege, INRIA Saclay - Ile-de-France,
11 * Parc Club Orsay Universite, ZAC des vignes, 4 rue Jacques Monod,
13 * and Ecole Normale Superieure, 45 rue d'Ulm, 75230 Paris, France
14 * and Centre de Recherche Inria de Paris, 2 rue Simone Iff - Voie DQ12,
15 * CS 42112, 75589 Paris Cedex 12, France
18 #include <isl_ctx_private.h>
19 #include <isl_map_private.h>
20 #include <isl_space_private.h>
21 #include <isl_aff_private.h>
24 #include <isl/constraint.h>
25 #include <isl/schedule.h>
26 #include <isl_schedule_constraints.h>
27 #include <isl/schedule_node.h>
28 #include <isl_mat_private.h>
29 #include <isl_vec_private.h>
31 #include <isl_union_set_private.h>
34 #include <isl_dim_map.h>
35 #include <isl/map_to_basic_set.h>
37 #include <isl_options_private.h>
38 #include <isl_tarjan.h>
39 #include <isl_morph.h>
41 #include <isl_val_private.h>
44 * The scheduling algorithm implemented in this file was inspired by
45 * Bondhugula et al., "Automatic Transformations for Communication-Minimized
46 * Parallelization and Locality Optimization in the Polyhedral Model".
48 * For a detailed description of the variant implemented in isl,
49 * see Verdoolaege and Janssens, "Scheduling for PPCG" (2017).
53 /* Internal information about a node that is used during the construction
55 * space represents the original space in which the domain lives;
56 * that is, the space is not affected by compression
57 * sched is a matrix representation of the schedule being constructed
58 * for this node; if compressed is set, then this schedule is
59 * defined over the compressed domain space
60 * sched_map is an isl_map representation of the same (partial) schedule
61 * sched_map may be NULL; if compressed is set, then this map
62 * is defined over the uncompressed domain space
63 * rank is the number of linearly independent rows in the linear part
65 * the rows of "vmap" represent a change of basis for the node
66 * variables; the first rank rows span the linear part of
67 * the schedule rows; the remaining rows are linearly independent
68 * the rows of "indep" represent linear combinations of the schedule
69 * coefficients that are non-zero when the schedule coefficients are
70 * linearly independent of previously computed schedule rows.
71 * start is the first variable in the LP problem in the sequences that
72 * represents the schedule coefficients of this node
73 * nvar is the dimension of the (compressed) domain
74 * nparam is the number of parameters or 0 if we are not constructing
75 * a parametric schedule
77 * If compressed is set, then hull represents the constraints
78 * that were used to derive the compression, while compress and
79 * decompress map the original space to the compressed space and
82 * scc is the index of SCC (or WCC) this node belongs to
84 * "cluster" is only used inside extract_clusters and identifies
85 * the cluster of SCCs that the node belongs to.
87 * coincident contains a boolean for each of the rows of the schedule,
88 * indicating whether the corresponding scheduling dimension satisfies
89 * the coincidence constraints in the sense that the corresponding
90 * dependence distances are zero.
92 * If the schedule_treat_coalescing option is set, then
93 * "sizes" contains the sizes of the (compressed) instance set
94 * in each direction. If there is no fixed size in a given direction,
95 * then the corresponding size value is set to infinity.
96 * If the schedule_treat_coalescing option or the schedule_max_coefficient
97 * option is set, then "max" contains the maximal values for
98 * schedule coefficients of the (compressed) variables. If no bound
99 * needs to be imposed on a particular variable, then the corresponding
101 * If not NULL, then "bounds" contains a non-parametric set
102 * in the compressed space that is bounded by the size in each direction.
104 struct isl_sched_node
{
108 isl_multi_aff
*compress
;
109 isl_multi_aff
*decompress
;
124 isl_multi_val
*sizes
;
125 isl_basic_set
*bounds
;
129 static int node_has_tuples(const void *entry
, const void *val
)
131 struct isl_sched_node
*node
= (struct isl_sched_node
*)entry
;
132 isl_space
*space
= (isl_space
*) val
;
134 return isl_space_has_equal_tuples(node
->space
, space
);
137 static int node_scc_exactly(struct isl_sched_node
*node
, int scc
)
139 return node
->scc
== scc
;
142 static int node_scc_at_most(struct isl_sched_node
*node
, int scc
)
144 return node
->scc
<= scc
;
147 static int node_scc_at_least(struct isl_sched_node
*node
, int scc
)
149 return node
->scc
>= scc
;
152 /* An edge in the dependence graph. An edge may be used to
153 * ensure validity of the generated schedule, to minimize the dependence
156 * map is the dependence relation, with i -> j in the map if j depends on i
157 * tagged_condition and tagged_validity contain the union of all tagged
158 * condition or conditional validity dependence relations that
159 * specialize the dependence relation "map"; that is,
160 * if (i -> a) -> (j -> b) is an element of "tagged_condition"
161 * or "tagged_validity", then i -> j is an element of "map".
162 * If these fields are NULL, then they represent the empty relation.
163 * src is the source node
164 * dst is the sink node
166 * types is a bit vector containing the types of this edge.
167 * validity is set if the edge is used to ensure correctness
168 * coincidence is used to enforce zero dependence distances
169 * proximity is set if the edge is used to minimize dependence distances
170 * condition is set if the edge represents a condition
171 * for a conditional validity schedule constraint
172 * local can only be set for condition edges and indicates that
173 * the dependence distance over the edge should be zero
174 * conditional_validity is set if the edge is used to conditionally
177 * For validity edges, start and end mark the sequence of inequality
178 * constraints in the LP problem that encode the validity constraint
179 * corresponding to this edge.
181 * During clustering, an edge may be marked "no_merge" if it should
182 * not be used to merge clusters.
183 * The weight is also only used during clustering and it is
184 * an indication of how many schedule dimensions on either side
185 * of the schedule constraints can be aligned.
186 * If the weight is negative, then this means that this edge was postponed
187 * by has_bounded_distances or any_no_merge. The original weight can
188 * be retrieved by adding 1 + graph->max_weight, with "graph"
189 * the graph containing this edge.
191 struct isl_sched_edge
{
193 isl_union_map
*tagged_condition
;
194 isl_union_map
*tagged_validity
;
196 struct isl_sched_node
*src
;
197 struct isl_sched_node
*dst
;
208 /* Is "edge" marked as being of type "type"?
210 static int is_type(struct isl_sched_edge
*edge
, enum isl_edge_type type
)
212 return ISL_FL_ISSET(edge
->types
, 1 << type
);
215 /* Mark "edge" as being of type "type".
217 static void set_type(struct isl_sched_edge
*edge
, enum isl_edge_type type
)
219 ISL_FL_SET(edge
->types
, 1 << type
);
222 /* No longer mark "edge" as being of type "type"?
224 static void clear_type(struct isl_sched_edge
*edge
, enum isl_edge_type type
)
226 ISL_FL_CLR(edge
->types
, 1 << type
);
229 /* Is "edge" marked as a validity edge?
231 static int is_validity(struct isl_sched_edge
*edge
)
233 return is_type(edge
, isl_edge_validity
);
236 /* Mark "edge" as a validity edge.
238 static void set_validity(struct isl_sched_edge
*edge
)
240 set_type(edge
, isl_edge_validity
);
243 /* Is "edge" marked as a proximity edge?
245 static int is_proximity(struct isl_sched_edge
*edge
)
247 return is_type(edge
, isl_edge_proximity
);
250 /* Is "edge" marked as a local edge?
252 static int is_local(struct isl_sched_edge
*edge
)
254 return is_type(edge
, isl_edge_local
);
257 /* Mark "edge" as a local edge.
259 static void set_local(struct isl_sched_edge
*edge
)
261 set_type(edge
, isl_edge_local
);
264 /* No longer mark "edge" as a local edge.
266 static void clear_local(struct isl_sched_edge
*edge
)
268 clear_type(edge
, isl_edge_local
);
271 /* Is "edge" marked as a coincidence edge?
273 static int is_coincidence(struct isl_sched_edge
*edge
)
275 return is_type(edge
, isl_edge_coincidence
);
278 /* Is "edge" marked as a condition edge?
280 static int is_condition(struct isl_sched_edge
*edge
)
282 return is_type(edge
, isl_edge_condition
);
285 /* Is "edge" marked as a conditional validity edge?
287 static int is_conditional_validity(struct isl_sched_edge
*edge
)
289 return is_type(edge
, isl_edge_conditional_validity
);
292 /* Is "edge" of a type that can appear multiple times between
293 * the same pair of nodes?
295 * Condition edges and conditional validity edges may have tagged
296 * dependence relations, in which case an edge is added for each
299 static int is_multi_edge_type(struct isl_sched_edge
*edge
)
301 return is_condition(edge
) || is_conditional_validity(edge
);
304 /* Internal information about the dependence graph used during
305 * the construction of the schedule.
307 * intra_hmap is a cache, mapping dependence relations to their dual,
308 * for dependences from a node to itself, possibly without
309 * coefficients for the parameters
310 * intra_hmap_param is a cache, mapping dependence relations to their dual,
311 * for dependences from a node to itself, including coefficients
313 * inter_hmap is a cache, mapping dependence relations to their dual,
314 * for dependences between distinct nodes
315 * if compression is involved then the key for these maps
316 * is the original, uncompressed dependence relation, while
317 * the value is the dual of the compressed dependence relation.
319 * n is the number of nodes
320 * node is the list of nodes
321 * maxvar is the maximal number of variables over all nodes
322 * max_row is the allocated number of rows in the schedule
323 * n_row is the current (maximal) number of linearly independent
324 * rows in the node schedules
325 * n_total_row is the current number of rows in the node schedules
326 * band_start is the starting row in the node schedules of the current band
327 * root is set to the original dependence graph from which this graph
328 * is derived through splitting. If this graph is not the result of
329 * splitting, then the root field points to the graph itself.
331 * sorted contains a list of node indices sorted according to the
332 * SCC to which a node belongs
334 * n_edge is the number of edges
335 * edge is the list of edges
336 * max_edge contains the maximal number of edges of each type;
337 * in particular, it contains the number of edges in the inital graph.
338 * edge_table contains pointers into the edge array, hashed on the source
339 * and sink spaces; there is one such table for each type;
340 * a given edge may be referenced from more than one table
341 * if the corresponding relation appears in more than one of the
342 * sets of dependences; however, for each type there is only
343 * a single edge between a given pair of source and sink space
344 * in the entire graph
346 * node_table contains pointers into the node array, hashed on the space tuples
348 * region contains a list of variable sequences that should be non-trivial
350 * lp contains the (I)LP problem used to obtain new schedule rows
352 * src_scc and dst_scc are the source and sink SCCs of an edge with
353 * conflicting constraints
355 * scc represents the number of components
356 * weak is set if the components are weakly connected
358 * max_weight is used during clustering and represents the maximal
359 * weight of the relevant proximity edges.
361 struct isl_sched_graph
{
362 isl_map_to_basic_set
*intra_hmap
;
363 isl_map_to_basic_set
*intra_hmap_param
;
364 isl_map_to_basic_set
*inter_hmap
;
366 struct isl_sched_node
*node
;
377 struct isl_sched_graph
*root
;
379 struct isl_sched_edge
*edge
;
381 int max_edge
[isl_edge_last
+ 1];
382 struct isl_hash_table
*edge_table
[isl_edge_last
+ 1];
384 struct isl_hash_table
*node_table
;
385 struct isl_trivial_region
*region
;
398 /* Initialize node_table based on the list of nodes.
400 static int graph_init_table(isl_ctx
*ctx
, struct isl_sched_graph
*graph
)
404 graph
->node_table
= isl_hash_table_alloc(ctx
, graph
->n
);
405 if (!graph
->node_table
)
408 for (i
= 0; i
< graph
->n
; ++i
) {
409 struct isl_hash_table_entry
*entry
;
412 hash
= isl_space_get_tuple_hash(graph
->node
[i
].space
);
413 entry
= isl_hash_table_find(ctx
, graph
->node_table
, hash
,
415 graph
->node
[i
].space
, 1);
418 entry
->data
= &graph
->node
[i
];
424 /* Return a pointer to the node that lives within the given space,
425 * an invalid node if there is no such node, or NULL in case of error.
427 static struct isl_sched_node
*graph_find_node(isl_ctx
*ctx
,
428 struct isl_sched_graph
*graph
, __isl_keep isl_space
*space
)
430 struct isl_hash_table_entry
*entry
;
436 hash
= isl_space_get_tuple_hash(space
);
437 entry
= isl_hash_table_find(ctx
, graph
->node_table
, hash
,
438 &node_has_tuples
, space
, 0);
440 return entry
? entry
->data
: graph
->node
+ graph
->n
;
443 /* Is "node" a node in "graph"?
445 static int is_node(struct isl_sched_graph
*graph
,
446 struct isl_sched_node
*node
)
448 return node
&& node
>= &graph
->node
[0] && node
< &graph
->node
[graph
->n
];
451 static int edge_has_src_and_dst(const void *entry
, const void *val
)
453 const struct isl_sched_edge
*edge
= entry
;
454 const struct isl_sched_edge
*temp
= val
;
456 return edge
->src
== temp
->src
&& edge
->dst
== temp
->dst
;
459 /* Add the given edge to graph->edge_table[type].
461 static isl_stat
graph_edge_table_add(isl_ctx
*ctx
,
462 struct isl_sched_graph
*graph
, enum isl_edge_type type
,
463 struct isl_sched_edge
*edge
)
465 struct isl_hash_table_entry
*entry
;
468 hash
= isl_hash_init();
469 hash
= isl_hash_builtin(hash
, edge
->src
);
470 hash
= isl_hash_builtin(hash
, edge
->dst
);
471 entry
= isl_hash_table_find(ctx
, graph
->edge_table
[type
], hash
,
472 &edge_has_src_and_dst
, edge
, 1);
474 return isl_stat_error
;
480 /* Add "edge" to all relevant edge tables.
481 * That is, for every type of the edge, add it to the corresponding table.
483 static isl_stat
graph_edge_tables_add(isl_ctx
*ctx
,
484 struct isl_sched_graph
*graph
, struct isl_sched_edge
*edge
)
486 enum isl_edge_type t
;
488 for (t
= isl_edge_first
; t
<= isl_edge_last
; ++t
) {
489 if (!is_type(edge
, t
))
491 if (graph_edge_table_add(ctx
, graph
, t
, edge
) < 0)
492 return isl_stat_error
;
498 /* Allocate the edge_tables based on the maximal number of edges of
501 static int graph_init_edge_tables(isl_ctx
*ctx
, struct isl_sched_graph
*graph
)
505 for (i
= 0; i
<= isl_edge_last
; ++i
) {
506 graph
->edge_table
[i
] = isl_hash_table_alloc(ctx
,
508 if (!graph
->edge_table
[i
])
515 /* If graph->edge_table[type] contains an edge from the given source
516 * to the given destination, then return the hash table entry of this edge.
517 * Otherwise, return NULL.
519 static struct isl_hash_table_entry
*graph_find_edge_entry(
520 struct isl_sched_graph
*graph
,
521 enum isl_edge_type type
,
522 struct isl_sched_node
*src
, struct isl_sched_node
*dst
)
524 isl_ctx
*ctx
= isl_space_get_ctx(src
->space
);
526 struct isl_sched_edge temp
= { .src
= src
, .dst
= dst
};
528 hash
= isl_hash_init();
529 hash
= isl_hash_builtin(hash
, temp
.src
);
530 hash
= isl_hash_builtin(hash
, temp
.dst
);
531 return isl_hash_table_find(ctx
, graph
->edge_table
[type
], hash
,
532 &edge_has_src_and_dst
, &temp
, 0);
536 /* If graph->edge_table[type] contains an edge from the given source
537 * to the given destination, then return this edge.
538 * Otherwise, return NULL.
540 static struct isl_sched_edge
*graph_find_edge(struct isl_sched_graph
*graph
,
541 enum isl_edge_type type
,
542 struct isl_sched_node
*src
, struct isl_sched_node
*dst
)
544 struct isl_hash_table_entry
*entry
;
546 entry
= graph_find_edge_entry(graph
, type
, src
, dst
);
553 /* Check whether the dependence graph has an edge of the given type
554 * between the given two nodes.
556 static isl_bool
graph_has_edge(struct isl_sched_graph
*graph
,
557 enum isl_edge_type type
,
558 struct isl_sched_node
*src
, struct isl_sched_node
*dst
)
560 struct isl_sched_edge
*edge
;
563 edge
= graph_find_edge(graph
, type
, src
, dst
);
565 return isl_bool_false
;
567 empty
= isl_map_plain_is_empty(edge
->map
);
569 return isl_bool_not(empty
);
572 /* Look for any edge with the same src, dst and map fields as "model".
574 * Return the matching edge if one can be found.
575 * Return "model" if no matching edge is found.
576 * Return NULL on error.
578 static struct isl_sched_edge
*graph_find_matching_edge(
579 struct isl_sched_graph
*graph
, struct isl_sched_edge
*model
)
581 enum isl_edge_type i
;
582 struct isl_sched_edge
*edge
;
584 for (i
= isl_edge_first
; i
<= isl_edge_last
; ++i
) {
587 edge
= graph_find_edge(graph
, i
, model
->src
, model
->dst
);
590 is_equal
= isl_map_plain_is_equal(model
->map
, edge
->map
);
600 /* Remove the given edge from all the edge_tables that refer to it.
602 static void graph_remove_edge(struct isl_sched_graph
*graph
,
603 struct isl_sched_edge
*edge
)
605 isl_ctx
*ctx
= isl_map_get_ctx(edge
->map
);
606 enum isl_edge_type i
;
608 for (i
= isl_edge_first
; i
<= isl_edge_last
; ++i
) {
609 struct isl_hash_table_entry
*entry
;
611 entry
= graph_find_edge_entry(graph
, i
, edge
->src
, edge
->dst
);
614 if (entry
->data
!= edge
)
616 isl_hash_table_remove(ctx
, graph
->edge_table
[i
], entry
);
620 /* Check whether the dependence graph has any edge
621 * between the given two nodes.
623 static isl_bool
graph_has_any_edge(struct isl_sched_graph
*graph
,
624 struct isl_sched_node
*src
, struct isl_sched_node
*dst
)
626 enum isl_edge_type i
;
629 for (i
= isl_edge_first
; i
<= isl_edge_last
; ++i
) {
630 r
= graph_has_edge(graph
, i
, src
, dst
);
638 /* Check whether the dependence graph has a validity edge
639 * between the given two nodes.
641 * Conditional validity edges are essentially validity edges that
642 * can be ignored if the corresponding condition edges are iteration private.
643 * Here, we are only checking for the presence of validity
644 * edges, so we need to consider the conditional validity edges too.
645 * In particular, this function is used during the detection
646 * of strongly connected components and we cannot ignore
647 * conditional validity edges during this detection.
649 static isl_bool
graph_has_validity_edge(struct isl_sched_graph
*graph
,
650 struct isl_sched_node
*src
, struct isl_sched_node
*dst
)
654 r
= graph_has_edge(graph
, isl_edge_validity
, src
, dst
);
658 return graph_has_edge(graph
, isl_edge_conditional_validity
, src
, dst
);
661 /* Perform all the required memory allocations for a schedule graph "graph"
662 * with "n_node" nodes and "n_edge" edge and initialize the corresponding
665 static isl_stat
graph_alloc(isl_ctx
*ctx
, struct isl_sched_graph
*graph
,
666 int n_node
, int n_edge
)
671 graph
->n_edge
= n_edge
;
672 graph
->node
= isl_calloc_array(ctx
, struct isl_sched_node
, graph
->n
);
673 graph
->sorted
= isl_calloc_array(ctx
, int, graph
->n
);
674 graph
->region
= isl_alloc_array(ctx
,
675 struct isl_trivial_region
, graph
->n
);
676 graph
->edge
= isl_calloc_array(ctx
,
677 struct isl_sched_edge
, graph
->n_edge
);
679 graph
->intra_hmap
= isl_map_to_basic_set_alloc(ctx
, 2 * n_edge
);
680 graph
->intra_hmap_param
= isl_map_to_basic_set_alloc(ctx
, 2 * n_edge
);
681 graph
->inter_hmap
= isl_map_to_basic_set_alloc(ctx
, 2 * n_edge
);
683 if (!graph
->node
|| !graph
->region
|| (graph
->n_edge
&& !graph
->edge
) ||
685 return isl_stat_error
;
687 for(i
= 0; i
< graph
->n
; ++i
)
688 graph
->sorted
[i
] = i
;
693 /* Free the memory associated to node "node" in "graph".
694 * The "coincident" field is shared by nodes in a graph and its subgraph.
695 * It therefore only needs to be freed for the original dependence graph,
696 * i.e., one that is not the result of splitting.
698 static void clear_node(struct isl_sched_graph
*graph
,
699 struct isl_sched_node
*node
)
701 isl_space_free(node
->space
);
702 isl_set_free(node
->hull
);
703 isl_multi_aff_free(node
->compress
);
704 isl_multi_aff_free(node
->decompress
);
705 isl_mat_free(node
->sched
);
706 isl_map_free(node
->sched_map
);
707 isl_mat_free(node
->indep
);
708 isl_mat_free(node
->vmap
);
709 if (graph
->root
== graph
)
710 free(node
->coincident
);
711 isl_multi_val_free(node
->sizes
);
712 isl_basic_set_free(node
->bounds
);
713 isl_vec_free(node
->max
);
716 static void graph_free(isl_ctx
*ctx
, struct isl_sched_graph
*graph
)
720 isl_map_to_basic_set_free(graph
->intra_hmap
);
721 isl_map_to_basic_set_free(graph
->intra_hmap_param
);
722 isl_map_to_basic_set_free(graph
->inter_hmap
);
725 for (i
= 0; i
< graph
->n
; ++i
)
726 clear_node(graph
, &graph
->node
[i
]);
730 for (i
= 0; i
< graph
->n_edge
; ++i
) {
731 isl_map_free(graph
->edge
[i
].map
);
732 isl_union_map_free(graph
->edge
[i
].tagged_condition
);
733 isl_union_map_free(graph
->edge
[i
].tagged_validity
);
737 for (i
= 0; i
<= isl_edge_last
; ++i
)
738 isl_hash_table_free(ctx
, graph
->edge_table
[i
]);
739 isl_hash_table_free(ctx
, graph
->node_table
);
740 isl_basic_set_free(graph
->lp
);
743 /* For each "set" on which this function is called, increment
744 * graph->n by one and update graph->maxvar.
746 static isl_stat
init_n_maxvar(__isl_take isl_set
*set
, void *user
)
748 struct isl_sched_graph
*graph
= user
;
749 isl_size nvar
= isl_set_dim(set
, isl_dim_set
);
752 if (nvar
> graph
->maxvar
)
753 graph
->maxvar
= nvar
;
758 return isl_stat_error
;
762 /* Compute the number of rows that should be allocated for the schedule.
763 * In particular, we need one row for each variable or one row
764 * for each basic map in the dependences.
765 * Note that it is practically impossible to exhaust both
766 * the number of dependences and the number of variables.
768 static isl_stat
compute_max_row(struct isl_sched_graph
*graph
,
769 __isl_keep isl_schedule_constraints
*sc
)
773 isl_union_set
*domain
;
777 domain
= isl_schedule_constraints_get_domain(sc
);
778 r
= isl_union_set_foreach_set(domain
, &init_n_maxvar
, graph
);
779 isl_union_set_free(domain
);
781 return isl_stat_error
;
782 n_edge
= isl_schedule_constraints_n_basic_map(sc
);
784 return isl_stat_error
;
785 graph
->max_row
= n_edge
+ graph
->maxvar
;
790 /* Does "bset" have any defining equalities for its set variables?
792 static isl_bool
has_any_defining_equality(__isl_keep isl_basic_set
*bset
)
797 n
= isl_basic_set_dim(bset
, isl_dim_set
);
799 return isl_bool_error
;
801 for (i
= 0; i
< n
; ++i
) {
804 has
= isl_basic_set_has_defining_equality(bset
, isl_dim_set
, i
,
810 return isl_bool_false
;
813 /* Set the entries of node->max to the value of the schedule_max_coefficient
816 static isl_stat
set_max_coefficient(isl_ctx
*ctx
, struct isl_sched_node
*node
)
820 max
= isl_options_get_schedule_max_coefficient(ctx
);
824 node
->max
= isl_vec_alloc(ctx
, node
->nvar
);
825 node
->max
= isl_vec_set_si(node
->max
, max
);
827 return isl_stat_error
;
832 /* Set the entries of node->max to the minimum of the schedule_max_coefficient
833 * option (if set) and half of the minimum of the sizes in the other
834 * dimensions. Round up when computing the half such that
835 * if the minimum of the sizes is one, half of the size is taken to be one
837 * If the global minimum is unbounded (i.e., if both
838 * the schedule_max_coefficient is not set and the sizes in the other
839 * dimensions are unbounded), then store a negative value.
840 * If the schedule coefficient is close to the size of the instance set
841 * in another dimension, then the schedule may represent a loop
842 * coalescing transformation (especially if the coefficient
843 * in that other dimension is one). Forcing the coefficient to be
844 * smaller than or equal to half the minimal size should avoid this
847 static isl_stat
compute_max_coefficient(isl_ctx
*ctx
,
848 struct isl_sched_node
*node
)
854 max
= isl_options_get_schedule_max_coefficient(ctx
);
855 v
= isl_vec_alloc(ctx
, node
->nvar
);
857 return isl_stat_error
;
859 for (i
= 0; i
< node
->nvar
; ++i
) {
860 isl_int_set_si(v
->el
[i
], max
);
861 isl_int_mul_si(v
->el
[i
], v
->el
[i
], 2);
864 for (i
= 0; i
< node
->nvar
; ++i
) {
867 size
= isl_multi_val_get_val(node
->sizes
, i
);
870 if (!isl_val_is_int(size
)) {
874 for (j
= 0; j
< node
->nvar
; ++j
) {
877 if (isl_int_is_neg(v
->el
[j
]) ||
878 isl_int_gt(v
->el
[j
], size
->n
))
879 isl_int_set(v
->el
[j
], size
->n
);
884 for (i
= 0; i
< node
->nvar
; ++i
)
885 isl_int_cdiv_q_ui(v
->el
[i
], v
->el
[i
], 2);
891 return isl_stat_error
;
894 /* Compute and return the size of "set" in dimension "dim".
895 * The size is taken to be the difference in values for that variable
896 * for fixed values of the other variables.
897 * This assumes that "set" is convex.
898 * In particular, the variable is first isolated from the other variables
899 * in the range of a map
901 * [i_0, ..., i_dim-1, i_dim+1, ...] -> [i_dim]
903 * and then duplicated
905 * [i_0, ..., i_dim-1, i_dim+1, ...] -> [[i_dim] -> [i_dim']]
907 * The shared variables are then projected out and the maximal value
908 * of i_dim' - i_dim is computed.
910 static __isl_give isl_val
*compute_size(__isl_take isl_set
*set
, int dim
)
917 map
= isl_set_project_onto_map(set
, isl_dim_set
, dim
, 1);
918 map
= isl_map_project_out(map
, isl_dim_in
, dim
, 1);
919 map
= isl_map_range_product(map
, isl_map_copy(map
));
920 map
= isl_set_unwrap(isl_map_range(map
));
921 set
= isl_map_deltas(map
);
922 ls
= isl_local_space_from_space(isl_set_get_space(set
));
923 obj
= isl_aff_var_on_domain(ls
, isl_dim_set
, 0);
924 v
= isl_set_max_val(set
, obj
);
931 /* Compute the size of the instance set "set" of "node", after compression,
932 * as well as bounds on the corresponding coefficients, if needed.
934 * The sizes are needed when the schedule_treat_coalescing option is set.
935 * The bounds are needed when the schedule_treat_coalescing option or
936 * the schedule_max_coefficient option is set.
938 * If the schedule_treat_coalescing option is not set, then at most
939 * the bounds need to be set and this is done in set_max_coefficient.
940 * Otherwise, compress the domain if needed, compute the size
941 * in each direction and store the results in node->size.
942 * If the domain is not convex, then the sizes are computed
943 * on a convex superset in order to avoid picking up sizes
944 * that are valid for the individual disjuncts, but not for
945 * the domain as a whole.
946 * Finally, set the bounds on the coefficients based on the sizes
947 * and the schedule_max_coefficient option in compute_max_coefficient.
949 static isl_stat
compute_sizes_and_max(isl_ctx
*ctx
, struct isl_sched_node
*node
,
950 __isl_take isl_set
*set
)
956 if (!isl_options_get_schedule_treat_coalescing(ctx
)) {
958 return set_max_coefficient(ctx
, node
);
961 if (node
->compressed
)
962 set
= isl_set_preimage_multi_aff(set
,
963 isl_multi_aff_copy(node
->decompress
));
964 set
= isl_set_from_basic_set(isl_set_simple_hull(set
));
965 mv
= isl_multi_val_zero(isl_set_get_space(set
));
966 n
= isl_set_dim(set
, isl_dim_set
);
968 mv
= isl_multi_val_free(mv
);
969 for (j
= 0; j
< n
; ++j
) {
972 v
= compute_size(isl_set_copy(set
), j
);
973 mv
= isl_multi_val_set_val(mv
, j
, v
);
978 return isl_stat_error
;
979 return compute_max_coefficient(ctx
, node
);
982 /* Add a new node to the graph representing the given instance set.
983 * "nvar" is the (possibly compressed) number of variables and
984 * may be smaller than then number of set variables in "set"
985 * if "compressed" is set.
986 * If "compressed" is set, then "hull" represents the constraints
987 * that were used to derive the compression, while "compress" and
988 * "decompress" map the original space to the compressed space and
990 * If "compressed" is not set, then "hull", "compress" and "decompress"
993 * Compute the size of the instance set and bounds on the coefficients,
996 static isl_stat
add_node(struct isl_sched_graph
*graph
,
997 __isl_take isl_set
*set
, int nvar
, int compressed
,
998 __isl_take isl_set
*hull
, __isl_take isl_multi_aff
*compress
,
999 __isl_take isl_multi_aff
*decompress
)
1006 struct isl_sched_node
*node
;
1008 nparam
= isl_set_dim(set
, isl_dim_param
);
1012 ctx
= isl_set_get_ctx(set
);
1013 if (!ctx
->opt
->schedule_parametric
)
1015 sched
= isl_mat_alloc(ctx
, 0, 1 + nparam
+ nvar
);
1016 node
= &graph
->node
[graph
->n
];
1018 space
= isl_set_get_space(set
);
1019 node
->space
= space
;
1021 node
->nparam
= nparam
;
1022 node
->sched
= sched
;
1023 node
->sched_map
= NULL
;
1024 coincident
= isl_calloc_array(ctx
, int, graph
->max_row
);
1025 node
->coincident
= coincident
;
1026 node
->compressed
= compressed
;
1028 node
->compress
= compress
;
1029 node
->decompress
= decompress
;
1030 if (compute_sizes_and_max(ctx
, node
, set
) < 0)
1031 return isl_stat_error
;
1033 if (!space
|| !sched
|| (graph
->max_row
&& !coincident
))
1034 return isl_stat_error
;
1035 if (compressed
&& (!hull
|| !compress
|| !decompress
))
1036 return isl_stat_error
;
1042 isl_multi_aff_free(compress
);
1043 isl_multi_aff_free(decompress
);
1044 return isl_stat_error
;
1047 /* Construct an identifier for node "node", which will represent "set".
1048 * The name of the identifier is either "compressed" or
1049 * "compressed_<name>", with <name> the name of the space of "set".
1050 * The user pointer of the identifier points to "node".
1052 static __isl_give isl_id
*construct_compressed_id(__isl_keep isl_set
*set
,
1053 struct isl_sched_node
*node
)
1062 has_name
= isl_set_has_tuple_name(set
);
1066 ctx
= isl_set_get_ctx(set
);
1068 return isl_id_alloc(ctx
, "compressed", node
);
1070 p
= isl_printer_to_str(ctx
);
1071 name
= isl_set_get_tuple_name(set
);
1072 p
= isl_printer_print_str(p
, "compressed_");
1073 p
= isl_printer_print_str(p
, name
);
1074 id_name
= isl_printer_get_str(p
);
1075 isl_printer_free(p
);
1077 id
= isl_id_alloc(ctx
, id_name
, node
);
1083 /* Add a new node to the graph representing the given set.
1085 * If any of the set variables is defined by an equality, then
1086 * we perform variable compression such that we can perform
1087 * the scheduling on the compressed domain.
1088 * In this case, an identifier is used that references the new node
1089 * such that each compressed space is unique and
1090 * such that the node can be recovered from the compressed space.
1092 static isl_stat
extract_node(__isl_take isl_set
*set
, void *user
)
1095 isl_bool has_equality
;
1097 isl_basic_set
*hull
;
1100 isl_multi_aff
*compress
, *decompress
;
1101 struct isl_sched_graph
*graph
= user
;
1103 hull
= isl_set_affine_hull(isl_set_copy(set
));
1104 hull
= isl_basic_set_remove_divs(hull
);
1105 nvar
= isl_set_dim(set
, isl_dim_set
);
1106 has_equality
= has_any_defining_equality(hull
);
1108 if (nvar
< 0 || has_equality
< 0)
1110 if (!has_equality
) {
1111 isl_basic_set_free(hull
);
1112 return add_node(graph
, set
, nvar
, 0, NULL
, NULL
, NULL
);
1115 id
= construct_compressed_id(set
, &graph
->node
[graph
->n
]);
1116 morph
= isl_basic_set_variable_compression_with_id(hull
,
1119 nvar
= isl_morph_ran_dim(morph
, isl_dim_set
);
1121 set
= isl_set_free(set
);
1122 compress
= isl_morph_get_var_multi_aff(morph
);
1123 morph
= isl_morph_inverse(morph
);
1124 decompress
= isl_morph_get_var_multi_aff(morph
);
1125 isl_morph_free(morph
);
1127 hull_set
= isl_set_from_basic_set(hull
);
1128 return add_node(graph
, set
, nvar
, 1, hull_set
, compress
, decompress
);
1130 isl_basic_set_free(hull
);
1132 return isl_stat_error
;
1135 struct isl_extract_edge_data
{
1136 enum isl_edge_type type
;
1137 struct isl_sched_graph
*graph
;
1140 /* Merge edge2 into edge1, freeing the contents of edge2.
1141 * Return 0 on success and -1 on failure.
1143 * edge1 and edge2 are assumed to have the same value for the map field.
1145 static int merge_edge(struct isl_sched_edge
*edge1
,
1146 struct isl_sched_edge
*edge2
)
1148 edge1
->types
|= edge2
->types
;
1149 isl_map_free(edge2
->map
);
1151 if (is_condition(edge2
)) {
1152 if (!edge1
->tagged_condition
)
1153 edge1
->tagged_condition
= edge2
->tagged_condition
;
1155 edge1
->tagged_condition
=
1156 isl_union_map_union(edge1
->tagged_condition
,
1157 edge2
->tagged_condition
);
1160 if (is_conditional_validity(edge2
)) {
1161 if (!edge1
->tagged_validity
)
1162 edge1
->tagged_validity
= edge2
->tagged_validity
;
1164 edge1
->tagged_validity
=
1165 isl_union_map_union(edge1
->tagged_validity
,
1166 edge2
->tagged_validity
);
1169 if (is_condition(edge2
) && !edge1
->tagged_condition
)
1171 if (is_conditional_validity(edge2
) && !edge1
->tagged_validity
)
1177 /* Insert dummy tags in domain and range of "map".
1179 * In particular, if "map" is of the form
1185 * [A -> dummy_tag] -> [B -> dummy_tag]
1187 * where the dummy_tags are identical and equal to any dummy tags
1188 * introduced by any other call to this function.
1190 static __isl_give isl_map
*insert_dummy_tags(__isl_take isl_map
*map
)
1196 isl_set
*domain
, *range
;
1198 ctx
= isl_map_get_ctx(map
);
1200 id
= isl_id_alloc(ctx
, NULL
, &dummy
);
1201 space
= isl_space_params(isl_map_get_space(map
));
1202 space
= isl_space_set_from_params(space
);
1203 space
= isl_space_set_tuple_id(space
, isl_dim_set
, id
);
1204 space
= isl_space_map_from_set(space
);
1206 domain
= isl_map_wrap(map
);
1207 range
= isl_map_wrap(isl_map_universe(space
));
1208 map
= isl_map_from_domain_and_range(domain
, range
);
1209 map
= isl_map_zip(map
);
1214 /* Given that at least one of "src" or "dst" is compressed, return
1215 * a map between the spaces of these nodes restricted to the affine
1216 * hull that was used in the compression.
1218 static __isl_give isl_map
*extract_hull(struct isl_sched_node
*src
,
1219 struct isl_sched_node
*dst
)
1223 if (src
->compressed
)
1224 dom
= isl_set_copy(src
->hull
);
1226 dom
= isl_set_universe(isl_space_copy(src
->space
));
1227 if (dst
->compressed
)
1228 ran
= isl_set_copy(dst
->hull
);
1230 ran
= isl_set_universe(isl_space_copy(dst
->space
));
1232 return isl_map_from_domain_and_range(dom
, ran
);
1235 /* Intersect the domains of the nested relations in domain and range
1236 * of "tagged" with "map".
1238 static __isl_give isl_map
*map_intersect_domains(__isl_take isl_map
*tagged
,
1239 __isl_keep isl_map
*map
)
1243 tagged
= isl_map_zip(tagged
);
1244 set
= isl_map_wrap(isl_map_copy(map
));
1245 tagged
= isl_map_intersect_domain(tagged
, set
);
1246 tagged
= isl_map_zip(tagged
);
1250 /* Return a pointer to the node that lives in the domain space of "map",
1251 * an invalid node if there is no such node, or NULL in case of error.
1253 static struct isl_sched_node
*find_domain_node(isl_ctx
*ctx
,
1254 struct isl_sched_graph
*graph
, __isl_keep isl_map
*map
)
1256 struct isl_sched_node
*node
;
1259 space
= isl_space_domain(isl_map_get_space(map
));
1260 node
= graph_find_node(ctx
, graph
, space
);
1261 isl_space_free(space
);
1266 /* Return a pointer to the node that lives in the range space of "map",
1267 * an invalid node if there is no such node, or NULL in case of error.
1269 static struct isl_sched_node
*find_range_node(isl_ctx
*ctx
,
1270 struct isl_sched_graph
*graph
, __isl_keep isl_map
*map
)
1272 struct isl_sched_node
*node
;
1275 space
= isl_space_range(isl_map_get_space(map
));
1276 node
= graph_find_node(ctx
, graph
, space
);
1277 isl_space_free(space
);
1282 /* Refrain from adding a new edge based on "map".
1283 * Instead, just free the map.
1284 * "tagged" is either a copy of "map" with additional tags or NULL.
1286 static isl_stat
skip_edge(__isl_take isl_map
*map
, __isl_take isl_map
*tagged
)
1289 isl_map_free(tagged
);
1294 /* Add a new edge to the graph based on the given map
1295 * and add it to data->graph->edge_table[data->type].
1296 * If a dependence relation of a given type happens to be identical
1297 * to one of the dependence relations of a type that was added before,
1298 * then we don't create a new edge, but instead mark the original edge
1299 * as also representing a dependence of the current type.
1301 * Edges of type isl_edge_condition or isl_edge_conditional_validity
1302 * may be specified as "tagged" dependence relations. That is, "map"
1303 * may contain elements (i -> a) -> (j -> b), where i -> j denotes
1304 * the dependence on iterations and a and b are tags.
1305 * edge->map is set to the relation containing the elements i -> j,
1306 * while edge->tagged_condition and edge->tagged_validity contain
1307 * the union of all the "map" relations
1308 * for which extract_edge is called that result in the same edge->map.
1310 * If the source or the destination node is compressed, then
1311 * intersect both "map" and "tagged" with the constraints that
1312 * were used to construct the compression.
1313 * This ensures that there are no schedule constraints defined
1314 * outside of these domains, while the scheduler no longer has
1315 * any control over those outside parts.
1317 static isl_stat
extract_edge(__isl_take isl_map
*map
, void *user
)
1320 isl_ctx
*ctx
= isl_map_get_ctx(map
);
1321 struct isl_extract_edge_data
*data
= user
;
1322 struct isl_sched_graph
*graph
= data
->graph
;
1323 struct isl_sched_node
*src
, *dst
;
1324 struct isl_sched_edge
*edge
;
1325 isl_map
*tagged
= NULL
;
1327 if (data
->type
== isl_edge_condition
||
1328 data
->type
== isl_edge_conditional_validity
) {
1329 if (isl_map_can_zip(map
)) {
1330 tagged
= isl_map_copy(map
);
1331 map
= isl_set_unwrap(isl_map_domain(isl_map_zip(map
)));
1333 tagged
= insert_dummy_tags(isl_map_copy(map
));
1337 src
= find_domain_node(ctx
, graph
, map
);
1338 dst
= find_range_node(ctx
, graph
, map
);
1342 if (!is_node(graph
, src
) || !is_node(graph
, dst
))
1343 return skip_edge(map
, tagged
);
1345 if (src
->compressed
|| dst
->compressed
) {
1347 hull
= extract_hull(src
, dst
);
1349 tagged
= map_intersect_domains(tagged
, hull
);
1350 map
= isl_map_intersect(map
, hull
);
1353 empty
= isl_map_plain_is_empty(map
);
1357 return skip_edge(map
, tagged
);
1359 graph
->edge
[graph
->n_edge
].src
= src
;
1360 graph
->edge
[graph
->n_edge
].dst
= dst
;
1361 graph
->edge
[graph
->n_edge
].map
= map
;
1362 graph
->edge
[graph
->n_edge
].types
= 0;
1363 graph
->edge
[graph
->n_edge
].tagged_condition
= NULL
;
1364 graph
->edge
[graph
->n_edge
].tagged_validity
= NULL
;
1365 set_type(&graph
->edge
[graph
->n_edge
], data
->type
);
1366 if (data
->type
== isl_edge_condition
)
1367 graph
->edge
[graph
->n_edge
].tagged_condition
=
1368 isl_union_map_from_map(tagged
);
1369 if (data
->type
== isl_edge_conditional_validity
)
1370 graph
->edge
[graph
->n_edge
].tagged_validity
=
1371 isl_union_map_from_map(tagged
);
1373 edge
= graph_find_matching_edge(graph
, &graph
->edge
[graph
->n_edge
]);
1376 return isl_stat_error
;
1378 if (edge
== &graph
->edge
[graph
->n_edge
])
1379 return graph_edge_table_add(ctx
, graph
, data
->type
,
1380 &graph
->edge
[graph
->n_edge
++]);
1382 if (merge_edge(edge
, &graph
->edge
[graph
->n_edge
]) < 0)
1383 return isl_stat_error
;
1385 return graph_edge_table_add(ctx
, graph
, data
->type
, edge
);
1388 isl_map_free(tagged
);
1389 return isl_stat_error
;
1392 /* Initialize the schedule graph "graph" from the schedule constraints "sc".
1394 * The context is included in the domain before the nodes of
1395 * the graphs are extracted in order to be able to exploit
1396 * any possible additional equalities.
1397 * Note that this intersection is only performed locally here.
1399 static isl_stat
graph_init(struct isl_sched_graph
*graph
,
1400 __isl_keep isl_schedule_constraints
*sc
)
1403 isl_union_set
*domain
;
1405 struct isl_extract_edge_data data
;
1406 enum isl_edge_type i
;
1411 return isl_stat_error
;
1413 ctx
= isl_schedule_constraints_get_ctx(sc
);
1415 domain
= isl_schedule_constraints_get_domain(sc
);
1416 n
= isl_union_set_n_set(domain
);
1418 isl_union_set_free(domain
);
1420 return isl_stat_error
;
1422 n
= isl_schedule_constraints_n_map(sc
);
1423 if (n
< 0 || graph_alloc(ctx
, graph
, graph
->n
, n
) < 0)
1424 return isl_stat_error
;
1426 if (compute_max_row(graph
, sc
) < 0)
1427 return isl_stat_error
;
1428 graph
->root
= graph
;
1430 domain
= isl_schedule_constraints_get_domain(sc
);
1431 domain
= isl_union_set_intersect_params(domain
,
1432 isl_schedule_constraints_get_context(sc
));
1433 r
= isl_union_set_foreach_set(domain
, &extract_node
, graph
);
1434 isl_union_set_free(domain
);
1436 return isl_stat_error
;
1437 if (graph_init_table(ctx
, graph
) < 0)
1438 return isl_stat_error
;
1439 for (i
= isl_edge_first
; i
<= isl_edge_last
; ++i
) {
1442 c
= isl_schedule_constraints_get(sc
, i
);
1443 n
= isl_union_map_n_map(c
);
1444 graph
->max_edge
[i
] = n
;
1445 isl_union_map_free(c
);
1447 return isl_stat_error
;
1449 if (graph_init_edge_tables(ctx
, graph
) < 0)
1450 return isl_stat_error
;
1453 for (i
= isl_edge_first
; i
<= isl_edge_last
; ++i
) {
1457 c
= isl_schedule_constraints_get(sc
, i
);
1458 r
= isl_union_map_foreach_map(c
, &extract_edge
, &data
);
1459 isl_union_map_free(c
);
1461 return isl_stat_error
;
1467 /* Check whether there is any dependence from node[j] to node[i]
1468 * or from node[i] to node[j].
1470 static isl_bool
node_follows_weak(int i
, int j
, void *user
)
1473 struct isl_sched_graph
*graph
= user
;
1475 f
= graph_has_any_edge(graph
, &graph
->node
[j
], &graph
->node
[i
]);
1478 return graph_has_any_edge(graph
, &graph
->node
[i
], &graph
->node
[j
]);
1481 /* Check whether there is a (conditional) validity dependence from node[j]
1482 * to node[i], forcing node[i] to follow node[j].
1484 static isl_bool
node_follows_strong(int i
, int j
, void *user
)
1486 struct isl_sched_graph
*graph
= user
;
1488 return graph_has_validity_edge(graph
, &graph
->node
[j
], &graph
->node
[i
]);
1491 /* Use Tarjan's algorithm for computing the strongly connected components
1492 * in the dependence graph only considering those edges defined by "follows".
1494 static isl_stat
detect_ccs(isl_ctx
*ctx
, struct isl_sched_graph
*graph
,
1495 isl_bool (*follows
)(int i
, int j
, void *user
))
1498 struct isl_tarjan_graph
*g
= NULL
;
1500 g
= isl_tarjan_graph_init(ctx
, graph
->n
, follows
, graph
);
1502 return isl_stat_error
;
1508 while (g
->order
[i
] != -1) {
1509 graph
->node
[g
->order
[i
]].scc
= graph
->scc
;
1517 isl_tarjan_graph_free(g
);
1522 /* Apply Tarjan's algorithm to detect the strongly connected components
1523 * in the dependence graph.
1524 * Only consider the (conditional) validity dependences and clear "weak".
1526 static isl_stat
detect_sccs(isl_ctx
*ctx
, struct isl_sched_graph
*graph
)
1529 return detect_ccs(ctx
, graph
, &node_follows_strong
);
1532 /* Apply Tarjan's algorithm to detect the (weakly) connected components
1533 * in the dependence graph.
1534 * Consider all dependences and set "weak".
1536 static isl_stat
detect_wccs(isl_ctx
*ctx
, struct isl_sched_graph
*graph
)
1539 return detect_ccs(ctx
, graph
, &node_follows_weak
);
1542 static int cmp_scc(const void *a
, const void *b
, void *data
)
1544 struct isl_sched_graph
*graph
= data
;
1548 return graph
->node
[*i1
].scc
- graph
->node
[*i2
].scc
;
1551 /* Sort the elements of graph->sorted according to the corresponding SCCs.
1553 static int sort_sccs(struct isl_sched_graph
*graph
)
1555 return isl_sort(graph
->sorted
, graph
->n
, sizeof(int), &cmp_scc
, graph
);
1558 /* Return a non-parametric set in the compressed space of "node" that is
1559 * bounded by the size in each direction
1561 * { [x] : -S_i <= x_i <= S_i }
1563 * If S_i is infinity in direction i, then there are no constraints
1564 * in that direction.
1566 * Cache the result in node->bounds.
1568 static __isl_give isl_basic_set
*get_size_bounds(struct isl_sched_node
*node
)
1571 isl_basic_set
*bounds
;
1575 return isl_basic_set_copy(node
->bounds
);
1577 if (node
->compressed
)
1578 space
= isl_multi_aff_get_domain_space(node
->decompress
);
1580 space
= isl_space_copy(node
->space
);
1581 space
= isl_space_drop_all_params(space
);
1582 bounds
= isl_basic_set_universe(space
);
1584 for (i
= 0; i
< node
->nvar
; ++i
) {
1587 size
= isl_multi_val_get_val(node
->sizes
, i
);
1589 return isl_basic_set_free(bounds
);
1590 if (!isl_val_is_int(size
)) {
1594 bounds
= isl_basic_set_upper_bound_val(bounds
, isl_dim_set
, i
,
1595 isl_val_copy(size
));
1596 bounds
= isl_basic_set_lower_bound_val(bounds
, isl_dim_set
, i
,
1600 node
->bounds
= isl_basic_set_copy(bounds
);
1604 /* Drop some constraints from "delta" that could be exploited
1605 * to construct loop coalescing schedules.
1606 * In particular, drop those constraint that bound the difference
1607 * to the size of the domain.
1608 * First project out the parameters to improve the effectiveness.
1610 static __isl_give isl_set
*drop_coalescing_constraints(
1611 __isl_take isl_set
*delta
, struct isl_sched_node
*node
)
1614 isl_basic_set
*bounds
;
1616 nparam
= isl_set_dim(delta
, isl_dim_param
);
1618 return isl_set_free(delta
);
1620 bounds
= get_size_bounds(node
);
1622 delta
= isl_set_project_out(delta
, isl_dim_param
, 0, nparam
);
1623 delta
= isl_set_remove_divs(delta
);
1624 delta
= isl_set_plain_gist_basic_set(delta
, bounds
);
1628 /* Given a dependence relation R from "node" to itself,
1629 * construct the set of coefficients of valid constraints for elements
1630 * in that dependence relation.
1631 * In particular, the result contains tuples of coefficients
1632 * c_0, c_n, c_x such that
1634 * c_0 + c_n n + c_x y - c_x x >= 0 for each (x,y) in R
1638 * c_0 + c_n n + c_x d >= 0 for each d in delta R = { y - x | (x,y) in R }
1640 * We choose here to compute the dual of delta R.
1641 * Alternatively, we could have computed the dual of R, resulting
1642 * in a set of tuples c_0, c_n, c_x, c_y, and then
1643 * plugged in (c_0, c_n, c_x, -c_x).
1645 * If "need_param" is set, then the resulting coefficients effectively
1646 * include coefficients for the parameters c_n. Otherwise, they may
1647 * have been projected out already.
1648 * Since the constraints may be different for these two cases,
1649 * they are stored in separate caches.
1650 * In particular, if no parameter coefficients are required and
1651 * the schedule_treat_coalescing option is set, then the parameters
1652 * are projected out and some constraints that could be exploited
1653 * to construct coalescing schedules are removed before the dual
1656 * If "node" has been compressed, then the dependence relation
1657 * is also compressed before the set of coefficients is computed.
1659 static __isl_give isl_basic_set
*intra_coefficients(
1660 struct isl_sched_graph
*graph
, struct isl_sched_node
*node
,
1661 __isl_take isl_map
*map
, int need_param
)
1666 isl_basic_set
*coef
;
1667 isl_maybe_isl_basic_set m
;
1668 isl_map_to_basic_set
**hmap
= &graph
->intra_hmap
;
1674 ctx
= isl_map_get_ctx(map
);
1675 treat
= !need_param
&& isl_options_get_schedule_treat_coalescing(ctx
);
1677 hmap
= &graph
->intra_hmap_param
;
1678 m
= isl_map_to_basic_set_try_get(*hmap
, map
);
1679 if (m
.valid
< 0 || m
.valid
) {
1684 key
= isl_map_copy(map
);
1685 if (node
->compressed
) {
1686 map
= isl_map_preimage_domain_multi_aff(map
,
1687 isl_multi_aff_copy(node
->decompress
));
1688 map
= isl_map_preimage_range_multi_aff(map
,
1689 isl_multi_aff_copy(node
->decompress
));
1691 delta
= isl_map_deltas(map
);
1693 delta
= drop_coalescing_constraints(delta
, node
);
1694 delta
= isl_set_remove_divs(delta
);
1695 coef
= isl_set_coefficients(delta
);
1696 *hmap
= isl_map_to_basic_set_set(*hmap
, key
, isl_basic_set_copy(coef
));
1701 /* Given a dependence relation R, construct the set of coefficients
1702 * of valid constraints for elements in that dependence relation.
1703 * In particular, the result contains tuples of coefficients
1704 * c_0, c_n, c_x, c_y such that
1706 * c_0 + c_n n + c_x x + c_y y >= 0 for each (x,y) in R
1708 * If the source or destination nodes of "edge" have been compressed,
1709 * then the dependence relation is also compressed before
1710 * the set of coefficients is computed.
1712 static __isl_give isl_basic_set
*inter_coefficients(
1713 struct isl_sched_graph
*graph
, struct isl_sched_edge
*edge
,
1714 __isl_take isl_map
*map
)
1718 isl_basic_set
*coef
;
1719 isl_maybe_isl_basic_set m
;
1721 m
= isl_map_to_basic_set_try_get(graph
->inter_hmap
, map
);
1722 if (m
.valid
< 0 || m
.valid
) {
1727 key
= isl_map_copy(map
);
1728 if (edge
->src
->compressed
)
1729 map
= isl_map_preimage_domain_multi_aff(map
,
1730 isl_multi_aff_copy(edge
->src
->decompress
));
1731 if (edge
->dst
->compressed
)
1732 map
= isl_map_preimage_range_multi_aff(map
,
1733 isl_multi_aff_copy(edge
->dst
->decompress
));
1734 set
= isl_map_wrap(isl_map_remove_divs(map
));
1735 coef
= isl_set_coefficients(set
);
1736 graph
->inter_hmap
= isl_map_to_basic_set_set(graph
->inter_hmap
, key
,
1737 isl_basic_set_copy(coef
));
1742 /* Return the position of the coefficients of the variables in
1743 * the coefficients constraints "coef".
1745 * The space of "coef" is of the form
1747 * { coefficients[[cst, params] -> S] }
1749 * Return the position of S.
1751 static isl_size
coef_var_offset(__isl_keep isl_basic_set
*coef
)
1756 space
= isl_space_unwrap(isl_basic_set_get_space(coef
));
1757 offset
= isl_space_dim(space
, isl_dim_in
);
1758 isl_space_free(space
);
1763 /* Return the offset of the coefficient of the constant term of "node"
1766 * Within each node, the coefficients have the following order:
1767 * - positive and negative parts of c_i_x
1768 * - c_i_n (if parametric)
1771 static int node_cst_coef_offset(struct isl_sched_node
*node
)
1773 return node
->start
+ 2 * node
->nvar
+ node
->nparam
;
1776 /* Return the offset of the coefficients of the parameters of "node"
1779 * Within each node, the coefficients have the following order:
1780 * - positive and negative parts of c_i_x
1781 * - c_i_n (if parametric)
1784 static int node_par_coef_offset(struct isl_sched_node
*node
)
1786 return node
->start
+ 2 * node
->nvar
;
1789 /* Return the offset of the coefficients of the variables of "node"
1792 * Within each node, the coefficients have the following order:
1793 * - positive and negative parts of c_i_x
1794 * - c_i_n (if parametric)
1797 static int node_var_coef_offset(struct isl_sched_node
*node
)
1802 /* Return the position of the pair of variables encoding
1803 * coefficient "i" of "node".
1805 * The order of these variable pairs is the opposite of
1806 * that of the coefficients, with 2 variables per coefficient.
1808 static int node_var_coef_pos(struct isl_sched_node
*node
, int i
)
1810 return node_var_coef_offset(node
) + 2 * (node
->nvar
- 1 - i
);
1813 /* Construct an isl_dim_map for mapping constraints on coefficients
1814 * for "node" to the corresponding positions in graph->lp.
1815 * "offset" is the offset of the coefficients for the variables
1816 * in the input constraints.
1817 * "s" is the sign of the mapping.
1819 * The input constraints are given in terms of the coefficients
1820 * (c_0, c_x) or (c_0, c_n, c_x).
1821 * The mapping produced by this function essentially plugs in
1822 * (0, c_i_x^+ - c_i_x^-) if s = 1 and
1823 * (0, -c_i_x^+ + c_i_x^-) if s = -1 or
1824 * (0, 0, c_i_x^+ - c_i_x^-) if s = 1 and
1825 * (0, 0, -c_i_x^+ + c_i_x^-) if s = -1.
1826 * In graph->lp, the c_i_x^- appear before their c_i_x^+ counterpart.
1827 * Furthermore, the order of these pairs is the opposite of that
1828 * of the corresponding coefficients.
1830 * The caller can extend the mapping to also map the other coefficients
1831 * (and therefore not plug in 0).
1833 static __isl_give isl_dim_map
*intra_dim_map(isl_ctx
*ctx
,
1834 struct isl_sched_graph
*graph
, struct isl_sched_node
*node
,
1839 isl_dim_map
*dim_map
;
1841 total
= isl_basic_set_dim(graph
->lp
, isl_dim_all
);
1842 if (!node
|| total
< 0)
1845 pos
= node_var_coef_pos(node
, 0);
1846 dim_map
= isl_dim_map_alloc(ctx
, total
);
1847 isl_dim_map_range(dim_map
, pos
, -2, offset
, 1, node
->nvar
, -s
);
1848 isl_dim_map_range(dim_map
, pos
+ 1, -2, offset
, 1, node
->nvar
, s
);
1853 /* Construct an isl_dim_map for mapping constraints on coefficients
1854 * for "src" (node i) and "dst" (node j) to the corresponding positions
1856 * "offset" is the offset of the coefficients for the variables of "src"
1857 * in the input constraints.
1858 * "s" is the sign of the mapping.
1860 * The input constraints are given in terms of the coefficients
1861 * (c_0, c_n, c_x, c_y).
1862 * The mapping produced by this function essentially plugs in
1863 * (c_j_0 - c_i_0, c_j_n - c_i_n,
1864 * -(c_i_x^+ - c_i_x^-), c_j_x^+ - c_j_x^-) if s = 1 and
1865 * (-c_j_0 + c_i_0, -c_j_n + c_i_n,
1866 * c_i_x^+ - c_i_x^-, -(c_j_x^+ - c_j_x^-)) if s = -1.
1867 * In graph->lp, the c_*^- appear before their c_*^+ counterpart.
1868 * Furthermore, the order of these pairs is the opposite of that
1869 * of the corresponding coefficients.
1871 * The caller can further extend the mapping.
1873 static __isl_give isl_dim_map
*inter_dim_map(isl_ctx
*ctx
,
1874 struct isl_sched_graph
*graph
, struct isl_sched_node
*src
,
1875 struct isl_sched_node
*dst
, int offset
, int s
)
1879 isl_dim_map
*dim_map
;
1881 total
= isl_basic_set_dim(graph
->lp
, isl_dim_all
);
1882 if (!src
|| !dst
|| total
< 0)
1885 dim_map
= isl_dim_map_alloc(ctx
, total
);
1887 pos
= node_cst_coef_offset(dst
);
1888 isl_dim_map_range(dim_map
, pos
, 0, 0, 0, 1, s
);
1889 pos
= node_par_coef_offset(dst
);
1890 isl_dim_map_range(dim_map
, pos
, 1, 1, 1, dst
->nparam
, s
);
1891 pos
= node_var_coef_pos(dst
, 0);
1892 isl_dim_map_range(dim_map
, pos
, -2, offset
+ src
->nvar
, 1,
1894 isl_dim_map_range(dim_map
, pos
+ 1, -2, offset
+ src
->nvar
, 1,
1897 pos
= node_cst_coef_offset(src
);
1898 isl_dim_map_range(dim_map
, pos
, 0, 0, 0, 1, -s
);
1899 pos
= node_par_coef_offset(src
);
1900 isl_dim_map_range(dim_map
, pos
, 1, 1, 1, src
->nparam
, -s
);
1901 pos
= node_var_coef_pos(src
, 0);
1902 isl_dim_map_range(dim_map
, pos
, -2, offset
, 1, src
->nvar
, s
);
1903 isl_dim_map_range(dim_map
, pos
+ 1, -2, offset
, 1, src
->nvar
, -s
);
1908 /* Add the constraints from "src" to "dst" using "dim_map",
1909 * after making sure there is enough room in "dst" for the extra constraints.
1911 static __isl_give isl_basic_set
*add_constraints_dim_map(
1912 __isl_take isl_basic_set
*dst
, __isl_take isl_basic_set
*src
,
1913 __isl_take isl_dim_map
*dim_map
)
1917 n_eq
= isl_basic_set_n_equality(src
);
1918 n_ineq
= isl_basic_set_n_inequality(src
);
1919 dst
= isl_basic_set_extend_constraints(dst
, n_eq
, n_ineq
);
1920 dst
= isl_basic_set_add_constraints_dim_map(dst
, src
, dim_map
);
1924 /* Add constraints to graph->lp that force validity for the given
1925 * dependence from a node i to itself.
1926 * That is, add constraints that enforce
1928 * (c_i_0 + c_i_n n + c_i_x y) - (c_i_0 + c_i_n n + c_i_x x)
1929 * = c_i_x (y - x) >= 0
1931 * for each (x,y) in R.
1932 * We obtain general constraints on coefficients (c_0, c_x)
1933 * of valid constraints for (y - x) and then plug in (0, c_i_x^+ - c_i_x^-),
1934 * where c_i_x = c_i_x^+ - c_i_x^-, with c_i_x^+ and c_i_x^- non-negative.
1935 * In graph->lp, the c_i_x^- appear before their c_i_x^+ counterpart.
1936 * Note that the result of intra_coefficients may also contain
1937 * parameter coefficients c_n, in which case 0 is plugged in for them as well.
1939 static isl_stat
add_intra_validity_constraints(struct isl_sched_graph
*graph
,
1940 struct isl_sched_edge
*edge
)
1943 isl_map
*map
= isl_map_copy(edge
->map
);
1944 isl_ctx
*ctx
= isl_map_get_ctx(map
);
1945 isl_dim_map
*dim_map
;
1946 isl_basic_set
*coef
;
1947 struct isl_sched_node
*node
= edge
->src
;
1949 coef
= intra_coefficients(graph
, node
, map
, 0);
1951 offset
= coef_var_offset(coef
);
1953 coef
= isl_basic_set_free(coef
);
1955 return isl_stat_error
;
1957 dim_map
= intra_dim_map(ctx
, graph
, node
, offset
, 1);
1958 graph
->lp
= add_constraints_dim_map(graph
->lp
, coef
, dim_map
);
1963 /* Add constraints to graph->lp that force validity for the given
1964 * dependence from node i to node j.
1965 * That is, add constraints that enforce
1967 * (c_j_0 + c_j_n n + c_j_x y) - (c_i_0 + c_i_n n + c_i_x x) >= 0
1969 * for each (x,y) in R.
1970 * We obtain general constraints on coefficients (c_0, c_n, c_x, c_y)
1971 * of valid constraints for R and then plug in
1972 * (c_j_0 - c_i_0, c_j_n - c_i_n, -(c_i_x^+ - c_i_x^-), c_j_x^+ - c_j_x^-),
1973 * where c_* = c_*^+ - c_*^-, with c_*^+ and c_*^- non-negative.
1974 * In graph->lp, the c_*^- appear before their c_*^+ counterpart.
1976 static isl_stat
add_inter_validity_constraints(struct isl_sched_graph
*graph
,
1977 struct isl_sched_edge
*edge
)
1982 isl_dim_map
*dim_map
;
1983 isl_basic_set
*coef
;
1984 struct isl_sched_node
*src
= edge
->src
;
1985 struct isl_sched_node
*dst
= edge
->dst
;
1988 return isl_stat_error
;
1990 map
= isl_map_copy(edge
->map
);
1991 ctx
= isl_map_get_ctx(map
);
1992 coef
= inter_coefficients(graph
, edge
, map
);
1994 offset
= coef_var_offset(coef
);
1996 coef
= isl_basic_set_free(coef
);
1998 return isl_stat_error
;
2000 dim_map
= inter_dim_map(ctx
, graph
, src
, dst
, offset
, 1);
2002 edge
->start
= graph
->lp
->n_ineq
;
2003 graph
->lp
= add_constraints_dim_map(graph
->lp
, coef
, dim_map
);
2005 return isl_stat_error
;
2006 edge
->end
= graph
->lp
->n_ineq
;
2011 /* Add constraints to graph->lp that bound the dependence distance for the given
2012 * dependence from a node i to itself.
2013 * If s = 1, we add the constraint
2015 * c_i_x (y - x) <= m_0 + m_n n
2019 * -c_i_x (y - x) + m_0 + m_n n >= 0
2021 * for each (x,y) in R.
2022 * If s = -1, we add the constraint
2024 * -c_i_x (y - x) <= m_0 + m_n n
2028 * c_i_x (y - x) + m_0 + m_n n >= 0
2030 * for each (x,y) in R.
2031 * We obtain general constraints on coefficients (c_0, c_n, c_x)
2032 * of valid constraints for (y - x) and then plug in (m_0, m_n, -s * c_i_x),
2033 * with each coefficient (except m_0) represented as a pair of non-negative
2037 * If "local" is set, then we add constraints
2039 * c_i_x (y - x) <= 0
2043 * -c_i_x (y - x) <= 0
2045 * instead, forcing the dependence distance to be (less than or) equal to 0.
2046 * That is, we plug in (0, 0, -s * c_i_x),
2047 * intra_coefficients is not required to have c_n in its result when
2048 * "local" is set. If they are missing, then (0, -s * c_i_x) is plugged in.
2049 * Note that dependences marked local are treated as validity constraints
2050 * by add_all_validity_constraints and therefore also have
2051 * their distances bounded by 0 from below.
2053 static isl_stat
add_intra_proximity_constraints(struct isl_sched_graph
*graph
,
2054 struct isl_sched_edge
*edge
, int s
, int local
)
2058 isl_map
*map
= isl_map_copy(edge
->map
);
2059 isl_ctx
*ctx
= isl_map_get_ctx(map
);
2060 isl_dim_map
*dim_map
;
2061 isl_basic_set
*coef
;
2062 struct isl_sched_node
*node
= edge
->src
;
2064 coef
= intra_coefficients(graph
, node
, map
, !local
);
2065 nparam
= isl_space_dim(node
->space
, isl_dim_param
);
2067 offset
= coef_var_offset(coef
);
2068 if (nparam
< 0 || offset
< 0)
2069 coef
= isl_basic_set_free(coef
);
2071 return isl_stat_error
;
2073 dim_map
= intra_dim_map(ctx
, graph
, node
, offset
, -s
);
2076 isl_dim_map_range(dim_map
, 1, 0, 0, 0, 1, 1);
2077 isl_dim_map_range(dim_map
, 4, 2, 1, 1, nparam
, -1);
2078 isl_dim_map_range(dim_map
, 5, 2, 1, 1, nparam
, 1);
2080 graph
->lp
= add_constraints_dim_map(graph
->lp
, coef
, dim_map
);
2085 /* Add constraints to graph->lp that bound the dependence distance for the given
2086 * dependence from node i to node j.
2087 * If s = 1, we add the constraint
2089 * (c_j_0 + c_j_n n + c_j_x y) - (c_i_0 + c_i_n n + c_i_x x)
2094 * -(c_j_0 + c_j_n n + c_j_x y) + (c_i_0 + c_i_n n + c_i_x x) +
2097 * for each (x,y) in R.
2098 * If s = -1, we add the constraint
2100 * -((c_j_0 + c_j_n n + c_j_x y) - (c_i_0 + c_i_n n + c_i_x x))
2105 * (c_j_0 + c_j_n n + c_j_x y) - (c_i_0 + c_i_n n + c_i_x x) +
2108 * for each (x,y) in R.
2109 * We obtain general constraints on coefficients (c_0, c_n, c_x, c_y)
2110 * of valid constraints for R and then plug in
2111 * (m_0 - s*c_j_0 + s*c_i_0, m_n - s*c_j_n + s*c_i_n,
2112 * s*c_i_x, -s*c_j_x)
2113 * with each coefficient (except m_0, c_*_0 and c_*_n)
2114 * represented as a pair of non-negative coefficients.
2117 * If "local" is set (and s = 1), then we add constraints
2119 * (c_j_0 + c_j_n n + c_j_x y) - (c_i_0 + c_i_n n + c_i_x x) <= 0
2123 * -((c_j_0 + c_j_n n + c_j_x y) + (c_i_0 + c_i_n n + c_i_x x)) >= 0
2125 * instead, forcing the dependence distance to be (less than or) equal to 0.
2126 * That is, we plug in
2127 * (-s*c_j_0 + s*c_i_0, -s*c_j_n + s*c_i_n, s*c_i_x, -s*c_j_x).
2128 * Note that dependences marked local are treated as validity constraints
2129 * by add_all_validity_constraints and therefore also have
2130 * their distances bounded by 0 from below.
2132 static isl_stat
add_inter_proximity_constraints(struct isl_sched_graph
*graph
,
2133 struct isl_sched_edge
*edge
, int s
, int local
)
2137 isl_map
*map
= isl_map_copy(edge
->map
);
2138 isl_ctx
*ctx
= isl_map_get_ctx(map
);
2139 isl_dim_map
*dim_map
;
2140 isl_basic_set
*coef
;
2141 struct isl_sched_node
*src
= edge
->src
;
2142 struct isl_sched_node
*dst
= edge
->dst
;
2144 coef
= inter_coefficients(graph
, edge
, map
);
2145 nparam
= isl_space_dim(src
->space
, isl_dim_param
);
2147 offset
= coef_var_offset(coef
);
2148 if (nparam
< 0 || offset
< 0)
2149 coef
= isl_basic_set_free(coef
);
2151 return isl_stat_error
;
2153 dim_map
= inter_dim_map(ctx
, graph
, src
, dst
, offset
, -s
);
2156 isl_dim_map_range(dim_map
, 1, 0, 0, 0, 1, 1);
2157 isl_dim_map_range(dim_map
, 4, 2, 1, 1, nparam
, -1);
2158 isl_dim_map_range(dim_map
, 5, 2, 1, 1, nparam
, 1);
2161 graph
->lp
= add_constraints_dim_map(graph
->lp
, coef
, dim_map
);
2166 /* Should the distance over "edge" be forced to zero?
2167 * That is, is it marked as a local edge?
2168 * If "use_coincidence" is set, then coincidence edges are treated
2171 static int force_zero(struct isl_sched_edge
*edge
, int use_coincidence
)
2173 return is_local(edge
) || (use_coincidence
&& is_coincidence(edge
));
2176 /* Add all validity constraints to graph->lp.
2178 * An edge that is forced to be local needs to have its dependence
2179 * distances equal to zero. We take care of bounding them by 0 from below
2180 * here. add_all_proximity_constraints takes care of bounding them by 0
2183 * If "use_coincidence" is set, then we treat coincidence edges as local edges.
2184 * Otherwise, we ignore them.
2186 static int add_all_validity_constraints(struct isl_sched_graph
*graph
,
2187 int use_coincidence
)
2191 for (i
= 0; i
< graph
->n_edge
; ++i
) {
2192 struct isl_sched_edge
*edge
= &graph
->edge
[i
];
2195 zero
= force_zero(edge
, use_coincidence
);
2196 if (!is_validity(edge
) && !zero
)
2198 if (edge
->src
!= edge
->dst
)
2200 if (add_intra_validity_constraints(graph
, edge
) < 0)
2204 for (i
= 0; i
< graph
->n_edge
; ++i
) {
2205 struct isl_sched_edge
*edge
= &graph
->edge
[i
];
2208 zero
= force_zero(edge
, use_coincidence
);
2209 if (!is_validity(edge
) && !zero
)
2211 if (edge
->src
== edge
->dst
)
2213 if (add_inter_validity_constraints(graph
, edge
) < 0)
2220 /* Add constraints to graph->lp that bound the dependence distance
2221 * for all dependence relations.
2222 * If a given proximity dependence is identical to a validity
2223 * dependence, then the dependence distance is already bounded
2224 * from below (by zero), so we only need to bound the distance
2225 * from above. (This includes the case of "local" dependences
2226 * which are treated as validity dependence by add_all_validity_constraints.)
2227 * Otherwise, we need to bound the distance both from above and from below.
2229 * If "use_coincidence" is set, then we treat coincidence edges as local edges.
2230 * Otherwise, we ignore them.
2232 static int add_all_proximity_constraints(struct isl_sched_graph
*graph
,
2233 int use_coincidence
)
2237 for (i
= 0; i
< graph
->n_edge
; ++i
) {
2238 struct isl_sched_edge
*edge
= &graph
->edge
[i
];
2241 zero
= force_zero(edge
, use_coincidence
);
2242 if (!is_proximity(edge
) && !zero
)
2244 if (edge
->src
== edge
->dst
&&
2245 add_intra_proximity_constraints(graph
, edge
, 1, zero
) < 0)
2247 if (edge
->src
!= edge
->dst
&&
2248 add_inter_proximity_constraints(graph
, edge
, 1, zero
) < 0)
2250 if (is_validity(edge
) || zero
)
2252 if (edge
->src
== edge
->dst
&&
2253 add_intra_proximity_constraints(graph
, edge
, -1, 0) < 0)
2255 if (edge
->src
!= edge
->dst
&&
2256 add_inter_proximity_constraints(graph
, edge
, -1, 0) < 0)
2263 /* Normalize the rows of "indep" such that all rows are lexicographically
2264 * positive and such that each row contains as many final zeros as possible,
2265 * given the choice for the previous rows.
2266 * Do this by performing elementary row operations.
2268 static __isl_give isl_mat
*normalize_independent(__isl_take isl_mat
*indep
)
2270 indep
= isl_mat_reverse_gauss(indep
);
2271 indep
= isl_mat_lexnonneg_rows(indep
);
2275 /* Extract the linear part of the current schedule for node "node".
2277 static __isl_give isl_mat
*extract_linear_schedule(struct isl_sched_node
*node
)
2279 isl_size n_row
= isl_mat_rows(node
->sched
);
2283 return isl_mat_sub_alloc(node
->sched
, 0, n_row
,
2284 1 + node
->nparam
, node
->nvar
);
2287 /* Compute a basis for the rows in the linear part of the schedule
2288 * and extend this basis to a full basis. The remaining rows
2289 * can then be used to force linear independence from the rows
2292 * In particular, given the schedule rows S, we compute
2297 * with H the Hermite normal form of S. That is, all but the
2298 * first rank columns of H are zero and so each row in S is
2299 * a linear combination of the first rank rows of Q.
2300 * The matrix Q can be used as a variable transformation
2301 * that isolates the directions of S in the first rank rows.
2302 * Transposing S U = H yields
2306 * with all but the first rank rows of H^T zero.
2307 * The last rows of U^T are therefore linear combinations
2308 * of schedule coefficients that are all zero on schedule
2309 * coefficients that are linearly dependent on the rows of S.
2310 * At least one of these combinations is non-zero on
2311 * linearly independent schedule coefficients.
2312 * The rows are normalized to involve as few of the last
2313 * coefficients as possible and to have a positive initial value.
2315 static int node_update_vmap(struct isl_sched_node
*node
)
2319 H
= extract_linear_schedule(node
);
2321 H
= isl_mat_left_hermite(H
, 0, &U
, &Q
);
2322 isl_mat_free(node
->indep
);
2323 isl_mat_free(node
->vmap
);
2325 node
->indep
= isl_mat_transpose(U
);
2326 node
->rank
= isl_mat_initial_non_zero_cols(H
);
2327 node
->indep
= isl_mat_drop_rows(node
->indep
, 0, node
->rank
);
2328 node
->indep
= normalize_independent(node
->indep
);
2331 if (!node
->indep
|| !node
->vmap
|| node
->rank
< 0)
2336 /* Is "edge" marked as a validity or a conditional validity edge?
2338 static int is_any_validity(struct isl_sched_edge
*edge
)
2340 return is_validity(edge
) || is_conditional_validity(edge
);
2343 /* How many times should we count the constraints in "edge"?
2345 * We count as follows
2346 * validity -> 1 (>= 0)
2347 * validity+proximity -> 2 (>= 0 and upper bound)
2348 * proximity -> 2 (lower and upper bound)
2349 * local(+any) -> 2 (>= 0 and <= 0)
2351 * If an edge is only marked conditional_validity then it counts
2352 * as zero since it is only checked afterwards.
2354 * If "use_coincidence" is set, then we treat coincidence edges as local edges.
2355 * Otherwise, we ignore them.
2357 static int edge_multiplicity(struct isl_sched_edge
*edge
, int use_coincidence
)
2359 if (is_proximity(edge
) || force_zero(edge
, use_coincidence
))
2361 if (is_validity(edge
))
2366 /* How many times should the constraints in "edge" be counted
2367 * as a parametric intra-node constraint?
2369 * Only proximity edges that are not forced zero need
2370 * coefficient constraints that include coefficients for parameters.
2371 * If the edge is also a validity edge, then only
2372 * an upper bound is introduced. Otherwise, both lower and upper bounds
2375 static int parametric_intra_edge_multiplicity(struct isl_sched_edge
*edge
,
2376 int use_coincidence
)
2378 if (edge
->src
!= edge
->dst
)
2380 if (!is_proximity(edge
))
2382 if (force_zero(edge
, use_coincidence
))
2384 if (is_validity(edge
))
2390 /* Add "f" times the number of equality and inequality constraints of "bset"
2391 * to "n_eq" and "n_ineq" and free "bset".
2393 static isl_stat
update_count(__isl_take isl_basic_set
*bset
,
2394 int f
, int *n_eq
, int *n_ineq
)
2397 return isl_stat_error
;
2399 *n_eq
+= isl_basic_set_n_equality(bset
);
2400 *n_ineq
+= isl_basic_set_n_inequality(bset
);
2401 isl_basic_set_free(bset
);
2406 /* Count the number of equality and inequality constraints
2407 * that will be added for the given map.
2409 * The edges that require parameter coefficients are counted separately.
2411 * "use_coincidence" is set if we should take into account coincidence edges.
2413 static isl_stat
count_map_constraints(struct isl_sched_graph
*graph
,
2414 struct isl_sched_edge
*edge
, __isl_take isl_map
*map
,
2415 int *n_eq
, int *n_ineq
, int use_coincidence
)
2418 isl_basic_set
*coef
;
2419 int f
= edge_multiplicity(edge
, use_coincidence
);
2420 int fp
= parametric_intra_edge_multiplicity(edge
, use_coincidence
);
2427 if (edge
->src
!= edge
->dst
) {
2428 coef
= inter_coefficients(graph
, edge
, map
);
2429 return update_count(coef
, f
, n_eq
, n_ineq
);
2433 copy
= isl_map_copy(map
);
2434 coef
= intra_coefficients(graph
, edge
->src
, copy
, 1);
2435 if (update_count(coef
, fp
, n_eq
, n_ineq
) < 0)
2440 copy
= isl_map_copy(map
);
2441 coef
= intra_coefficients(graph
, edge
->src
, copy
, 0);
2442 if (update_count(coef
, f
- fp
, n_eq
, n_ineq
) < 0)
2450 return isl_stat_error
;
2453 /* Count the number of equality and inequality constraints
2454 * that will be added to the main lp problem.
2455 * We count as follows
2456 * validity -> 1 (>= 0)
2457 * validity+proximity -> 2 (>= 0 and upper bound)
2458 * proximity -> 2 (lower and upper bound)
2459 * local(+any) -> 2 (>= 0 and <= 0)
2461 * If "use_coincidence" is set, then we treat coincidence edges as local edges.
2462 * Otherwise, we ignore them.
2464 static int count_constraints(struct isl_sched_graph
*graph
,
2465 int *n_eq
, int *n_ineq
, int use_coincidence
)
2469 *n_eq
= *n_ineq
= 0;
2470 for (i
= 0; i
< graph
->n_edge
; ++i
) {
2471 struct isl_sched_edge
*edge
= &graph
->edge
[i
];
2472 isl_map
*map
= isl_map_copy(edge
->map
);
2474 if (count_map_constraints(graph
, edge
, map
, n_eq
, n_ineq
,
2475 use_coincidence
) < 0)
2482 /* Count the number of constraints that will be added by
2483 * add_bound_constant_constraints to bound the values of the constant terms
2484 * and increment *n_eq and *n_ineq accordingly.
2486 * In practice, add_bound_constant_constraints only adds inequalities.
2488 static isl_stat
count_bound_constant_constraints(isl_ctx
*ctx
,
2489 struct isl_sched_graph
*graph
, int *n_eq
, int *n_ineq
)
2491 if (isl_options_get_schedule_max_constant_term(ctx
) == -1)
2494 *n_ineq
+= graph
->n
;
2499 /* Add constraints to bound the values of the constant terms in the schedule,
2500 * if requested by the user.
2502 * The maximal value of the constant terms is defined by the option
2503 * "schedule_max_constant_term".
2505 static isl_stat
add_bound_constant_constraints(isl_ctx
*ctx
,
2506 struct isl_sched_graph
*graph
)
2512 max
= isl_options_get_schedule_max_constant_term(ctx
);
2516 total
= isl_basic_set_dim(graph
->lp
, isl_dim_set
);
2518 return isl_stat_error
;
2520 for (i
= 0; i
< graph
->n
; ++i
) {
2521 struct isl_sched_node
*node
= &graph
->node
[i
];
2524 k
= isl_basic_set_alloc_inequality(graph
->lp
);
2526 return isl_stat_error
;
2527 isl_seq_clr(graph
->lp
->ineq
[k
], 1 + total
);
2528 pos
= node_cst_coef_offset(node
);
2529 isl_int_set_si(graph
->lp
->ineq
[k
][1 + pos
], -1);
2530 isl_int_set_si(graph
->lp
->ineq
[k
][0], max
);
2536 /* Count the number of constraints that will be added by
2537 * add_bound_coefficient_constraints and increment *n_eq and *n_ineq
2540 * In practice, add_bound_coefficient_constraints only adds inequalities.
2542 static int count_bound_coefficient_constraints(isl_ctx
*ctx
,
2543 struct isl_sched_graph
*graph
, int *n_eq
, int *n_ineq
)
2547 if (isl_options_get_schedule_max_coefficient(ctx
) == -1 &&
2548 !isl_options_get_schedule_treat_coalescing(ctx
))
2551 for (i
= 0; i
< graph
->n
; ++i
)
2552 *n_ineq
+= graph
->node
[i
].nparam
+ 2 * graph
->node
[i
].nvar
;
2557 /* Add constraints to graph->lp that bound the values of
2558 * the parameter schedule coefficients of "node" to "max" and
2559 * the variable schedule coefficients to the corresponding entry
2561 * In either case, a negative value means that no bound needs to be imposed.
2563 * For parameter coefficients, this amounts to adding a constraint
2571 * The variables coefficients are, however, not represented directly.
2572 * Instead, the variable coefficients c_x are written as differences
2573 * c_x = c_x^+ - c_x^-.
2576 * -max_i <= c_x_i <= max_i
2580 * -max_i <= c_x_i^+ - c_x_i^- <= max_i
2584 * -(c_x_i^+ - c_x_i^-) + max_i >= 0
2585 * c_x_i^+ - c_x_i^- + max_i >= 0
2587 static isl_stat
node_add_coefficient_constraints(isl_ctx
*ctx
,
2588 struct isl_sched_graph
*graph
, struct isl_sched_node
*node
, int max
)
2594 total
= isl_basic_set_dim(graph
->lp
, isl_dim_set
);
2596 return isl_stat_error
;
2598 for (j
= 0; j
< node
->nparam
; ++j
) {
2604 k
= isl_basic_set_alloc_inequality(graph
->lp
);
2606 return isl_stat_error
;
2607 dim
= 1 + node_par_coef_offset(node
) + j
;
2608 isl_seq_clr(graph
->lp
->ineq
[k
], 1 + total
);
2609 isl_int_set_si(graph
->lp
->ineq
[k
][dim
], -1);
2610 isl_int_set_si(graph
->lp
->ineq
[k
][0], max
);
2613 ineq
= isl_vec_alloc(ctx
, 1 + total
);
2614 ineq
= isl_vec_clr(ineq
);
2616 return isl_stat_error
;
2617 for (i
= 0; i
< node
->nvar
; ++i
) {
2618 int pos
= 1 + node_var_coef_pos(node
, i
);
2620 if (isl_int_is_neg(node
->max
->el
[i
]))
2623 isl_int_set_si(ineq
->el
[pos
], 1);
2624 isl_int_set_si(ineq
->el
[pos
+ 1], -1);
2625 isl_int_set(ineq
->el
[0], node
->max
->el
[i
]);
2627 k
= isl_basic_set_alloc_inequality(graph
->lp
);
2630 isl_seq_cpy(graph
->lp
->ineq
[k
], ineq
->el
, 1 + total
);
2632 isl_seq_neg(ineq
->el
+ pos
, ineq
->el
+ pos
, 2);
2633 k
= isl_basic_set_alloc_inequality(graph
->lp
);
2636 isl_seq_cpy(graph
->lp
->ineq
[k
], ineq
->el
, 1 + total
);
2638 isl_seq_clr(ineq
->el
+ pos
, 2);
2645 return isl_stat_error
;
2648 /* Add constraints that bound the values of the variable and parameter
2649 * coefficients of the schedule.
2651 * The maximal value of the coefficients is defined by the option
2652 * 'schedule_max_coefficient' and the entries in node->max.
2653 * These latter entries are only set if either the schedule_max_coefficient
2654 * option or the schedule_treat_coalescing option is set.
2656 static isl_stat
add_bound_coefficient_constraints(isl_ctx
*ctx
,
2657 struct isl_sched_graph
*graph
)
2662 max
= isl_options_get_schedule_max_coefficient(ctx
);
2664 if (max
== -1 && !isl_options_get_schedule_treat_coalescing(ctx
))
2667 for (i
= 0; i
< graph
->n
; ++i
) {
2668 struct isl_sched_node
*node
= &graph
->node
[i
];
2670 if (node_add_coefficient_constraints(ctx
, graph
, node
, max
) < 0)
2671 return isl_stat_error
;
2677 /* Add a constraint to graph->lp that equates the value at position
2678 * "sum_pos" to the sum of the "n" values starting at "first".
2680 static isl_stat
add_sum_constraint(struct isl_sched_graph
*graph
,
2681 int sum_pos
, int first
, int n
)
2686 total
= isl_basic_set_dim(graph
->lp
, isl_dim_set
);
2688 return isl_stat_error
;
2690 k
= isl_basic_set_alloc_equality(graph
->lp
);
2692 return isl_stat_error
;
2693 isl_seq_clr(graph
->lp
->eq
[k
], 1 + total
);
2694 isl_int_set_si(graph
->lp
->eq
[k
][1 + sum_pos
], -1);
2695 for (i
= 0; i
< n
; ++i
)
2696 isl_int_set_si(graph
->lp
->eq
[k
][1 + first
+ i
], 1);
2701 /* Add a constraint to graph->lp that equates the value at position
2702 * "sum_pos" to the sum of the parameter coefficients of all nodes.
2704 static isl_stat
add_param_sum_constraint(struct isl_sched_graph
*graph
,
2710 total
= isl_basic_set_dim(graph
->lp
, isl_dim_set
);
2712 return isl_stat_error
;
2714 k
= isl_basic_set_alloc_equality(graph
->lp
);
2716 return isl_stat_error
;
2717 isl_seq_clr(graph
->lp
->eq
[k
], 1 + total
);
2718 isl_int_set_si(graph
->lp
->eq
[k
][1 + sum_pos
], -1);
2719 for (i
= 0; i
< graph
->n
; ++i
) {
2720 int pos
= 1 + node_par_coef_offset(&graph
->node
[i
]);
2722 for (j
= 0; j
< graph
->node
[i
].nparam
; ++j
)
2723 isl_int_set_si(graph
->lp
->eq
[k
][pos
+ j
], 1);
2729 /* Add a constraint to graph->lp that equates the value at position
2730 * "sum_pos" to the sum of the variable coefficients of all nodes.
2732 static isl_stat
add_var_sum_constraint(struct isl_sched_graph
*graph
,
2738 total
= isl_basic_set_dim(graph
->lp
, isl_dim_set
);
2740 return isl_stat_error
;
2742 k
= isl_basic_set_alloc_equality(graph
->lp
);
2744 return isl_stat_error
;
2745 isl_seq_clr(graph
->lp
->eq
[k
], 1 + total
);
2746 isl_int_set_si(graph
->lp
->eq
[k
][1 + sum_pos
], -1);
2747 for (i
= 0; i
< graph
->n
; ++i
) {
2748 struct isl_sched_node
*node
= &graph
->node
[i
];
2749 int pos
= 1 + node_var_coef_offset(node
);
2751 for (j
= 0; j
< 2 * node
->nvar
; ++j
)
2752 isl_int_set_si(graph
->lp
->eq
[k
][pos
+ j
], 1);
2758 /* Construct an ILP problem for finding schedule coefficients
2759 * that result in non-negative, but small dependence distances
2760 * over all dependences.
2761 * In particular, the dependence distances over proximity edges
2762 * are bounded by m_0 + m_n n and we compute schedule coefficients
2763 * with small values (preferably zero) of m_n and m_0.
2765 * All variables of the ILP are non-negative. The actual coefficients
2766 * may be negative, so each coefficient is represented as the difference
2767 * of two non-negative variables. The negative part always appears
2768 * immediately before the positive part.
2769 * Other than that, the variables have the following order
2771 * - sum of positive and negative parts of m_n coefficients
2773 * - sum of all c_n coefficients
2774 * (unconstrained when computing non-parametric schedules)
2775 * - sum of positive and negative parts of all c_x coefficients
2776 * - positive and negative parts of m_n coefficients
2778 * - positive and negative parts of c_i_x, in opposite order
2779 * - c_i_n (if parametric)
2782 * The constraints are those from the edges plus two or three equalities
2783 * to express the sums.
2785 * If "use_coincidence" is set, then we treat coincidence edges as local edges.
2786 * Otherwise, we ignore them.
2788 static isl_stat
setup_lp(isl_ctx
*ctx
, struct isl_sched_graph
*graph
,
2789 int use_coincidence
)
2799 parametric
= ctx
->opt
->schedule_parametric
;
2800 nparam
= isl_space_dim(graph
->node
[0].space
, isl_dim_param
);
2802 return isl_stat_error
;
2804 total
= param_pos
+ 2 * nparam
;
2805 for (i
= 0; i
< graph
->n
; ++i
) {
2806 struct isl_sched_node
*node
= &graph
->node
[graph
->sorted
[i
]];
2807 if (node_update_vmap(node
) < 0)
2808 return isl_stat_error
;
2809 node
->start
= total
;
2810 total
+= 1 + node
->nparam
+ 2 * node
->nvar
;
2813 if (count_constraints(graph
, &n_eq
, &n_ineq
, use_coincidence
) < 0)
2814 return isl_stat_error
;
2815 if (count_bound_constant_constraints(ctx
, graph
, &n_eq
, &n_ineq
) < 0)
2816 return isl_stat_error
;
2817 if (count_bound_coefficient_constraints(ctx
, graph
, &n_eq
, &n_ineq
) < 0)
2818 return isl_stat_error
;
2820 space
= isl_space_set_alloc(ctx
, 0, total
);
2821 isl_basic_set_free(graph
->lp
);
2822 n_eq
+= 2 + parametric
;
2824 graph
->lp
= isl_basic_set_alloc_space(space
, 0, n_eq
, n_ineq
);
2826 if (add_sum_constraint(graph
, 0, param_pos
, 2 * nparam
) < 0)
2827 return isl_stat_error
;
2828 if (parametric
&& add_param_sum_constraint(graph
, 2) < 0)
2829 return isl_stat_error
;
2830 if (add_var_sum_constraint(graph
, 3) < 0)
2831 return isl_stat_error
;
2832 if (add_bound_constant_constraints(ctx
, graph
) < 0)
2833 return isl_stat_error
;
2834 if (add_bound_coefficient_constraints(ctx
, graph
) < 0)
2835 return isl_stat_error
;
2836 if (add_all_validity_constraints(graph
, use_coincidence
) < 0)
2837 return isl_stat_error
;
2838 if (add_all_proximity_constraints(graph
, use_coincidence
) < 0)
2839 return isl_stat_error
;
2844 /* Analyze the conflicting constraint found by
2845 * isl_tab_basic_set_non_trivial_lexmin. If it corresponds to the validity
2846 * constraint of one of the edges between distinct nodes, living, moreover
2847 * in distinct SCCs, then record the source and sink SCC as this may
2848 * be a good place to cut between SCCs.
2850 static int check_conflict(int con
, void *user
)
2853 struct isl_sched_graph
*graph
= user
;
2855 if (graph
->src_scc
>= 0)
2858 con
-= graph
->lp
->n_eq
;
2860 if (con
>= graph
->lp
->n_ineq
)
2863 for (i
= 0; i
< graph
->n_edge
; ++i
) {
2864 if (!is_validity(&graph
->edge
[i
]))
2866 if (graph
->edge
[i
].src
== graph
->edge
[i
].dst
)
2868 if (graph
->edge
[i
].src
->scc
== graph
->edge
[i
].dst
->scc
)
2870 if (graph
->edge
[i
].start
> con
)
2872 if (graph
->edge
[i
].end
<= con
)
2874 graph
->src_scc
= graph
->edge
[i
].src
->scc
;
2875 graph
->dst_scc
= graph
->edge
[i
].dst
->scc
;
2881 /* Check whether the next schedule row of the given node needs to be
2882 * non-trivial. Lower-dimensional domains may have some trivial rows,
2883 * but as soon as the number of remaining required non-trivial rows
2884 * is as large as the number or remaining rows to be computed,
2885 * all remaining rows need to be non-trivial.
2887 static int needs_row(struct isl_sched_graph
*graph
, struct isl_sched_node
*node
)
2889 return node
->nvar
- node
->rank
>= graph
->maxvar
- graph
->n_row
;
2892 /* Construct a non-triviality region with triviality directions
2893 * corresponding to the rows of "indep".
2894 * The rows of "indep" are expressed in terms of the schedule coefficients c_i,
2895 * while the triviality directions are expressed in terms of
2896 * pairs of non-negative variables c^+_i - c^-_i, with c^-_i appearing
2897 * before c^+_i. Furthermore,
2898 * the pairs of non-negative variables representing the coefficients
2899 * are stored in the opposite order.
2901 static __isl_give isl_mat
*construct_trivial(__isl_keep isl_mat
*indep
)
2908 n
= isl_mat_rows(indep
);
2909 n_var
= isl_mat_cols(indep
);
2910 if (n
< 0 || n_var
< 0)
2913 ctx
= isl_mat_get_ctx(indep
);
2914 mat
= isl_mat_alloc(ctx
, n
, 2 * n_var
);
2917 for (i
= 0; i
< n
; ++i
) {
2918 for (j
= 0; j
< n_var
; ++j
) {
2919 int nj
= n_var
- 1 - j
;
2920 isl_int_neg(mat
->row
[i
][2 * nj
], indep
->row
[i
][j
]);
2921 isl_int_set(mat
->row
[i
][2 * nj
+ 1], indep
->row
[i
][j
]);
2928 /* Solve the ILP problem constructed in setup_lp.
2929 * For each node such that all the remaining rows of its schedule
2930 * need to be non-trivial, we construct a non-triviality region.
2931 * This region imposes that the next row is independent of previous rows.
2932 * In particular, the non-triviality region enforces that at least
2933 * one of the linear combinations in the rows of node->indep is non-zero.
2935 static __isl_give isl_vec
*solve_lp(isl_ctx
*ctx
, struct isl_sched_graph
*graph
)
2941 for (i
= 0; i
< graph
->n
; ++i
) {
2942 struct isl_sched_node
*node
= &graph
->node
[i
];
2945 graph
->region
[i
].pos
= node_var_coef_offset(node
);
2946 if (needs_row(graph
, node
))
2947 trivial
= construct_trivial(node
->indep
);
2949 trivial
= isl_mat_zero(ctx
, 0, 0);
2950 graph
->region
[i
].trivial
= trivial
;
2952 lp
= isl_basic_set_copy(graph
->lp
);
2953 sol
= isl_tab_basic_set_non_trivial_lexmin(lp
, 2, graph
->n
,
2954 graph
->region
, &check_conflict
, graph
);
2955 for (i
= 0; i
< graph
->n
; ++i
)
2956 isl_mat_free(graph
->region
[i
].trivial
);
2960 /* Extract the coefficients for the variables of "node" from "sol".
2962 * Each schedule coefficient c_i_x is represented as the difference
2963 * between two non-negative variables c_i_x^+ - c_i_x^-.
2964 * The c_i_x^- appear before their c_i_x^+ counterpart.
2965 * Furthermore, the order of these pairs is the opposite of that
2966 * of the corresponding coefficients.
2968 * Return c_i_x = c_i_x^+ - c_i_x^-
2970 static __isl_give isl_vec
*extract_var_coef(struct isl_sched_node
*node
,
2971 __isl_keep isl_vec
*sol
)
2979 csol
= isl_vec_alloc(isl_vec_get_ctx(sol
), node
->nvar
);
2983 pos
= 1 + node_var_coef_offset(node
);
2984 for (i
= 0; i
< node
->nvar
; ++i
)
2985 isl_int_sub(csol
->el
[node
->nvar
- 1 - i
],
2986 sol
->el
[pos
+ 2 * i
+ 1], sol
->el
[pos
+ 2 * i
]);
2991 /* Update the schedules of all nodes based on the given solution
2992 * of the LP problem.
2993 * The new row is added to the current band.
2994 * All possibly negative coefficients are encoded as a difference
2995 * of two non-negative variables, so we need to perform the subtraction
2998 * If coincident is set, then the caller guarantees that the new
2999 * row satisfies the coincidence constraints.
3001 static int update_schedule(struct isl_sched_graph
*graph
,
3002 __isl_take isl_vec
*sol
, int coincident
)
3005 isl_vec
*csol
= NULL
;
3010 isl_die(sol
->ctx
, isl_error_internal
,
3011 "no solution found", goto error
);
3012 if (graph
->n_total_row
>= graph
->max_row
)
3013 isl_die(sol
->ctx
, isl_error_internal
,
3014 "too many schedule rows", goto error
);
3016 for (i
= 0; i
< graph
->n
; ++i
) {
3017 struct isl_sched_node
*node
= &graph
->node
[i
];
3019 isl_size row
= isl_mat_rows(node
->sched
);
3022 csol
= extract_var_coef(node
, sol
);
3023 if (row
< 0 || !csol
)
3026 isl_map_free(node
->sched_map
);
3027 node
->sched_map
= NULL
;
3028 node
->sched
= isl_mat_add_rows(node
->sched
, 1);
3031 pos
= node_cst_coef_offset(node
);
3032 node
->sched
= isl_mat_set_element(node
->sched
,
3033 row
, 0, sol
->el
[1 + pos
]);
3034 pos
= node_par_coef_offset(node
);
3035 for (j
= 0; j
< node
->nparam
; ++j
)
3036 node
->sched
= isl_mat_set_element(node
->sched
,
3037 row
, 1 + j
, sol
->el
[1 + pos
+ j
]);
3038 for (j
= 0; j
< node
->nvar
; ++j
)
3039 node
->sched
= isl_mat_set_element(node
->sched
,
3040 row
, 1 + node
->nparam
+ j
, csol
->el
[j
]);
3041 node
->coincident
[graph
->n_total_row
] = coincident
;
3047 graph
->n_total_row
++;
3056 /* Convert row "row" of node->sched into an isl_aff living in "ls"
3057 * and return this isl_aff.
3059 static __isl_give isl_aff
*extract_schedule_row(__isl_take isl_local_space
*ls
,
3060 struct isl_sched_node
*node
, int row
)
3068 aff
= isl_aff_zero_on_domain(ls
);
3069 if (isl_mat_get_element(node
->sched
, row
, 0, &v
) < 0)
3071 aff
= isl_aff_set_constant(aff
, v
);
3072 for (j
= 0; j
< node
->nparam
; ++j
) {
3073 if (isl_mat_get_element(node
->sched
, row
, 1 + j
, &v
) < 0)
3075 aff
= isl_aff_set_coefficient(aff
, isl_dim_param
, j
, v
);
3077 for (j
= 0; j
< node
->nvar
; ++j
) {
3078 if (isl_mat_get_element(node
->sched
, row
,
3079 1 + node
->nparam
+ j
, &v
) < 0)
3081 aff
= isl_aff_set_coefficient(aff
, isl_dim_in
, j
, v
);
3093 /* Convert the "n" rows starting at "first" of node->sched into a multi_aff
3094 * and return this multi_aff.
3096 * The result is defined over the uncompressed node domain.
3098 static __isl_give isl_multi_aff
*node_extract_partial_schedule_multi_aff(
3099 struct isl_sched_node
*node
, int first
, int n
)
3103 isl_local_space
*ls
;
3110 nrow
= isl_mat_rows(node
->sched
);
3113 if (node
->compressed
)
3114 space
= isl_multi_aff_get_domain_space(node
->decompress
);
3116 space
= isl_space_copy(node
->space
);
3117 ls
= isl_local_space_from_space(isl_space_copy(space
));
3118 space
= isl_space_from_domain(space
);
3119 space
= isl_space_add_dims(space
, isl_dim_out
, n
);
3120 ma
= isl_multi_aff_zero(space
);
3122 for (i
= first
; i
< first
+ n
; ++i
) {
3123 aff
= extract_schedule_row(isl_local_space_copy(ls
), node
, i
);
3124 ma
= isl_multi_aff_set_aff(ma
, i
- first
, aff
);
3127 isl_local_space_free(ls
);
3129 if (node
->compressed
)
3130 ma
= isl_multi_aff_pullback_multi_aff(ma
,
3131 isl_multi_aff_copy(node
->compress
));
3136 /* Convert node->sched into a multi_aff and return this multi_aff.
3138 * The result is defined over the uncompressed node domain.
3140 static __isl_give isl_multi_aff
*node_extract_schedule_multi_aff(
3141 struct isl_sched_node
*node
)
3145 nrow
= isl_mat_rows(node
->sched
);
3148 return node_extract_partial_schedule_multi_aff(node
, 0, nrow
);
3151 /* Convert node->sched into a map and return this map.
3153 * The result is cached in node->sched_map, which needs to be released
3154 * whenever node->sched is updated.
3155 * It is defined over the uncompressed node domain.
3157 static __isl_give isl_map
*node_extract_schedule(struct isl_sched_node
*node
)
3159 if (!node
->sched_map
) {
3162 ma
= node_extract_schedule_multi_aff(node
);
3163 node
->sched_map
= isl_map_from_multi_aff(ma
);
3166 return isl_map_copy(node
->sched_map
);
3169 /* Construct a map that can be used to update a dependence relation
3170 * based on the current schedule.
3171 * That is, construct a map expressing that source and sink
3172 * are executed within the same iteration of the current schedule.
3173 * This map can then be intersected with the dependence relation.
3174 * This is not the most efficient way, but this shouldn't be a critical
3177 static __isl_give isl_map
*specializer(struct isl_sched_node
*src
,
3178 struct isl_sched_node
*dst
)
3180 isl_map
*src_sched
, *dst_sched
;
3182 src_sched
= node_extract_schedule(src
);
3183 dst_sched
= node_extract_schedule(dst
);
3184 return isl_map_apply_range(src_sched
, isl_map_reverse(dst_sched
));
3187 /* Intersect the domains of the nested relations in domain and range
3188 * of "umap" with "map".
3190 static __isl_give isl_union_map
*intersect_domains(
3191 __isl_take isl_union_map
*umap
, __isl_keep isl_map
*map
)
3193 isl_union_set
*uset
;
3195 umap
= isl_union_map_zip(umap
);
3196 uset
= isl_union_set_from_set(isl_map_wrap(isl_map_copy(map
)));
3197 umap
= isl_union_map_intersect_domain(umap
, uset
);
3198 umap
= isl_union_map_zip(umap
);
3202 /* Update the dependence relation of the given edge based
3203 * on the current schedule.
3204 * If the dependence is carried completely by the current schedule, then
3205 * it is removed from the edge_tables. It is kept in the list of edges
3206 * as otherwise all edge_tables would have to be recomputed.
3208 * If the edge is of a type that can appear multiple times
3209 * between the same pair of nodes, then it is added to
3210 * the edge table (again). This prevents the situation
3211 * where none of these edges is referenced from the edge table
3212 * because the one that was referenced turned out to be empty and
3213 * was therefore removed from the table.
3215 static isl_stat
update_edge(isl_ctx
*ctx
, struct isl_sched_graph
*graph
,
3216 struct isl_sched_edge
*edge
)
3221 id
= specializer(edge
->src
, edge
->dst
);
3222 edge
->map
= isl_map_intersect(edge
->map
, isl_map_copy(id
));
3226 if (edge
->tagged_condition
) {
3227 edge
->tagged_condition
=
3228 intersect_domains(edge
->tagged_condition
, id
);
3229 if (!edge
->tagged_condition
)
3232 if (edge
->tagged_validity
) {
3233 edge
->tagged_validity
=
3234 intersect_domains(edge
->tagged_validity
, id
);
3235 if (!edge
->tagged_validity
)
3239 empty
= isl_map_plain_is_empty(edge
->map
);
3243 graph_remove_edge(graph
, edge
);
3244 } else if (is_multi_edge_type(edge
)) {
3245 if (graph_edge_tables_add(ctx
, graph
, edge
) < 0)
3253 return isl_stat_error
;
3256 /* Does the domain of "umap" intersect "uset"?
3258 static int domain_intersects(__isl_keep isl_union_map
*umap
,
3259 __isl_keep isl_union_set
*uset
)
3263 umap
= isl_union_map_copy(umap
);
3264 umap
= isl_union_map_intersect_domain(umap
, isl_union_set_copy(uset
));
3265 empty
= isl_union_map_is_empty(umap
);
3266 isl_union_map_free(umap
);
3268 return empty
< 0 ? -1 : !empty
;
3271 /* Does the range of "umap" intersect "uset"?
3273 static int range_intersects(__isl_keep isl_union_map
*umap
,
3274 __isl_keep isl_union_set
*uset
)
3278 umap
= isl_union_map_copy(umap
);
3279 umap
= isl_union_map_intersect_range(umap
, isl_union_set_copy(uset
));
3280 empty
= isl_union_map_is_empty(umap
);
3281 isl_union_map_free(umap
);
3283 return empty
< 0 ? -1 : !empty
;
3286 /* Are the condition dependences of "edge" local with respect to
3287 * the current schedule?
3289 * That is, are domain and range of the condition dependences mapped
3290 * to the same point?
3292 * In other words, is the condition false?
3294 static int is_condition_false(struct isl_sched_edge
*edge
)
3296 isl_union_map
*umap
;
3297 isl_map
*map
, *sched
, *test
;
3300 empty
= isl_union_map_is_empty(edge
->tagged_condition
);
3301 if (empty
< 0 || empty
)
3304 umap
= isl_union_map_copy(edge
->tagged_condition
);
3305 umap
= isl_union_map_zip(umap
);
3306 umap
= isl_union_set_unwrap(isl_union_map_domain(umap
));
3307 map
= isl_map_from_union_map(umap
);
3309 sched
= node_extract_schedule(edge
->src
);
3310 map
= isl_map_apply_domain(map
, sched
);
3311 sched
= node_extract_schedule(edge
->dst
);
3312 map
= isl_map_apply_range(map
, sched
);
3314 test
= isl_map_identity(isl_map_get_space(map
));
3315 local
= isl_map_is_subset(map
, test
);
3322 /* For each conditional validity constraint that is adjacent
3323 * to a condition with domain in condition_source or range in condition_sink,
3324 * turn it into an unconditional validity constraint.
3326 static int unconditionalize_adjacent_validity(struct isl_sched_graph
*graph
,
3327 __isl_take isl_union_set
*condition_source
,
3328 __isl_take isl_union_set
*condition_sink
)
3332 condition_source
= isl_union_set_coalesce(condition_source
);
3333 condition_sink
= isl_union_set_coalesce(condition_sink
);
3335 for (i
= 0; i
< graph
->n_edge
; ++i
) {
3337 isl_union_map
*validity
;
3339 if (!is_conditional_validity(&graph
->edge
[i
]))
3341 if (is_validity(&graph
->edge
[i
]))
3344 validity
= graph
->edge
[i
].tagged_validity
;
3345 adjacent
= domain_intersects(validity
, condition_sink
);
3346 if (adjacent
>= 0 && !adjacent
)
3347 adjacent
= range_intersects(validity
, condition_source
);
3353 set_validity(&graph
->edge
[i
]);
3356 isl_union_set_free(condition_source
);
3357 isl_union_set_free(condition_sink
);
3360 isl_union_set_free(condition_source
);
3361 isl_union_set_free(condition_sink
);
3365 /* Update the dependence relations of all edges based on the current schedule
3366 * and enforce conditional validity constraints that are adjacent
3367 * to satisfied condition constraints.
3369 * First check if any of the condition constraints are satisfied
3370 * (i.e., not local to the outer schedule) and keep track of
3371 * their domain and range.
3372 * Then update all dependence relations (which removes the non-local
3374 * Finally, if any condition constraints turned out to be satisfied,
3375 * then turn all adjacent conditional validity constraints into
3376 * unconditional validity constraints.
3378 static int update_edges(isl_ctx
*ctx
, struct isl_sched_graph
*graph
)
3382 isl_union_set
*source
, *sink
;
3384 source
= isl_union_set_empty(isl_space_params_alloc(ctx
, 0));
3385 sink
= isl_union_set_empty(isl_space_params_alloc(ctx
, 0));
3386 for (i
= 0; i
< graph
->n_edge
; ++i
) {
3388 isl_union_set
*uset
;
3389 isl_union_map
*umap
;
3391 if (!is_condition(&graph
->edge
[i
]))
3393 if (is_local(&graph
->edge
[i
]))
3395 local
= is_condition_false(&graph
->edge
[i
]);
3403 umap
= isl_union_map_copy(graph
->edge
[i
].tagged_condition
);
3404 uset
= isl_union_map_domain(umap
);
3405 source
= isl_union_set_union(source
, uset
);
3407 umap
= isl_union_map_copy(graph
->edge
[i
].tagged_condition
);
3408 uset
= isl_union_map_range(umap
);
3409 sink
= isl_union_set_union(sink
, uset
);
3412 for (i
= 0; i
< graph
->n_edge
; ++i
) {
3413 if (update_edge(ctx
, graph
, &graph
->edge
[i
]) < 0)
3418 return unconditionalize_adjacent_validity(graph
, source
, sink
);
3420 isl_union_set_free(source
);
3421 isl_union_set_free(sink
);
3424 isl_union_set_free(source
);
3425 isl_union_set_free(sink
);
3429 static void next_band(struct isl_sched_graph
*graph
)
3431 graph
->band_start
= graph
->n_total_row
;
3434 /* Return the union of the universe domains of the nodes in "graph"
3435 * that satisfy "pred".
3437 static __isl_give isl_union_set
*isl_sched_graph_domain(isl_ctx
*ctx
,
3438 struct isl_sched_graph
*graph
,
3439 int (*pred
)(struct isl_sched_node
*node
, int data
), int data
)
3445 for (i
= 0; i
< graph
->n
; ++i
)
3446 if (pred(&graph
->node
[i
], data
))
3450 isl_die(ctx
, isl_error_internal
,
3451 "empty component", return NULL
);
3453 set
= isl_set_universe(isl_space_copy(graph
->node
[i
].space
));
3454 dom
= isl_union_set_from_set(set
);
3456 for (i
= i
+ 1; i
< graph
->n
; ++i
) {
3457 if (!pred(&graph
->node
[i
], data
))
3459 set
= isl_set_universe(isl_space_copy(graph
->node
[i
].space
));
3460 dom
= isl_union_set_union(dom
, isl_union_set_from_set(set
));
3466 /* Return a list of unions of universe domains, where each element
3467 * in the list corresponds to an SCC (or WCC) indexed by node->scc.
3469 static __isl_give isl_union_set_list
*extract_sccs(isl_ctx
*ctx
,
3470 struct isl_sched_graph
*graph
)
3473 isl_union_set_list
*filters
;
3475 filters
= isl_union_set_list_alloc(ctx
, graph
->scc
);
3476 for (i
= 0; i
< graph
->scc
; ++i
) {
3479 dom
= isl_sched_graph_domain(ctx
, graph
, &node_scc_exactly
, i
);
3480 filters
= isl_union_set_list_add(filters
, dom
);
3486 /* Return a list of two unions of universe domains, one for the SCCs up
3487 * to and including graph->src_scc and another for the other SCCs.
3489 static __isl_give isl_union_set_list
*extract_split(isl_ctx
*ctx
,
3490 struct isl_sched_graph
*graph
)
3493 isl_union_set_list
*filters
;
3495 filters
= isl_union_set_list_alloc(ctx
, 2);
3496 dom
= isl_sched_graph_domain(ctx
, graph
,
3497 &node_scc_at_most
, graph
->src_scc
);
3498 filters
= isl_union_set_list_add(filters
, dom
);
3499 dom
= isl_sched_graph_domain(ctx
, graph
,
3500 &node_scc_at_least
, graph
->src_scc
+ 1);
3501 filters
= isl_union_set_list_add(filters
, dom
);
3506 /* Copy nodes that satisfy node_pred from the src dependence graph
3507 * to the dst dependence graph.
3509 static isl_stat
copy_nodes(struct isl_sched_graph
*dst
,
3510 struct isl_sched_graph
*src
,
3511 int (*node_pred
)(struct isl_sched_node
*node
, int data
), int data
)
3516 for (i
= 0; i
< src
->n
; ++i
) {
3519 if (!node_pred(&src
->node
[i
], data
))
3523 dst
->node
[j
].space
= isl_space_copy(src
->node
[i
].space
);
3524 dst
->node
[j
].compressed
= src
->node
[i
].compressed
;
3525 dst
->node
[j
].hull
= isl_set_copy(src
->node
[i
].hull
);
3526 dst
->node
[j
].compress
=
3527 isl_multi_aff_copy(src
->node
[i
].compress
);
3528 dst
->node
[j
].decompress
=
3529 isl_multi_aff_copy(src
->node
[i
].decompress
);
3530 dst
->node
[j
].nvar
= src
->node
[i
].nvar
;
3531 dst
->node
[j
].nparam
= src
->node
[i
].nparam
;
3532 dst
->node
[j
].sched
= isl_mat_copy(src
->node
[i
].sched
);
3533 dst
->node
[j
].sched_map
= isl_map_copy(src
->node
[i
].sched_map
);
3534 dst
->node
[j
].coincident
= src
->node
[i
].coincident
;
3535 dst
->node
[j
].sizes
= isl_multi_val_copy(src
->node
[i
].sizes
);
3536 dst
->node
[j
].bounds
= isl_basic_set_copy(src
->node
[i
].bounds
);
3537 dst
->node
[j
].max
= isl_vec_copy(src
->node
[i
].max
);
3540 if (!dst
->node
[j
].space
|| !dst
->node
[j
].sched
)
3541 return isl_stat_error
;
3542 if (dst
->node
[j
].compressed
&&
3543 (!dst
->node
[j
].hull
|| !dst
->node
[j
].compress
||
3544 !dst
->node
[j
].decompress
))
3545 return isl_stat_error
;
3551 /* Copy non-empty edges that satisfy edge_pred from the src dependence graph
3552 * to the dst dependence graph.
3553 * If the source or destination node of the edge is not in the destination
3554 * graph, then it must be a backward proximity edge and it should simply
3557 static isl_stat
copy_edges(isl_ctx
*ctx
, struct isl_sched_graph
*dst
,
3558 struct isl_sched_graph
*src
,
3559 int (*edge_pred
)(struct isl_sched_edge
*edge
, int data
), int data
)
3564 for (i
= 0; i
< src
->n_edge
; ++i
) {
3565 struct isl_sched_edge
*edge
= &src
->edge
[i
];
3567 isl_union_map
*tagged_condition
;
3568 isl_union_map
*tagged_validity
;
3569 struct isl_sched_node
*dst_src
, *dst_dst
;
3571 if (!edge_pred(edge
, data
))
3574 if (isl_map_plain_is_empty(edge
->map
))
3577 dst_src
= graph_find_node(ctx
, dst
, edge
->src
->space
);
3578 dst_dst
= graph_find_node(ctx
, dst
, edge
->dst
->space
);
3579 if (!dst_src
|| !dst_dst
)
3580 return isl_stat_error
;
3581 if (!is_node(dst
, dst_src
) || !is_node(dst
, dst_dst
)) {
3582 if (is_validity(edge
) || is_conditional_validity(edge
))
3583 isl_die(ctx
, isl_error_internal
,
3584 "backward (conditional) validity edge",
3585 return isl_stat_error
);
3589 map
= isl_map_copy(edge
->map
);
3590 tagged_condition
= isl_union_map_copy(edge
->tagged_condition
);
3591 tagged_validity
= isl_union_map_copy(edge
->tagged_validity
);
3593 dst
->edge
[dst
->n_edge
].src
= dst_src
;
3594 dst
->edge
[dst
->n_edge
].dst
= dst_dst
;
3595 dst
->edge
[dst
->n_edge
].map
= map
;
3596 dst
->edge
[dst
->n_edge
].tagged_condition
= tagged_condition
;
3597 dst
->edge
[dst
->n_edge
].tagged_validity
= tagged_validity
;
3598 dst
->edge
[dst
->n_edge
].types
= edge
->types
;
3601 if (edge
->tagged_condition
&& !tagged_condition
)
3602 return isl_stat_error
;
3603 if (edge
->tagged_validity
&& !tagged_validity
)
3604 return isl_stat_error
;
3606 if (graph_edge_tables_add(ctx
, dst
,
3607 &dst
->edge
[dst
->n_edge
- 1]) < 0)
3608 return isl_stat_error
;
3614 /* Compute the maximal number of variables over all nodes.
3615 * This is the maximal number of linearly independent schedule
3616 * rows that we need to compute.
3617 * Just in case we end up in a part of the dependence graph
3618 * with only lower-dimensional domains, we make sure we will
3619 * compute the required amount of extra linearly independent rows.
3621 static int compute_maxvar(struct isl_sched_graph
*graph
)
3626 for (i
= 0; i
< graph
->n
; ++i
) {
3627 struct isl_sched_node
*node
= &graph
->node
[i
];
3630 if (node_update_vmap(node
) < 0)
3632 nvar
= node
->nvar
+ graph
->n_row
- node
->rank
;
3633 if (nvar
> graph
->maxvar
)
3634 graph
->maxvar
= nvar
;
3640 /* Extract the subgraph of "graph" that consists of the nodes satisfying
3641 * "node_pred" and the edges satisfying "edge_pred" and store
3642 * the result in "sub".
3644 static isl_stat
extract_sub_graph(isl_ctx
*ctx
, struct isl_sched_graph
*graph
,
3645 int (*node_pred
)(struct isl_sched_node
*node
, int data
),
3646 int (*edge_pred
)(struct isl_sched_edge
*edge
, int data
),
3647 int data
, struct isl_sched_graph
*sub
)
3649 int i
, n
= 0, n_edge
= 0;
3652 for (i
= 0; i
< graph
->n
; ++i
)
3653 if (node_pred(&graph
->node
[i
], data
))
3655 for (i
= 0; i
< graph
->n_edge
; ++i
)
3656 if (edge_pred(&graph
->edge
[i
], data
))
3658 if (graph_alloc(ctx
, sub
, n
, n_edge
) < 0)
3659 return isl_stat_error
;
3660 sub
->root
= graph
->root
;
3661 if (copy_nodes(sub
, graph
, node_pred
, data
) < 0)
3662 return isl_stat_error
;
3663 if (graph_init_table(ctx
, sub
) < 0)
3664 return isl_stat_error
;
3665 for (t
= 0; t
<= isl_edge_last
; ++t
)
3666 sub
->max_edge
[t
] = graph
->max_edge
[t
];
3667 if (graph_init_edge_tables(ctx
, sub
) < 0)
3668 return isl_stat_error
;
3669 if (copy_edges(ctx
, sub
, graph
, edge_pred
, data
) < 0)
3670 return isl_stat_error
;
3671 sub
->n_row
= graph
->n_row
;
3672 sub
->max_row
= graph
->max_row
;
3673 sub
->n_total_row
= graph
->n_total_row
;
3674 sub
->band_start
= graph
->band_start
;
3679 static __isl_give isl_schedule_node
*compute_schedule(isl_schedule_node
*node
,
3680 struct isl_sched_graph
*graph
);
3681 static __isl_give isl_schedule_node
*compute_schedule_wcc(
3682 isl_schedule_node
*node
, struct isl_sched_graph
*graph
);
3684 /* Compute a schedule for a subgraph of "graph". In particular, for
3685 * the graph composed of nodes that satisfy node_pred and edges that
3686 * that satisfy edge_pred.
3687 * If the subgraph is known to consist of a single component, then wcc should
3688 * be set and then we call compute_schedule_wcc on the constructed subgraph.
3689 * Otherwise, we call compute_schedule, which will check whether the subgraph
3692 * The schedule is inserted at "node" and the updated schedule node
3695 static __isl_give isl_schedule_node
*compute_sub_schedule(
3696 __isl_take isl_schedule_node
*node
, isl_ctx
*ctx
,
3697 struct isl_sched_graph
*graph
,
3698 int (*node_pred
)(struct isl_sched_node
*node
, int data
),
3699 int (*edge_pred
)(struct isl_sched_edge
*edge
, int data
),
3702 struct isl_sched_graph split
= { 0 };
3704 if (extract_sub_graph(ctx
, graph
, node_pred
, edge_pred
, data
,
3709 node
= compute_schedule_wcc(node
, &split
);
3711 node
= compute_schedule(node
, &split
);
3713 graph_free(ctx
, &split
);
3716 graph_free(ctx
, &split
);
3717 return isl_schedule_node_free(node
);
3720 static int edge_scc_exactly(struct isl_sched_edge
*edge
, int scc
)
3722 return edge
->src
->scc
== scc
&& edge
->dst
->scc
== scc
;
3725 static int edge_dst_scc_at_most(struct isl_sched_edge
*edge
, int scc
)
3727 return edge
->dst
->scc
<= scc
;
3730 static int edge_src_scc_at_least(struct isl_sched_edge
*edge
, int scc
)
3732 return edge
->src
->scc
>= scc
;
3735 /* Reset the current band by dropping all its schedule rows.
3737 static isl_stat
reset_band(struct isl_sched_graph
*graph
)
3742 drop
= graph
->n_total_row
- graph
->band_start
;
3743 graph
->n_total_row
-= drop
;
3744 graph
->n_row
-= drop
;
3746 for (i
= 0; i
< graph
->n
; ++i
) {
3747 struct isl_sched_node
*node
= &graph
->node
[i
];
3749 isl_map_free(node
->sched_map
);
3750 node
->sched_map
= NULL
;
3752 node
->sched
= isl_mat_drop_rows(node
->sched
,
3753 graph
->band_start
, drop
);
3756 return isl_stat_error
;
3762 /* Split the current graph into two parts and compute a schedule for each
3763 * part individually. In particular, one part consists of all SCCs up
3764 * to and including graph->src_scc, while the other part contains the other
3765 * SCCs. The split is enforced by a sequence node inserted at position "node"
3766 * in the schedule tree. Return the updated schedule node.
3767 * If either of these two parts consists of a sequence, then it is spliced
3768 * into the sequence containing the two parts.
3770 * The current band is reset. It would be possible to reuse
3771 * the previously computed rows as the first rows in the next
3772 * band, but recomputing them may result in better rows as we are looking
3773 * at a smaller part of the dependence graph.
3775 static __isl_give isl_schedule_node
*compute_split_schedule(
3776 __isl_take isl_schedule_node
*node
, struct isl_sched_graph
*graph
)
3780 isl_union_set_list
*filters
;
3785 if (reset_band(graph
) < 0)
3786 return isl_schedule_node_free(node
);
3790 ctx
= isl_schedule_node_get_ctx(node
);
3791 filters
= extract_split(ctx
, graph
);
3792 node
= isl_schedule_node_insert_sequence(node
, filters
);
3793 node
= isl_schedule_node_child(node
, 1);
3794 node
= isl_schedule_node_child(node
, 0);
3796 node
= compute_sub_schedule(node
, ctx
, graph
,
3797 &node_scc_at_least
, &edge_src_scc_at_least
,
3798 graph
->src_scc
+ 1, 0);
3799 is_seq
= isl_schedule_node_get_type(node
) == isl_schedule_node_sequence
;
3800 node
= isl_schedule_node_parent(node
);
3801 node
= isl_schedule_node_parent(node
);
3803 node
= isl_schedule_node_sequence_splice_child(node
, 1);
3804 node
= isl_schedule_node_child(node
, 0);
3805 node
= isl_schedule_node_child(node
, 0);
3806 node
= compute_sub_schedule(node
, ctx
, graph
,
3807 &node_scc_at_most
, &edge_dst_scc_at_most
,
3809 is_seq
= isl_schedule_node_get_type(node
) == isl_schedule_node_sequence
;
3810 node
= isl_schedule_node_parent(node
);
3811 node
= isl_schedule_node_parent(node
);
3813 node
= isl_schedule_node_sequence_splice_child(node
, 0);
3818 /* Insert a band node at position "node" in the schedule tree corresponding
3819 * to the current band in "graph". Mark the band node permutable
3820 * if "permutable" is set.
3821 * The partial schedules and the coincidence property are extracted
3822 * from the graph nodes.
3823 * Return the updated schedule node.
3825 static __isl_give isl_schedule_node
*insert_current_band(
3826 __isl_take isl_schedule_node
*node
, struct isl_sched_graph
*graph
,
3832 isl_multi_pw_aff
*mpa
;
3833 isl_multi_union_pw_aff
*mupa
;
3839 isl_die(isl_schedule_node_get_ctx(node
), isl_error_internal
,
3840 "graph should have at least one node",
3841 return isl_schedule_node_free(node
));
3843 start
= graph
->band_start
;
3844 end
= graph
->n_total_row
;
3847 ma
= node_extract_partial_schedule_multi_aff(&graph
->node
[0], start
, n
);
3848 mpa
= isl_multi_pw_aff_from_multi_aff(ma
);
3849 mupa
= isl_multi_union_pw_aff_from_multi_pw_aff(mpa
);
3851 for (i
= 1; i
< graph
->n
; ++i
) {
3852 isl_multi_union_pw_aff
*mupa_i
;
3854 ma
= node_extract_partial_schedule_multi_aff(&graph
->node
[i
],
3856 mpa
= isl_multi_pw_aff_from_multi_aff(ma
);
3857 mupa_i
= isl_multi_union_pw_aff_from_multi_pw_aff(mpa
);
3858 mupa
= isl_multi_union_pw_aff_union_add(mupa
, mupa_i
);
3860 node
= isl_schedule_node_insert_partial_schedule(node
, mupa
);
3862 for (i
= 0; i
< n
; ++i
)
3863 node
= isl_schedule_node_band_member_set_coincident(node
, i
,
3864 graph
->node
[0].coincident
[start
+ i
]);
3865 node
= isl_schedule_node_band_set_permutable(node
, permutable
);
3870 /* Update the dependence relations based on the current schedule,
3871 * add the current band to "node" and then continue with the computation
3873 * Return the updated schedule node.
3875 static __isl_give isl_schedule_node
*compute_next_band(
3876 __isl_take isl_schedule_node
*node
,
3877 struct isl_sched_graph
*graph
, int permutable
)
3884 ctx
= isl_schedule_node_get_ctx(node
);
3885 if (update_edges(ctx
, graph
) < 0)
3886 return isl_schedule_node_free(node
);
3887 node
= insert_current_band(node
, graph
, permutable
);
3890 node
= isl_schedule_node_child(node
, 0);
3891 node
= compute_schedule(node
, graph
);
3892 node
= isl_schedule_node_parent(node
);
3897 /* Add the constraints "coef" derived from an edge from "node" to itself
3898 * to graph->lp in order to respect the dependences and to try and carry them.
3899 * "pos" is the sequence number of the edge that needs to be carried.
3900 * "coef" represents general constraints on coefficients (c_0, c_x)
3901 * of valid constraints for (y - x) with x and y instances of the node.
3903 * The constraints added to graph->lp need to enforce
3905 * (c_j_0 + c_j_x y) - (c_j_0 + c_j_x x)
3906 * = c_j_x (y - x) >= e_i
3908 * for each (x,y) in the dependence relation of the edge.
3909 * That is, (-e_i, c_j_x) needs to be plugged in for (c_0, c_x),
3910 * taking into account that each coefficient in c_j_x is represented
3911 * as a pair of non-negative coefficients.
3913 static isl_stat
add_intra_constraints(struct isl_sched_graph
*graph
,
3914 struct isl_sched_node
*node
, __isl_take isl_basic_set
*coef
, int pos
)
3918 isl_dim_map
*dim_map
;
3920 offset
= coef_var_offset(coef
);
3922 coef
= isl_basic_set_free(coef
);
3924 return isl_stat_error
;
3926 ctx
= isl_basic_set_get_ctx(coef
);
3927 dim_map
= intra_dim_map(ctx
, graph
, node
, offset
, 1);
3928 isl_dim_map_range(dim_map
, 3 + pos
, 0, 0, 0, 1, -1);
3929 graph
->lp
= add_constraints_dim_map(graph
->lp
, coef
, dim_map
);
3934 /* Add the constraints "coef" derived from an edge from "src" to "dst"
3935 * to graph->lp in order to respect the dependences and to try and carry them.
3936 * "pos" is the sequence number of the edge that needs to be carried or
3937 * -1 if no attempt should be made to carry the dependences.
3938 * "coef" represents general constraints on coefficients (c_0, c_n, c_x, c_y)
3939 * of valid constraints for (x, y) with x and y instances of "src" and "dst".
3941 * The constraints added to graph->lp need to enforce
3943 * (c_k_0 + c_k_n n + c_k_x y) - (c_j_0 + c_j_n n + c_j_x x) >= e_i
3945 * for each (x,y) in the dependence relation of the edge or
3947 * (c_k_0 + c_k_n n + c_k_x y) - (c_j_0 + c_j_n n + c_j_x x) >= 0
3951 * (-e_i + c_k_0 - c_j_0, c_k_n - c_j_n, -c_j_x, c_k_x)
3953 * (c_k_0 - c_j_0, c_k_n - c_j_n, -c_j_x, c_k_x)
3954 * needs to be plugged in for (c_0, c_n, c_x, c_y),
3955 * taking into account that each coefficient in c_j_x and c_k_x is represented
3956 * as a pair of non-negative coefficients.
3958 static isl_stat
add_inter_constraints(struct isl_sched_graph
*graph
,
3959 struct isl_sched_node
*src
, struct isl_sched_node
*dst
,
3960 __isl_take isl_basic_set
*coef
, int pos
)
3964 isl_dim_map
*dim_map
;
3966 offset
= coef_var_offset(coef
);
3968 coef
= isl_basic_set_free(coef
);
3970 return isl_stat_error
;
3972 ctx
= isl_basic_set_get_ctx(coef
);
3973 dim_map
= inter_dim_map(ctx
, graph
, src
, dst
, offset
, 1);
3975 isl_dim_map_range(dim_map
, 3 + pos
, 0, 0, 0, 1, -1);
3976 graph
->lp
= add_constraints_dim_map(graph
->lp
, coef
, dim_map
);
3981 /* Data structure for keeping track of the data needed
3982 * to exploit non-trivial lineality spaces.
3984 * "any_non_trivial" is true if there are any non-trivial lineality spaces.
3985 * If "any_non_trivial" is not true, then "equivalent" and "mask" may be NULL.
3986 * "equivalent" connects instances to other instances on the same line(s).
3987 * "mask" contains the domain spaces of "equivalent".
3988 * Any instance set not in "mask" does not have a non-trivial lineality space.
3990 struct isl_exploit_lineality_data
{
3991 isl_bool any_non_trivial
;
3992 isl_union_map
*equivalent
;
3993 isl_union_set
*mask
;
3996 /* Data structure collecting information used during the construction
3997 * of an LP for carrying dependences.
3999 * "intra" is a sequence of coefficient constraints for intra-node edges.
4000 * "inter" is a sequence of coefficient constraints for inter-node edges.
4001 * "lineality" contains data used to exploit non-trivial lineality spaces.
4004 isl_basic_set_list
*intra
;
4005 isl_basic_set_list
*inter
;
4006 struct isl_exploit_lineality_data lineality
;
4009 /* Free all the data stored in "carry".
4011 static void isl_carry_clear(struct isl_carry
*carry
)
4013 isl_basic_set_list_free(carry
->intra
);
4014 isl_basic_set_list_free(carry
->inter
);
4015 isl_union_map_free(carry
->lineality
.equivalent
);
4016 isl_union_set_free(carry
->lineality
.mask
);
4019 /* Return a pointer to the node in "graph" that lives in "space".
4020 * If the requested node has been compressed, then "space"
4021 * corresponds to the compressed space.
4022 * The graph is assumed to have such a node.
4023 * Return NULL in case of error.
4025 * First try and see if "space" is the space of an uncompressed node.
4026 * If so, return that node.
4027 * Otherwise, "space" was constructed by construct_compressed_id and
4028 * contains a user pointer pointing to the node in the tuple id.
4029 * However, this node belongs to the original dependence graph.
4030 * If "graph" is a subgraph of this original dependence graph,
4031 * then the node with the same space still needs to be looked up
4032 * in the current graph.
4034 static struct isl_sched_node
*graph_find_compressed_node(isl_ctx
*ctx
,
4035 struct isl_sched_graph
*graph
, __isl_keep isl_space
*space
)
4038 struct isl_sched_node
*node
;
4043 node
= graph_find_node(ctx
, graph
, space
);
4046 if (is_node(graph
, node
))
4049 id
= isl_space_get_tuple_id(space
, isl_dim_set
);
4050 node
= isl_id_get_user(id
);
4056 if (!is_node(graph
->root
, node
))
4057 isl_die(ctx
, isl_error_internal
,
4058 "space points to invalid node", return NULL
);
4059 if (graph
!= graph
->root
)
4060 node
= graph_find_node(ctx
, graph
, node
->space
);
4061 if (!is_node(graph
, node
))
4062 isl_die(ctx
, isl_error_internal
,
4063 "unable to find node", return NULL
);
4068 /* Internal data structure for add_all_constraints.
4070 * "graph" is the schedule constraint graph for which an LP problem
4071 * is being constructed.
4072 * "carry_inter" indicates whether inter-node edges should be carried.
4073 * "pos" is the position of the next edge that needs to be carried.
4075 struct isl_add_all_constraints_data
{
4077 struct isl_sched_graph
*graph
;
4082 /* Add the constraints "coef" derived from an edge from a node to itself
4083 * to data->graph->lp in order to respect the dependences and
4084 * to try and carry them.
4086 * The space of "coef" is of the form
4088 * coefficients[[c_cst] -> S[c_x]]
4090 * with S[c_x] the (compressed) space of the node.
4091 * Extract the node from the space and call add_intra_constraints.
4093 static isl_stat
lp_add_intra(__isl_take isl_basic_set
*coef
, void *user
)
4095 struct isl_add_all_constraints_data
*data
= user
;
4097 struct isl_sched_node
*node
;
4099 space
= isl_basic_set_get_space(coef
);
4100 space
= isl_space_range(isl_space_unwrap(space
));
4101 node
= graph_find_compressed_node(data
->ctx
, data
->graph
, space
);
4102 isl_space_free(space
);
4103 return add_intra_constraints(data
->graph
, node
, coef
, data
->pos
++);
4106 /* Add the constraints "coef" derived from an edge from a node j
4107 * to a node k to data->graph->lp in order to respect the dependences and
4108 * to try and carry them (provided data->carry_inter is set).
4110 * The space of "coef" is of the form
4112 * coefficients[[c_cst, c_n] -> [S_j[c_x] -> S_k[c_y]]]
4114 * with S_j[c_x] and S_k[c_y] the (compressed) spaces of the nodes.
4115 * Extract the nodes from the space and call add_inter_constraints.
4117 static isl_stat
lp_add_inter(__isl_take isl_basic_set
*coef
, void *user
)
4119 struct isl_add_all_constraints_data
*data
= user
;
4120 isl_space
*space
, *dom
;
4121 struct isl_sched_node
*src
, *dst
;
4124 space
= isl_basic_set_get_space(coef
);
4125 space
= isl_space_unwrap(isl_space_range(isl_space_unwrap(space
)));
4126 dom
= isl_space_domain(isl_space_copy(space
));
4127 src
= graph_find_compressed_node(data
->ctx
, data
->graph
, dom
);
4128 isl_space_free(dom
);
4129 space
= isl_space_range(space
);
4130 dst
= graph_find_compressed_node(data
->ctx
, data
->graph
, space
);
4131 isl_space_free(space
);
4133 pos
= data
->carry_inter
? data
->pos
++ : -1;
4134 return add_inter_constraints(data
->graph
, src
, dst
, coef
, pos
);
4137 /* Add constraints to graph->lp that force all (conditional) validity
4138 * dependences to be respected and attempt to carry them.
4139 * "intra" is the sequence of coefficient constraints for intra-node edges.
4140 * "inter" is the sequence of coefficient constraints for inter-node edges.
4141 * "carry_inter" indicates whether inter-node edges should be carried or
4144 static isl_stat
add_all_constraints(isl_ctx
*ctx
, struct isl_sched_graph
*graph
,
4145 __isl_keep isl_basic_set_list
*intra
,
4146 __isl_keep isl_basic_set_list
*inter
, int carry_inter
)
4148 struct isl_add_all_constraints_data data
= { ctx
, graph
, carry_inter
};
4151 if (isl_basic_set_list_foreach(intra
, &lp_add_intra
, &data
) < 0)
4152 return isl_stat_error
;
4153 if (isl_basic_set_list_foreach(inter
, &lp_add_inter
, &data
) < 0)
4154 return isl_stat_error
;
4158 /* Internal data structure for count_all_constraints
4159 * for keeping track of the number of equality and inequality constraints.
4161 struct isl_sched_count
{
4166 /* Add the number of equality and inequality constraints of "bset"
4167 * to data->n_eq and data->n_ineq.
4169 static isl_stat
bset_update_count(__isl_take isl_basic_set
*bset
, void *user
)
4171 struct isl_sched_count
*data
= user
;
4173 return update_count(bset
, 1, &data
->n_eq
, &data
->n_ineq
);
4176 /* Count the number of equality and inequality constraints
4177 * that will be added to the carry_lp problem.
4178 * We count each edge exactly once.
4179 * "intra" is the sequence of coefficient constraints for intra-node edges.
4180 * "inter" is the sequence of coefficient constraints for inter-node edges.
4182 static isl_stat
count_all_constraints(__isl_keep isl_basic_set_list
*intra
,
4183 __isl_keep isl_basic_set_list
*inter
, int *n_eq
, int *n_ineq
)
4185 struct isl_sched_count data
;
4187 data
.n_eq
= data
.n_ineq
= 0;
4188 if (isl_basic_set_list_foreach(inter
, &bset_update_count
, &data
) < 0)
4189 return isl_stat_error
;
4190 if (isl_basic_set_list_foreach(intra
, &bset_update_count
, &data
) < 0)
4191 return isl_stat_error
;
4194 *n_ineq
= data
.n_ineq
;
4199 /* Construct an LP problem for finding schedule coefficients
4200 * such that the schedule carries as many validity dependences as possible.
4201 * In particular, for each dependence i, we bound the dependence distance
4202 * from below by e_i, with 0 <= e_i <= 1 and then maximize the sum
4203 * of all e_i's. Dependences with e_i = 0 in the solution are simply
4204 * respected, while those with e_i > 0 (in practice e_i = 1) are carried.
4205 * "intra" is the sequence of coefficient constraints for intra-node edges.
4206 * "inter" is the sequence of coefficient constraints for inter-node edges.
4207 * "n_edge" is the total number of edges.
4208 * "carry_inter" indicates whether inter-node edges should be carried or
4209 * only respected. That is, if "carry_inter" is not set, then
4210 * no e_i variables are introduced for the inter-node edges.
4212 * All variables of the LP are non-negative. The actual coefficients
4213 * may be negative, so each coefficient is represented as the difference
4214 * of two non-negative variables. The negative part always appears
4215 * immediately before the positive part.
4216 * Other than that, the variables have the following order
4218 * - sum of (1 - e_i) over all edges
4219 * - sum of all c_n coefficients
4220 * (unconstrained when computing non-parametric schedules)
4221 * - sum of positive and negative parts of all c_x coefficients
4225 * - positive and negative parts of c_i_x, in opposite order
4226 * - c_i_n (if parametric)
4229 * The constraints are those from the (validity) edges plus three equalities
4230 * to express the sums and n_edge inequalities to express e_i <= 1.
4232 static isl_stat
setup_carry_lp(isl_ctx
*ctx
, struct isl_sched_graph
*graph
,
4233 int n_edge
, __isl_keep isl_basic_set_list
*intra
,
4234 __isl_keep isl_basic_set_list
*inter
, int carry_inter
)
4243 for (i
= 0; i
< graph
->n
; ++i
) {
4244 struct isl_sched_node
*node
= &graph
->node
[graph
->sorted
[i
]];
4245 node
->start
= total
;
4246 total
+= 1 + node
->nparam
+ 2 * node
->nvar
;
4249 if (count_all_constraints(intra
, inter
, &n_eq
, &n_ineq
) < 0)
4250 return isl_stat_error
;
4252 dim
= isl_space_set_alloc(ctx
, 0, total
);
4253 isl_basic_set_free(graph
->lp
);
4256 graph
->lp
= isl_basic_set_alloc_space(dim
, 0, n_eq
, n_ineq
);
4257 graph
->lp
= isl_basic_set_set_rational(graph
->lp
);
4259 k
= isl_basic_set_alloc_equality(graph
->lp
);
4261 return isl_stat_error
;
4262 isl_seq_clr(graph
->lp
->eq
[k
], 1 + total
);
4263 isl_int_set_si(graph
->lp
->eq
[k
][0], -n_edge
);
4264 isl_int_set_si(graph
->lp
->eq
[k
][1], 1);
4265 for (i
= 0; i
< n_edge
; ++i
)
4266 isl_int_set_si(graph
->lp
->eq
[k
][4 + i
], 1);
4268 if (add_param_sum_constraint(graph
, 1) < 0)
4269 return isl_stat_error
;
4270 if (add_var_sum_constraint(graph
, 2) < 0)
4271 return isl_stat_error
;
4273 for (i
= 0; i
< n_edge
; ++i
) {
4274 k
= isl_basic_set_alloc_inequality(graph
->lp
);
4276 return isl_stat_error
;
4277 isl_seq_clr(graph
->lp
->ineq
[k
], 1 + total
);
4278 isl_int_set_si(graph
->lp
->ineq
[k
][4 + i
], -1);
4279 isl_int_set_si(graph
->lp
->ineq
[k
][0], 1);
4282 if (add_all_constraints(ctx
, graph
, intra
, inter
, carry_inter
) < 0)
4283 return isl_stat_error
;
4288 static __isl_give isl_schedule_node
*compute_component_schedule(
4289 __isl_take isl_schedule_node
*node
, struct isl_sched_graph
*graph
,
4292 /* If the schedule_split_scaled option is set and if the linear
4293 * parts of the scheduling rows for all nodes in the graphs have
4294 * a non-trivial common divisor, then remove this
4295 * common divisor from the linear part.
4296 * Otherwise, insert a band node directly and continue with
4297 * the construction of the schedule.
4299 * If a non-trivial common divisor is found, then
4300 * the linear part is reduced and the remainder is ignored.
4301 * The pieces of the graph that are assigned different remainders
4302 * form (groups of) strongly connected components within
4303 * the scaled down band. If needed, they can therefore
4304 * be ordered along this remainder in a sequence node.
4305 * However, this ordering is not enforced here in order to allow
4306 * the scheduler to combine some of the strongly connected components.
4308 static __isl_give isl_schedule_node
*split_scaled(
4309 __isl_take isl_schedule_node
*node
, struct isl_sched_graph
*graph
)
4320 ctx
= isl_schedule_node_get_ctx(node
);
4321 if (!ctx
->opt
->schedule_split_scaled
)
4322 return compute_next_band(node
, graph
, 0);
4324 return compute_next_band(node
, graph
, 0);
4325 n_row
= isl_mat_rows(graph
->node
[0].sched
);
4327 return isl_schedule_node_free(node
);
4330 isl_int_init(gcd_i
);
4332 isl_int_set_si(gcd
, 0);
4336 for (i
= 0; i
< graph
->n
; ++i
) {
4337 struct isl_sched_node
*node
= &graph
->node
[i
];
4338 isl_size cols
= isl_mat_cols(node
->sched
);
4342 isl_seq_gcd(node
->sched
->row
[row
] + 1, cols
- 1, &gcd_i
);
4343 isl_int_gcd(gcd
, gcd
, gcd_i
);
4346 isl_int_clear(gcd_i
);
4350 if (isl_int_cmp_si(gcd
, 1) <= 0) {
4352 return compute_next_band(node
, graph
, 0);
4355 for (i
= 0; i
< graph
->n
; ++i
) {
4356 struct isl_sched_node
*node
= &graph
->node
[i
];
4358 isl_int_fdiv_q(node
->sched
->row
[row
][0],
4359 node
->sched
->row
[row
][0], gcd
);
4360 isl_int_mul(node
->sched
->row
[row
][0],
4361 node
->sched
->row
[row
][0], gcd
);
4362 node
->sched
= isl_mat_scale_down_row(node
->sched
, row
, gcd
);
4369 return compute_next_band(node
, graph
, 0);
4372 return isl_schedule_node_free(node
);
4375 /* Is the schedule row "sol" trivial on node "node"?
4376 * That is, is the solution zero on the dimensions linearly independent of
4377 * the previously found solutions?
4378 * Return 1 if the solution is trivial, 0 if it is not and -1 on error.
4380 * Each coefficient is represented as the difference between
4381 * two non-negative values in "sol".
4382 * We construct the schedule row s and check if it is linearly
4383 * independent of previously computed schedule rows
4384 * by computing T s, with T the linear combinations that are zero
4385 * on linearly dependent schedule rows.
4386 * If the result consists of all zeros, then the solution is trivial.
4388 static int is_trivial(struct isl_sched_node
*node
, __isl_keep isl_vec
*sol
)
4395 if (node
->nvar
== node
->rank
)
4398 node_sol
= extract_var_coef(node
, sol
);
4399 node_sol
= isl_mat_vec_product(isl_mat_copy(node
->indep
), node_sol
);
4403 trivial
= isl_seq_first_non_zero(node_sol
->el
,
4404 node
->nvar
- node
->rank
) == -1;
4406 isl_vec_free(node_sol
);
4411 /* Is the schedule row "sol" trivial on any node where it should
4413 * Return 1 if any solution is trivial, 0 if they are not and -1 on error.
4415 static int is_any_trivial(struct isl_sched_graph
*graph
,
4416 __isl_keep isl_vec
*sol
)
4420 for (i
= 0; i
< graph
->n
; ++i
) {
4421 struct isl_sched_node
*node
= &graph
->node
[i
];
4424 if (!needs_row(graph
, node
))
4426 trivial
= is_trivial(node
, sol
);
4427 if (trivial
< 0 || trivial
)
4434 /* Does the schedule represented by "sol" perform loop coalescing on "node"?
4435 * If so, return the position of the coalesced dimension.
4436 * Otherwise, return node->nvar or -1 on error.
4438 * In particular, look for pairs of coefficients c_i and c_j such that
4439 * |c_j/c_i| > ceil(size_i/2), i.e., |c_j| > |c_i * ceil(size_i/2)|.
4440 * If any such pair is found, then return i.
4441 * If size_i is infinity, then no check on c_i needs to be performed.
4443 static int find_node_coalescing(struct isl_sched_node
*node
,
4444 __isl_keep isl_vec
*sol
)
4450 if (node
->nvar
<= 1)
4453 csol
= extract_var_coef(node
, sol
);
4457 for (i
= 0; i
< node
->nvar
; ++i
) {
4460 if (isl_int_is_zero(csol
->el
[i
]))
4462 v
= isl_multi_val_get_val(node
->sizes
, i
);
4465 if (!isl_val_is_int(v
)) {
4469 v
= isl_val_div_ui(v
, 2);
4470 v
= isl_val_ceil(v
);
4473 isl_int_mul(max
, v
->n
, csol
->el
[i
]);
4476 for (j
= 0; j
< node
->nvar
; ++j
) {
4479 if (isl_int_abs_gt(csol
->el
[j
], max
))
4495 /* Force the schedule coefficient at position "pos" of "node" to be zero
4497 * The coefficient is encoded as the difference between two non-negative
4498 * variables. Force these two variables to have the same value.
4500 static __isl_give isl_tab_lexmin
*zero_out_node_coef(
4501 __isl_take isl_tab_lexmin
*tl
, struct isl_sched_node
*node
, int pos
)
4507 ctx
= isl_space_get_ctx(node
->space
);
4508 dim
= isl_tab_lexmin_dim(tl
);
4510 return isl_tab_lexmin_free(tl
);
4511 eq
= isl_vec_alloc(ctx
, 1 + dim
);
4512 eq
= isl_vec_clr(eq
);
4514 return isl_tab_lexmin_free(tl
);
4516 pos
= 1 + node_var_coef_pos(node
, pos
);
4517 isl_int_set_si(eq
->el
[pos
], 1);
4518 isl_int_set_si(eq
->el
[pos
+ 1], -1);
4519 tl
= isl_tab_lexmin_add_eq(tl
, eq
->el
);
4525 /* Return the lexicographically smallest rational point in the basic set
4526 * from which "tl" was constructed, double checking that this input set
4529 static __isl_give isl_vec
*non_empty_solution(__isl_keep isl_tab_lexmin
*tl
)
4533 sol
= isl_tab_lexmin_get_solution(tl
);
4537 isl_die(isl_vec_get_ctx(sol
), isl_error_internal
,
4538 "error in schedule construction",
4539 return isl_vec_free(sol
));
4543 /* Does the solution "sol" of the LP problem constructed by setup_carry_lp
4544 * carry any of the "n_edge" groups of dependences?
4545 * The value in the first position is the sum of (1 - e_i) over all "n_edge"
4546 * edges, with 0 <= e_i <= 1 equal to 1 when the dependences represented
4547 * by the edge are carried by the solution.
4548 * If the sum of the (1 - e_i) is smaller than "n_edge" then at least
4549 * one of those is carried.
4551 * Note that despite the fact that the problem is solved using a rational
4552 * solver, the solution is guaranteed to be integral.
4553 * Specifically, the dependence distance lower bounds e_i (and therefore
4554 * also their sum) are integers. See Lemma 5 of [1].
4556 * Any potential denominator of the sum is cleared by this function.
4557 * The denominator is not relevant for any of the other elements
4560 * [1] P. Feautrier, Some Efficient Solutions to the Affine Scheduling
4561 * Problem, Part II: Multi-Dimensional Time.
4562 * In Intl. Journal of Parallel Programming, 1992.
4564 static int carries_dependences(__isl_keep isl_vec
*sol
, int n_edge
)
4566 isl_int_divexact(sol
->el
[1], sol
->el
[1], sol
->el
[0]);
4567 isl_int_set_si(sol
->el
[0], 1);
4568 return isl_int_cmp_si(sol
->el
[1], n_edge
) < 0;
4571 /* Return the lexicographically smallest rational point in "lp",
4572 * assuming that all variables are non-negative and performing some
4573 * additional sanity checks.
4574 * If "want_integral" is set, then compute the lexicographically smallest
4575 * integer point instead.
4576 * In particular, "lp" should not be empty by construction.
4577 * Double check that this is the case.
4578 * If dependences are not carried for any of the "n_edge" edges,
4579 * then return an empty vector.
4581 * If the schedule_treat_coalescing option is set and
4582 * if the computed schedule performs loop coalescing on a given node,
4583 * i.e., if it is of the form
4585 * c_i i + c_j j + ...
4587 * with |c_j/c_i| >= size_i, then force the coefficient c_i to be zero
4588 * to cut out this solution. Repeat this process until no more loop
4589 * coalescing occurs or until no more dependences can be carried.
4590 * In the latter case, revert to the previously computed solution.
4592 * If the caller requests an integral solution and if coalescing should
4593 * be treated, then perform the coalescing treatment first as
4594 * an integral solution computed before coalescing treatment
4595 * would carry the same number of edges and would therefore probably
4596 * also be coalescing.
4598 * To allow the coalescing treatment to be performed first,
4599 * the initial solution is allowed to be rational and it is only
4600 * cut out (if needed) in the next iteration, if no coalescing measures
4603 static __isl_give isl_vec
*non_neg_lexmin(struct isl_sched_graph
*graph
,
4604 __isl_take isl_basic_set
*lp
, int n_edge
, int want_integral
)
4609 isl_vec
*sol
= NULL
, *prev
;
4610 int treat_coalescing
;
4615 ctx
= isl_basic_set_get_ctx(lp
);
4616 treat_coalescing
= isl_options_get_schedule_treat_coalescing(ctx
);
4617 tl
= isl_tab_lexmin_from_basic_set(lp
);
4625 tl
= isl_tab_lexmin_cut_to_integer(tl
);
4627 sol
= non_empty_solution(tl
);
4631 integral
= isl_int_is_one(sol
->el
[0]);
4632 if (!carries_dependences(sol
, n_edge
)) {
4634 prev
= isl_vec_alloc(ctx
, 0);
4639 prev
= isl_vec_free(prev
);
4640 cut
= want_integral
&& !integral
;
4643 if (!treat_coalescing
)
4645 for (i
= 0; i
< graph
->n
; ++i
) {
4646 struct isl_sched_node
*node
= &graph
->node
[i
];
4648 pos
= find_node_coalescing(node
, sol
);
4651 if (pos
< node
->nvar
)
4656 tl
= zero_out_node_coef(tl
, &graph
->node
[i
], pos
);
4659 } while (try_again
);
4661 isl_tab_lexmin_free(tl
);
4665 isl_tab_lexmin_free(tl
);
4671 /* If "edge" is an edge from a node to itself, then add the corresponding
4672 * dependence relation to "umap".
4673 * If "node" has been compressed, then the dependence relation
4674 * is also compressed first.
4676 static __isl_give isl_union_map
*add_intra(__isl_take isl_union_map
*umap
,
4677 struct isl_sched_edge
*edge
)
4680 struct isl_sched_node
*node
= edge
->src
;
4682 if (edge
->src
!= edge
->dst
)
4685 map
= isl_map_copy(edge
->map
);
4686 if (node
->compressed
) {
4687 map
= isl_map_preimage_domain_multi_aff(map
,
4688 isl_multi_aff_copy(node
->decompress
));
4689 map
= isl_map_preimage_range_multi_aff(map
,
4690 isl_multi_aff_copy(node
->decompress
));
4692 umap
= isl_union_map_add_map(umap
, map
);
4696 /* If "edge" is an edge from a node to another node, then add the corresponding
4697 * dependence relation to "umap".
4698 * If the source or destination nodes of "edge" have been compressed,
4699 * then the dependence relation is also compressed first.
4701 static __isl_give isl_union_map
*add_inter(__isl_take isl_union_map
*umap
,
4702 struct isl_sched_edge
*edge
)
4706 if (edge
->src
== edge
->dst
)
4709 map
= isl_map_copy(edge
->map
);
4710 if (edge
->src
->compressed
)
4711 map
= isl_map_preimage_domain_multi_aff(map
,
4712 isl_multi_aff_copy(edge
->src
->decompress
));
4713 if (edge
->dst
->compressed
)
4714 map
= isl_map_preimage_range_multi_aff(map
,
4715 isl_multi_aff_copy(edge
->dst
->decompress
));
4716 umap
= isl_union_map_add_map(umap
, map
);
4720 /* Internal data structure used by union_drop_coalescing_constraints
4721 * to collect bounds on all relevant statements.
4723 * "graph" is the schedule constraint graph for which an LP problem
4724 * is being constructed.
4725 * "bounds" collects the bounds.
4727 struct isl_collect_bounds_data
{
4729 struct isl_sched_graph
*graph
;
4730 isl_union_set
*bounds
;
4733 /* Add the size bounds for the node with instance deltas in "set"
4736 static isl_stat
collect_bounds(__isl_take isl_set
*set
, void *user
)
4738 struct isl_collect_bounds_data
*data
= user
;
4739 struct isl_sched_node
*node
;
4743 space
= isl_set_get_space(set
);
4746 node
= graph_find_compressed_node(data
->ctx
, data
->graph
, space
);
4747 isl_space_free(space
);
4749 bounds
= isl_set_from_basic_set(get_size_bounds(node
));
4750 data
->bounds
= isl_union_set_add_set(data
->bounds
, bounds
);
4755 /* Drop some constraints from "delta" that could be exploited
4756 * to construct loop coalescing schedules.
4757 * In particular, drop those constraint that bound the difference
4758 * to the size of the domain.
4759 * Do this for each set/node in "delta" separately.
4760 * The parameters are assumed to have been projected out by the caller.
4762 static __isl_give isl_union_set
*union_drop_coalescing_constraints(isl_ctx
*ctx
,
4763 struct isl_sched_graph
*graph
, __isl_take isl_union_set
*delta
)
4765 struct isl_collect_bounds_data data
= { ctx
, graph
};
4767 data
.bounds
= isl_union_set_empty(isl_space_params_alloc(ctx
, 0));
4768 if (isl_union_set_foreach_set(delta
, &collect_bounds
, &data
) < 0)
4769 data
.bounds
= isl_union_set_free(data
.bounds
);
4770 delta
= isl_union_set_plain_gist(delta
, data
.bounds
);
4775 /* Given a non-trivial lineality space "lineality", add the corresponding
4776 * universe set to data->mask and add a map from elements to
4777 * other elements along the lines in "lineality" to data->equivalent.
4778 * If this is the first time this function gets called
4779 * (data->any_non_trivial is still false), then set data->any_non_trivial and
4780 * initialize data->mask and data->equivalent.
4782 * In particular, if the lineality space is defined by equality constraints
4786 * then construct an affine mapping
4790 * and compute the equivalence relation of having the same image under f:
4792 * { x -> x' : E x = E x' }
4794 static isl_stat
add_non_trivial_lineality(__isl_take isl_basic_set
*lineality
,
4795 struct isl_exploit_lineality_data
*data
)
4801 isl_multi_pw_aff
*mpa
;
4805 if (isl_basic_set_check_no_locals(lineality
) < 0)
4808 space
= isl_basic_set_get_space(lineality
);
4809 if (!data
->any_non_trivial
) {
4810 data
->equivalent
= isl_union_map_empty(isl_space_copy(space
));
4811 data
->mask
= isl_union_set_empty(isl_space_copy(space
));
4813 data
->any_non_trivial
= isl_bool_true
;
4815 univ
= isl_set_universe(isl_space_copy(space
));
4816 data
->mask
= isl_union_set_add_set(data
->mask
, univ
);
4818 eq
= isl_basic_set_extract_equalities(lineality
);
4819 n
= isl_mat_rows(eq
);
4821 space
= isl_space_free(space
);
4822 eq
= isl_mat_insert_zero_rows(eq
, 0, 1);
4823 eq
= isl_mat_set_element_si(eq
, 0, 0, 1);
4824 space
= isl_space_from_domain(space
);
4825 space
= isl_space_add_dims(space
, isl_dim_out
, n
);
4826 ma
= isl_multi_aff_from_aff_mat(space
, eq
);
4827 mpa
= isl_multi_pw_aff_from_multi_aff(ma
);
4828 map
= isl_multi_pw_aff_eq_map(mpa
, isl_multi_pw_aff_copy(mpa
));
4829 data
->equivalent
= isl_union_map_add_map(data
->equivalent
, map
);
4831 isl_basic_set_free(lineality
);
4834 isl_basic_set_free(lineality
);
4835 return isl_stat_error
;
4838 /* Check if the lineality space "set" is non-trivial (i.e., is not just
4839 * the origin or, in other words, satisfies a number of equality constraints
4840 * that is smaller than the dimension of the set).
4841 * If so, extend data->mask and data->equivalent accordingly.
4843 * The input should not have any local variables already, but
4844 * isl_set_remove_divs is called to make sure it does not.
4846 static isl_stat
add_lineality(__isl_take isl_set
*set
, void *user
)
4848 struct isl_exploit_lineality_data
*data
= user
;
4849 isl_basic_set
*hull
;
4853 set
= isl_set_remove_divs(set
);
4854 hull
= isl_set_unshifted_simple_hull(set
);
4855 dim
= isl_basic_set_dim(hull
, isl_dim_set
);
4856 n_eq
= isl_basic_set_n_equality(hull
);
4860 return add_non_trivial_lineality(hull
, data
);
4861 isl_basic_set_free(hull
);
4864 isl_basic_set_free(hull
);
4865 return isl_stat_error
;
4868 /* Check if the difference set on intra-node schedule constraints "intra"
4869 * has any non-trivial lineality space.
4870 * If so, then extend the difference set to a difference set
4871 * on equivalent elements. That is, if "intra" is
4873 * { y - x : (x,y) \in V }
4875 * and elements are equivalent if they have the same image under f,
4878 * { y' - x' : (x,y) \in V and f(x) = f(x') and f(y) = f(y') }
4880 * or, since f is linear,
4882 * { y' - x' : (x,y) \in V and f(y - x) = f(y' - x') }
4884 * The results of the search for non-trivial lineality spaces is stored
4887 static __isl_give isl_union_set
*exploit_intra_lineality(
4888 __isl_take isl_union_set
*intra
,
4889 struct isl_exploit_lineality_data
*data
)
4891 isl_union_set
*lineality
;
4892 isl_union_set
*uset
;
4894 data
->any_non_trivial
= isl_bool_false
;
4895 lineality
= isl_union_set_copy(intra
);
4896 lineality
= isl_union_set_combined_lineality_space(lineality
);
4897 if (isl_union_set_foreach_set(lineality
, &add_lineality
, data
) < 0)
4898 data
->any_non_trivial
= isl_bool_error
;
4899 isl_union_set_free(lineality
);
4901 if (data
->any_non_trivial
< 0)
4902 return isl_union_set_free(intra
);
4903 if (!data
->any_non_trivial
)
4906 uset
= isl_union_set_copy(intra
);
4907 intra
= isl_union_set_subtract(intra
, isl_union_set_copy(data
->mask
));
4908 uset
= isl_union_set_apply(uset
, isl_union_map_copy(data
->equivalent
));
4909 intra
= isl_union_set_union(intra
, uset
);
4911 intra
= isl_union_set_remove_divs(intra
);
4916 /* If the difference set on intra-node schedule constraints was found to have
4917 * any non-trivial lineality space by exploit_intra_lineality,
4918 * as recorded in "data", then extend the inter-node
4919 * schedule constraints "inter" to schedule constraints on equivalent elements.
4920 * That is, if "inter" is V and
4921 * elements are equivalent if they have the same image under f, then return
4923 * { (x', y') : (x,y) \in V and f(x) = f(x') and f(y) = f(y') }
4925 static __isl_give isl_union_map
*exploit_inter_lineality(
4926 __isl_take isl_union_map
*inter
,
4927 struct isl_exploit_lineality_data
*data
)
4929 isl_union_map
*umap
;
4931 if (data
->any_non_trivial
< 0)
4932 return isl_union_map_free(inter
);
4933 if (!data
->any_non_trivial
)
4936 umap
= isl_union_map_copy(inter
);
4937 inter
= isl_union_map_subtract_range(inter
,
4938 isl_union_set_copy(data
->mask
));
4939 umap
= isl_union_map_apply_range(umap
,
4940 isl_union_map_copy(data
->equivalent
));
4941 inter
= isl_union_map_union(inter
, umap
);
4942 umap
= isl_union_map_copy(inter
);
4943 inter
= isl_union_map_subtract_domain(inter
,
4944 isl_union_set_copy(data
->mask
));
4945 umap
= isl_union_map_apply_range(isl_union_map_copy(data
->equivalent
),
4947 inter
= isl_union_map_union(inter
, umap
);
4949 inter
= isl_union_map_remove_divs(inter
);
4954 /* For each (conditional) validity edge in "graph",
4955 * add the corresponding dependence relation using "add"
4956 * to a collection of dependence relations and return the result.
4957 * If "coincidence" is set, then coincidence edges are considered as well.
4959 static __isl_give isl_union_map
*collect_validity(struct isl_sched_graph
*graph
,
4960 __isl_give isl_union_map
*(*add
)(__isl_take isl_union_map
*umap
,
4961 struct isl_sched_edge
*edge
), int coincidence
)
4965 isl_union_map
*umap
;
4967 space
= isl_space_copy(graph
->node
[0].space
);
4968 umap
= isl_union_map_empty(space
);
4970 for (i
= 0; i
< graph
->n_edge
; ++i
) {
4971 struct isl_sched_edge
*edge
= &graph
->edge
[i
];
4973 if (!is_any_validity(edge
) &&
4974 (!coincidence
|| !is_coincidence(edge
)))
4977 umap
= add(umap
, edge
);
4983 /* For each dependence relation on a (conditional) validity edge
4984 * from a node to itself,
4985 * construct the set of coefficients of valid constraints for elements
4986 * in that dependence relation and collect the results.
4987 * If "coincidence" is set, then coincidence edges are considered as well.
4989 * In particular, for each dependence relation R, constraints
4990 * on coefficients (c_0, c_x) are constructed such that
4992 * c_0 + c_x d >= 0 for each d in delta R = { y - x | (x,y) in R }
4994 * If the schedule_treat_coalescing option is set, then some constraints
4995 * that could be exploited to construct coalescing schedules
4996 * are removed before the dual is computed, but after the parameters
4997 * have been projected out.
4998 * The entire computation is essentially the same as that performed
4999 * by intra_coefficients, except that it operates on multiple
5000 * edges together and that the parameters are always projected out.
5002 * Additionally, exploit any non-trivial lineality space
5003 * in the difference set after removing coalescing constraints and
5004 * store the results of the non-trivial lineality space detection in "data".
5005 * The procedure is currently run unconditionally, but it is unlikely
5006 * to find any non-trivial lineality spaces if no coalescing constraints
5007 * have been removed.
5009 * Note that if a dependence relation is a union of basic maps,
5010 * then each basic map needs to be treated individually as it may only
5011 * be possible to carry the dependences expressed by some of those
5012 * basic maps and not all of them.
5013 * The collected validity constraints are therefore not coalesced and
5014 * it is assumed that they are not coalesced automatically.
5015 * Duplicate basic maps can be removed, however.
5016 * In particular, if the same basic map appears as a disjunct
5017 * in multiple edges, then it only needs to be carried once.
5019 static __isl_give isl_basic_set_list
*collect_intra_validity(isl_ctx
*ctx
,
5020 struct isl_sched_graph
*graph
, int coincidence
,
5021 struct isl_exploit_lineality_data
*data
)
5023 isl_union_map
*intra
;
5024 isl_union_set
*delta
;
5025 isl_basic_set_list
*list
;
5027 intra
= collect_validity(graph
, &add_intra
, coincidence
);
5028 delta
= isl_union_map_deltas(intra
);
5029 delta
= isl_union_set_project_out_all_params(delta
);
5030 delta
= isl_union_set_remove_divs(delta
);
5031 if (isl_options_get_schedule_treat_coalescing(ctx
))
5032 delta
= union_drop_coalescing_constraints(ctx
, graph
, delta
);
5033 delta
= exploit_intra_lineality(delta
, data
);
5034 list
= isl_union_set_get_basic_set_list(delta
);
5035 isl_union_set_free(delta
);
5037 return isl_basic_set_list_coefficients(list
);
5040 /* For each dependence relation on a (conditional) validity edge
5041 * from a node to some other node,
5042 * construct the set of coefficients of valid constraints for elements
5043 * in that dependence relation and collect the results.
5044 * If "coincidence" is set, then coincidence edges are considered as well.
5046 * In particular, for each dependence relation R, constraints
5047 * on coefficients (c_0, c_n, c_x, c_y) are constructed such that
5049 * c_0 + c_n n + c_x x + c_y y >= 0 for each (x,y) in R
5051 * This computation is essentially the same as that performed
5052 * by inter_coefficients, except that it operates on multiple
5055 * Additionally, exploit any non-trivial lineality space
5056 * that may have been discovered by collect_intra_validity
5057 * (as stored in "data").
5059 * Note that if a dependence relation is a union of basic maps,
5060 * then each basic map needs to be treated individually as it may only
5061 * be possible to carry the dependences expressed by some of those
5062 * basic maps and not all of them.
5063 * The collected validity constraints are therefore not coalesced and
5064 * it is assumed that they are not coalesced automatically.
5065 * Duplicate basic maps can be removed, however.
5066 * In particular, if the same basic map appears as a disjunct
5067 * in multiple edges, then it only needs to be carried once.
5069 static __isl_give isl_basic_set_list
*collect_inter_validity(
5070 struct isl_sched_graph
*graph
, int coincidence
,
5071 struct isl_exploit_lineality_data
*data
)
5073 isl_union_map
*inter
;
5074 isl_union_set
*wrap
;
5075 isl_basic_set_list
*list
;
5077 inter
= collect_validity(graph
, &add_inter
, coincidence
);
5078 inter
= exploit_inter_lineality(inter
, data
);
5079 inter
= isl_union_map_remove_divs(inter
);
5080 wrap
= isl_union_map_wrap(inter
);
5081 list
= isl_union_set_get_basic_set_list(wrap
);
5082 isl_union_set_free(wrap
);
5083 return isl_basic_set_list_coefficients(list
);
5086 /* Construct an LP problem for finding schedule coefficients
5087 * such that the schedule carries as many of the "n_edge" groups of
5088 * dependences as possible based on the corresponding coefficient
5089 * constraints and return the lexicographically smallest non-trivial solution.
5090 * "intra" is the sequence of coefficient constraints for intra-node edges.
5091 * "inter" is the sequence of coefficient constraints for inter-node edges.
5092 * If "want_integral" is set, then compute an integral solution
5093 * for the coefficients rather than using the numerators
5094 * of a rational solution.
5095 * "carry_inter" indicates whether inter-node edges should be carried or
5098 * If none of the "n_edge" groups can be carried
5099 * then return an empty vector.
5101 static __isl_give isl_vec
*compute_carrying_sol_coef(isl_ctx
*ctx
,
5102 struct isl_sched_graph
*graph
, int n_edge
,
5103 __isl_keep isl_basic_set_list
*intra
,
5104 __isl_keep isl_basic_set_list
*inter
, int want_integral
,
5109 if (setup_carry_lp(ctx
, graph
, n_edge
, intra
, inter
, carry_inter
) < 0)
5112 lp
= isl_basic_set_copy(graph
->lp
);
5113 return non_neg_lexmin(graph
, lp
, n_edge
, want_integral
);
5116 /* Construct an LP problem for finding schedule coefficients
5117 * such that the schedule carries as many of the validity dependences
5119 * return the lexicographically smallest non-trivial solution.
5120 * If "fallback" is set, then the carrying is performed as a fallback
5121 * for the Pluto-like scheduler.
5122 * If "coincidence" is set, then try and carry coincidence edges as well.
5124 * The variable "n_edge" stores the number of groups that should be carried.
5125 * If none of the "n_edge" groups can be carried
5126 * then return an empty vector.
5127 * If, moreover, "n_edge" is zero, then the LP problem does not even
5128 * need to be constructed.
5130 * If a fallback solution is being computed, then compute an integral solution
5131 * for the coefficients rather than using the numerators
5132 * of a rational solution.
5134 * If a fallback solution is being computed, if there are any intra-node
5135 * dependences, and if requested by the user, then first try
5136 * to only carry those intra-node dependences.
5137 * If this fails to carry any dependences, then try again
5138 * with the inter-node dependences included.
5140 static __isl_give isl_vec
*compute_carrying_sol(isl_ctx
*ctx
,
5141 struct isl_sched_graph
*graph
, int fallback
, int coincidence
)
5143 int n_intra
, n_inter
;
5145 struct isl_carry carry
= { 0 };
5148 carry
.intra
= collect_intra_validity(ctx
, graph
, coincidence
,
5150 carry
.inter
= collect_inter_validity(graph
, coincidence
,
5152 if (!carry
.intra
|| !carry
.inter
)
5154 n_intra
= isl_basic_set_list_n_basic_set(carry
.intra
);
5155 n_inter
= isl_basic_set_list_n_basic_set(carry
.inter
);
5157 if (fallback
&& n_intra
> 0 &&
5158 isl_options_get_schedule_carry_self_first(ctx
)) {
5159 sol
= compute_carrying_sol_coef(ctx
, graph
, n_intra
,
5160 carry
.intra
, carry
.inter
, fallback
, 0);
5161 if (!sol
|| sol
->size
!= 0 || n_inter
== 0) {
5162 isl_carry_clear(&carry
);
5168 n_edge
= n_intra
+ n_inter
;
5170 isl_carry_clear(&carry
);
5171 return isl_vec_alloc(ctx
, 0);
5174 sol
= compute_carrying_sol_coef(ctx
, graph
, n_edge
,
5175 carry
.intra
, carry
.inter
, fallback
, 1);
5176 isl_carry_clear(&carry
);
5179 isl_carry_clear(&carry
);
5183 /* Construct a schedule row for each node such that as many validity dependences
5184 * as possible are carried and then continue with the next band.
5185 * If "fallback" is set, then the carrying is performed as a fallback
5186 * for the Pluto-like scheduler.
5187 * If "coincidence" is set, then try and carry coincidence edges as well.
5189 * If there are no validity dependences, then no dependence can be carried and
5190 * the procedure is guaranteed to fail. If there is more than one component,
5191 * then try computing a schedule on each component separately
5192 * to prevent or at least postpone this failure.
5194 * If a schedule row is computed, then check that dependences are carried
5195 * for at least one of the edges.
5197 * If the computed schedule row turns out to be trivial on one or
5198 * more nodes where it should not be trivial, then we throw it away
5199 * and try again on each component separately.
5201 * If there is only one component, then we accept the schedule row anyway,
5202 * but we do not consider it as a complete row and therefore do not
5203 * increment graph->n_row. Note that the ranks of the nodes that
5204 * do get a non-trivial schedule part will get updated regardless and
5205 * graph->maxvar is computed based on these ranks. The test for
5206 * whether more schedule rows are required in compute_schedule_wcc
5207 * is therefore not affected.
5209 * Insert a band corresponding to the schedule row at position "node"
5210 * of the schedule tree and continue with the construction of the schedule.
5211 * This insertion and the continued construction is performed by split_scaled
5212 * after optionally checking for non-trivial common divisors.
5214 static __isl_give isl_schedule_node
*carry(__isl_take isl_schedule_node
*node
,
5215 struct isl_sched_graph
*graph
, int fallback
, int coincidence
)
5224 ctx
= isl_schedule_node_get_ctx(node
);
5225 sol
= compute_carrying_sol(ctx
, graph
, fallback
, coincidence
);
5227 return isl_schedule_node_free(node
);
5228 if (sol
->size
== 0) {
5231 return compute_component_schedule(node
, graph
, 1);
5232 isl_die(ctx
, isl_error_unknown
, "unable to carry dependences",
5233 return isl_schedule_node_free(node
));
5236 trivial
= is_any_trivial(graph
, sol
);
5238 sol
= isl_vec_free(sol
);
5239 } else if (trivial
&& graph
->scc
> 1) {
5241 return compute_component_schedule(node
, graph
, 1);
5244 if (update_schedule(graph
, sol
, 0) < 0)
5245 return isl_schedule_node_free(node
);
5249 return split_scaled(node
, graph
);
5252 /* Construct a schedule row for each node such that as many validity dependences
5253 * as possible are carried and then continue with the next band.
5254 * Do so as a fallback for the Pluto-like scheduler.
5255 * If "coincidence" is set, then try and carry coincidence edges as well.
5257 static __isl_give isl_schedule_node
*carry_fallback(
5258 __isl_take isl_schedule_node
*node
, struct isl_sched_graph
*graph
,
5261 return carry(node
, graph
, 1, coincidence
);
5264 /* Construct a schedule row for each node such that as many validity dependences
5265 * as possible are carried and then continue with the next band.
5266 * Do so for the case where the Feautrier scheduler was selected
5269 static __isl_give isl_schedule_node
*carry_feautrier(
5270 __isl_take isl_schedule_node
*node
, struct isl_sched_graph
*graph
)
5272 return carry(node
, graph
, 0, 0);
5275 /* Construct a schedule row for each node such that as many validity dependences
5276 * as possible are carried and then continue with the next band.
5277 * Do so as a fallback for the Pluto-like scheduler.
5279 static __isl_give isl_schedule_node
*carry_dependences(
5280 __isl_take isl_schedule_node
*node
, struct isl_sched_graph
*graph
)
5282 return carry_fallback(node
, graph
, 0);
5285 /* Construct a schedule row for each node such that as many validity or
5286 * coincidence dependences as possible are carried and
5287 * then continue with the next band.
5288 * Do so as a fallback for the Pluto-like scheduler.
5290 static __isl_give isl_schedule_node
*carry_coincidence(
5291 __isl_take isl_schedule_node
*node
, struct isl_sched_graph
*graph
)
5293 return carry_fallback(node
, graph
, 1);
5296 /* Topologically sort statements mapped to the same schedule iteration
5297 * and add insert a sequence node in front of "node"
5298 * corresponding to this order.
5299 * If "initialized" is set, then it may be assumed that compute_maxvar
5300 * has been called on the current band. Otherwise, call
5301 * compute_maxvar if and before carry_dependences gets called.
5303 * If it turns out to be impossible to sort the statements apart,
5304 * because different dependences impose different orderings
5305 * on the statements, then we extend the schedule such that
5306 * it carries at least one more dependence.
5308 static __isl_give isl_schedule_node
*sort_statements(
5309 __isl_take isl_schedule_node
*node
, struct isl_sched_graph
*graph
,
5313 isl_union_set_list
*filters
;
5318 ctx
= isl_schedule_node_get_ctx(node
);
5320 isl_die(ctx
, isl_error_internal
,
5321 "graph should have at least one node",
5322 return isl_schedule_node_free(node
));
5327 if (update_edges(ctx
, graph
) < 0)
5328 return isl_schedule_node_free(node
);
5330 if (graph
->n_edge
== 0)
5333 if (detect_sccs(ctx
, graph
) < 0)
5334 return isl_schedule_node_free(node
);
5337 if (graph
->scc
< graph
->n
) {
5338 if (!initialized
&& compute_maxvar(graph
) < 0)
5339 return isl_schedule_node_free(node
);
5340 return carry_dependences(node
, graph
);
5343 filters
= extract_sccs(ctx
, graph
);
5344 node
= isl_schedule_node_insert_sequence(node
, filters
);
5349 /* Are there any (non-empty) (conditional) validity edges in the graph?
5351 static int has_validity_edges(struct isl_sched_graph
*graph
)
5355 for (i
= 0; i
< graph
->n_edge
; ++i
) {
5358 empty
= isl_map_plain_is_empty(graph
->edge
[i
].map
);
5363 if (is_any_validity(&graph
->edge
[i
]))
5370 /* Should we apply a Feautrier step?
5371 * That is, did the user request the Feautrier algorithm and are
5372 * there any validity dependences (left)?
5374 static int need_feautrier_step(isl_ctx
*ctx
, struct isl_sched_graph
*graph
)
5376 if (ctx
->opt
->schedule_algorithm
!= ISL_SCHEDULE_ALGORITHM_FEAUTRIER
)
5379 return has_validity_edges(graph
);
5382 /* Compute a schedule for a connected dependence graph using Feautrier's
5383 * multi-dimensional scheduling algorithm and return the updated schedule node.
5385 * The original algorithm is described in [1].
5386 * The main idea is to minimize the number of scheduling dimensions, by
5387 * trying to satisfy as many dependences as possible per scheduling dimension.
5389 * [1] P. Feautrier, Some Efficient Solutions to the Affine Scheduling
5390 * Problem, Part II: Multi-Dimensional Time.
5391 * In Intl. Journal of Parallel Programming, 1992.
5393 static __isl_give isl_schedule_node
*compute_schedule_wcc_feautrier(
5394 isl_schedule_node
*node
, struct isl_sched_graph
*graph
)
5396 return carry_feautrier(node
, graph
);
5399 /* Turn off the "local" bit on all (condition) edges.
5401 static void clear_local_edges(struct isl_sched_graph
*graph
)
5405 for (i
= 0; i
< graph
->n_edge
; ++i
)
5406 if (is_condition(&graph
->edge
[i
]))
5407 clear_local(&graph
->edge
[i
]);
5410 /* Does "graph" have both condition and conditional validity edges?
5412 static int need_condition_check(struct isl_sched_graph
*graph
)
5415 int any_condition
= 0;
5416 int any_conditional_validity
= 0;
5418 for (i
= 0; i
< graph
->n_edge
; ++i
) {
5419 if (is_condition(&graph
->edge
[i
]))
5421 if (is_conditional_validity(&graph
->edge
[i
]))
5422 any_conditional_validity
= 1;
5425 return any_condition
&& any_conditional_validity
;
5428 /* Does "graph" contain any coincidence edge?
5430 static int has_any_coincidence(struct isl_sched_graph
*graph
)
5434 for (i
= 0; i
< graph
->n_edge
; ++i
)
5435 if (is_coincidence(&graph
->edge
[i
]))
5441 /* Extract the final schedule row as a map with the iteration domain
5442 * of "node" as domain.
5444 static __isl_give isl_map
*final_row(struct isl_sched_node
*node
)
5449 n_row
= isl_mat_rows(node
->sched
);
5452 ma
= node_extract_partial_schedule_multi_aff(node
, n_row
- 1, 1);
5453 return isl_map_from_multi_aff(ma
);
5456 /* Is the conditional validity dependence in the edge with index "edge_index"
5457 * violated by the latest (i.e., final) row of the schedule?
5458 * That is, is i scheduled after j
5459 * for any conditional validity dependence i -> j?
5461 static int is_violated(struct isl_sched_graph
*graph
, int edge_index
)
5463 isl_map
*src_sched
, *dst_sched
, *map
;
5464 struct isl_sched_edge
*edge
= &graph
->edge
[edge_index
];
5467 src_sched
= final_row(edge
->src
);
5468 dst_sched
= final_row(edge
->dst
);
5469 map
= isl_map_copy(edge
->map
);
5470 map
= isl_map_apply_domain(map
, src_sched
);
5471 map
= isl_map_apply_range(map
, dst_sched
);
5472 map
= isl_map_order_gt(map
, isl_dim_in
, 0, isl_dim_out
, 0);
5473 empty
= isl_map_is_empty(map
);
5482 /* Does "graph" have any satisfied condition edges that
5483 * are adjacent to the conditional validity constraint with
5484 * domain "conditional_source" and range "conditional_sink"?
5486 * A satisfied condition is one that is not local.
5487 * If a condition was forced to be local already (i.e., marked as local)
5488 * then there is no need to check if it is in fact local.
5490 * Additionally, mark all adjacent condition edges found as local.
5492 static int has_adjacent_true_conditions(struct isl_sched_graph
*graph
,
5493 __isl_keep isl_union_set
*conditional_source
,
5494 __isl_keep isl_union_set
*conditional_sink
)
5499 for (i
= 0; i
< graph
->n_edge
; ++i
) {
5500 int adjacent
, local
;
5501 isl_union_map
*condition
;
5503 if (!is_condition(&graph
->edge
[i
]))
5505 if (is_local(&graph
->edge
[i
]))
5508 condition
= graph
->edge
[i
].tagged_condition
;
5509 adjacent
= domain_intersects(condition
, conditional_sink
);
5510 if (adjacent
>= 0 && !adjacent
)
5511 adjacent
= range_intersects(condition
,
5512 conditional_source
);
5518 set_local(&graph
->edge
[i
]);
5520 local
= is_condition_false(&graph
->edge
[i
]);
5530 /* Are there any violated conditional validity dependences with
5531 * adjacent condition dependences that are not local with respect
5532 * to the current schedule?
5533 * That is, is the conditional validity constraint violated?
5535 * Additionally, mark all those adjacent condition dependences as local.
5536 * We also mark those adjacent condition dependences that were not marked
5537 * as local before, but just happened to be local already. This ensures
5538 * that they remain local if the schedule is recomputed.
5540 * We first collect domain and range of all violated conditional validity
5541 * dependences and then check if there are any adjacent non-local
5542 * condition dependences.
5544 static int has_violated_conditional_constraint(isl_ctx
*ctx
,
5545 struct isl_sched_graph
*graph
)
5549 isl_union_set
*source
, *sink
;
5551 source
= isl_union_set_empty(isl_space_params_alloc(ctx
, 0));
5552 sink
= isl_union_set_empty(isl_space_params_alloc(ctx
, 0));
5553 for (i
= 0; i
< graph
->n_edge
; ++i
) {
5554 isl_union_set
*uset
;
5555 isl_union_map
*umap
;
5558 if (!is_conditional_validity(&graph
->edge
[i
]))
5561 violated
= is_violated(graph
, i
);
5569 umap
= isl_union_map_copy(graph
->edge
[i
].tagged_validity
);
5570 uset
= isl_union_map_domain(umap
);
5571 source
= isl_union_set_union(source
, uset
);
5572 source
= isl_union_set_coalesce(source
);
5574 umap
= isl_union_map_copy(graph
->edge
[i
].tagged_validity
);
5575 uset
= isl_union_map_range(umap
);
5576 sink
= isl_union_set_union(sink
, uset
);
5577 sink
= isl_union_set_coalesce(sink
);
5581 any
= has_adjacent_true_conditions(graph
, source
, sink
);
5583 isl_union_set_free(source
);
5584 isl_union_set_free(sink
);
5587 isl_union_set_free(source
);
5588 isl_union_set_free(sink
);
5592 /* Examine the current band (the rows between graph->band_start and
5593 * graph->n_total_row), deciding whether to drop it or add it to "node"
5594 * and then continue with the computation of the next band, if any.
5595 * If "initialized" is set, then it may be assumed that compute_maxvar
5596 * has been called on the current band. Otherwise, call
5597 * compute_maxvar if and before carry_dependences gets called.
5599 * The caller keeps looking for a new row as long as
5600 * graph->n_row < graph->maxvar. If the latest attempt to find
5601 * such a row failed (i.e., we still have graph->n_row < graph->maxvar),
5603 * - split between SCCs and start over (assuming we found an interesting
5604 * pair of SCCs between which to split)
5605 * - continue with the next band (assuming the current band has at least
5607 * - if there is more than one SCC left, then split along all SCCs
5608 * - if outer coincidence needs to be enforced, then try to carry as many
5609 * validity or coincidence dependences as possible and
5610 * continue with the next band
5611 * - try to carry as many validity dependences as possible and
5612 * continue with the next band
5613 * In each case, we first insert a band node in the schedule tree
5614 * if any rows have been computed.
5616 * If the caller managed to complete the schedule and the current band
5617 * is empty, then finish off by topologically
5618 * sorting the statements based on the remaining dependences.
5619 * If, on the other hand, the current band has at least one row,
5620 * then continue with the next band. Note that this next band
5621 * will necessarily be empty, but the graph may still be split up
5622 * into weakly connected components before arriving back here.
5624 static __isl_give isl_schedule_node
*compute_schedule_finish_band(
5625 __isl_take isl_schedule_node
*node
, struct isl_sched_graph
*graph
,
5633 empty
= graph
->n_total_row
== graph
->band_start
;
5634 if (graph
->n_row
< graph
->maxvar
) {
5637 ctx
= isl_schedule_node_get_ctx(node
);
5638 if (!ctx
->opt
->schedule_maximize_band_depth
&& !empty
)
5639 return compute_next_band(node
, graph
, 1);
5640 if (graph
->src_scc
>= 0)
5641 return compute_split_schedule(node
, graph
);
5643 return compute_next_band(node
, graph
, 1);
5645 return compute_component_schedule(node
, graph
, 1);
5646 if (!initialized
&& compute_maxvar(graph
) < 0)
5647 return isl_schedule_node_free(node
);
5648 if (isl_options_get_schedule_outer_coincidence(ctx
))
5649 return carry_coincidence(node
, graph
);
5650 return carry_dependences(node
, graph
);
5654 return compute_next_band(node
, graph
, 1);
5655 return sort_statements(node
, graph
, initialized
);
5658 /* Construct a band of schedule rows for a connected dependence graph.
5659 * The caller is responsible for determining the strongly connected
5660 * components and calling compute_maxvar first.
5662 * We try to find a sequence of as many schedule rows as possible that result
5663 * in non-negative dependence distances (independent of the previous rows
5664 * in the sequence, i.e., such that the sequence is tilable), with as
5665 * many of the initial rows as possible satisfying the coincidence constraints.
5666 * The computation stops if we can't find any more rows or if we have found
5667 * all the rows we wanted to find.
5669 * If ctx->opt->schedule_outer_coincidence is set, then we force the
5670 * outermost dimension to satisfy the coincidence constraints. If this
5671 * turns out to be impossible, we fall back on the general scheme above
5672 * and try to carry as many dependences as possible.
5674 * If "graph" contains both condition and conditional validity dependences,
5675 * then we need to check that that the conditional schedule constraint
5676 * is satisfied, i.e., there are no violated conditional validity dependences
5677 * that are adjacent to any non-local condition dependences.
5678 * If there are, then we mark all those adjacent condition dependences
5679 * as local and recompute the current band. Those dependences that
5680 * are marked local will then be forced to be local.
5681 * The initial computation is performed with no dependences marked as local.
5682 * If we are lucky, then there will be no violated conditional validity
5683 * dependences adjacent to any non-local condition dependences.
5684 * Otherwise, we mark some additional condition dependences as local and
5685 * recompute. We continue this process until there are no violations left or
5686 * until we are no longer able to compute a schedule.
5687 * Since there are only a finite number of dependences,
5688 * there will only be a finite number of iterations.
5690 static isl_stat
compute_schedule_wcc_band(isl_ctx
*ctx
,
5691 struct isl_sched_graph
*graph
)
5693 int has_coincidence
;
5694 int use_coincidence
;
5695 int force_coincidence
= 0;
5696 int check_conditional
;
5698 if (sort_sccs(graph
) < 0)
5699 return isl_stat_error
;
5701 clear_local_edges(graph
);
5702 check_conditional
= need_condition_check(graph
);
5703 has_coincidence
= has_any_coincidence(graph
);
5705 if (ctx
->opt
->schedule_outer_coincidence
)
5706 force_coincidence
= 1;
5708 use_coincidence
= has_coincidence
;
5709 while (graph
->n_row
< graph
->maxvar
) {
5714 graph
->src_scc
= -1;
5715 graph
->dst_scc
= -1;
5717 if (setup_lp(ctx
, graph
, use_coincidence
) < 0)
5718 return isl_stat_error
;
5719 sol
= solve_lp(ctx
, graph
);
5721 return isl_stat_error
;
5722 if (sol
->size
== 0) {
5723 int empty
= graph
->n_total_row
== graph
->band_start
;
5726 if (use_coincidence
&& (!force_coincidence
|| !empty
)) {
5727 use_coincidence
= 0;
5732 coincident
= !has_coincidence
|| use_coincidence
;
5733 if (update_schedule(graph
, sol
, coincident
) < 0)
5734 return isl_stat_error
;
5736 if (!check_conditional
)
5738 violated
= has_violated_conditional_constraint(ctx
, graph
);
5740 return isl_stat_error
;
5743 if (reset_band(graph
) < 0)
5744 return isl_stat_error
;
5745 use_coincidence
= has_coincidence
;
5751 /* Compute a schedule for a connected dependence graph by considering
5752 * the graph as a whole and return the updated schedule node.
5754 * The actual schedule rows of the current band are computed by
5755 * compute_schedule_wcc_band. compute_schedule_finish_band takes
5756 * care of integrating the band into "node" and continuing
5759 static __isl_give isl_schedule_node
*compute_schedule_wcc_whole(
5760 __isl_take isl_schedule_node
*node
, struct isl_sched_graph
*graph
)
5767 ctx
= isl_schedule_node_get_ctx(node
);
5768 if (compute_schedule_wcc_band(ctx
, graph
) < 0)
5769 return isl_schedule_node_free(node
);
5771 return compute_schedule_finish_band(node
, graph
, 1);
5774 /* Clustering information used by compute_schedule_wcc_clustering.
5776 * "n" is the number of SCCs in the original dependence graph
5777 * "scc" is an array of "n" elements, each representing an SCC
5778 * of the original dependence graph. All entries in the same cluster
5779 * have the same number of schedule rows.
5780 * "scc_cluster" maps each SCC index to the cluster to which it belongs,
5781 * where each cluster is represented by the index of the first SCC
5782 * in the cluster. Initially, each SCC belongs to a cluster containing
5785 * "scc_in_merge" is used by merge_clusters_along_edge to keep
5786 * track of which SCCs need to be merged.
5788 * "cluster" contains the merged clusters of SCCs after the clustering
5791 * "scc_node" is a temporary data structure used inside copy_partial.
5792 * For each SCC, it keeps track of the number of nodes in the SCC
5793 * that have already been copied.
5795 struct isl_clustering
{
5797 struct isl_sched_graph
*scc
;
5798 struct isl_sched_graph
*cluster
;
5804 /* Initialize the clustering data structure "c" from "graph".
5806 * In particular, allocate memory, extract the SCCs from "graph"
5807 * into c->scc, initialize scc_cluster and construct
5808 * a band of schedule rows for each SCC.
5809 * Within each SCC, there is only one SCC by definition.
5810 * Each SCC initially belongs to a cluster containing only that SCC.
5812 static isl_stat
clustering_init(isl_ctx
*ctx
, struct isl_clustering
*c
,
5813 struct isl_sched_graph
*graph
)
5818 c
->scc
= isl_calloc_array(ctx
, struct isl_sched_graph
, c
->n
);
5819 c
->cluster
= isl_calloc_array(ctx
, struct isl_sched_graph
, c
->n
);
5820 c
->scc_cluster
= isl_calloc_array(ctx
, int, c
->n
);
5821 c
->scc_node
= isl_calloc_array(ctx
, int, c
->n
);
5822 c
->scc_in_merge
= isl_calloc_array(ctx
, int, c
->n
);
5823 if (!c
->scc
|| !c
->cluster
||
5824 !c
->scc_cluster
|| !c
->scc_node
|| !c
->scc_in_merge
)
5825 return isl_stat_error
;
5827 for (i
= 0; i
< c
->n
; ++i
) {
5828 if (extract_sub_graph(ctx
, graph
, &node_scc_exactly
,
5829 &edge_scc_exactly
, i
, &c
->scc
[i
]) < 0)
5830 return isl_stat_error
;
5832 if (compute_maxvar(&c
->scc
[i
]) < 0)
5833 return isl_stat_error
;
5834 if (compute_schedule_wcc_band(ctx
, &c
->scc
[i
]) < 0)
5835 return isl_stat_error
;
5836 c
->scc_cluster
[i
] = i
;
5842 /* Free all memory allocated for "c".
5844 static void clustering_free(isl_ctx
*ctx
, struct isl_clustering
*c
)
5849 for (i
= 0; i
< c
->n
; ++i
)
5850 graph_free(ctx
, &c
->scc
[i
]);
5853 for (i
= 0; i
< c
->n
; ++i
)
5854 graph_free(ctx
, &c
->cluster
[i
]);
5856 free(c
->scc_cluster
);
5858 free(c
->scc_in_merge
);
5861 /* Should we refrain from merging the cluster in "graph" with
5862 * any other cluster?
5863 * In particular, is its current schedule band empty and incomplete.
5865 static int bad_cluster(struct isl_sched_graph
*graph
)
5867 return graph
->n_row
< graph
->maxvar
&&
5868 graph
->n_total_row
== graph
->band_start
;
5871 /* Is "edge" a proximity edge with a non-empty dependence relation?
5873 static isl_bool
is_non_empty_proximity(struct isl_sched_edge
*edge
)
5875 if (!is_proximity(edge
))
5876 return isl_bool_false
;
5877 return isl_bool_not(isl_map_plain_is_empty(edge
->map
));
5880 /* Return the index of an edge in "graph" that can be used to merge
5881 * two clusters in "c".
5882 * Return graph->n_edge if no such edge can be found.
5883 * Return -1 on error.
5885 * In particular, return a proximity edge between two clusters
5886 * that is not marked "no_merge" and such that neither of the
5887 * two clusters has an incomplete, empty band.
5889 * If there are multiple such edges, then try and find the most
5890 * appropriate edge to use for merging. In particular, pick the edge
5891 * with the greatest weight. If there are multiple of those,
5892 * then pick one with the shortest distance between
5893 * the two cluster representatives.
5895 static int find_proximity(struct isl_sched_graph
*graph
,
5896 struct isl_clustering
*c
)
5898 int i
, best
= graph
->n_edge
, best_dist
, best_weight
;
5900 for (i
= 0; i
< graph
->n_edge
; ++i
) {
5901 struct isl_sched_edge
*edge
= &graph
->edge
[i
];
5905 prox
= is_non_empty_proximity(edge
);
5912 if (bad_cluster(&c
->scc
[edge
->src
->scc
]) ||
5913 bad_cluster(&c
->scc
[edge
->dst
->scc
]))
5915 dist
= c
->scc_cluster
[edge
->dst
->scc
] -
5916 c
->scc_cluster
[edge
->src
->scc
];
5919 weight
= edge
->weight
;
5920 if (best
< graph
->n_edge
) {
5921 if (best_weight
> weight
)
5923 if (best_weight
== weight
&& best_dist
<= dist
)
5928 best_weight
= weight
;
5934 /* Internal data structure used in mark_merge_sccs.
5936 * "graph" is the dependence graph in which a strongly connected
5937 * component is constructed.
5938 * "scc_cluster" maps each SCC index to the cluster to which it belongs.
5939 * "src" and "dst" are the indices of the nodes that are being merged.
5941 struct isl_mark_merge_sccs_data
{
5942 struct isl_sched_graph
*graph
;
5948 /* Check whether the cluster containing node "i" depends on the cluster
5949 * containing node "j". If "i" and "j" belong to the same cluster,
5950 * then they are taken to depend on each other to ensure that
5951 * the resulting strongly connected component consists of complete
5952 * clusters. Furthermore, if "i" and "j" are the two nodes that
5953 * are being merged, then they are taken to depend on each other as well.
5954 * Otherwise, check if there is a (conditional) validity dependence
5955 * from node[j] to node[i], forcing node[i] to follow node[j].
5957 static isl_bool
cluster_follows(int i
, int j
, void *user
)
5959 struct isl_mark_merge_sccs_data
*data
= user
;
5960 struct isl_sched_graph
*graph
= data
->graph
;
5961 int *scc_cluster
= data
->scc_cluster
;
5963 if (data
->src
== i
&& data
->dst
== j
)
5964 return isl_bool_true
;
5965 if (data
->src
== j
&& data
->dst
== i
)
5966 return isl_bool_true
;
5967 if (scc_cluster
[graph
->node
[i
].scc
] == scc_cluster
[graph
->node
[j
].scc
])
5968 return isl_bool_true
;
5970 return graph_has_validity_edge(graph
, &graph
->node
[j
], &graph
->node
[i
]);
5973 /* Mark all SCCs that belong to either of the two clusters in "c"
5974 * connected by the edge in "graph" with index "edge", or to any
5975 * of the intermediate clusters.
5976 * The marking is recorded in c->scc_in_merge.
5978 * The given edge has been selected for merging two clusters,
5979 * meaning that there is at least a proximity edge between the two nodes.
5980 * However, there may also be (indirect) validity dependences
5981 * between the two nodes. When merging the two clusters, all clusters
5982 * containing one or more of the intermediate nodes along the
5983 * indirect validity dependences need to be merged in as well.
5985 * First collect all such nodes by computing the strongly connected
5986 * component (SCC) containing the two nodes connected by the edge, where
5987 * the two nodes are considered to depend on each other to make
5988 * sure they end up in the same SCC. Similarly, each node is considered
5989 * to depend on every other node in the same cluster to ensure
5990 * that the SCC consists of complete clusters.
5992 * Then the original SCCs that contain any of these nodes are marked
5993 * in c->scc_in_merge.
5995 static isl_stat
mark_merge_sccs(isl_ctx
*ctx
, struct isl_sched_graph
*graph
,
5996 int edge
, struct isl_clustering
*c
)
5998 struct isl_mark_merge_sccs_data data
;
5999 struct isl_tarjan_graph
*g
;
6002 for (i
= 0; i
< c
->n
; ++i
)
6003 c
->scc_in_merge
[i
] = 0;
6006 data
.scc_cluster
= c
->scc_cluster
;
6007 data
.src
= graph
->edge
[edge
].src
- graph
->node
;
6008 data
.dst
= graph
->edge
[edge
].dst
- graph
->node
;
6010 g
= isl_tarjan_graph_component(ctx
, graph
->n
, data
.dst
,
6011 &cluster_follows
, &data
);
6017 isl_die(ctx
, isl_error_internal
,
6018 "expecting at least two nodes in component",
6020 if (g
->order
[--i
] != -1)
6021 isl_die(ctx
, isl_error_internal
,
6022 "expecting end of component marker", goto error
);
6024 for (--i
; i
>= 0 && g
->order
[i
] != -1; --i
) {
6025 int scc
= graph
->node
[g
->order
[i
]].scc
;
6026 c
->scc_in_merge
[scc
] = 1;
6029 isl_tarjan_graph_free(g
);
6032 isl_tarjan_graph_free(g
);
6033 return isl_stat_error
;
6036 /* Construct the identifier "cluster_i".
6038 static __isl_give isl_id
*cluster_id(isl_ctx
*ctx
, int i
)
6042 snprintf(name
, sizeof(name
), "cluster_%d", i
);
6043 return isl_id_alloc(ctx
, name
, NULL
);
6046 /* Construct the space of the cluster with index "i" containing
6047 * the strongly connected component "scc".
6049 * In particular, construct a space called cluster_i with dimension equal
6050 * to the number of schedule rows in the current band of "scc".
6052 static __isl_give isl_space
*cluster_space(struct isl_sched_graph
*scc
, int i
)
6058 nvar
= scc
->n_total_row
- scc
->band_start
;
6059 space
= isl_space_copy(scc
->node
[0].space
);
6060 space
= isl_space_params(space
);
6061 space
= isl_space_set_from_params(space
);
6062 space
= isl_space_add_dims(space
, isl_dim_set
, nvar
);
6063 id
= cluster_id(isl_space_get_ctx(space
), i
);
6064 space
= isl_space_set_tuple_id(space
, isl_dim_set
, id
);
6069 /* Collect the domain of the graph for merging clusters.
6071 * In particular, for each cluster with first SCC "i", construct
6072 * a set in the space called cluster_i with dimension equal
6073 * to the number of schedule rows in the current band of the cluster.
6075 static __isl_give isl_union_set
*collect_domain(isl_ctx
*ctx
,
6076 struct isl_sched_graph
*graph
, struct isl_clustering
*c
)
6080 isl_union_set
*domain
;
6082 space
= isl_space_params_alloc(ctx
, 0);
6083 domain
= isl_union_set_empty(space
);
6085 for (i
= 0; i
< graph
->scc
; ++i
) {
6088 if (!c
->scc_in_merge
[i
])
6090 if (c
->scc_cluster
[i
] != i
)
6092 space
= cluster_space(&c
->scc
[i
], i
);
6093 domain
= isl_union_set_add_set(domain
, isl_set_universe(space
));
6099 /* Construct a map from the original instances to the corresponding
6100 * cluster instance in the current bands of the clusters in "c".
6102 static __isl_give isl_union_map
*collect_cluster_map(isl_ctx
*ctx
,
6103 struct isl_sched_graph
*graph
, struct isl_clustering
*c
)
6107 isl_union_map
*cluster_map
;
6109 space
= isl_space_params_alloc(ctx
, 0);
6110 cluster_map
= isl_union_map_empty(space
);
6111 for (i
= 0; i
< graph
->scc
; ++i
) {
6115 if (!c
->scc_in_merge
[i
])
6118 id
= cluster_id(ctx
, c
->scc_cluster
[i
]);
6119 start
= c
->scc
[i
].band_start
;
6120 n
= c
->scc
[i
].n_total_row
- start
;
6121 for (j
= 0; j
< c
->scc
[i
].n
; ++j
) {
6124 struct isl_sched_node
*node
= &c
->scc
[i
].node
[j
];
6126 ma
= node_extract_partial_schedule_multi_aff(node
,
6128 ma
= isl_multi_aff_set_tuple_id(ma
, isl_dim_out
,
6130 map
= isl_map_from_multi_aff(ma
);
6131 cluster_map
= isl_union_map_add_map(cluster_map
, map
);
6139 /* Add "umap" to the schedule constraints "sc" of all types of "edge"
6140 * that are not isl_edge_condition or isl_edge_conditional_validity.
6142 static __isl_give isl_schedule_constraints
*add_non_conditional_constraints(
6143 struct isl_sched_edge
*edge
, __isl_keep isl_union_map
*umap
,
6144 __isl_take isl_schedule_constraints
*sc
)
6146 enum isl_edge_type t
;
6151 for (t
= isl_edge_first
; t
<= isl_edge_last
; ++t
) {
6152 if (t
== isl_edge_condition
||
6153 t
== isl_edge_conditional_validity
)
6155 if (!is_type(edge
, t
))
6157 sc
= isl_schedule_constraints_add(sc
, t
,
6158 isl_union_map_copy(umap
));
6164 /* Add schedule constraints of types isl_edge_condition and
6165 * isl_edge_conditional_validity to "sc" by applying "umap" to
6166 * the domains of the wrapped relations in domain and range
6167 * of the corresponding tagged constraints of "edge".
6169 static __isl_give isl_schedule_constraints
*add_conditional_constraints(
6170 struct isl_sched_edge
*edge
, __isl_keep isl_union_map
*umap
,
6171 __isl_take isl_schedule_constraints
*sc
)
6173 enum isl_edge_type t
;
6174 isl_union_map
*tagged
;
6176 for (t
= isl_edge_condition
; t
<= isl_edge_conditional_validity
; ++t
) {
6177 if (!is_type(edge
, t
))
6179 if (t
== isl_edge_condition
)
6180 tagged
= isl_union_map_copy(edge
->tagged_condition
);
6182 tagged
= isl_union_map_copy(edge
->tagged_validity
);
6183 tagged
= isl_union_map_zip(tagged
);
6184 tagged
= isl_union_map_apply_domain(tagged
,
6185 isl_union_map_copy(umap
));
6186 tagged
= isl_union_map_zip(tagged
);
6187 sc
= isl_schedule_constraints_add(sc
, t
, tagged
);
6195 /* Given a mapping "cluster_map" from the original instances to
6196 * the cluster instances, add schedule constraints on the clusters
6197 * to "sc" corresponding to the original constraints represented by "edge".
6199 * For non-tagged dependence constraints, the cluster constraints
6200 * are obtained by applying "cluster_map" to the edge->map.
6202 * For tagged dependence constraints, "cluster_map" needs to be applied
6203 * to the domains of the wrapped relations in domain and range
6204 * of the tagged dependence constraints. Pick out the mappings
6205 * from these domains from "cluster_map" and construct their product.
6206 * This mapping can then be applied to the pair of domains.
6208 static __isl_give isl_schedule_constraints
*collect_edge_constraints(
6209 struct isl_sched_edge
*edge
, __isl_keep isl_union_map
*cluster_map
,
6210 __isl_take isl_schedule_constraints
*sc
)
6212 isl_union_map
*umap
;
6214 isl_union_set
*uset
;
6215 isl_union_map
*umap1
, *umap2
;
6220 umap
= isl_union_map_from_map(isl_map_copy(edge
->map
));
6221 umap
= isl_union_map_apply_domain(umap
,
6222 isl_union_map_copy(cluster_map
));
6223 umap
= isl_union_map_apply_range(umap
,
6224 isl_union_map_copy(cluster_map
));
6225 sc
= add_non_conditional_constraints(edge
, umap
, sc
);
6226 isl_union_map_free(umap
);
6228 if (!sc
|| (!is_condition(edge
) && !is_conditional_validity(edge
)))
6231 space
= isl_space_domain(isl_map_get_space(edge
->map
));
6232 uset
= isl_union_set_from_set(isl_set_universe(space
));
6233 umap1
= isl_union_map_copy(cluster_map
);
6234 umap1
= isl_union_map_intersect_domain(umap1
, uset
);
6235 space
= isl_space_range(isl_map_get_space(edge
->map
));
6236 uset
= isl_union_set_from_set(isl_set_universe(space
));
6237 umap2
= isl_union_map_copy(cluster_map
);
6238 umap2
= isl_union_map_intersect_domain(umap2
, uset
);
6239 umap
= isl_union_map_product(umap1
, umap2
);
6241 sc
= add_conditional_constraints(edge
, umap
, sc
);
6243 isl_union_map_free(umap
);
6247 /* Given a mapping "cluster_map" from the original instances to
6248 * the cluster instances, add schedule constraints on the clusters
6249 * to "sc" corresponding to all edges in "graph" between nodes that
6250 * belong to SCCs that are marked for merging in "scc_in_merge".
6252 static __isl_give isl_schedule_constraints
*collect_constraints(
6253 struct isl_sched_graph
*graph
, int *scc_in_merge
,
6254 __isl_keep isl_union_map
*cluster_map
,
6255 __isl_take isl_schedule_constraints
*sc
)
6259 for (i
= 0; i
< graph
->n_edge
; ++i
) {
6260 struct isl_sched_edge
*edge
= &graph
->edge
[i
];
6262 if (!scc_in_merge
[edge
->src
->scc
])
6264 if (!scc_in_merge
[edge
->dst
->scc
])
6266 sc
= collect_edge_constraints(edge
, cluster_map
, sc
);
6272 /* Construct a dependence graph for scheduling clusters with respect
6273 * to each other and store the result in "merge_graph".
6274 * In particular, the nodes of the graph correspond to the schedule
6275 * dimensions of the current bands of those clusters that have been
6276 * marked for merging in "c".
6278 * First construct an isl_schedule_constraints object for this domain
6279 * by transforming the edges in "graph" to the domain.
6280 * Then initialize a dependence graph for scheduling from these
6283 static isl_stat
init_merge_graph(isl_ctx
*ctx
, struct isl_sched_graph
*graph
,
6284 struct isl_clustering
*c
, struct isl_sched_graph
*merge_graph
)
6286 isl_union_set
*domain
;
6287 isl_union_map
*cluster_map
;
6288 isl_schedule_constraints
*sc
;
6291 domain
= collect_domain(ctx
, graph
, c
);
6292 sc
= isl_schedule_constraints_on_domain(domain
);
6294 return isl_stat_error
;
6295 cluster_map
= collect_cluster_map(ctx
, graph
, c
);
6296 sc
= collect_constraints(graph
, c
->scc_in_merge
, cluster_map
, sc
);
6297 isl_union_map_free(cluster_map
);
6299 r
= graph_init(merge_graph
, sc
);
6301 isl_schedule_constraints_free(sc
);
6306 /* Compute the maximal number of remaining schedule rows that still need
6307 * to be computed for the nodes that belong to clusters with the maximal
6308 * dimension for the current band (i.e., the band that is to be merged).
6309 * Only clusters that are about to be merged are considered.
6310 * "maxvar" is the maximal dimension for the current band.
6311 * "c" contains information about the clusters.
6313 * Return the maximal number of remaining schedule rows or -1 on error.
6315 static int compute_maxvar_max_slack(int maxvar
, struct isl_clustering
*c
)
6321 for (i
= 0; i
< c
->n
; ++i
) {
6323 struct isl_sched_graph
*scc
;
6325 if (!c
->scc_in_merge
[i
])
6328 nvar
= scc
->n_total_row
- scc
->band_start
;
6331 for (j
= 0; j
< scc
->n
; ++j
) {
6332 struct isl_sched_node
*node
= &scc
->node
[j
];
6335 if (node_update_vmap(node
) < 0)
6337 slack
= node
->nvar
- node
->rank
;
6338 if (slack
> max_slack
)
6346 /* If there are any clusters where the dimension of the current band
6347 * (i.e., the band that is to be merged) is smaller than "maxvar" and
6348 * if there are any nodes in such a cluster where the number
6349 * of remaining schedule rows that still need to be computed
6350 * is greater than "max_slack", then return the smallest current band
6351 * dimension of all these clusters. Otherwise return the original value
6352 * of "maxvar". Return -1 in case of any error.
6353 * Only clusters that are about to be merged are considered.
6354 * "c" contains information about the clusters.
6356 static int limit_maxvar_to_slack(int maxvar
, int max_slack
,
6357 struct isl_clustering
*c
)
6361 for (i
= 0; i
< c
->n
; ++i
) {
6363 struct isl_sched_graph
*scc
;
6365 if (!c
->scc_in_merge
[i
])
6368 nvar
= scc
->n_total_row
- scc
->band_start
;
6371 for (j
= 0; j
< scc
->n
; ++j
) {
6372 struct isl_sched_node
*node
= &scc
->node
[j
];
6375 if (node_update_vmap(node
) < 0)
6377 slack
= node
->nvar
- node
->rank
;
6378 if (slack
> max_slack
) {
6388 /* Adjust merge_graph->maxvar based on the number of remaining schedule rows
6389 * that still need to be computed. In particular, if there is a node
6390 * in a cluster where the dimension of the current band is smaller
6391 * than merge_graph->maxvar, but the number of remaining schedule rows
6392 * is greater than that of any node in a cluster with the maximal
6393 * dimension for the current band (i.e., merge_graph->maxvar),
6394 * then adjust merge_graph->maxvar to the (smallest) current band dimension
6395 * of those clusters. Without this adjustment, the total number of
6396 * schedule dimensions would be increased, resulting in a skewed view
6397 * of the number of coincident dimensions.
6398 * "c" contains information about the clusters.
6400 * If the maximize_band_depth option is set and merge_graph->maxvar is reduced,
6401 * then there is no point in attempting any merge since it will be rejected
6402 * anyway. Set merge_graph->maxvar to zero in such cases.
6404 static isl_stat
adjust_maxvar_to_slack(isl_ctx
*ctx
,
6405 struct isl_sched_graph
*merge_graph
, struct isl_clustering
*c
)
6407 int max_slack
, maxvar
;
6409 max_slack
= compute_maxvar_max_slack(merge_graph
->maxvar
, c
);
6411 return isl_stat_error
;
6412 maxvar
= limit_maxvar_to_slack(merge_graph
->maxvar
, max_slack
, c
);
6414 return isl_stat_error
;
6416 if (maxvar
< merge_graph
->maxvar
) {
6417 if (isl_options_get_schedule_maximize_band_depth(ctx
))
6418 merge_graph
->maxvar
= 0;
6420 merge_graph
->maxvar
= maxvar
;
6426 /* Return the number of coincident dimensions in the current band of "graph",
6427 * where the nodes of "graph" are assumed to be scheduled by a single band.
6429 static int get_n_coincident(struct isl_sched_graph
*graph
)
6433 for (i
= graph
->band_start
; i
< graph
->n_total_row
; ++i
)
6434 if (!graph
->node
[0].coincident
[i
])
6437 return i
- graph
->band_start
;
6440 /* Should the clusters be merged based on the cluster schedule
6441 * in the current (and only) band of "merge_graph", given that
6442 * coincidence should be maximized?
6444 * If the number of coincident schedule dimensions in the merged band
6445 * would be less than the maximal number of coincident schedule dimensions
6446 * in any of the merged clusters, then the clusters should not be merged.
6448 static isl_bool
ok_to_merge_coincident(struct isl_clustering
*c
,
6449 struct isl_sched_graph
*merge_graph
)
6456 for (i
= 0; i
< c
->n
; ++i
) {
6457 if (!c
->scc_in_merge
[i
])
6459 n_coincident
= get_n_coincident(&c
->scc
[i
]);
6460 if (n_coincident
> max_coincident
)
6461 max_coincident
= n_coincident
;
6464 n_coincident
= get_n_coincident(merge_graph
);
6466 return n_coincident
>= max_coincident
;
6469 /* Return the transformation on "node" expressed by the current (and only)
6470 * band of "merge_graph" applied to the clusters in "c".
6472 * First find the representation of "node" in its SCC in "c" and
6473 * extract the transformation expressed by the current band.
6474 * Then extract the transformation applied by "merge_graph"
6475 * to the cluster to which this SCC belongs.
6476 * Combine the two to obtain the complete transformation on the node.
6478 * Note that the range of the first transformation is an anonymous space,
6479 * while the domain of the second is named "cluster_X". The range
6480 * of the former therefore needs to be adjusted before the two
6483 static __isl_give isl_map
*extract_node_transformation(isl_ctx
*ctx
,
6484 struct isl_sched_node
*node
, struct isl_clustering
*c
,
6485 struct isl_sched_graph
*merge_graph
)
6487 struct isl_sched_node
*scc_node
, *cluster_node
;
6491 isl_multi_aff
*ma
, *ma2
;
6493 scc_node
= graph_find_node(ctx
, &c
->scc
[node
->scc
], node
->space
);
6494 if (scc_node
&& !is_node(&c
->scc
[node
->scc
], scc_node
))
6495 isl_die(ctx
, isl_error_internal
, "unable to find node",
6497 start
= c
->scc
[node
->scc
].band_start
;
6498 n
= c
->scc
[node
->scc
].n_total_row
- start
;
6499 ma
= node_extract_partial_schedule_multi_aff(scc_node
, start
, n
);
6500 space
= cluster_space(&c
->scc
[node
->scc
], c
->scc_cluster
[node
->scc
]);
6501 cluster_node
= graph_find_node(ctx
, merge_graph
, space
);
6502 if (cluster_node
&& !is_node(merge_graph
, cluster_node
))
6503 isl_die(ctx
, isl_error_internal
, "unable to find cluster",
6504 space
= isl_space_free(space
));
6505 id
= isl_space_get_tuple_id(space
, isl_dim_set
);
6506 ma
= isl_multi_aff_set_tuple_id(ma
, isl_dim_out
, id
);
6507 isl_space_free(space
);
6508 n
= merge_graph
->n_total_row
;
6509 ma2
= node_extract_partial_schedule_multi_aff(cluster_node
, 0, n
);
6510 ma
= isl_multi_aff_pullback_multi_aff(ma2
, ma
);
6512 return isl_map_from_multi_aff(ma
);
6515 /* Give a set of distances "set", are they bounded by a small constant
6516 * in direction "pos"?
6517 * In practice, check if they are bounded by 2 by checking that there
6518 * are no elements with a value greater than or equal to 3 or
6519 * smaller than or equal to -3.
6521 static isl_bool
distance_is_bounded(__isl_keep isl_set
*set
, int pos
)
6527 return isl_bool_error
;
6529 test
= isl_set_copy(set
);
6530 test
= isl_set_lower_bound_si(test
, isl_dim_set
, pos
, 3);
6531 bounded
= isl_set_is_empty(test
);
6534 if (bounded
< 0 || !bounded
)
6537 test
= isl_set_copy(set
);
6538 test
= isl_set_upper_bound_si(test
, isl_dim_set
, pos
, -3);
6539 bounded
= isl_set_is_empty(test
);
6545 /* Does the set "set" have a fixed (but possible parametric) value
6546 * at dimension "pos"?
6548 static isl_bool
has_single_value(__isl_keep isl_set
*set
, int pos
)
6553 n
= isl_set_dim(set
, isl_dim_set
);
6555 return isl_bool_error
;
6556 set
= isl_set_copy(set
);
6557 set
= isl_set_project_out(set
, isl_dim_set
, pos
+ 1, n
- (pos
+ 1));
6558 set
= isl_set_project_out(set
, isl_dim_set
, 0, pos
);
6559 single
= isl_set_is_singleton(set
);
6565 /* Does "map" have a fixed (but possible parametric) value
6566 * at dimension "pos" of either its domain or its range?
6568 static isl_bool
has_singular_src_or_dst(__isl_keep isl_map
*map
, int pos
)
6573 set
= isl_map_domain(isl_map_copy(map
));
6574 single
= has_single_value(set
, pos
);
6577 if (single
< 0 || single
)
6580 set
= isl_map_range(isl_map_copy(map
));
6581 single
= has_single_value(set
, pos
);
6587 /* Does the edge "edge" from "graph" have bounded dependence distances
6588 * in the merged graph "merge_graph" of a selection of clusters in "c"?
6590 * Extract the complete transformations of the source and destination
6591 * nodes of the edge, apply them to the edge constraints and
6592 * compute the differences. Finally, check if these differences are bounded
6593 * in each direction.
6595 * If the dimension of the band is greater than the number of
6596 * dimensions that can be expected to be optimized by the edge
6597 * (based on its weight), then also allow the differences to be unbounded
6598 * in the remaining dimensions, but only if either the source or
6599 * the destination has a fixed value in that direction.
6600 * This allows a statement that produces values that are used by
6601 * several instances of another statement to be merged with that
6603 * However, merging such clusters will introduce an inherently
6604 * large proximity distance inside the merged cluster, meaning
6605 * that proximity distances will no longer be optimized in
6606 * subsequent merges. These merges are therefore only allowed
6607 * after all other possible merges have been tried.
6608 * The first time such a merge is encountered, the weight of the edge
6609 * is replaced by a negative weight. The second time (i.e., after
6610 * all merges over edges with a non-negative weight have been tried),
6611 * the merge is allowed.
6613 static isl_bool
has_bounded_distances(isl_ctx
*ctx
, struct isl_sched_edge
*edge
,
6614 struct isl_sched_graph
*graph
, struct isl_clustering
*c
,
6615 struct isl_sched_graph
*merge_graph
)
6623 map
= isl_map_copy(edge
->map
);
6624 t
= extract_node_transformation(ctx
, edge
->src
, c
, merge_graph
);
6625 map
= isl_map_apply_domain(map
, t
);
6626 t
= extract_node_transformation(ctx
, edge
->dst
, c
, merge_graph
);
6627 map
= isl_map_apply_range(map
, t
);
6628 dist
= isl_map_deltas(isl_map_copy(map
));
6630 bounded
= isl_bool_true
;
6631 n
= isl_set_dim(dist
, isl_dim_set
);
6634 n_slack
= n
- edge
->weight
;
6635 if (edge
->weight
< 0)
6636 n_slack
-= graph
->max_weight
+ 1;
6637 for (i
= 0; i
< n
; ++i
) {
6638 isl_bool bounded_i
, singular_i
;
6640 bounded_i
= distance_is_bounded(dist
, i
);
6645 if (edge
->weight
>= 0)
6646 bounded
= isl_bool_false
;
6650 singular_i
= has_singular_src_or_dst(map
, i
);
6655 bounded
= isl_bool_false
;
6658 if (!bounded
&& i
>= n
&& edge
->weight
>= 0)
6659 edge
->weight
-= graph
->max_weight
+ 1;
6667 return isl_bool_error
;
6670 /* Should the clusters be merged based on the cluster schedule
6671 * in the current (and only) band of "merge_graph"?
6672 * "graph" is the original dependence graph, while "c" records
6673 * which SCCs are involved in the latest merge.
6675 * In particular, is there at least one proximity constraint
6676 * that is optimized by the merge?
6678 * A proximity constraint is considered to be optimized
6679 * if the dependence distances are small.
6681 static isl_bool
ok_to_merge_proximity(isl_ctx
*ctx
,
6682 struct isl_sched_graph
*graph
, struct isl_clustering
*c
,
6683 struct isl_sched_graph
*merge_graph
)
6687 for (i
= 0; i
< graph
->n_edge
; ++i
) {
6688 struct isl_sched_edge
*edge
= &graph
->edge
[i
];
6691 if (!is_proximity(edge
))
6693 if (!c
->scc_in_merge
[edge
->src
->scc
])
6695 if (!c
->scc_in_merge
[edge
->dst
->scc
])
6697 if (c
->scc_cluster
[edge
->dst
->scc
] ==
6698 c
->scc_cluster
[edge
->src
->scc
])
6700 bounded
= has_bounded_distances(ctx
, edge
, graph
, c
,
6702 if (bounded
< 0 || bounded
)
6706 return isl_bool_false
;
6709 /* Should the clusters be merged based on the cluster schedule
6710 * in the current (and only) band of "merge_graph"?
6711 * "graph" is the original dependence graph, while "c" records
6712 * which SCCs are involved in the latest merge.
6714 * If the current band is empty, then the clusters should not be merged.
6716 * If the band depth should be maximized and the merge schedule
6717 * is incomplete (meaning that the dimension of some of the schedule
6718 * bands in the original schedule will be reduced), then the clusters
6719 * should not be merged.
6721 * If the schedule_maximize_coincidence option is set, then check that
6722 * the number of coincident schedule dimensions is not reduced.
6724 * Finally, only allow the merge if at least one proximity
6725 * constraint is optimized.
6727 static isl_bool
ok_to_merge(isl_ctx
*ctx
, struct isl_sched_graph
*graph
,
6728 struct isl_clustering
*c
, struct isl_sched_graph
*merge_graph
)
6730 if (merge_graph
->n_total_row
== merge_graph
->band_start
)
6731 return isl_bool_false
;
6733 if (isl_options_get_schedule_maximize_band_depth(ctx
) &&
6734 merge_graph
->n_total_row
< merge_graph
->maxvar
)
6735 return isl_bool_false
;
6737 if (isl_options_get_schedule_maximize_coincidence(ctx
)) {
6740 ok
= ok_to_merge_coincident(c
, merge_graph
);
6745 return ok_to_merge_proximity(ctx
, graph
, c
, merge_graph
);
6748 /* Apply the schedule in "t_node" to the "n" rows starting at "first"
6749 * of the schedule in "node" and return the result.
6751 * That is, essentially compute
6753 * T * N(first:first+n-1)
6755 * taking into account the constant term and the parameter coefficients
6758 static __isl_give isl_mat
*node_transformation(isl_ctx
*ctx
,
6759 struct isl_sched_node
*t_node
, struct isl_sched_node
*node
,
6764 isl_size n_row
, n_col
;
6767 n_param
= node
->nparam
;
6769 n_row
= isl_mat_rows(t_node
->sched
);
6770 n_col
= isl_mat_cols(node
->sched
);
6771 if (n_row
< 0 || n_col
< 0)
6773 t
= isl_mat_alloc(ctx
, n_row
, n_col
);
6776 for (i
= 0; i
< n_row
; ++i
) {
6777 isl_seq_cpy(t
->row
[i
], t_node
->sched
->row
[i
], 1 + n_param
);
6778 isl_seq_clr(t
->row
[i
] + 1 + n_param
, n_var
);
6779 for (j
= 0; j
< n
; ++j
)
6780 isl_seq_addmul(t
->row
[i
],
6781 t_node
->sched
->row
[i
][1 + n_param
+ j
],
6782 node
->sched
->row
[first
+ j
],
6783 1 + n_param
+ n_var
);
6788 /* Apply the cluster schedule in "t_node" to the current band
6789 * schedule of the nodes in "graph".
6791 * In particular, replace the rows starting at band_start
6792 * by the result of applying the cluster schedule in "t_node"
6793 * to the original rows.
6795 * The coincidence of the schedule is determined by the coincidence
6796 * of the cluster schedule.
6798 static isl_stat
transform(isl_ctx
*ctx
, struct isl_sched_graph
*graph
,
6799 struct isl_sched_node
*t_node
)
6805 start
= graph
->band_start
;
6806 n
= graph
->n_total_row
- start
;
6808 n_new
= isl_mat_rows(t_node
->sched
);
6810 return isl_stat_error
;
6811 for (i
= 0; i
< graph
->n
; ++i
) {
6812 struct isl_sched_node
*node
= &graph
->node
[i
];
6815 t
= node_transformation(ctx
, t_node
, node
, start
, n
);
6816 node
->sched
= isl_mat_drop_rows(node
->sched
, start
, n
);
6817 node
->sched
= isl_mat_concat(node
->sched
, t
);
6818 node
->sched_map
= isl_map_free(node
->sched_map
);
6820 return isl_stat_error
;
6821 for (j
= 0; j
< n_new
; ++j
)
6822 node
->coincident
[start
+ j
] = t_node
->coincident
[j
];
6824 graph
->n_total_row
-= n
;
6826 graph
->n_total_row
+= n_new
;
6827 graph
->n_row
+= n_new
;
6832 /* Merge the clusters marked for merging in "c" into a single
6833 * cluster using the cluster schedule in the current band of "merge_graph".
6834 * The representative SCC for the new cluster is the SCC with
6835 * the smallest index.
6837 * The current band schedule of each SCC in the new cluster is obtained
6838 * by applying the schedule of the corresponding original cluster
6839 * to the original band schedule.
6840 * All SCCs in the new cluster have the same number of schedule rows.
6842 static isl_stat
merge(isl_ctx
*ctx
, struct isl_clustering
*c
,
6843 struct isl_sched_graph
*merge_graph
)
6849 for (i
= 0; i
< c
->n
; ++i
) {
6850 struct isl_sched_node
*node
;
6852 if (!c
->scc_in_merge
[i
])
6856 space
= cluster_space(&c
->scc
[i
], c
->scc_cluster
[i
]);
6857 node
= graph_find_node(ctx
, merge_graph
, space
);
6858 isl_space_free(space
);
6860 return isl_stat_error
;
6861 if (!is_node(merge_graph
, node
))
6862 isl_die(ctx
, isl_error_internal
,
6863 "unable to find cluster",
6864 return isl_stat_error
);
6865 if (transform(ctx
, &c
->scc
[i
], node
) < 0)
6866 return isl_stat_error
;
6867 c
->scc_cluster
[i
] = cluster
;
6873 /* Try and merge the clusters of SCCs marked in c->scc_in_merge
6874 * by scheduling the current cluster bands with respect to each other.
6876 * Construct a dependence graph with a space for each cluster and
6877 * with the coordinates of each space corresponding to the schedule
6878 * dimensions of the current band of that cluster.
6879 * Construct a cluster schedule in this cluster dependence graph and
6880 * apply it to the current cluster bands if it is applicable
6881 * according to ok_to_merge.
6883 * If the number of remaining schedule dimensions in a cluster
6884 * with a non-maximal current schedule dimension is greater than
6885 * the number of remaining schedule dimensions in clusters
6886 * with a maximal current schedule dimension, then restrict
6887 * the number of rows to be computed in the cluster schedule
6888 * to the minimal such non-maximal current schedule dimension.
6889 * Do this by adjusting merge_graph.maxvar.
6891 * Return isl_bool_true if the clusters have effectively been merged
6892 * into a single cluster.
6894 * Note that since the standard scheduling algorithm minimizes the maximal
6895 * distance over proximity constraints, the proximity constraints between
6896 * the merged clusters may not be optimized any further than what is
6897 * sufficient to bring the distances within the limits of the internal
6898 * proximity constraints inside the individual clusters.
6899 * It may therefore make sense to perform an additional translation step
6900 * to bring the clusters closer to each other, while maintaining
6901 * the linear part of the merging schedule found using the standard
6902 * scheduling algorithm.
6904 static isl_bool
try_merge(isl_ctx
*ctx
, struct isl_sched_graph
*graph
,
6905 struct isl_clustering
*c
)
6907 struct isl_sched_graph merge_graph
= { 0 };
6910 if (init_merge_graph(ctx
, graph
, c
, &merge_graph
) < 0)
6913 if (compute_maxvar(&merge_graph
) < 0)
6915 if (adjust_maxvar_to_slack(ctx
, &merge_graph
,c
) < 0)
6917 if (compute_schedule_wcc_band(ctx
, &merge_graph
) < 0)
6919 merged
= ok_to_merge(ctx
, graph
, c
, &merge_graph
);
6920 if (merged
&& merge(ctx
, c
, &merge_graph
) < 0)
6923 graph_free(ctx
, &merge_graph
);
6926 graph_free(ctx
, &merge_graph
);
6927 return isl_bool_error
;
6930 /* Is there any edge marked "no_merge" between two SCCs that are
6931 * about to be merged (i.e., that are set in "scc_in_merge")?
6932 * "merge_edge" is the proximity edge along which the clusters of SCCs
6933 * are going to be merged.
6935 * If there is any edge between two SCCs with a negative weight,
6936 * while the weight of "merge_edge" is non-negative, then this
6937 * means that the edge was postponed. "merge_edge" should then
6938 * also be postponed since merging along the edge with negative weight should
6939 * be postponed until all edges with non-negative weight have been tried.
6940 * Replace the weight of "merge_edge" by a negative weight as well and
6941 * tell the caller not to attempt a merge.
6943 static int any_no_merge(struct isl_sched_graph
*graph
, int *scc_in_merge
,
6944 struct isl_sched_edge
*merge_edge
)
6948 for (i
= 0; i
< graph
->n_edge
; ++i
) {
6949 struct isl_sched_edge
*edge
= &graph
->edge
[i
];
6951 if (!scc_in_merge
[edge
->src
->scc
])
6953 if (!scc_in_merge
[edge
->dst
->scc
])
6957 if (merge_edge
->weight
>= 0 && edge
->weight
< 0) {
6958 merge_edge
->weight
-= graph
->max_weight
+ 1;
6966 /* Merge the two clusters in "c" connected by the edge in "graph"
6967 * with index "edge" into a single cluster.
6968 * If it turns out to be impossible to merge these two clusters,
6969 * then mark the edge as "no_merge" such that it will not be
6972 * First mark all SCCs that need to be merged. This includes the SCCs
6973 * in the two clusters, but it may also include the SCCs
6974 * of intermediate clusters.
6975 * If there is already a no_merge edge between any pair of such SCCs,
6976 * then simply mark the current edge as no_merge as well.
6977 * Likewise, if any of those edges was postponed by has_bounded_distances,
6978 * then postpone the current edge as well.
6979 * Otherwise, try and merge the clusters and mark "edge" as "no_merge"
6980 * if the clusters did not end up getting merged, unless the non-merge
6981 * is due to the fact that the edge was postponed. This postponement
6982 * can be recognized by a change in weight (from non-negative to negative).
6984 static isl_stat
merge_clusters_along_edge(isl_ctx
*ctx
,
6985 struct isl_sched_graph
*graph
, int edge
, struct isl_clustering
*c
)
6988 int edge_weight
= graph
->edge
[edge
].weight
;
6990 if (mark_merge_sccs(ctx
, graph
, edge
, c
) < 0)
6991 return isl_stat_error
;
6993 if (any_no_merge(graph
, c
->scc_in_merge
, &graph
->edge
[edge
]))
6994 merged
= isl_bool_false
;
6996 merged
= try_merge(ctx
, graph
, c
);
6998 return isl_stat_error
;
6999 if (!merged
&& edge_weight
== graph
->edge
[edge
].weight
)
7000 graph
->edge
[edge
].no_merge
= 1;
7005 /* Does "node" belong to the cluster identified by "cluster"?
7007 static int node_cluster_exactly(struct isl_sched_node
*node
, int cluster
)
7009 return node
->cluster
== cluster
;
7012 /* Does "edge" connect two nodes belonging to the cluster
7013 * identified by "cluster"?
7015 static int edge_cluster_exactly(struct isl_sched_edge
*edge
, int cluster
)
7017 return edge
->src
->cluster
== cluster
&& edge
->dst
->cluster
== cluster
;
7020 /* Swap the schedule of "node1" and "node2".
7021 * Both nodes have been derived from the same node in a common parent graph.
7022 * Since the "coincident" field is shared with that node
7023 * in the parent graph, there is no need to also swap this field.
7025 static void swap_sched(struct isl_sched_node
*node1
,
7026 struct isl_sched_node
*node2
)
7031 sched
= node1
->sched
;
7032 node1
->sched
= node2
->sched
;
7033 node2
->sched
= sched
;
7035 sched_map
= node1
->sched_map
;
7036 node1
->sched_map
= node2
->sched_map
;
7037 node2
->sched_map
= sched_map
;
7040 /* Copy the current band schedule from the SCCs that form the cluster
7041 * with index "pos" to the actual cluster at position "pos".
7042 * By construction, the index of the first SCC that belongs to the cluster
7045 * The order of the nodes inside both the SCCs and the cluster
7046 * is assumed to be same as the order in the original "graph".
7048 * Since the SCC graphs will no longer be used after this function,
7049 * the schedules are actually swapped rather than copied.
7051 static isl_stat
copy_partial(struct isl_sched_graph
*graph
,
7052 struct isl_clustering
*c
, int pos
)
7056 c
->cluster
[pos
].n_total_row
= c
->scc
[pos
].n_total_row
;
7057 c
->cluster
[pos
].n_row
= c
->scc
[pos
].n_row
;
7058 c
->cluster
[pos
].maxvar
= c
->scc
[pos
].maxvar
;
7060 for (i
= 0; i
< graph
->n
; ++i
) {
7064 if (graph
->node
[i
].cluster
!= pos
)
7066 s
= graph
->node
[i
].scc
;
7067 k
= c
->scc_node
[s
]++;
7068 swap_sched(&c
->cluster
[pos
].node
[j
], &c
->scc
[s
].node
[k
]);
7069 if (c
->scc
[s
].maxvar
> c
->cluster
[pos
].maxvar
)
7070 c
->cluster
[pos
].maxvar
= c
->scc
[s
].maxvar
;
7077 /* Is there a (conditional) validity dependence from node[j] to node[i],
7078 * forcing node[i] to follow node[j] or do the nodes belong to the same
7081 static isl_bool
node_follows_strong_or_same_cluster(int i
, int j
, void *user
)
7083 struct isl_sched_graph
*graph
= user
;
7085 if (graph
->node
[i
].cluster
== graph
->node
[j
].cluster
)
7086 return isl_bool_true
;
7087 return graph_has_validity_edge(graph
, &graph
->node
[j
], &graph
->node
[i
]);
7090 /* Extract the merged clusters of SCCs in "graph", sort them, and
7091 * store them in c->clusters. Update c->scc_cluster accordingly.
7093 * First keep track of the cluster containing the SCC to which a node
7094 * belongs in the node itself.
7095 * Then extract the clusters into c->clusters, copying the current
7096 * band schedule from the SCCs that belong to the cluster.
7097 * Do this only once per cluster.
7099 * Finally, topologically sort the clusters and update c->scc_cluster
7100 * to match the new scc numbering. While the SCCs were originally
7101 * sorted already, some SCCs that depend on some other SCCs may
7102 * have been merged with SCCs that appear before these other SCCs.
7103 * A reordering may therefore be required.
7105 static isl_stat
extract_clusters(isl_ctx
*ctx
, struct isl_sched_graph
*graph
,
7106 struct isl_clustering
*c
)
7110 for (i
= 0; i
< graph
->n
; ++i
)
7111 graph
->node
[i
].cluster
= c
->scc_cluster
[graph
->node
[i
].scc
];
7113 for (i
= 0; i
< graph
->scc
; ++i
) {
7114 if (c
->scc_cluster
[i
] != i
)
7116 if (extract_sub_graph(ctx
, graph
, &node_cluster_exactly
,
7117 &edge_cluster_exactly
, i
, &c
->cluster
[i
]) < 0)
7118 return isl_stat_error
;
7119 c
->cluster
[i
].src_scc
= -1;
7120 c
->cluster
[i
].dst_scc
= -1;
7121 if (copy_partial(graph
, c
, i
) < 0)
7122 return isl_stat_error
;
7125 if (detect_ccs(ctx
, graph
, &node_follows_strong_or_same_cluster
) < 0)
7126 return isl_stat_error
;
7127 for (i
= 0; i
< graph
->n
; ++i
)
7128 c
->scc_cluster
[graph
->node
[i
].scc
] = graph
->node
[i
].cluster
;
7133 /* Compute weights on the proximity edges of "graph" that can
7134 * be used by find_proximity to find the most appropriate
7135 * proximity edge to use to merge two clusters in "c".
7136 * The weights are also used by has_bounded_distances to determine
7137 * whether the merge should be allowed.
7138 * Store the maximum of the computed weights in graph->max_weight.
7140 * The computed weight is a measure for the number of remaining schedule
7141 * dimensions that can still be completely aligned.
7142 * In particular, compute the number of equalities between
7143 * input dimensions and output dimensions in the proximity constraints.
7144 * The directions that are already handled by outer schedule bands
7145 * are projected out prior to determining this number.
7147 * Edges that will never be considered by find_proximity are ignored.
7149 static isl_stat
compute_weights(struct isl_sched_graph
*graph
,
7150 struct isl_clustering
*c
)
7154 graph
->max_weight
= 0;
7156 for (i
= 0; i
< graph
->n_edge
; ++i
) {
7157 struct isl_sched_edge
*edge
= &graph
->edge
[i
];
7158 struct isl_sched_node
*src
= edge
->src
;
7159 struct isl_sched_node
*dst
= edge
->dst
;
7160 isl_basic_map
*hull
;
7162 isl_size n_in
, n_out
;
7164 prox
= is_non_empty_proximity(edge
);
7166 return isl_stat_error
;
7169 if (bad_cluster(&c
->scc
[edge
->src
->scc
]) ||
7170 bad_cluster(&c
->scc
[edge
->dst
->scc
]))
7172 if (c
->scc_cluster
[edge
->dst
->scc
] ==
7173 c
->scc_cluster
[edge
->src
->scc
])
7176 hull
= isl_map_affine_hull(isl_map_copy(edge
->map
));
7177 hull
= isl_basic_map_transform_dims(hull
, isl_dim_in
, 0,
7178 isl_mat_copy(src
->vmap
));
7179 hull
= isl_basic_map_transform_dims(hull
, isl_dim_out
, 0,
7180 isl_mat_copy(dst
->vmap
));
7181 hull
= isl_basic_map_project_out(hull
,
7182 isl_dim_in
, 0, src
->rank
);
7183 hull
= isl_basic_map_project_out(hull
,
7184 isl_dim_out
, 0, dst
->rank
);
7185 hull
= isl_basic_map_remove_divs(hull
);
7186 n_in
= isl_basic_map_dim(hull
, isl_dim_in
);
7187 n_out
= isl_basic_map_dim(hull
, isl_dim_out
);
7188 if (n_in
< 0 || n_out
< 0)
7189 hull
= isl_basic_map_free(hull
);
7190 hull
= isl_basic_map_drop_constraints_not_involving_dims(hull
,
7191 isl_dim_in
, 0, n_in
);
7192 hull
= isl_basic_map_drop_constraints_not_involving_dims(hull
,
7193 isl_dim_out
, 0, n_out
);
7195 return isl_stat_error
;
7196 edge
->weight
= isl_basic_map_n_equality(hull
);
7197 isl_basic_map_free(hull
);
7199 if (edge
->weight
> graph
->max_weight
)
7200 graph
->max_weight
= edge
->weight
;
7206 /* Call compute_schedule_finish_band on each of the clusters in "c"
7207 * in their topological order. This order is determined by the scc
7208 * fields of the nodes in "graph".
7209 * Combine the results in a sequence expressing the topological order.
7211 * If there is only one cluster left, then there is no need to introduce
7212 * a sequence node. Also, in this case, the cluster necessarily contains
7213 * the SCC at position 0 in the original graph and is therefore also
7214 * stored in the first cluster of "c".
7216 static __isl_give isl_schedule_node
*finish_bands_clustering(
7217 __isl_take isl_schedule_node
*node
, struct isl_sched_graph
*graph
,
7218 struct isl_clustering
*c
)
7222 isl_union_set_list
*filters
;
7224 if (graph
->scc
== 1)
7225 return compute_schedule_finish_band(node
, &c
->cluster
[0], 0);
7227 ctx
= isl_schedule_node_get_ctx(node
);
7229 filters
= extract_sccs(ctx
, graph
);
7230 node
= isl_schedule_node_insert_sequence(node
, filters
);
7232 for (i
= 0; i
< graph
->scc
; ++i
) {
7233 int j
= c
->scc_cluster
[i
];
7234 node
= isl_schedule_node_child(node
, i
);
7235 node
= isl_schedule_node_child(node
, 0);
7236 node
= compute_schedule_finish_band(node
, &c
->cluster
[j
], 0);
7237 node
= isl_schedule_node_parent(node
);
7238 node
= isl_schedule_node_parent(node
);
7244 /* Compute a schedule for a connected dependence graph by first considering
7245 * each strongly connected component (SCC) in the graph separately and then
7246 * incrementally combining them into clusters.
7247 * Return the updated schedule node.
7249 * Initially, each cluster consists of a single SCC, each with its
7250 * own band schedule. The algorithm then tries to merge pairs
7251 * of clusters along a proximity edge until no more suitable
7252 * proximity edges can be found. During this merging, the schedule
7253 * is maintained in the individual SCCs.
7254 * After the merging is completed, the full resulting clusters
7255 * are extracted and in finish_bands_clustering,
7256 * compute_schedule_finish_band is called on each of them to integrate
7257 * the band into "node" and to continue the computation.
7259 * compute_weights initializes the weights that are used by find_proximity.
7261 static __isl_give isl_schedule_node
*compute_schedule_wcc_clustering(
7262 __isl_take isl_schedule_node
*node
, struct isl_sched_graph
*graph
)
7265 struct isl_clustering c
;
7268 ctx
= isl_schedule_node_get_ctx(node
);
7270 if (clustering_init(ctx
, &c
, graph
) < 0)
7273 if (compute_weights(graph
, &c
) < 0)
7277 i
= find_proximity(graph
, &c
);
7280 if (i
>= graph
->n_edge
)
7282 if (merge_clusters_along_edge(ctx
, graph
, i
, &c
) < 0)
7286 if (extract_clusters(ctx
, graph
, &c
) < 0)
7289 node
= finish_bands_clustering(node
, graph
, &c
);
7291 clustering_free(ctx
, &c
);
7294 clustering_free(ctx
, &c
);
7295 return isl_schedule_node_free(node
);
7298 /* Compute a schedule for a connected dependence graph and return
7299 * the updated schedule node.
7301 * If Feautrier's algorithm is selected, we first recursively try to satisfy
7302 * as many validity dependences as possible. When all validity dependences
7303 * are satisfied we extend the schedule to a full-dimensional schedule.
7305 * Call compute_schedule_wcc_whole or compute_schedule_wcc_clustering
7306 * depending on whether the user has selected the option to try and
7307 * compute a schedule for the entire (weakly connected) component first.
7308 * If there is only a single strongly connected component (SCC), then
7309 * there is no point in trying to combine SCCs
7310 * in compute_schedule_wcc_clustering, so compute_schedule_wcc_whole
7311 * is called instead.
7313 static __isl_give isl_schedule_node
*compute_schedule_wcc(
7314 __isl_take isl_schedule_node
*node
, struct isl_sched_graph
*graph
)
7321 ctx
= isl_schedule_node_get_ctx(node
);
7322 if (detect_sccs(ctx
, graph
) < 0)
7323 return isl_schedule_node_free(node
);
7325 if (compute_maxvar(graph
) < 0)
7326 return isl_schedule_node_free(node
);
7328 if (need_feautrier_step(ctx
, graph
))
7329 return compute_schedule_wcc_feautrier(node
, graph
);
7331 if (graph
->scc
<= 1 || isl_options_get_schedule_whole_component(ctx
))
7332 return compute_schedule_wcc_whole(node
, graph
);
7334 return compute_schedule_wcc_clustering(node
, graph
);
7337 /* Compute a schedule for each group of nodes identified by node->scc
7338 * separately and then combine them in a sequence node (or as set node
7339 * if graph->weak is set) inserted at position "node" of the schedule tree.
7340 * Return the updated schedule node.
7342 * If "wcc" is set then each of the groups belongs to a single
7343 * weakly connected component in the dependence graph so that
7344 * there is no need for compute_sub_schedule to look for weakly
7345 * connected components.
7347 * If a set node would be introduced and if the number of components
7348 * is equal to the number of nodes, then check if the schedule
7349 * is already complete. If so, a redundant set node would be introduced
7350 * (without any further descendants) stating that the statements
7351 * can be executed in arbitrary order, which is also expressed
7352 * by the absence of any node. Refrain from inserting any nodes
7353 * in this case and simply return.
7355 static __isl_give isl_schedule_node
*compute_component_schedule(
7356 __isl_take isl_schedule_node
*node
, struct isl_sched_graph
*graph
,
7361 isl_union_set_list
*filters
;
7366 if (graph
->weak
&& graph
->scc
== graph
->n
) {
7367 if (compute_maxvar(graph
) < 0)
7368 return isl_schedule_node_free(node
);
7369 if (graph
->n_row
>= graph
->maxvar
)
7373 ctx
= isl_schedule_node_get_ctx(node
);
7374 filters
= extract_sccs(ctx
, graph
);
7376 node
= isl_schedule_node_insert_set(node
, filters
);
7378 node
= isl_schedule_node_insert_sequence(node
, filters
);
7380 for (component
= 0; component
< graph
->scc
; ++component
) {
7381 node
= isl_schedule_node_child(node
, component
);
7382 node
= isl_schedule_node_child(node
, 0);
7383 node
= compute_sub_schedule(node
, ctx
, graph
,
7385 &edge_scc_exactly
, component
, wcc
);
7386 node
= isl_schedule_node_parent(node
);
7387 node
= isl_schedule_node_parent(node
);
7393 /* Compute a schedule for the given dependence graph and insert it at "node".
7394 * Return the updated schedule node.
7396 * We first check if the graph is connected (through validity and conditional
7397 * validity dependences) and, if not, compute a schedule
7398 * for each component separately.
7399 * If the schedule_serialize_sccs option is set, then we check for strongly
7400 * connected components instead and compute a separate schedule for
7401 * each such strongly connected component.
7403 static __isl_give isl_schedule_node
*compute_schedule(isl_schedule_node
*node
,
7404 struct isl_sched_graph
*graph
)
7411 ctx
= isl_schedule_node_get_ctx(node
);
7412 if (isl_options_get_schedule_serialize_sccs(ctx
)) {
7413 if (detect_sccs(ctx
, graph
) < 0)
7414 return isl_schedule_node_free(node
);
7416 if (detect_wccs(ctx
, graph
) < 0)
7417 return isl_schedule_node_free(node
);
7421 return compute_component_schedule(node
, graph
, 1);
7423 return compute_schedule_wcc(node
, graph
);
7426 /* Compute a schedule on sc->domain that respects the given schedule
7429 * In particular, the schedule respects all the validity dependences.
7430 * If the default isl scheduling algorithm is used, it tries to minimize
7431 * the dependence distances over the proximity dependences.
7432 * If Feautrier's scheduling algorithm is used, the proximity dependence
7433 * distances are only minimized during the extension to a full-dimensional
7436 * If there are any condition and conditional validity dependences,
7437 * then the conditional validity dependences may be violated inside
7438 * a tilable band, provided they have no adjacent non-local
7439 * condition dependences.
7441 __isl_give isl_schedule
*isl_schedule_constraints_compute_schedule(
7442 __isl_take isl_schedule_constraints
*sc
)
7444 isl_ctx
*ctx
= isl_schedule_constraints_get_ctx(sc
);
7445 struct isl_sched_graph graph
= { 0 };
7446 isl_schedule
*sched
;
7447 isl_schedule_node
*node
;
7448 isl_union_set
*domain
;
7451 sc
= isl_schedule_constraints_align_params(sc
);
7453 domain
= isl_schedule_constraints_get_domain(sc
);
7454 n
= isl_union_set_n_set(domain
);
7456 isl_schedule_constraints_free(sc
);
7457 return isl_schedule_from_domain(domain
);
7460 if (n
< 0 || graph_init(&graph
, sc
) < 0)
7461 domain
= isl_union_set_free(domain
);
7463 node
= isl_schedule_node_from_domain(domain
);
7464 node
= isl_schedule_node_child(node
, 0);
7466 node
= compute_schedule(node
, &graph
);
7467 sched
= isl_schedule_node_get_schedule(node
);
7468 isl_schedule_node_free(node
);
7470 graph_free(ctx
, &graph
);
7471 isl_schedule_constraints_free(sc
);
7476 /* Compute a schedule for the given union of domains that respects
7477 * all the validity dependences and minimizes
7478 * the dependence distances over the proximity dependences.
7480 * This function is kept for backward compatibility.
7482 __isl_give isl_schedule
*isl_union_set_compute_schedule(
7483 __isl_take isl_union_set
*domain
,
7484 __isl_take isl_union_map
*validity
,
7485 __isl_take isl_union_map
*proximity
)
7487 isl_schedule_constraints
*sc
;
7489 sc
= isl_schedule_constraints_on_domain(domain
);
7490 sc
= isl_schedule_constraints_set_validity(sc
, validity
);
7491 sc
= isl_schedule_constraints_set_proximity(sc
, proximity
);
7493 return isl_schedule_constraints_compute_schedule(sc
);