2 * Copyright 2011 INRIA Saclay
3 * Copyright 2012-2014 Ecole Normale Superieure
4 * Copyright 2015-2016 Sven Verdoolaege
5 * Copyright 2016 INRIA Paris
6 * Copyright 2017 Sven Verdoolaege
8 * Use of this software is governed by the MIT license
10 * Written by Sven Verdoolaege, INRIA Saclay - Ile-de-France,
11 * Parc Club Orsay Universite, ZAC des vignes, 4 rue Jacques Monod,
13 * and Ecole Normale Superieure, 45 rue d'Ulm, 75230 Paris, France
14 * and Centre de Recherche Inria de Paris, 2 rue Simone Iff - Voie DQ12,
15 * CS 42112, 75589 Paris Cedex 12, France
18 #include <isl_ctx_private.h>
19 #include <isl_map_private.h>
20 #include <isl_space_private.h>
21 #include <isl_aff_private.h>
24 #include <isl/constraint.h>
25 #include <isl/schedule.h>
26 #include <isl_schedule_constraints.h>
27 #include <isl/schedule_node.h>
28 #include <isl_mat_private.h>
29 #include <isl_vec_private.h>
31 #include <isl_union_set_private.h>
34 #include <isl_dim_map.h>
35 #include <isl/map_to_basic_set.h>
37 #include <isl_options_private.h>
38 #include <isl_tarjan.h>
39 #include <isl_morph.h>
41 #include <isl_val_private.h>
44 * The scheduling algorithm implemented in this file was inspired by
45 * Bondhugula et al., "Automatic Transformations for Communication-Minimized
46 * Parallelization and Locality Optimization in the Polyhedral Model".
48 * For a detailed description of the variant implemented in isl,
49 * see Verdoolaege and Janssens, "Scheduling for PPCG" (2017).
53 /* Internal information about a node that is used during the construction
55 * space represents the original space in which the domain lives;
56 * that is, the space is not affected by compression
57 * sched is a matrix representation of the schedule being constructed
58 * for this node; if compressed is set, then this schedule is
59 * defined over the compressed domain space
60 * sched_map is an isl_map representation of the same (partial) schedule
61 * sched_map may be NULL; if compressed is set, then this map
62 * is defined over the uncompressed domain space
63 * rank is the number of linearly independent rows in the linear part
65 * the rows of "vmap" represent a change of basis for the node
66 * variables; the first rank rows span the linear part of
67 * the schedule rows; the remaining rows are linearly independent
68 * the rows of "indep" represent linear combinations of the schedule
69 * coefficients that are non-zero when the schedule coefficients are
70 * linearly independent of previously computed schedule rows.
71 * start is the first variable in the LP problem in the sequences that
72 * represents the schedule coefficients of this node
73 * nvar is the dimension of the (compressed) domain
74 * nparam is the number of parameters or 0 if we are not constructing
75 * a parametric schedule
77 * If compressed is set, then hull represents the constraints
78 * that were used to derive the compression, while compress and
79 * decompress map the original space to the compressed space and
82 * scc is the index of SCC (or WCC) this node belongs to
84 * "cluster" is only used inside extract_clusters and identifies
85 * the cluster of SCCs that the node belongs to.
87 * coincident contains a boolean for each of the rows of the schedule,
88 * indicating whether the corresponding scheduling dimension satisfies
89 * the coincidence constraints in the sense that the corresponding
90 * dependence distances are zero.
92 * If the schedule_treat_coalescing option is set, then
93 * "sizes" contains the sizes of the (compressed) instance set
94 * in each direction. If there is no fixed size in a given direction,
95 * then the corresponding size value is set to infinity.
96 * If the schedule_treat_coalescing option or the schedule_max_coefficient
97 * option is set, then "max" contains the maximal values for
98 * schedule coefficients of the (compressed) variables. If no bound
99 * needs to be imposed on a particular variable, then the corresponding
101 * If not NULL, then "bounds" contains a non-parametric set
102 * in the compressed space that is bounded by the size in each direction.
104 struct isl_sched_node
{
108 isl_multi_aff
*compress
;
109 isl_pw_multi_aff
*decompress
;
124 isl_multi_val
*sizes
;
125 isl_basic_set
*bounds
;
129 static isl_bool
node_has_tuples(const void *entry
, const void *val
)
131 struct isl_sched_node
*node
= (struct isl_sched_node
*)entry
;
132 isl_space
*space
= (isl_space
*) val
;
134 return isl_space_has_equal_tuples(node
->space
, space
);
137 static int node_scc_exactly(struct isl_sched_node
*node
, int scc
)
139 return node
->scc
== scc
;
142 static int node_scc_at_most(struct isl_sched_node
*node
, int scc
)
144 return node
->scc
<= scc
;
147 static int node_scc_at_least(struct isl_sched_node
*node
, int scc
)
149 return node
->scc
>= scc
;
152 /* An edge in the dependence graph. An edge may be used to
153 * ensure validity of the generated schedule, to minimize the dependence
156 * map is the dependence relation, with i -> j in the map if j depends on i
157 * tagged_condition and tagged_validity contain the union of all tagged
158 * condition or conditional validity dependence relations that
159 * specialize the dependence relation "map"; that is,
160 * if (i -> a) -> (j -> b) is an element of "tagged_condition"
161 * or "tagged_validity", then i -> j is an element of "map".
162 * If these fields are NULL, then they represent the empty relation.
163 * src is the source node
164 * dst is the sink node
166 * types is a bit vector containing the types of this edge.
167 * validity is set if the edge is used to ensure correctness
168 * coincidence is used to enforce zero dependence distances
169 * proximity is set if the edge is used to minimize dependence distances
170 * condition is set if the edge represents a condition
171 * for a conditional validity schedule constraint
172 * local can only be set for condition edges and indicates that
173 * the dependence distance over the edge should be zero
174 * conditional_validity is set if the edge is used to conditionally
177 * For validity edges, start and end mark the sequence of inequality
178 * constraints in the LP problem that encode the validity constraint
179 * corresponding to this edge.
181 * During clustering, an edge may be marked "no_merge" if it should
182 * not be used to merge clusters.
183 * The weight is also only used during clustering and it is
184 * an indication of how many schedule dimensions on either side
185 * of the schedule constraints can be aligned.
186 * If the weight is negative, then this means that this edge was postponed
187 * by has_bounded_distances or any_no_merge. The original weight can
188 * be retrieved by adding 1 + graph->max_weight, with "graph"
189 * the graph containing this edge.
191 struct isl_sched_edge
{
193 isl_union_map
*tagged_condition
;
194 isl_union_map
*tagged_validity
;
196 struct isl_sched_node
*src
;
197 struct isl_sched_node
*dst
;
208 /* Is "edge" marked as being of type "type"?
210 static int is_type(struct isl_sched_edge
*edge
, enum isl_edge_type type
)
212 return ISL_FL_ISSET(edge
->types
, 1 << type
);
215 /* Mark "edge" as being of type "type".
217 static void set_type(struct isl_sched_edge
*edge
, enum isl_edge_type type
)
219 ISL_FL_SET(edge
->types
, 1 << type
);
222 /* No longer mark "edge" as being of type "type"?
224 static void clear_type(struct isl_sched_edge
*edge
, enum isl_edge_type type
)
226 ISL_FL_CLR(edge
->types
, 1 << type
);
229 /* Is "edge" marked as a validity edge?
231 static int is_validity(struct isl_sched_edge
*edge
)
233 return is_type(edge
, isl_edge_validity
);
236 /* Mark "edge" as a validity edge.
238 static void set_validity(struct isl_sched_edge
*edge
)
240 set_type(edge
, isl_edge_validity
);
243 /* Is "edge" marked as a proximity edge?
245 static int is_proximity(struct isl_sched_edge
*edge
)
247 return is_type(edge
, isl_edge_proximity
);
250 /* Is "edge" marked as a local edge?
252 static int is_local(struct isl_sched_edge
*edge
)
254 return is_type(edge
, isl_edge_local
);
257 /* Mark "edge" as a local edge.
259 static void set_local(struct isl_sched_edge
*edge
)
261 set_type(edge
, isl_edge_local
);
264 /* No longer mark "edge" as a local edge.
266 static void clear_local(struct isl_sched_edge
*edge
)
268 clear_type(edge
, isl_edge_local
);
271 /* Is "edge" marked as a coincidence edge?
273 static int is_coincidence(struct isl_sched_edge
*edge
)
275 return is_type(edge
, isl_edge_coincidence
);
278 /* Is "edge" marked as a condition edge?
280 static int is_condition(struct isl_sched_edge
*edge
)
282 return is_type(edge
, isl_edge_condition
);
285 /* Is "edge" marked as a conditional validity edge?
287 static int is_conditional_validity(struct isl_sched_edge
*edge
)
289 return is_type(edge
, isl_edge_conditional_validity
);
292 /* Is "edge" of a type that can appear multiple times between
293 * the same pair of nodes?
295 * Condition edges and conditional validity edges may have tagged
296 * dependence relations, in which case an edge is added for each
299 static int is_multi_edge_type(struct isl_sched_edge
*edge
)
301 return is_condition(edge
) || is_conditional_validity(edge
);
304 /* Internal information about the dependence graph used during
305 * the construction of the schedule.
307 * intra_hmap is a cache, mapping dependence relations to their dual,
308 * for dependences from a node to itself, possibly without
309 * coefficients for the parameters
310 * intra_hmap_param is a cache, mapping dependence relations to their dual,
311 * for dependences from a node to itself, including coefficients
313 * inter_hmap is a cache, mapping dependence relations to their dual,
314 * for dependences between distinct nodes
315 * if compression is involved then the key for these maps
316 * is the original, uncompressed dependence relation, while
317 * the value is the dual of the compressed dependence relation.
319 * n is the number of nodes
320 * node is the list of nodes
321 * maxvar is the maximal number of variables over all nodes
322 * max_row is the allocated number of rows in the schedule
323 * n_row is the current (maximal) number of linearly independent
324 * rows in the node schedules
325 * n_total_row is the current number of rows in the node schedules
326 * band_start is the starting row in the node schedules of the current band
327 * root is set to the original dependence graph from which this graph
328 * is derived through splitting. If this graph is not the result of
329 * splitting, then the root field points to the graph itself.
331 * sorted contains a list of node indices sorted according to the
332 * SCC to which a node belongs
334 * n_edge is the number of edges
335 * edge is the list of edges
336 * max_edge contains the maximal number of edges of each type;
337 * in particular, it contains the number of edges in the inital graph.
338 * edge_table contains pointers into the edge array, hashed on the source
339 * and sink spaces; there is one such table for each type;
340 * a given edge may be referenced from more than one table
341 * if the corresponding relation appears in more than one of the
342 * sets of dependences; however, for each type there is only
343 * a single edge between a given pair of source and sink space
344 * in the entire graph
346 * node_table contains pointers into the node array, hashed on the space tuples
348 * region contains a list of variable sequences that should be non-trivial
350 * lp contains the (I)LP problem used to obtain new schedule rows
352 * src_scc and dst_scc are the source and sink SCCs of an edge with
353 * conflicting constraints
355 * scc represents the number of components
356 * weak is set if the components are weakly connected
358 * max_weight is used during clustering and represents the maximal
359 * weight of the relevant proximity edges.
361 struct isl_sched_graph
{
362 isl_map_to_basic_set
*intra_hmap
;
363 isl_map_to_basic_set
*intra_hmap_param
;
364 isl_map_to_basic_set
*inter_hmap
;
366 struct isl_sched_node
*node
;
377 struct isl_sched_graph
*root
;
379 struct isl_sched_edge
*edge
;
381 int max_edge
[isl_edge_last
+ 1];
382 struct isl_hash_table
*edge_table
[isl_edge_last
+ 1];
384 struct isl_hash_table
*node_table
;
385 struct isl_trivial_region
*region
;
398 /* Initialize node_table based on the list of nodes.
400 static int graph_init_table(isl_ctx
*ctx
, struct isl_sched_graph
*graph
)
404 graph
->node_table
= isl_hash_table_alloc(ctx
, graph
->n
);
405 if (!graph
->node_table
)
408 for (i
= 0; i
< graph
->n
; ++i
) {
409 struct isl_hash_table_entry
*entry
;
412 hash
= isl_space_get_tuple_hash(graph
->node
[i
].space
);
413 entry
= isl_hash_table_find(ctx
, graph
->node_table
, hash
,
415 graph
->node
[i
].space
, 1);
418 entry
->data
= &graph
->node
[i
];
424 /* Return a pointer to the node that lives within the given space,
425 * an invalid node if there is no such node, or NULL in case of error.
427 static struct isl_sched_node
*graph_find_node(isl_ctx
*ctx
,
428 struct isl_sched_graph
*graph
, __isl_keep isl_space
*space
)
430 struct isl_hash_table_entry
*entry
;
436 hash
= isl_space_get_tuple_hash(space
);
437 entry
= isl_hash_table_find(ctx
, graph
->node_table
, hash
,
438 &node_has_tuples
, space
, 0);
441 if (entry
== isl_hash_table_entry_none
)
442 return graph
->node
+ graph
->n
;
447 /* Is "node" a node in "graph"?
449 static int is_node(struct isl_sched_graph
*graph
,
450 struct isl_sched_node
*node
)
452 return node
&& node
>= &graph
->node
[0] && node
< &graph
->node
[graph
->n
];
455 static isl_bool
edge_has_src_and_dst(const void *entry
, const void *val
)
457 const struct isl_sched_edge
*edge
= entry
;
458 const struct isl_sched_edge
*temp
= val
;
460 return isl_bool_ok(edge
->src
== temp
->src
&& edge
->dst
== temp
->dst
);
463 /* Add the given edge to graph->edge_table[type].
465 static isl_stat
graph_edge_table_add(isl_ctx
*ctx
,
466 struct isl_sched_graph
*graph
, enum isl_edge_type type
,
467 struct isl_sched_edge
*edge
)
469 struct isl_hash_table_entry
*entry
;
472 hash
= isl_hash_init();
473 hash
= isl_hash_builtin(hash
, edge
->src
);
474 hash
= isl_hash_builtin(hash
, edge
->dst
);
475 entry
= isl_hash_table_find(ctx
, graph
->edge_table
[type
], hash
,
476 &edge_has_src_and_dst
, edge
, 1);
478 return isl_stat_error
;
484 /* Add "edge" to all relevant edge tables.
485 * That is, for every type of the edge, add it to the corresponding table.
487 static isl_stat
graph_edge_tables_add(isl_ctx
*ctx
,
488 struct isl_sched_graph
*graph
, struct isl_sched_edge
*edge
)
490 enum isl_edge_type t
;
492 for (t
= isl_edge_first
; t
<= isl_edge_last
; ++t
) {
493 if (!is_type(edge
, t
))
495 if (graph_edge_table_add(ctx
, graph
, t
, edge
) < 0)
496 return isl_stat_error
;
502 /* Allocate the edge_tables based on the maximal number of edges of
505 static int graph_init_edge_tables(isl_ctx
*ctx
, struct isl_sched_graph
*graph
)
509 for (i
= 0; i
<= isl_edge_last
; ++i
) {
510 graph
->edge_table
[i
] = isl_hash_table_alloc(ctx
,
512 if (!graph
->edge_table
[i
])
519 /* If graph->edge_table[type] contains an edge from the given source
520 * to the given destination, then return the hash table entry of this edge.
521 * Otherwise, return NULL.
523 static struct isl_hash_table_entry
*graph_find_edge_entry(
524 struct isl_sched_graph
*graph
,
525 enum isl_edge_type type
,
526 struct isl_sched_node
*src
, struct isl_sched_node
*dst
)
528 isl_ctx
*ctx
= isl_space_get_ctx(src
->space
);
530 struct isl_sched_edge temp
= { .src
= src
, .dst
= dst
};
532 hash
= isl_hash_init();
533 hash
= isl_hash_builtin(hash
, temp
.src
);
534 hash
= isl_hash_builtin(hash
, temp
.dst
);
535 return isl_hash_table_find(ctx
, graph
->edge_table
[type
], hash
,
536 &edge_has_src_and_dst
, &temp
, 0);
540 /* If graph->edge_table[type] contains an edge from the given source
541 * to the given destination, then return this edge.
542 * Return "none" if no such edge can be found.
543 * Return NULL on error.
545 static struct isl_sched_edge
*graph_find_edge(struct isl_sched_graph
*graph
,
546 enum isl_edge_type type
,
547 struct isl_sched_node
*src
, struct isl_sched_node
*dst
,
548 struct isl_sched_edge
*none
)
550 struct isl_hash_table_entry
*entry
;
552 entry
= graph_find_edge_entry(graph
, type
, src
, dst
);
555 if (entry
== isl_hash_table_entry_none
)
561 /* Check whether the dependence graph has an edge of the given type
562 * between the given two nodes.
564 static isl_bool
graph_has_edge(struct isl_sched_graph
*graph
,
565 enum isl_edge_type type
,
566 struct isl_sched_node
*src
, struct isl_sched_node
*dst
)
568 struct isl_sched_edge dummy
;
569 struct isl_sched_edge
*edge
;
572 edge
= graph_find_edge(graph
, type
, src
, dst
, &dummy
);
574 return isl_bool_error
;
576 return isl_bool_false
;
578 empty
= isl_map_plain_is_empty(edge
->map
);
580 return isl_bool_not(empty
);
583 /* Look for any edge with the same src, dst and map fields as "model".
585 * Return the matching edge if one can be found.
586 * Return "model" if no matching edge is found.
587 * Return NULL on error.
589 static struct isl_sched_edge
*graph_find_matching_edge(
590 struct isl_sched_graph
*graph
, struct isl_sched_edge
*model
)
592 enum isl_edge_type i
;
593 struct isl_sched_edge
*edge
;
595 for (i
= isl_edge_first
; i
<= isl_edge_last
; ++i
) {
598 edge
= graph_find_edge(graph
, i
, model
->src
, model
->dst
, model
);
603 is_equal
= isl_map_plain_is_equal(model
->map
, edge
->map
);
613 /* Remove the given edge from all the edge_tables that refer to it.
615 static isl_stat
graph_remove_edge(struct isl_sched_graph
*graph
,
616 struct isl_sched_edge
*edge
)
618 isl_ctx
*ctx
= isl_map_get_ctx(edge
->map
);
619 enum isl_edge_type i
;
621 for (i
= isl_edge_first
; i
<= isl_edge_last
; ++i
) {
622 struct isl_hash_table_entry
*entry
;
624 entry
= graph_find_edge_entry(graph
, i
, edge
->src
, edge
->dst
);
626 return isl_stat_error
;
627 if (entry
== isl_hash_table_entry_none
)
629 if (entry
->data
!= edge
)
631 isl_hash_table_remove(ctx
, graph
->edge_table
[i
], entry
);
637 /* Check whether the dependence graph has any edge
638 * between the given two nodes.
640 static isl_bool
graph_has_any_edge(struct isl_sched_graph
*graph
,
641 struct isl_sched_node
*src
, struct isl_sched_node
*dst
)
643 enum isl_edge_type i
;
646 for (i
= isl_edge_first
; i
<= isl_edge_last
; ++i
) {
647 r
= graph_has_edge(graph
, i
, src
, dst
);
655 /* Check whether the dependence graph has a validity edge
656 * between the given two nodes.
658 * Conditional validity edges are essentially validity edges that
659 * can be ignored if the corresponding condition edges are iteration private.
660 * Here, we are only checking for the presence of validity
661 * edges, so we need to consider the conditional validity edges too.
662 * In particular, this function is used during the detection
663 * of strongly connected components and we cannot ignore
664 * conditional validity edges during this detection.
666 static isl_bool
graph_has_validity_edge(struct isl_sched_graph
*graph
,
667 struct isl_sched_node
*src
, struct isl_sched_node
*dst
)
671 r
= graph_has_edge(graph
, isl_edge_validity
, src
, dst
);
675 return graph_has_edge(graph
, isl_edge_conditional_validity
, src
, dst
);
678 /* Perform all the required memory allocations for a schedule graph "graph"
679 * with "n_node" nodes and "n_edge" edge and initialize the corresponding
682 static isl_stat
graph_alloc(isl_ctx
*ctx
, struct isl_sched_graph
*graph
,
683 int n_node
, int n_edge
)
688 graph
->n_edge
= n_edge
;
689 graph
->node
= isl_calloc_array(ctx
, struct isl_sched_node
, graph
->n
);
690 graph
->sorted
= isl_calloc_array(ctx
, int, graph
->n
);
691 graph
->region
= isl_alloc_array(ctx
,
692 struct isl_trivial_region
, graph
->n
);
693 graph
->edge
= isl_calloc_array(ctx
,
694 struct isl_sched_edge
, graph
->n_edge
);
696 graph
->intra_hmap
= isl_map_to_basic_set_alloc(ctx
, 2 * n_edge
);
697 graph
->intra_hmap_param
= isl_map_to_basic_set_alloc(ctx
, 2 * n_edge
);
698 graph
->inter_hmap
= isl_map_to_basic_set_alloc(ctx
, 2 * n_edge
);
700 if (!graph
->node
|| !graph
->region
|| (graph
->n_edge
&& !graph
->edge
) ||
702 return isl_stat_error
;
704 for(i
= 0; i
< graph
->n
; ++i
)
705 graph
->sorted
[i
] = i
;
710 /* Free the memory associated to node "node" in "graph".
711 * The "coincident" field is shared by nodes in a graph and its subgraph.
712 * It therefore only needs to be freed for the original dependence graph,
713 * i.e., one that is not the result of splitting.
715 static void clear_node(struct isl_sched_graph
*graph
,
716 struct isl_sched_node
*node
)
718 isl_space_free(node
->space
);
719 isl_set_free(node
->hull
);
720 isl_multi_aff_free(node
->compress
);
721 isl_pw_multi_aff_free(node
->decompress
);
722 isl_mat_free(node
->sched
);
723 isl_map_free(node
->sched_map
);
724 isl_mat_free(node
->indep
);
725 isl_mat_free(node
->vmap
);
726 if (graph
->root
== graph
)
727 free(node
->coincident
);
728 isl_multi_val_free(node
->sizes
);
729 isl_basic_set_free(node
->bounds
);
730 isl_vec_free(node
->max
);
733 static void graph_free(isl_ctx
*ctx
, struct isl_sched_graph
*graph
)
737 isl_map_to_basic_set_free(graph
->intra_hmap
);
738 isl_map_to_basic_set_free(graph
->intra_hmap_param
);
739 isl_map_to_basic_set_free(graph
->inter_hmap
);
742 for (i
= 0; i
< graph
->n
; ++i
)
743 clear_node(graph
, &graph
->node
[i
]);
747 for (i
= 0; i
< graph
->n_edge
; ++i
) {
748 isl_map_free(graph
->edge
[i
].map
);
749 isl_union_map_free(graph
->edge
[i
].tagged_condition
);
750 isl_union_map_free(graph
->edge
[i
].tagged_validity
);
754 for (i
= 0; i
<= isl_edge_last
; ++i
)
755 isl_hash_table_free(ctx
, graph
->edge_table
[i
]);
756 isl_hash_table_free(ctx
, graph
->node_table
);
757 isl_basic_set_free(graph
->lp
);
760 /* For each "set" on which this function is called, increment
761 * graph->n by one and update graph->maxvar.
763 static isl_stat
init_n_maxvar(__isl_take isl_set
*set
, void *user
)
765 struct isl_sched_graph
*graph
= user
;
766 isl_size nvar
= isl_set_dim(set
, isl_dim_set
);
769 if (nvar
> graph
->maxvar
)
770 graph
->maxvar
= nvar
;
775 return isl_stat_error
;
779 /* Compute the number of rows that should be allocated for the schedule.
780 * In particular, we need one row for each variable or one row
781 * for each basic map in the dependences.
782 * Note that it is practically impossible to exhaust both
783 * the number of dependences and the number of variables.
785 static isl_stat
compute_max_row(struct isl_sched_graph
*graph
,
786 __isl_keep isl_schedule_constraints
*sc
)
790 isl_union_set
*domain
;
794 domain
= isl_schedule_constraints_get_domain(sc
);
795 r
= isl_union_set_foreach_set(domain
, &init_n_maxvar
, graph
);
796 isl_union_set_free(domain
);
798 return isl_stat_error
;
799 n_edge
= isl_schedule_constraints_n_basic_map(sc
);
801 return isl_stat_error
;
802 graph
->max_row
= n_edge
+ graph
->maxvar
;
807 /* Does "bset" have any defining equalities for its set variables?
809 static isl_bool
has_any_defining_equality(__isl_keep isl_basic_set
*bset
)
814 n
= isl_basic_set_dim(bset
, isl_dim_set
);
816 return isl_bool_error
;
818 for (i
= 0; i
< n
; ++i
) {
821 has
= isl_basic_set_has_defining_equality(bset
, isl_dim_set
, i
,
827 return isl_bool_false
;
830 /* Set the entries of node->max to the value of the schedule_max_coefficient
833 static isl_stat
set_max_coefficient(isl_ctx
*ctx
, struct isl_sched_node
*node
)
837 max
= isl_options_get_schedule_max_coefficient(ctx
);
841 node
->max
= isl_vec_alloc(ctx
, node
->nvar
);
842 node
->max
= isl_vec_set_si(node
->max
, max
);
844 return isl_stat_error
;
849 /* Set the entries of node->max to the minimum of the schedule_max_coefficient
850 * option (if set) and half of the minimum of the sizes in the other
851 * dimensions. Round up when computing the half such that
852 * if the minimum of the sizes is one, half of the size is taken to be one
854 * If the global minimum is unbounded (i.e., if both
855 * the schedule_max_coefficient is not set and the sizes in the other
856 * dimensions are unbounded), then store a negative value.
857 * If the schedule coefficient is close to the size of the instance set
858 * in another dimension, then the schedule may represent a loop
859 * coalescing transformation (especially if the coefficient
860 * in that other dimension is one). Forcing the coefficient to be
861 * smaller than or equal to half the minimal size should avoid this
864 static isl_stat
compute_max_coefficient(isl_ctx
*ctx
,
865 struct isl_sched_node
*node
)
871 max
= isl_options_get_schedule_max_coefficient(ctx
);
872 v
= isl_vec_alloc(ctx
, node
->nvar
);
874 return isl_stat_error
;
876 for (i
= 0; i
< node
->nvar
; ++i
) {
877 isl_int_set_si(v
->el
[i
], max
);
878 isl_int_mul_si(v
->el
[i
], v
->el
[i
], 2);
881 for (i
= 0; i
< node
->nvar
; ++i
) {
884 size
= isl_multi_val_get_val(node
->sizes
, i
);
887 if (!isl_val_is_int(size
)) {
891 for (j
= 0; j
< node
->nvar
; ++j
) {
894 if (isl_int_is_neg(v
->el
[j
]) ||
895 isl_int_gt(v
->el
[j
], size
->n
))
896 isl_int_set(v
->el
[j
], size
->n
);
901 for (i
= 0; i
< node
->nvar
; ++i
)
902 isl_int_cdiv_q_ui(v
->el
[i
], v
->el
[i
], 2);
908 return isl_stat_error
;
911 /* Construct an identifier for node "node", which will represent "set".
912 * The name of the identifier is either "compressed" or
913 * "compressed_<name>", with <name> the name of the space of "set".
914 * The user pointer of the identifier points to "node".
916 static __isl_give isl_id
*construct_compressed_id(__isl_keep isl_set
*set
,
917 struct isl_sched_node
*node
)
926 has_name
= isl_set_has_tuple_name(set
);
930 ctx
= isl_set_get_ctx(set
);
932 return isl_id_alloc(ctx
, "compressed", node
);
934 p
= isl_printer_to_str(ctx
);
935 name
= isl_set_get_tuple_name(set
);
936 p
= isl_printer_print_str(p
, "compressed_");
937 p
= isl_printer_print_str(p
, name
);
938 id_name
= isl_printer_get_str(p
);
941 id
= isl_id_alloc(ctx
, id_name
, node
);
947 /* Construct a map that isolates the variable in position "pos" in "set".
951 * [i_0, ..., i_pos-1, i_pos+1, ...] -> [i_pos]
953 static __isl_give isl_map
*isolate(__isl_take isl_set
*set
, int pos
)
957 map
= isl_set_project_onto_map(set
, isl_dim_set
, pos
, 1);
958 map
= isl_map_project_out(map
, isl_dim_in
, pos
, 1);
962 /* Compute and return the size of "set" in dimension "dim".
963 * The size is taken to be the difference in values for that variable
964 * for fixed values of the other variables.
965 * This assumes that "set" is convex.
966 * In particular, the variable is first isolated from the other variables
967 * in the range of a map
969 * [i_0, ..., i_dim-1, i_dim+1, ...] -> [i_dim]
971 * and then duplicated
973 * [i_0, ..., i_dim-1, i_dim+1, ...] -> [[i_dim] -> [i_dim']]
975 * The shared variables are then projected out and the maximal value
976 * of i_dim' - i_dim is computed.
978 static __isl_give isl_val
*compute_size(__isl_take isl_set
*set
, int dim
)
985 map
= isolate(set
, dim
);
986 map
= isl_map_range_product(map
, isl_map_copy(map
));
987 map
= isl_set_unwrap(isl_map_range(map
));
988 set
= isl_map_deltas(map
);
989 ls
= isl_local_space_from_space(isl_set_get_space(set
));
990 obj
= isl_aff_var_on_domain(ls
, isl_dim_set
, 0);
991 v
= isl_set_max_val(set
, obj
);
998 /* Perform a compression on "node" where "hull" represents the constraints
999 * that were used to derive the compression, while "compress" and
1000 * "decompress" map the original space to the compressed space and
1003 * If "node" was not compressed already, then simply store
1004 * the compression information.
1005 * Otherwise the "original" space is actually the result
1006 * of a previous compression, which is then combined
1007 * with the present compression.
1009 * The dimensionality of the compressed domain is also adjusted.
1010 * Other information, such as the sizes and the maximal coefficient values,
1011 * has not been computed yet and therefore does not need to be adjusted.
1013 static isl_stat
compress_node(struct isl_sched_node
*node
,
1014 __isl_take isl_set
*hull
, __isl_take isl_multi_aff
*compress
,
1015 __isl_take isl_pw_multi_aff
*decompress
)
1017 node
->nvar
= isl_multi_aff_dim(compress
, isl_dim_out
);
1018 if (!node
->compressed
) {
1019 node
->compressed
= 1;
1021 node
->compress
= compress
;
1022 node
->decompress
= decompress
;
1024 hull
= isl_set_preimage_multi_aff(hull
,
1025 isl_multi_aff_copy(node
->compress
));
1026 node
->hull
= isl_set_intersect(node
->hull
, hull
);
1027 node
->compress
= isl_multi_aff_pullback_multi_aff(
1028 compress
, node
->compress
);
1029 node
->decompress
= isl_pw_multi_aff_pullback_pw_multi_aff(
1030 node
->decompress
, decompress
);
1033 if (!node
->hull
|| !node
->compress
|| !node
->decompress
)
1034 return isl_stat_error
;
1039 /* Given that dimension "pos" in "set" has a fixed value
1040 * in terms of the other dimensions, (further) compress "node"
1041 * by projecting out this dimension.
1042 * "set" may be the result of a previous compression.
1043 * "uncompressed" is the original domain (without compression).
1045 * The compression function simply projects out the dimension.
1046 * The decompression function adds back the dimension
1047 * in the right position as an expression of the other dimensions
1048 * derived from "set".
1049 * As in extract_node, the compressed space has an identifier
1050 * that references "node" such that each compressed space is unique and
1051 * such that the node can be recovered from the compressed space.
1053 * The constraint removed through the compression is added to the "hull"
1054 * such that only edges that relate to the original domains
1055 * are taken into account.
1056 * In particular, it is obtained by composing compression and decompression and
1057 * taking the relation among the variables in the range.
1059 static isl_stat
project_out_fixed(struct isl_sched_node
*node
,
1060 __isl_keep isl_set
*uncompressed
, __isl_take isl_set
*set
, int pos
)
1066 isl_multi_aff
*compress
;
1067 isl_pw_multi_aff
*decompress
, *pma
;
1068 isl_multi_pw_aff
*mpa
;
1071 map
= isolate(isl_set_copy(set
), pos
);
1072 pma
= isl_pw_multi_aff_from_map(map
);
1073 domain
= isl_pw_multi_aff_domain(isl_pw_multi_aff_copy(pma
));
1074 pma
= isl_pw_multi_aff_gist(pma
, domain
);
1075 space
= isl_pw_multi_aff_get_domain_space(pma
);
1076 mpa
= isl_multi_pw_aff_identity(isl_space_map_from_set(space
));
1077 mpa
= isl_multi_pw_aff_range_splice(mpa
, pos
,
1078 isl_multi_pw_aff_from_pw_multi_aff(pma
));
1079 decompress
= isl_pw_multi_aff_from_multi_pw_aff(mpa
);
1080 space
= isl_set_get_space(set
);
1081 compress
= isl_multi_aff_project_out_map(space
, isl_dim_set
, pos
, 1);
1082 id
= construct_compressed_id(uncompressed
, node
);
1083 compress
= isl_multi_aff_set_tuple_id(compress
, isl_dim_out
, id
);
1084 space
= isl_space_reverse(isl_multi_aff_get_space(compress
));
1085 decompress
= isl_pw_multi_aff_reset_space(decompress
, space
);
1086 pma
= isl_pw_multi_aff_pullback_multi_aff(
1087 isl_pw_multi_aff_copy(decompress
), isl_multi_aff_copy(compress
));
1088 hull
= isl_map_range(isl_map_from_pw_multi_aff(pma
));
1092 return compress_node(node
, hull
, compress
, decompress
);
1095 /* Compute the size of the compressed domain in each dimension and
1096 * store the results in node->sizes.
1097 * "uncompressed" is the original domain (without compression).
1099 * First compress the domain if needed and then compute the size
1100 * in each direction.
1101 * If the domain is not convex, then the sizes are computed
1102 * on a convex superset in order to avoid picking up sizes
1103 * that are valid for the individual disjuncts, but not for
1104 * the domain as a whole.
1106 * If any of the sizes turns out to be zero, then this means
1107 * that this dimension has a fixed value in terms of
1108 * the other dimensions. Perform an (extra) compression
1109 * to remove this dimension.
1111 static isl_stat
compute_sizes(struct isl_sched_node
*node
,
1112 __isl_keep isl_set
*uncompressed
)
1117 isl_set
*set
= isl_set_copy(uncompressed
);
1119 if (node
->compressed
)
1120 set
= isl_set_preimage_pw_multi_aff(set
,
1121 isl_pw_multi_aff_copy(node
->decompress
));
1122 set
= isl_set_from_basic_set(isl_set_simple_hull(set
));
1123 mv
= isl_multi_val_zero(isl_set_get_space(set
));
1124 n
= isl_set_dim(set
, isl_dim_set
);
1126 mv
= isl_multi_val_free(mv
);
1127 for (j
= 0; j
< n
; ++j
) {
1131 v
= compute_size(isl_set_copy(set
), j
);
1132 is_zero
= isl_val_is_zero(v
);
1133 mv
= isl_multi_val_set_val(mv
, j
, v
);
1134 if (is_zero
>= 0 && is_zero
) {
1135 isl_multi_val_free(mv
);
1136 if (project_out_fixed(node
, uncompressed
, set
, j
) < 0)
1137 return isl_stat_error
;
1138 return compute_sizes(node
, uncompressed
);
1144 return isl_stat_error
;
1148 /* Compute the size of the instance set "set" of "node", after compression,
1149 * as well as bounds on the corresponding coefficients, if needed.
1151 * The sizes are needed when the schedule_treat_coalescing option is set.
1152 * The bounds are needed when the schedule_treat_coalescing option or
1153 * the schedule_max_coefficient option is set.
1155 * If the schedule_treat_coalescing option is not set, then at most
1156 * the bounds need to be set and this is done in set_max_coefficient.
1157 * Otherwise, compute the size of the compressed domain
1158 * in each direction and store the results in node->size.
1159 * Finally, set the bounds on the coefficients based on the sizes
1160 * and the schedule_max_coefficient option in compute_max_coefficient.
1162 static isl_stat
compute_sizes_and_max(isl_ctx
*ctx
, struct isl_sched_node
*node
,
1163 __isl_take isl_set
*set
)
1167 if (!isl_options_get_schedule_treat_coalescing(ctx
)) {
1169 return set_max_coefficient(ctx
, node
);
1172 r
= compute_sizes(node
, set
);
1175 return isl_stat_error
;
1176 return compute_max_coefficient(ctx
, node
);
1179 /* Add a new node to the graph representing the given instance set.
1180 * "nvar" is the (possibly compressed) number of variables and
1181 * may be smaller than then number of set variables in "set"
1182 * if "compressed" is set.
1183 * If "compressed" is set, then "hull" represents the constraints
1184 * that were used to derive the compression, while "compress" and
1185 * "decompress" map the original space to the compressed space and
1187 * If "compressed" is not set, then "hull", "compress" and "decompress"
1190 * Compute the size of the instance set and bounds on the coefficients,
1193 static isl_stat
add_node(struct isl_sched_graph
*graph
,
1194 __isl_take isl_set
*set
, int nvar
, int compressed
,
1195 __isl_take isl_set
*hull
, __isl_take isl_multi_aff
*compress
,
1196 __isl_take isl_pw_multi_aff
*decompress
)
1203 struct isl_sched_node
*node
;
1205 nparam
= isl_set_dim(set
, isl_dim_param
);
1209 ctx
= isl_set_get_ctx(set
);
1210 if (!ctx
->opt
->schedule_parametric
)
1212 sched
= isl_mat_alloc(ctx
, 0, 1 + nparam
+ nvar
);
1213 node
= &graph
->node
[graph
->n
];
1215 space
= isl_set_get_space(set
);
1216 node
->space
= space
;
1218 node
->nparam
= nparam
;
1219 node
->sched
= sched
;
1220 node
->sched_map
= NULL
;
1221 coincident
= isl_calloc_array(ctx
, int, graph
->max_row
);
1222 node
->coincident
= coincident
;
1223 node
->compressed
= compressed
;
1225 node
->compress
= compress
;
1226 node
->decompress
= decompress
;
1227 if (compute_sizes_and_max(ctx
, node
, set
) < 0)
1228 return isl_stat_error
;
1230 if (!space
|| !sched
|| (graph
->max_row
&& !coincident
))
1231 return isl_stat_error
;
1232 if (compressed
&& (!hull
|| !compress
|| !decompress
))
1233 return isl_stat_error
;
1239 isl_multi_aff_free(compress
);
1240 isl_pw_multi_aff_free(decompress
);
1241 return isl_stat_error
;
1244 /* Add a new node to the graph representing the given set.
1246 * If any of the set variables is defined by an equality, then
1247 * we perform variable compression such that we can perform
1248 * the scheduling on the compressed domain.
1249 * In this case, an identifier is used that references the new node
1250 * such that each compressed space is unique and
1251 * such that the node can be recovered from the compressed space.
1253 static isl_stat
extract_node(__isl_take isl_set
*set
, void *user
)
1256 isl_bool has_equality
;
1258 isl_basic_set
*hull
;
1261 isl_multi_aff
*compress
, *decompress_ma
;
1262 isl_pw_multi_aff
*decompress
;
1263 struct isl_sched_graph
*graph
= user
;
1265 hull
= isl_set_affine_hull(isl_set_copy(set
));
1266 hull
= isl_basic_set_remove_divs(hull
);
1267 nvar
= isl_set_dim(set
, isl_dim_set
);
1268 has_equality
= has_any_defining_equality(hull
);
1270 if (nvar
< 0 || has_equality
< 0)
1272 if (!has_equality
) {
1273 isl_basic_set_free(hull
);
1274 return add_node(graph
, set
, nvar
, 0, NULL
, NULL
, NULL
);
1277 id
= construct_compressed_id(set
, &graph
->node
[graph
->n
]);
1278 morph
= isl_basic_set_variable_compression_with_id(hull
, id
);
1280 nvar
= isl_morph_ran_dim(morph
, isl_dim_set
);
1282 set
= isl_set_free(set
);
1283 compress
= isl_morph_get_var_multi_aff(morph
);
1284 morph
= isl_morph_inverse(morph
);
1285 decompress_ma
= isl_morph_get_var_multi_aff(morph
);
1286 decompress
= isl_pw_multi_aff_from_multi_aff(decompress_ma
);
1287 isl_morph_free(morph
);
1289 hull_set
= isl_set_from_basic_set(hull
);
1290 return add_node(graph
, set
, nvar
, 1, hull_set
, compress
, decompress
);
1292 isl_basic_set_free(hull
);
1294 return isl_stat_error
;
1297 struct isl_extract_edge_data
{
1298 enum isl_edge_type type
;
1299 struct isl_sched_graph
*graph
;
1302 /* Merge edge2 into edge1, freeing the contents of edge2.
1303 * Return 0 on success and -1 on failure.
1305 * edge1 and edge2 are assumed to have the same value for the map field.
1307 static int merge_edge(struct isl_sched_edge
*edge1
,
1308 struct isl_sched_edge
*edge2
)
1310 edge1
->types
|= edge2
->types
;
1311 isl_map_free(edge2
->map
);
1313 if (is_condition(edge2
)) {
1314 if (!edge1
->tagged_condition
)
1315 edge1
->tagged_condition
= edge2
->tagged_condition
;
1317 edge1
->tagged_condition
=
1318 isl_union_map_union(edge1
->tagged_condition
,
1319 edge2
->tagged_condition
);
1322 if (is_conditional_validity(edge2
)) {
1323 if (!edge1
->tagged_validity
)
1324 edge1
->tagged_validity
= edge2
->tagged_validity
;
1326 edge1
->tagged_validity
=
1327 isl_union_map_union(edge1
->tagged_validity
,
1328 edge2
->tagged_validity
);
1331 if (is_condition(edge2
) && !edge1
->tagged_condition
)
1333 if (is_conditional_validity(edge2
) && !edge1
->tagged_validity
)
1339 /* Insert dummy tags in domain and range of "map".
1341 * In particular, if "map" is of the form
1347 * [A -> dummy_tag] -> [B -> dummy_tag]
1349 * where the dummy_tags are identical and equal to any dummy tags
1350 * introduced by any other call to this function.
1352 static __isl_give isl_map
*insert_dummy_tags(__isl_take isl_map
*map
)
1358 isl_set
*domain
, *range
;
1360 ctx
= isl_map_get_ctx(map
);
1362 id
= isl_id_alloc(ctx
, NULL
, &dummy
);
1363 space
= isl_space_params(isl_map_get_space(map
));
1364 space
= isl_space_set_from_params(space
);
1365 space
= isl_space_set_tuple_id(space
, isl_dim_set
, id
);
1366 space
= isl_space_map_from_set(space
);
1368 domain
= isl_map_wrap(map
);
1369 range
= isl_map_wrap(isl_map_universe(space
));
1370 map
= isl_map_from_domain_and_range(domain
, range
);
1371 map
= isl_map_zip(map
);
1376 /* Given that at least one of "src" or "dst" is compressed, return
1377 * a map between the spaces of these nodes restricted to the affine
1378 * hull that was used in the compression.
1380 static __isl_give isl_map
*extract_hull(struct isl_sched_node
*src
,
1381 struct isl_sched_node
*dst
)
1385 if (src
->compressed
)
1386 dom
= isl_set_copy(src
->hull
);
1388 dom
= isl_set_universe(isl_space_copy(src
->space
));
1389 if (dst
->compressed
)
1390 ran
= isl_set_copy(dst
->hull
);
1392 ran
= isl_set_universe(isl_space_copy(dst
->space
));
1394 return isl_map_from_domain_and_range(dom
, ran
);
1397 /* Intersect the domains of the nested relations in domain and range
1398 * of "tagged" with "map".
1400 static __isl_give isl_map
*map_intersect_domains(__isl_take isl_map
*tagged
,
1401 __isl_keep isl_map
*map
)
1405 tagged
= isl_map_zip(tagged
);
1406 set
= isl_map_wrap(isl_map_copy(map
));
1407 tagged
= isl_map_intersect_domain(tagged
, set
);
1408 tagged
= isl_map_zip(tagged
);
1412 /* Return a pointer to the node that lives in the domain space of "map",
1413 * an invalid node if there is no such node, or NULL in case of error.
1415 static struct isl_sched_node
*find_domain_node(isl_ctx
*ctx
,
1416 struct isl_sched_graph
*graph
, __isl_keep isl_map
*map
)
1418 struct isl_sched_node
*node
;
1421 space
= isl_space_domain(isl_map_get_space(map
));
1422 node
= graph_find_node(ctx
, graph
, space
);
1423 isl_space_free(space
);
1428 /* Return a pointer to the node that lives in the range space of "map",
1429 * an invalid node if there is no such node, or NULL in case of error.
1431 static struct isl_sched_node
*find_range_node(isl_ctx
*ctx
,
1432 struct isl_sched_graph
*graph
, __isl_keep isl_map
*map
)
1434 struct isl_sched_node
*node
;
1437 space
= isl_space_range(isl_map_get_space(map
));
1438 node
= graph_find_node(ctx
, graph
, space
);
1439 isl_space_free(space
);
1444 /* Refrain from adding a new edge based on "map".
1445 * Instead, just free the map.
1446 * "tagged" is either a copy of "map" with additional tags or NULL.
1448 static isl_stat
skip_edge(__isl_take isl_map
*map
, __isl_take isl_map
*tagged
)
1451 isl_map_free(tagged
);
1456 /* Add a new edge to the graph based on the given map
1457 * and add it to data->graph->edge_table[data->type].
1458 * If a dependence relation of a given type happens to be identical
1459 * to one of the dependence relations of a type that was added before,
1460 * then we don't create a new edge, but instead mark the original edge
1461 * as also representing a dependence of the current type.
1463 * Edges of type isl_edge_condition or isl_edge_conditional_validity
1464 * may be specified as "tagged" dependence relations. That is, "map"
1465 * may contain elements (i -> a) -> (j -> b), where i -> j denotes
1466 * the dependence on iterations and a and b are tags.
1467 * edge->map is set to the relation containing the elements i -> j,
1468 * while edge->tagged_condition and edge->tagged_validity contain
1469 * the union of all the "map" relations
1470 * for which extract_edge is called that result in the same edge->map.
1472 * If the source or the destination node is compressed, then
1473 * intersect both "map" and "tagged" with the constraints that
1474 * were used to construct the compression.
1475 * This ensures that there are no schedule constraints defined
1476 * outside of these domains, while the scheduler no longer has
1477 * any control over those outside parts.
1479 static isl_stat
extract_edge(__isl_take isl_map
*map
, void *user
)
1482 isl_ctx
*ctx
= isl_map_get_ctx(map
);
1483 struct isl_extract_edge_data
*data
= user
;
1484 struct isl_sched_graph
*graph
= data
->graph
;
1485 struct isl_sched_node
*src
, *dst
;
1486 struct isl_sched_edge
*edge
;
1487 isl_map
*tagged
= NULL
;
1489 if (data
->type
== isl_edge_condition
||
1490 data
->type
== isl_edge_conditional_validity
) {
1491 if (isl_map_can_zip(map
)) {
1492 tagged
= isl_map_copy(map
);
1493 map
= isl_set_unwrap(isl_map_domain(isl_map_zip(map
)));
1495 tagged
= insert_dummy_tags(isl_map_copy(map
));
1499 src
= find_domain_node(ctx
, graph
, map
);
1500 dst
= find_range_node(ctx
, graph
, map
);
1504 if (!is_node(graph
, src
) || !is_node(graph
, dst
))
1505 return skip_edge(map
, tagged
);
1507 if (src
->compressed
|| dst
->compressed
) {
1509 hull
= extract_hull(src
, dst
);
1511 tagged
= map_intersect_domains(tagged
, hull
);
1512 map
= isl_map_intersect(map
, hull
);
1515 empty
= isl_map_plain_is_empty(map
);
1519 return skip_edge(map
, tagged
);
1521 graph
->edge
[graph
->n_edge
].src
= src
;
1522 graph
->edge
[graph
->n_edge
].dst
= dst
;
1523 graph
->edge
[graph
->n_edge
].map
= map
;
1524 graph
->edge
[graph
->n_edge
].types
= 0;
1525 graph
->edge
[graph
->n_edge
].tagged_condition
= NULL
;
1526 graph
->edge
[graph
->n_edge
].tagged_validity
= NULL
;
1527 set_type(&graph
->edge
[graph
->n_edge
], data
->type
);
1528 if (data
->type
== isl_edge_condition
)
1529 graph
->edge
[graph
->n_edge
].tagged_condition
=
1530 isl_union_map_from_map(tagged
);
1531 if (data
->type
== isl_edge_conditional_validity
)
1532 graph
->edge
[graph
->n_edge
].tagged_validity
=
1533 isl_union_map_from_map(tagged
);
1535 edge
= graph_find_matching_edge(graph
, &graph
->edge
[graph
->n_edge
]);
1538 return isl_stat_error
;
1540 if (edge
== &graph
->edge
[graph
->n_edge
])
1541 return graph_edge_table_add(ctx
, graph
, data
->type
,
1542 &graph
->edge
[graph
->n_edge
++]);
1544 if (merge_edge(edge
, &graph
->edge
[graph
->n_edge
]) < 0)
1545 return isl_stat_error
;
1547 return graph_edge_table_add(ctx
, graph
, data
->type
, edge
);
1550 isl_map_free(tagged
);
1551 return isl_stat_error
;
1554 /* Initialize the schedule graph "graph" from the schedule constraints "sc".
1556 * The context is included in the domain before the nodes of
1557 * the graphs are extracted in order to be able to exploit
1558 * any possible additional equalities.
1559 * Note that this intersection is only performed locally here.
1561 static isl_stat
graph_init(struct isl_sched_graph
*graph
,
1562 __isl_keep isl_schedule_constraints
*sc
)
1565 isl_union_set
*domain
;
1567 struct isl_extract_edge_data data
;
1568 enum isl_edge_type i
;
1573 return isl_stat_error
;
1575 ctx
= isl_schedule_constraints_get_ctx(sc
);
1577 domain
= isl_schedule_constraints_get_domain(sc
);
1578 n
= isl_union_set_n_set(domain
);
1580 isl_union_set_free(domain
);
1582 return isl_stat_error
;
1584 n
= isl_schedule_constraints_n_map(sc
);
1585 if (n
< 0 || graph_alloc(ctx
, graph
, graph
->n
, n
) < 0)
1586 return isl_stat_error
;
1588 if (compute_max_row(graph
, sc
) < 0)
1589 return isl_stat_error
;
1590 graph
->root
= graph
;
1592 domain
= isl_schedule_constraints_get_domain(sc
);
1593 domain
= isl_union_set_intersect_params(domain
,
1594 isl_schedule_constraints_get_context(sc
));
1595 r
= isl_union_set_foreach_set(domain
, &extract_node
, graph
);
1596 isl_union_set_free(domain
);
1598 return isl_stat_error
;
1599 if (graph_init_table(ctx
, graph
) < 0)
1600 return isl_stat_error
;
1601 for (i
= isl_edge_first
; i
<= isl_edge_last
; ++i
) {
1604 c
= isl_schedule_constraints_get(sc
, i
);
1605 n
= isl_union_map_n_map(c
);
1606 graph
->max_edge
[i
] = n
;
1607 isl_union_map_free(c
);
1609 return isl_stat_error
;
1611 if (graph_init_edge_tables(ctx
, graph
) < 0)
1612 return isl_stat_error
;
1615 for (i
= isl_edge_first
; i
<= isl_edge_last
; ++i
) {
1619 c
= isl_schedule_constraints_get(sc
, i
);
1620 r
= isl_union_map_foreach_map(c
, &extract_edge
, &data
);
1621 isl_union_map_free(c
);
1623 return isl_stat_error
;
1629 /* Check whether there is any dependence from node[j] to node[i]
1630 * or from node[i] to node[j].
1632 static isl_bool
node_follows_weak(int i
, int j
, void *user
)
1635 struct isl_sched_graph
*graph
= user
;
1637 f
= graph_has_any_edge(graph
, &graph
->node
[j
], &graph
->node
[i
]);
1640 return graph_has_any_edge(graph
, &graph
->node
[i
], &graph
->node
[j
]);
1643 /* Check whether there is a (conditional) validity dependence from node[j]
1644 * to node[i], forcing node[i] to follow node[j].
1646 static isl_bool
node_follows_strong(int i
, int j
, void *user
)
1648 struct isl_sched_graph
*graph
= user
;
1650 return graph_has_validity_edge(graph
, &graph
->node
[j
], &graph
->node
[i
]);
1653 /* Use Tarjan's algorithm for computing the strongly connected components
1654 * in the dependence graph only considering those edges defined by "follows".
1656 static isl_stat
detect_ccs(isl_ctx
*ctx
, struct isl_sched_graph
*graph
,
1657 isl_bool (*follows
)(int i
, int j
, void *user
))
1660 struct isl_tarjan_graph
*g
= NULL
;
1662 g
= isl_tarjan_graph_init(ctx
, graph
->n
, follows
, graph
);
1664 return isl_stat_error
;
1670 while (g
->order
[i
] != -1) {
1671 graph
->node
[g
->order
[i
]].scc
= graph
->scc
;
1679 isl_tarjan_graph_free(g
);
1684 /* Apply Tarjan's algorithm to detect the strongly connected components
1685 * in the dependence graph.
1686 * Only consider the (conditional) validity dependences and clear "weak".
1688 static isl_stat
detect_sccs(isl_ctx
*ctx
, struct isl_sched_graph
*graph
)
1691 return detect_ccs(ctx
, graph
, &node_follows_strong
);
1694 /* Apply Tarjan's algorithm to detect the (weakly) connected components
1695 * in the dependence graph.
1696 * Consider all dependences and set "weak".
1698 static isl_stat
detect_wccs(isl_ctx
*ctx
, struct isl_sched_graph
*graph
)
1701 return detect_ccs(ctx
, graph
, &node_follows_weak
);
1704 static int cmp_scc(const void *a
, const void *b
, void *data
)
1706 struct isl_sched_graph
*graph
= data
;
1710 return graph
->node
[*i1
].scc
- graph
->node
[*i2
].scc
;
1713 /* Sort the elements of graph->sorted according to the corresponding SCCs.
1715 static int sort_sccs(struct isl_sched_graph
*graph
)
1717 return isl_sort(graph
->sorted
, graph
->n
, sizeof(int), &cmp_scc
, graph
);
1720 /* Return a non-parametric set in the compressed space of "node" that is
1721 * bounded by the size in each direction
1723 * { [x] : -S_i <= x_i <= S_i }
1725 * If S_i is infinity in direction i, then there are no constraints
1726 * in that direction.
1728 * Cache the result in node->bounds.
1730 static __isl_give isl_basic_set
*get_size_bounds(struct isl_sched_node
*node
)
1733 isl_basic_set
*bounds
;
1737 return isl_basic_set_copy(node
->bounds
);
1739 if (node
->compressed
)
1740 space
= isl_pw_multi_aff_get_domain_space(node
->decompress
);
1742 space
= isl_space_copy(node
->space
);
1743 space
= isl_space_drop_all_params(space
);
1744 bounds
= isl_basic_set_universe(space
);
1746 for (i
= 0; i
< node
->nvar
; ++i
) {
1749 size
= isl_multi_val_get_val(node
->sizes
, i
);
1751 return isl_basic_set_free(bounds
);
1752 if (!isl_val_is_int(size
)) {
1756 bounds
= isl_basic_set_upper_bound_val(bounds
, isl_dim_set
, i
,
1757 isl_val_copy(size
));
1758 bounds
= isl_basic_set_lower_bound_val(bounds
, isl_dim_set
, i
,
1762 node
->bounds
= isl_basic_set_copy(bounds
);
1766 /* Compress the dependence relation "map", if needed, i.e.,
1767 * when the source node "src" and/or the destination node "dst"
1768 * has been compressed.
1770 static __isl_give isl_map
*compress(__isl_take isl_map
*map
,
1771 struct isl_sched_node
*src
, struct isl_sched_node
*dst
)
1773 if (src
->compressed
)
1774 map
= isl_map_preimage_domain_pw_multi_aff(map
,
1775 isl_pw_multi_aff_copy(src
->decompress
));
1776 if (dst
->compressed
)
1777 map
= isl_map_preimage_range_pw_multi_aff(map
,
1778 isl_pw_multi_aff_copy(dst
->decompress
));
1782 /* Drop some constraints from "delta" that could be exploited
1783 * to construct loop coalescing schedules.
1784 * In particular, drop those constraint that bound the difference
1785 * to the size of the domain.
1786 * First project out the parameters to improve the effectiveness.
1788 static __isl_give isl_set
*drop_coalescing_constraints(
1789 __isl_take isl_set
*delta
, struct isl_sched_node
*node
)
1792 isl_basic_set
*bounds
;
1794 nparam
= isl_set_dim(delta
, isl_dim_param
);
1796 return isl_set_free(delta
);
1798 bounds
= get_size_bounds(node
);
1800 delta
= isl_set_project_out(delta
, isl_dim_param
, 0, nparam
);
1801 delta
= isl_set_remove_divs(delta
);
1802 delta
= isl_set_plain_gist_basic_set(delta
, bounds
);
1806 /* Given a dependence relation R from "node" to itself,
1807 * construct the set of coefficients of valid constraints for elements
1808 * in that dependence relation.
1809 * In particular, the result contains tuples of coefficients
1810 * c_0, c_n, c_x such that
1812 * c_0 + c_n n + c_x y - c_x x >= 0 for each (x,y) in R
1816 * c_0 + c_n n + c_x d >= 0 for each d in delta R = { y - x | (x,y) in R }
1818 * We choose here to compute the dual of delta R.
1819 * Alternatively, we could have computed the dual of R, resulting
1820 * in a set of tuples c_0, c_n, c_x, c_y, and then
1821 * plugged in (c_0, c_n, c_x, -c_x).
1823 * If "need_param" is set, then the resulting coefficients effectively
1824 * include coefficients for the parameters c_n. Otherwise, they may
1825 * have been projected out already.
1826 * Since the constraints may be different for these two cases,
1827 * they are stored in separate caches.
1828 * In particular, if no parameter coefficients are required and
1829 * the schedule_treat_coalescing option is set, then the parameters
1830 * are projected out and some constraints that could be exploited
1831 * to construct coalescing schedules are removed before the dual
1834 * If "node" has been compressed, then the dependence relation
1835 * is also compressed before the set of coefficients is computed.
1837 static __isl_give isl_basic_set
*intra_coefficients(
1838 struct isl_sched_graph
*graph
, struct isl_sched_node
*node
,
1839 __isl_take isl_map
*map
, int need_param
)
1844 isl_basic_set
*coef
;
1845 isl_maybe_isl_basic_set m
;
1846 isl_map_to_basic_set
**hmap
= &graph
->intra_hmap
;
1852 ctx
= isl_map_get_ctx(map
);
1853 treat
= !need_param
&& isl_options_get_schedule_treat_coalescing(ctx
);
1855 hmap
= &graph
->intra_hmap_param
;
1856 m
= isl_map_to_basic_set_try_get(*hmap
, map
);
1857 if (m
.valid
< 0 || m
.valid
) {
1862 key
= isl_map_copy(map
);
1863 map
= compress(map
, node
, node
);
1864 delta
= isl_map_deltas(map
);
1866 delta
= drop_coalescing_constraints(delta
, node
);
1867 delta
= isl_set_remove_divs(delta
);
1868 coef
= isl_set_coefficients(delta
);
1869 *hmap
= isl_map_to_basic_set_set(*hmap
, key
, isl_basic_set_copy(coef
));
1874 /* Given a dependence relation R, construct the set of coefficients
1875 * of valid constraints for elements in that dependence relation.
1876 * In particular, the result contains tuples of coefficients
1877 * c_0, c_n, c_x, c_y such that
1879 * c_0 + c_n n + c_x x + c_y y >= 0 for each (x,y) in R
1881 * If the source or destination nodes of "edge" have been compressed,
1882 * then the dependence relation is also compressed before
1883 * the set of coefficients is computed.
1885 static __isl_give isl_basic_set
*inter_coefficients(
1886 struct isl_sched_graph
*graph
, struct isl_sched_edge
*edge
,
1887 __isl_take isl_map
*map
)
1891 isl_basic_set
*coef
;
1892 isl_maybe_isl_basic_set m
;
1894 m
= isl_map_to_basic_set_try_get(graph
->inter_hmap
, map
);
1895 if (m
.valid
< 0 || m
.valid
) {
1900 key
= isl_map_copy(map
);
1901 map
= compress(map
, edge
->src
, edge
->dst
);
1902 set
= isl_map_wrap(isl_map_remove_divs(map
));
1903 coef
= isl_set_coefficients(set
);
1904 graph
->inter_hmap
= isl_map_to_basic_set_set(graph
->inter_hmap
, key
,
1905 isl_basic_set_copy(coef
));
1910 /* Return the position of the coefficients of the variables in
1911 * the coefficients constraints "coef".
1913 * The space of "coef" is of the form
1915 * { coefficients[[cst, params] -> S] }
1917 * Return the position of S.
1919 static isl_size
coef_var_offset(__isl_keep isl_basic_set
*coef
)
1924 space
= isl_space_unwrap(isl_basic_set_get_space(coef
));
1925 offset
= isl_space_dim(space
, isl_dim_in
);
1926 isl_space_free(space
);
1931 /* Return the offset of the coefficient of the constant term of "node"
1934 * Within each node, the coefficients have the following order:
1935 * - positive and negative parts of c_i_x
1936 * - c_i_n (if parametric)
1939 static int node_cst_coef_offset(struct isl_sched_node
*node
)
1941 return node
->start
+ 2 * node
->nvar
+ node
->nparam
;
1944 /* Return the offset of the coefficients of the parameters of "node"
1947 * Within each node, the coefficients have the following order:
1948 * - positive and negative parts of c_i_x
1949 * - c_i_n (if parametric)
1952 static int node_par_coef_offset(struct isl_sched_node
*node
)
1954 return node
->start
+ 2 * node
->nvar
;
1957 /* Return the offset of the coefficients of the variables of "node"
1960 * Within each node, the coefficients have the following order:
1961 * - positive and negative parts of c_i_x
1962 * - c_i_n (if parametric)
1965 static int node_var_coef_offset(struct isl_sched_node
*node
)
1970 /* Return the position of the pair of variables encoding
1971 * coefficient "i" of "node".
1973 * The order of these variable pairs is the opposite of
1974 * that of the coefficients, with 2 variables per coefficient.
1976 static int node_var_coef_pos(struct isl_sched_node
*node
, int i
)
1978 return node_var_coef_offset(node
) + 2 * (node
->nvar
- 1 - i
);
1981 /* Construct an isl_dim_map for mapping constraints on coefficients
1982 * for "node" to the corresponding positions in graph->lp.
1983 * "offset" is the offset of the coefficients for the variables
1984 * in the input constraints.
1985 * "s" is the sign of the mapping.
1987 * The input constraints are given in terms of the coefficients
1988 * (c_0, c_x) or (c_0, c_n, c_x).
1989 * The mapping produced by this function essentially plugs in
1990 * (0, c_i_x^+ - c_i_x^-) if s = 1 and
1991 * (0, -c_i_x^+ + c_i_x^-) if s = -1 or
1992 * (0, 0, c_i_x^+ - c_i_x^-) if s = 1 and
1993 * (0, 0, -c_i_x^+ + c_i_x^-) if s = -1.
1994 * In graph->lp, the c_i_x^- appear before their c_i_x^+ counterpart.
1995 * Furthermore, the order of these pairs is the opposite of that
1996 * of the corresponding coefficients.
1998 * The caller can extend the mapping to also map the other coefficients
1999 * (and therefore not plug in 0).
2001 static __isl_give isl_dim_map
*intra_dim_map(isl_ctx
*ctx
,
2002 struct isl_sched_graph
*graph
, struct isl_sched_node
*node
,
2007 isl_dim_map
*dim_map
;
2009 total
= isl_basic_set_dim(graph
->lp
, isl_dim_all
);
2010 if (!node
|| total
< 0)
2013 pos
= node_var_coef_pos(node
, 0);
2014 dim_map
= isl_dim_map_alloc(ctx
, total
);
2015 isl_dim_map_range(dim_map
, pos
, -2, offset
, 1, node
->nvar
, -s
);
2016 isl_dim_map_range(dim_map
, pos
+ 1, -2, offset
, 1, node
->nvar
, s
);
2021 /* Construct an isl_dim_map for mapping constraints on coefficients
2022 * for "src" (node i) and "dst" (node j) to the corresponding positions
2024 * "offset" is the offset of the coefficients for the variables of "src"
2025 * in the input constraints.
2026 * "s" is the sign of the mapping.
2028 * The input constraints are given in terms of the coefficients
2029 * (c_0, c_n, c_x, c_y).
2030 * The mapping produced by this function essentially plugs in
2031 * (c_j_0 - c_i_0, c_j_n - c_i_n,
2032 * -(c_i_x^+ - c_i_x^-), c_j_x^+ - c_j_x^-) if s = 1 and
2033 * (-c_j_0 + c_i_0, -c_j_n + c_i_n,
2034 * c_i_x^+ - c_i_x^-, -(c_j_x^+ - c_j_x^-)) if s = -1.
2035 * In graph->lp, the c_*^- appear before their c_*^+ counterpart.
2036 * Furthermore, the order of these pairs is the opposite of that
2037 * of the corresponding coefficients.
2039 * The caller can further extend the mapping.
2041 static __isl_give isl_dim_map
*inter_dim_map(isl_ctx
*ctx
,
2042 struct isl_sched_graph
*graph
, struct isl_sched_node
*src
,
2043 struct isl_sched_node
*dst
, int offset
, int s
)
2047 isl_dim_map
*dim_map
;
2049 total
= isl_basic_set_dim(graph
->lp
, isl_dim_all
);
2050 if (!src
|| !dst
|| total
< 0)
2053 dim_map
= isl_dim_map_alloc(ctx
, total
);
2055 pos
= node_cst_coef_offset(dst
);
2056 isl_dim_map_range(dim_map
, pos
, 0, 0, 0, 1, s
);
2057 pos
= node_par_coef_offset(dst
);
2058 isl_dim_map_range(dim_map
, pos
, 1, 1, 1, dst
->nparam
, s
);
2059 pos
= node_var_coef_pos(dst
, 0);
2060 isl_dim_map_range(dim_map
, pos
, -2, offset
+ src
->nvar
, 1,
2062 isl_dim_map_range(dim_map
, pos
+ 1, -2, offset
+ src
->nvar
, 1,
2065 pos
= node_cst_coef_offset(src
);
2066 isl_dim_map_range(dim_map
, pos
, 0, 0, 0, 1, -s
);
2067 pos
= node_par_coef_offset(src
);
2068 isl_dim_map_range(dim_map
, pos
, 1, 1, 1, src
->nparam
, -s
);
2069 pos
= node_var_coef_pos(src
, 0);
2070 isl_dim_map_range(dim_map
, pos
, -2, offset
, 1, src
->nvar
, s
);
2071 isl_dim_map_range(dim_map
, pos
+ 1, -2, offset
, 1, src
->nvar
, -s
);
2076 /* Add the constraints from "src" to "dst" using "dim_map",
2077 * after making sure there is enough room in "dst" for the extra constraints.
2079 static __isl_give isl_basic_set
*add_constraints_dim_map(
2080 __isl_take isl_basic_set
*dst
, __isl_take isl_basic_set
*src
,
2081 __isl_take isl_dim_map
*dim_map
)
2083 isl_size n_eq
, n_ineq
;
2085 n_eq
= isl_basic_set_n_equality(src
);
2086 n_ineq
= isl_basic_set_n_inequality(src
);
2087 if (n_eq
< 0 || n_ineq
< 0)
2088 dst
= isl_basic_set_free(dst
);
2089 dst
= isl_basic_set_extend_constraints(dst
, n_eq
, n_ineq
);
2090 dst
= isl_basic_set_add_constraints_dim_map(dst
, src
, dim_map
);
2094 /* Add constraints to graph->lp that force validity for the given
2095 * dependence from a node i to itself.
2096 * That is, add constraints that enforce
2098 * (c_i_0 + c_i_n n + c_i_x y) - (c_i_0 + c_i_n n + c_i_x x)
2099 * = c_i_x (y - x) >= 0
2101 * for each (x,y) in R.
2102 * We obtain general constraints on coefficients (c_0, c_x)
2103 * of valid constraints for (y - x) and then plug in (0, c_i_x^+ - c_i_x^-),
2104 * where c_i_x = c_i_x^+ - c_i_x^-, with c_i_x^+ and c_i_x^- non-negative.
2105 * In graph->lp, the c_i_x^- appear before their c_i_x^+ counterpart.
2106 * Note that the result of intra_coefficients may also contain
2107 * parameter coefficients c_n, in which case 0 is plugged in for them as well.
2109 static isl_stat
add_intra_validity_constraints(struct isl_sched_graph
*graph
,
2110 struct isl_sched_edge
*edge
)
2113 isl_map
*map
= isl_map_copy(edge
->map
);
2114 isl_ctx
*ctx
= isl_map_get_ctx(map
);
2115 isl_dim_map
*dim_map
;
2116 isl_basic_set
*coef
;
2117 struct isl_sched_node
*node
= edge
->src
;
2119 coef
= intra_coefficients(graph
, node
, map
, 0);
2121 offset
= coef_var_offset(coef
);
2123 coef
= isl_basic_set_free(coef
);
2125 return isl_stat_error
;
2127 dim_map
= intra_dim_map(ctx
, graph
, node
, offset
, 1);
2128 graph
->lp
= add_constraints_dim_map(graph
->lp
, coef
, dim_map
);
2133 /* Add constraints to graph->lp that force validity for the given
2134 * dependence from node i to node j.
2135 * That is, add constraints that enforce
2137 * (c_j_0 + c_j_n n + c_j_x y) - (c_i_0 + c_i_n n + c_i_x x) >= 0
2139 * for each (x,y) in R.
2140 * We obtain general constraints on coefficients (c_0, c_n, c_x, c_y)
2141 * of valid constraints for R and then plug in
2142 * (c_j_0 - c_i_0, c_j_n - c_i_n, -(c_i_x^+ - c_i_x^-), c_j_x^+ - c_j_x^-),
2143 * where c_* = c_*^+ - c_*^-, with c_*^+ and c_*^- non-negative.
2144 * In graph->lp, the c_*^- appear before their c_*^+ counterpart.
2146 static isl_stat
add_inter_validity_constraints(struct isl_sched_graph
*graph
,
2147 struct isl_sched_edge
*edge
)
2152 isl_dim_map
*dim_map
;
2153 isl_basic_set
*coef
;
2154 struct isl_sched_node
*src
= edge
->src
;
2155 struct isl_sched_node
*dst
= edge
->dst
;
2158 return isl_stat_error
;
2160 map
= isl_map_copy(edge
->map
);
2161 ctx
= isl_map_get_ctx(map
);
2162 coef
= inter_coefficients(graph
, edge
, map
);
2164 offset
= coef_var_offset(coef
);
2166 coef
= isl_basic_set_free(coef
);
2168 return isl_stat_error
;
2170 dim_map
= inter_dim_map(ctx
, graph
, src
, dst
, offset
, 1);
2172 edge
->start
= graph
->lp
->n_ineq
;
2173 graph
->lp
= add_constraints_dim_map(graph
->lp
, coef
, dim_map
);
2175 return isl_stat_error
;
2176 edge
->end
= graph
->lp
->n_ineq
;
2181 /* Add constraints to graph->lp that bound the dependence distance for the given
2182 * dependence from a node i to itself.
2183 * If s = 1, we add the constraint
2185 * c_i_x (y - x) <= m_0 + m_n n
2189 * -c_i_x (y - x) + m_0 + m_n n >= 0
2191 * for each (x,y) in R.
2192 * If s = -1, we add the constraint
2194 * -c_i_x (y - x) <= m_0 + m_n n
2198 * c_i_x (y - x) + m_0 + m_n n >= 0
2200 * for each (x,y) in R.
2201 * We obtain general constraints on coefficients (c_0, c_n, c_x)
2202 * of valid constraints for (y - x) and then plug in (m_0, m_n, -s * c_i_x),
2203 * with each coefficient (except m_0) represented as a pair of non-negative
2207 * If "local" is set, then we add constraints
2209 * c_i_x (y - x) <= 0
2213 * -c_i_x (y - x) <= 0
2215 * instead, forcing the dependence distance to be (less than or) equal to 0.
2216 * That is, we plug in (0, 0, -s * c_i_x),
2217 * intra_coefficients is not required to have c_n in its result when
2218 * "local" is set. If they are missing, then (0, -s * c_i_x) is plugged in.
2219 * Note that dependences marked local are treated as validity constraints
2220 * by add_all_validity_constraints and therefore also have
2221 * their distances bounded by 0 from below.
2223 static isl_stat
add_intra_proximity_constraints(struct isl_sched_graph
*graph
,
2224 struct isl_sched_edge
*edge
, int s
, int local
)
2228 isl_map
*map
= isl_map_copy(edge
->map
);
2229 isl_ctx
*ctx
= isl_map_get_ctx(map
);
2230 isl_dim_map
*dim_map
;
2231 isl_basic_set
*coef
;
2232 struct isl_sched_node
*node
= edge
->src
;
2234 coef
= intra_coefficients(graph
, node
, map
, !local
);
2235 nparam
= isl_space_dim(node
->space
, isl_dim_param
);
2237 offset
= coef_var_offset(coef
);
2238 if (nparam
< 0 || offset
< 0)
2239 coef
= isl_basic_set_free(coef
);
2241 return isl_stat_error
;
2243 dim_map
= intra_dim_map(ctx
, graph
, node
, offset
, -s
);
2246 isl_dim_map_range(dim_map
, 1, 0, 0, 0, 1, 1);
2247 isl_dim_map_range(dim_map
, 4, 2, 1, 1, nparam
, -1);
2248 isl_dim_map_range(dim_map
, 5, 2, 1, 1, nparam
, 1);
2250 graph
->lp
= add_constraints_dim_map(graph
->lp
, coef
, dim_map
);
2255 /* Add constraints to graph->lp that bound the dependence distance for the given
2256 * dependence from node i to node j.
2257 * If s = 1, we add the constraint
2259 * (c_j_0 + c_j_n n + c_j_x y) - (c_i_0 + c_i_n n + c_i_x x)
2264 * -(c_j_0 + c_j_n n + c_j_x y) + (c_i_0 + c_i_n n + c_i_x x) +
2267 * for each (x,y) in R.
2268 * If s = -1, we add the constraint
2270 * -((c_j_0 + c_j_n n + c_j_x y) - (c_i_0 + c_i_n n + c_i_x x))
2275 * (c_j_0 + c_j_n n + c_j_x y) - (c_i_0 + c_i_n n + c_i_x x) +
2278 * for each (x,y) in R.
2279 * We obtain general constraints on coefficients (c_0, c_n, c_x, c_y)
2280 * of valid constraints for R and then plug in
2281 * (m_0 - s*c_j_0 + s*c_i_0, m_n - s*c_j_n + s*c_i_n,
2282 * s*c_i_x, -s*c_j_x)
2283 * with each coefficient (except m_0, c_*_0 and c_*_n)
2284 * represented as a pair of non-negative coefficients.
2287 * If "local" is set (and s = 1), then we add constraints
2289 * (c_j_0 + c_j_n n + c_j_x y) - (c_i_0 + c_i_n n + c_i_x x) <= 0
2293 * -((c_j_0 + c_j_n n + c_j_x y) + (c_i_0 + c_i_n n + c_i_x x)) >= 0
2295 * instead, forcing the dependence distance to be (less than or) equal to 0.
2296 * That is, we plug in
2297 * (-s*c_j_0 + s*c_i_0, -s*c_j_n + s*c_i_n, s*c_i_x, -s*c_j_x).
2298 * Note that dependences marked local are treated as validity constraints
2299 * by add_all_validity_constraints and therefore also have
2300 * their distances bounded by 0 from below.
2302 static isl_stat
add_inter_proximity_constraints(struct isl_sched_graph
*graph
,
2303 struct isl_sched_edge
*edge
, int s
, int local
)
2307 isl_map
*map
= isl_map_copy(edge
->map
);
2308 isl_ctx
*ctx
= isl_map_get_ctx(map
);
2309 isl_dim_map
*dim_map
;
2310 isl_basic_set
*coef
;
2311 struct isl_sched_node
*src
= edge
->src
;
2312 struct isl_sched_node
*dst
= edge
->dst
;
2314 coef
= inter_coefficients(graph
, edge
, map
);
2315 nparam
= isl_space_dim(src
->space
, isl_dim_param
);
2317 offset
= coef_var_offset(coef
);
2318 if (nparam
< 0 || offset
< 0)
2319 coef
= isl_basic_set_free(coef
);
2321 return isl_stat_error
;
2323 dim_map
= inter_dim_map(ctx
, graph
, src
, dst
, offset
, -s
);
2326 isl_dim_map_range(dim_map
, 1, 0, 0, 0, 1, 1);
2327 isl_dim_map_range(dim_map
, 4, 2, 1, 1, nparam
, -1);
2328 isl_dim_map_range(dim_map
, 5, 2, 1, 1, nparam
, 1);
2331 graph
->lp
= add_constraints_dim_map(graph
->lp
, coef
, dim_map
);
2336 /* Should the distance over "edge" be forced to zero?
2337 * That is, is it marked as a local edge?
2338 * If "use_coincidence" is set, then coincidence edges are treated
2341 static int force_zero(struct isl_sched_edge
*edge
, int use_coincidence
)
2343 return is_local(edge
) || (use_coincidence
&& is_coincidence(edge
));
2346 /* Add all validity constraints to graph->lp.
2348 * An edge that is forced to be local needs to have its dependence
2349 * distances equal to zero. We take care of bounding them by 0 from below
2350 * here. add_all_proximity_constraints takes care of bounding them by 0
2353 * If "use_coincidence" is set, then we treat coincidence edges as local edges.
2354 * Otherwise, we ignore them.
2356 static int add_all_validity_constraints(struct isl_sched_graph
*graph
,
2357 int use_coincidence
)
2361 for (i
= 0; i
< graph
->n_edge
; ++i
) {
2362 struct isl_sched_edge
*edge
= &graph
->edge
[i
];
2365 zero
= force_zero(edge
, use_coincidence
);
2366 if (!is_validity(edge
) && !zero
)
2368 if (edge
->src
!= edge
->dst
)
2370 if (add_intra_validity_constraints(graph
, edge
) < 0)
2374 for (i
= 0; i
< graph
->n_edge
; ++i
) {
2375 struct isl_sched_edge
*edge
= &graph
->edge
[i
];
2378 zero
= force_zero(edge
, use_coincidence
);
2379 if (!is_validity(edge
) && !zero
)
2381 if (edge
->src
== edge
->dst
)
2383 if (add_inter_validity_constraints(graph
, edge
) < 0)
2390 /* Add constraints to graph->lp that bound the dependence distance
2391 * for all dependence relations.
2392 * If a given proximity dependence is identical to a validity
2393 * dependence, then the dependence distance is already bounded
2394 * from below (by zero), so we only need to bound the distance
2395 * from above. (This includes the case of "local" dependences
2396 * which are treated as validity dependence by add_all_validity_constraints.)
2397 * Otherwise, we need to bound the distance both from above and from below.
2399 * If "use_coincidence" is set, then we treat coincidence edges as local edges.
2400 * Otherwise, we ignore them.
2402 static int add_all_proximity_constraints(struct isl_sched_graph
*graph
,
2403 int use_coincidence
)
2407 for (i
= 0; i
< graph
->n_edge
; ++i
) {
2408 struct isl_sched_edge
*edge
= &graph
->edge
[i
];
2411 zero
= force_zero(edge
, use_coincidence
);
2412 if (!is_proximity(edge
) && !zero
)
2414 if (edge
->src
== edge
->dst
&&
2415 add_intra_proximity_constraints(graph
, edge
, 1, zero
) < 0)
2417 if (edge
->src
!= edge
->dst
&&
2418 add_inter_proximity_constraints(graph
, edge
, 1, zero
) < 0)
2420 if (is_validity(edge
) || zero
)
2422 if (edge
->src
== edge
->dst
&&
2423 add_intra_proximity_constraints(graph
, edge
, -1, 0) < 0)
2425 if (edge
->src
!= edge
->dst
&&
2426 add_inter_proximity_constraints(graph
, edge
, -1, 0) < 0)
2433 /* Normalize the rows of "indep" such that all rows are lexicographically
2434 * positive and such that each row contains as many final zeros as possible,
2435 * given the choice for the previous rows.
2436 * Do this by performing elementary row operations.
2438 static __isl_give isl_mat
*normalize_independent(__isl_take isl_mat
*indep
)
2440 indep
= isl_mat_reverse_gauss(indep
);
2441 indep
= isl_mat_lexnonneg_rows(indep
);
2445 /* Extract the linear part of the current schedule for node "node".
2447 static __isl_give isl_mat
*extract_linear_schedule(struct isl_sched_node
*node
)
2449 isl_size n_row
= isl_mat_rows(node
->sched
);
2453 return isl_mat_sub_alloc(node
->sched
, 0, n_row
,
2454 1 + node
->nparam
, node
->nvar
);
2457 /* Compute a basis for the rows in the linear part of the schedule
2458 * and extend this basis to a full basis. The remaining rows
2459 * can then be used to force linear independence from the rows
2462 * In particular, given the schedule rows S, we compute
2467 * with H the Hermite normal form of S. That is, all but the
2468 * first rank columns of H are zero and so each row in S is
2469 * a linear combination of the first rank rows of Q.
2470 * The matrix Q can be used as a variable transformation
2471 * that isolates the directions of S in the first rank rows.
2472 * Transposing S U = H yields
2476 * with all but the first rank rows of H^T zero.
2477 * The last rows of U^T are therefore linear combinations
2478 * of schedule coefficients that are all zero on schedule
2479 * coefficients that are linearly dependent on the rows of S.
2480 * At least one of these combinations is non-zero on
2481 * linearly independent schedule coefficients.
2482 * The rows are normalized to involve as few of the last
2483 * coefficients as possible and to have a positive initial value.
2485 static int node_update_vmap(struct isl_sched_node
*node
)
2489 H
= extract_linear_schedule(node
);
2491 H
= isl_mat_left_hermite(H
, 0, &U
, &Q
);
2492 isl_mat_free(node
->indep
);
2493 isl_mat_free(node
->vmap
);
2495 node
->indep
= isl_mat_transpose(U
);
2496 node
->rank
= isl_mat_initial_non_zero_cols(H
);
2497 node
->indep
= isl_mat_drop_rows(node
->indep
, 0, node
->rank
);
2498 node
->indep
= normalize_independent(node
->indep
);
2501 if (!node
->indep
|| !node
->vmap
|| node
->rank
< 0)
2506 /* Is "edge" marked as a validity or a conditional validity edge?
2508 static int is_any_validity(struct isl_sched_edge
*edge
)
2510 return is_validity(edge
) || is_conditional_validity(edge
);
2513 /* How many times should we count the constraints in "edge"?
2515 * We count as follows
2516 * validity -> 1 (>= 0)
2517 * validity+proximity -> 2 (>= 0 and upper bound)
2518 * proximity -> 2 (lower and upper bound)
2519 * local(+any) -> 2 (>= 0 and <= 0)
2521 * If an edge is only marked conditional_validity then it counts
2522 * as zero since it is only checked afterwards.
2524 * If "use_coincidence" is set, then we treat coincidence edges as local edges.
2525 * Otherwise, we ignore them.
2527 static int edge_multiplicity(struct isl_sched_edge
*edge
, int use_coincidence
)
2529 if (is_proximity(edge
) || force_zero(edge
, use_coincidence
))
2531 if (is_validity(edge
))
2536 /* How many times should the constraints in "edge" be counted
2537 * as a parametric intra-node constraint?
2539 * Only proximity edges that are not forced zero need
2540 * coefficient constraints that include coefficients for parameters.
2541 * If the edge is also a validity edge, then only
2542 * an upper bound is introduced. Otherwise, both lower and upper bounds
2545 static int parametric_intra_edge_multiplicity(struct isl_sched_edge
*edge
,
2546 int use_coincidence
)
2548 if (edge
->src
!= edge
->dst
)
2550 if (!is_proximity(edge
))
2552 if (force_zero(edge
, use_coincidence
))
2554 if (is_validity(edge
))
2560 /* Add "f" times the number of equality and inequality constraints of "bset"
2561 * to "n_eq" and "n_ineq" and free "bset".
2563 static isl_stat
update_count(__isl_take isl_basic_set
*bset
,
2564 int f
, int *n_eq
, int *n_ineq
)
2568 eq
= isl_basic_set_n_equality(bset
);
2569 ineq
= isl_basic_set_n_inequality(bset
);
2570 isl_basic_set_free(bset
);
2572 if (eq
< 0 || ineq
< 0)
2573 return isl_stat_error
;
2581 /* Count the number of equality and inequality constraints
2582 * that will be added for the given map.
2584 * The edges that require parameter coefficients are counted separately.
2586 * "use_coincidence" is set if we should take into account coincidence edges.
2588 static isl_stat
count_map_constraints(struct isl_sched_graph
*graph
,
2589 struct isl_sched_edge
*edge
, __isl_take isl_map
*map
,
2590 int *n_eq
, int *n_ineq
, int use_coincidence
)
2593 isl_basic_set
*coef
;
2594 int f
= edge_multiplicity(edge
, use_coincidence
);
2595 int fp
= parametric_intra_edge_multiplicity(edge
, use_coincidence
);
2602 if (edge
->src
!= edge
->dst
) {
2603 coef
= inter_coefficients(graph
, edge
, map
);
2604 return update_count(coef
, f
, n_eq
, n_ineq
);
2608 copy
= isl_map_copy(map
);
2609 coef
= intra_coefficients(graph
, edge
->src
, copy
, 1);
2610 if (update_count(coef
, fp
, n_eq
, n_ineq
) < 0)
2615 copy
= isl_map_copy(map
);
2616 coef
= intra_coefficients(graph
, edge
->src
, copy
, 0);
2617 if (update_count(coef
, f
- fp
, n_eq
, n_ineq
) < 0)
2625 return isl_stat_error
;
2628 /* Count the number of equality and inequality constraints
2629 * that will be added to the main lp problem.
2630 * We count as follows
2631 * validity -> 1 (>= 0)
2632 * validity+proximity -> 2 (>= 0 and upper bound)
2633 * proximity -> 2 (lower and upper bound)
2634 * local(+any) -> 2 (>= 0 and <= 0)
2636 * If "use_coincidence" is set, then we treat coincidence edges as local edges.
2637 * Otherwise, we ignore them.
2639 static int count_constraints(struct isl_sched_graph
*graph
,
2640 int *n_eq
, int *n_ineq
, int use_coincidence
)
2644 *n_eq
= *n_ineq
= 0;
2645 for (i
= 0; i
< graph
->n_edge
; ++i
) {
2646 struct isl_sched_edge
*edge
= &graph
->edge
[i
];
2647 isl_map
*map
= isl_map_copy(edge
->map
);
2649 if (count_map_constraints(graph
, edge
, map
, n_eq
, n_ineq
,
2650 use_coincidence
) < 0)
2657 /* Count the number of constraints that will be added by
2658 * add_bound_constant_constraints to bound the values of the constant terms
2659 * and increment *n_eq and *n_ineq accordingly.
2661 * In practice, add_bound_constant_constraints only adds inequalities.
2663 static isl_stat
count_bound_constant_constraints(isl_ctx
*ctx
,
2664 struct isl_sched_graph
*graph
, int *n_eq
, int *n_ineq
)
2666 if (isl_options_get_schedule_max_constant_term(ctx
) == -1)
2669 *n_ineq
+= graph
->n
;
2674 /* Add constraints to bound the values of the constant terms in the schedule,
2675 * if requested by the user.
2677 * The maximal value of the constant terms is defined by the option
2678 * "schedule_max_constant_term".
2680 static isl_stat
add_bound_constant_constraints(isl_ctx
*ctx
,
2681 struct isl_sched_graph
*graph
)
2687 max
= isl_options_get_schedule_max_constant_term(ctx
);
2691 total
= isl_basic_set_dim(graph
->lp
, isl_dim_set
);
2693 return isl_stat_error
;
2695 for (i
= 0; i
< graph
->n
; ++i
) {
2696 struct isl_sched_node
*node
= &graph
->node
[i
];
2699 k
= isl_basic_set_alloc_inequality(graph
->lp
);
2701 return isl_stat_error
;
2702 isl_seq_clr(graph
->lp
->ineq
[k
], 1 + total
);
2703 pos
= node_cst_coef_offset(node
);
2704 isl_int_set_si(graph
->lp
->ineq
[k
][1 + pos
], -1);
2705 isl_int_set_si(graph
->lp
->ineq
[k
][0], max
);
2711 /* Count the number of constraints that will be added by
2712 * add_bound_coefficient_constraints and increment *n_eq and *n_ineq
2715 * In practice, add_bound_coefficient_constraints only adds inequalities.
2717 static int count_bound_coefficient_constraints(isl_ctx
*ctx
,
2718 struct isl_sched_graph
*graph
, int *n_eq
, int *n_ineq
)
2722 if (isl_options_get_schedule_max_coefficient(ctx
) == -1 &&
2723 !isl_options_get_schedule_treat_coalescing(ctx
))
2726 for (i
= 0; i
< graph
->n
; ++i
)
2727 *n_ineq
+= graph
->node
[i
].nparam
+ 2 * graph
->node
[i
].nvar
;
2732 /* Add constraints to graph->lp that bound the values of
2733 * the parameter schedule coefficients of "node" to "max" and
2734 * the variable schedule coefficients to the corresponding entry
2736 * In either case, a negative value means that no bound needs to be imposed.
2738 * For parameter coefficients, this amounts to adding a constraint
2746 * The variables coefficients are, however, not represented directly.
2747 * Instead, the variable coefficients c_x are written as differences
2748 * c_x = c_x^+ - c_x^-.
2751 * -max_i <= c_x_i <= max_i
2755 * -max_i <= c_x_i^+ - c_x_i^- <= max_i
2759 * -(c_x_i^+ - c_x_i^-) + max_i >= 0
2760 * c_x_i^+ - c_x_i^- + max_i >= 0
2762 static isl_stat
node_add_coefficient_constraints(isl_ctx
*ctx
,
2763 struct isl_sched_graph
*graph
, struct isl_sched_node
*node
, int max
)
2769 total
= isl_basic_set_dim(graph
->lp
, isl_dim_set
);
2771 return isl_stat_error
;
2773 for (j
= 0; j
< node
->nparam
; ++j
) {
2779 k
= isl_basic_set_alloc_inequality(graph
->lp
);
2781 return isl_stat_error
;
2782 dim
= 1 + node_par_coef_offset(node
) + j
;
2783 isl_seq_clr(graph
->lp
->ineq
[k
], 1 + total
);
2784 isl_int_set_si(graph
->lp
->ineq
[k
][dim
], -1);
2785 isl_int_set_si(graph
->lp
->ineq
[k
][0], max
);
2788 ineq
= isl_vec_alloc(ctx
, 1 + total
);
2789 ineq
= isl_vec_clr(ineq
);
2791 return isl_stat_error
;
2792 for (i
= 0; i
< node
->nvar
; ++i
) {
2793 int pos
= 1 + node_var_coef_pos(node
, i
);
2795 if (isl_int_is_neg(node
->max
->el
[i
]))
2798 isl_int_set_si(ineq
->el
[pos
], 1);
2799 isl_int_set_si(ineq
->el
[pos
+ 1], -1);
2800 isl_int_set(ineq
->el
[0], node
->max
->el
[i
]);
2802 k
= isl_basic_set_alloc_inequality(graph
->lp
);
2805 isl_seq_cpy(graph
->lp
->ineq
[k
], ineq
->el
, 1 + total
);
2807 isl_seq_neg(ineq
->el
+ pos
, ineq
->el
+ pos
, 2);
2808 k
= isl_basic_set_alloc_inequality(graph
->lp
);
2811 isl_seq_cpy(graph
->lp
->ineq
[k
], ineq
->el
, 1 + total
);
2813 isl_seq_clr(ineq
->el
+ pos
, 2);
2820 return isl_stat_error
;
2823 /* Add constraints that bound the values of the variable and parameter
2824 * coefficients of the schedule.
2826 * The maximal value of the coefficients is defined by the option
2827 * 'schedule_max_coefficient' and the entries in node->max.
2828 * These latter entries are only set if either the schedule_max_coefficient
2829 * option or the schedule_treat_coalescing option is set.
2831 static isl_stat
add_bound_coefficient_constraints(isl_ctx
*ctx
,
2832 struct isl_sched_graph
*graph
)
2837 max
= isl_options_get_schedule_max_coefficient(ctx
);
2839 if (max
== -1 && !isl_options_get_schedule_treat_coalescing(ctx
))
2842 for (i
= 0; i
< graph
->n
; ++i
) {
2843 struct isl_sched_node
*node
= &graph
->node
[i
];
2845 if (node_add_coefficient_constraints(ctx
, graph
, node
, max
) < 0)
2846 return isl_stat_error
;
2852 /* Add a constraint to graph->lp that equates the value at position
2853 * "sum_pos" to the sum of the "n" values starting at "first".
2855 static isl_stat
add_sum_constraint(struct isl_sched_graph
*graph
,
2856 int sum_pos
, int first
, int n
)
2861 total
= isl_basic_set_dim(graph
->lp
, isl_dim_set
);
2863 return isl_stat_error
;
2865 k
= isl_basic_set_alloc_equality(graph
->lp
);
2867 return isl_stat_error
;
2868 isl_seq_clr(graph
->lp
->eq
[k
], 1 + total
);
2869 isl_int_set_si(graph
->lp
->eq
[k
][1 + sum_pos
], -1);
2870 for (i
= 0; i
< n
; ++i
)
2871 isl_int_set_si(graph
->lp
->eq
[k
][1 + first
+ i
], 1);
2876 /* Add a constraint to graph->lp that equates the value at position
2877 * "sum_pos" to the sum of the parameter coefficients of all nodes.
2879 static isl_stat
add_param_sum_constraint(struct isl_sched_graph
*graph
,
2885 total
= isl_basic_set_dim(graph
->lp
, isl_dim_set
);
2887 return isl_stat_error
;
2889 k
= isl_basic_set_alloc_equality(graph
->lp
);
2891 return isl_stat_error
;
2892 isl_seq_clr(graph
->lp
->eq
[k
], 1 + total
);
2893 isl_int_set_si(graph
->lp
->eq
[k
][1 + sum_pos
], -1);
2894 for (i
= 0; i
< graph
->n
; ++i
) {
2895 int pos
= 1 + node_par_coef_offset(&graph
->node
[i
]);
2897 for (j
= 0; j
< graph
->node
[i
].nparam
; ++j
)
2898 isl_int_set_si(graph
->lp
->eq
[k
][pos
+ j
], 1);
2904 /* Add a constraint to graph->lp that equates the value at position
2905 * "sum_pos" to the sum of the variable coefficients of all nodes.
2907 static isl_stat
add_var_sum_constraint(struct isl_sched_graph
*graph
,
2913 total
= isl_basic_set_dim(graph
->lp
, isl_dim_set
);
2915 return isl_stat_error
;
2917 k
= isl_basic_set_alloc_equality(graph
->lp
);
2919 return isl_stat_error
;
2920 isl_seq_clr(graph
->lp
->eq
[k
], 1 + total
);
2921 isl_int_set_si(graph
->lp
->eq
[k
][1 + sum_pos
], -1);
2922 for (i
= 0; i
< graph
->n
; ++i
) {
2923 struct isl_sched_node
*node
= &graph
->node
[i
];
2924 int pos
= 1 + node_var_coef_offset(node
);
2926 for (j
= 0; j
< 2 * node
->nvar
; ++j
)
2927 isl_int_set_si(graph
->lp
->eq
[k
][pos
+ j
], 1);
2933 /* Construct an ILP problem for finding schedule coefficients
2934 * that result in non-negative, but small dependence distances
2935 * over all dependences.
2936 * In particular, the dependence distances over proximity edges
2937 * are bounded by m_0 + m_n n and we compute schedule coefficients
2938 * with small values (preferably zero) of m_n and m_0.
2940 * All variables of the ILP are non-negative. The actual coefficients
2941 * may be negative, so each coefficient is represented as the difference
2942 * of two non-negative variables. The negative part always appears
2943 * immediately before the positive part.
2944 * Other than that, the variables have the following order
2946 * - sum of positive and negative parts of m_n coefficients
2948 * - sum of all c_n coefficients
2949 * (unconstrained when computing non-parametric schedules)
2950 * - sum of positive and negative parts of all c_x coefficients
2951 * - positive and negative parts of m_n coefficients
2953 * - positive and negative parts of c_i_x, in opposite order
2954 * - c_i_n (if parametric)
2957 * The constraints are those from the edges plus two or three equalities
2958 * to express the sums.
2960 * If "use_coincidence" is set, then we treat coincidence edges as local edges.
2961 * Otherwise, we ignore them.
2963 static isl_stat
setup_lp(isl_ctx
*ctx
, struct isl_sched_graph
*graph
,
2964 int use_coincidence
)
2974 parametric
= ctx
->opt
->schedule_parametric
;
2975 nparam
= isl_space_dim(graph
->node
[0].space
, isl_dim_param
);
2977 return isl_stat_error
;
2979 total
= param_pos
+ 2 * nparam
;
2980 for (i
= 0; i
< graph
->n
; ++i
) {
2981 struct isl_sched_node
*node
= &graph
->node
[graph
->sorted
[i
]];
2982 if (node_update_vmap(node
) < 0)
2983 return isl_stat_error
;
2984 node
->start
= total
;
2985 total
+= 1 + node
->nparam
+ 2 * node
->nvar
;
2988 if (count_constraints(graph
, &n_eq
, &n_ineq
, use_coincidence
) < 0)
2989 return isl_stat_error
;
2990 if (count_bound_constant_constraints(ctx
, graph
, &n_eq
, &n_ineq
) < 0)
2991 return isl_stat_error
;
2992 if (count_bound_coefficient_constraints(ctx
, graph
, &n_eq
, &n_ineq
) < 0)
2993 return isl_stat_error
;
2995 space
= isl_space_set_alloc(ctx
, 0, total
);
2996 isl_basic_set_free(graph
->lp
);
2997 n_eq
+= 2 + parametric
;
2999 graph
->lp
= isl_basic_set_alloc_space(space
, 0, n_eq
, n_ineq
);
3001 if (add_sum_constraint(graph
, 0, param_pos
, 2 * nparam
) < 0)
3002 return isl_stat_error
;
3003 if (parametric
&& add_param_sum_constraint(graph
, 2) < 0)
3004 return isl_stat_error
;
3005 if (add_var_sum_constraint(graph
, 3) < 0)
3006 return isl_stat_error
;
3007 if (add_bound_constant_constraints(ctx
, graph
) < 0)
3008 return isl_stat_error
;
3009 if (add_bound_coefficient_constraints(ctx
, graph
) < 0)
3010 return isl_stat_error
;
3011 if (add_all_validity_constraints(graph
, use_coincidence
) < 0)
3012 return isl_stat_error
;
3013 if (add_all_proximity_constraints(graph
, use_coincidence
) < 0)
3014 return isl_stat_error
;
3019 /* Analyze the conflicting constraint found by
3020 * isl_tab_basic_set_non_trivial_lexmin. If it corresponds to the validity
3021 * constraint of one of the edges between distinct nodes, living, moreover
3022 * in distinct SCCs, then record the source and sink SCC as this may
3023 * be a good place to cut between SCCs.
3025 static int check_conflict(int con
, void *user
)
3028 struct isl_sched_graph
*graph
= user
;
3030 if (graph
->src_scc
>= 0)
3033 con
-= graph
->lp
->n_eq
;
3035 if (con
>= graph
->lp
->n_ineq
)
3038 for (i
= 0; i
< graph
->n_edge
; ++i
) {
3039 if (!is_validity(&graph
->edge
[i
]))
3041 if (graph
->edge
[i
].src
== graph
->edge
[i
].dst
)
3043 if (graph
->edge
[i
].src
->scc
== graph
->edge
[i
].dst
->scc
)
3045 if (graph
->edge
[i
].start
> con
)
3047 if (graph
->edge
[i
].end
<= con
)
3049 graph
->src_scc
= graph
->edge
[i
].src
->scc
;
3050 graph
->dst_scc
= graph
->edge
[i
].dst
->scc
;
3056 /* Check whether the next schedule row of the given node needs to be
3057 * non-trivial. Lower-dimensional domains may have some trivial rows,
3058 * but as soon as the number of remaining required non-trivial rows
3059 * is as large as the number or remaining rows to be computed,
3060 * all remaining rows need to be non-trivial.
3062 static int needs_row(struct isl_sched_graph
*graph
, struct isl_sched_node
*node
)
3064 return node
->nvar
- node
->rank
>= graph
->maxvar
- graph
->n_row
;
3067 /* Construct a non-triviality region with triviality directions
3068 * corresponding to the rows of "indep".
3069 * The rows of "indep" are expressed in terms of the schedule coefficients c_i,
3070 * while the triviality directions are expressed in terms of
3071 * pairs of non-negative variables c^+_i - c^-_i, with c^-_i appearing
3072 * before c^+_i. Furthermore,
3073 * the pairs of non-negative variables representing the coefficients
3074 * are stored in the opposite order.
3076 static __isl_give isl_mat
*construct_trivial(__isl_keep isl_mat
*indep
)
3083 n
= isl_mat_rows(indep
);
3084 n_var
= isl_mat_cols(indep
);
3085 if (n
< 0 || n_var
< 0)
3088 ctx
= isl_mat_get_ctx(indep
);
3089 mat
= isl_mat_alloc(ctx
, n
, 2 * n_var
);
3092 for (i
= 0; i
< n
; ++i
) {
3093 for (j
= 0; j
< n_var
; ++j
) {
3094 int nj
= n_var
- 1 - j
;
3095 isl_int_neg(mat
->row
[i
][2 * nj
], indep
->row
[i
][j
]);
3096 isl_int_set(mat
->row
[i
][2 * nj
+ 1], indep
->row
[i
][j
]);
3103 /* Solve the ILP problem constructed in setup_lp.
3104 * For each node such that all the remaining rows of its schedule
3105 * need to be non-trivial, we construct a non-triviality region.
3106 * This region imposes that the next row is independent of previous rows.
3107 * In particular, the non-triviality region enforces that at least
3108 * one of the linear combinations in the rows of node->indep is non-zero.
3110 static __isl_give isl_vec
*solve_lp(isl_ctx
*ctx
, struct isl_sched_graph
*graph
)
3116 for (i
= 0; i
< graph
->n
; ++i
) {
3117 struct isl_sched_node
*node
= &graph
->node
[i
];
3120 graph
->region
[i
].pos
= node_var_coef_offset(node
);
3121 if (needs_row(graph
, node
))
3122 trivial
= construct_trivial(node
->indep
);
3124 trivial
= isl_mat_zero(ctx
, 0, 0);
3125 graph
->region
[i
].trivial
= trivial
;
3127 lp
= isl_basic_set_copy(graph
->lp
);
3128 sol
= isl_tab_basic_set_non_trivial_lexmin(lp
, 2, graph
->n
,
3129 graph
->region
, &check_conflict
, graph
);
3130 for (i
= 0; i
< graph
->n
; ++i
)
3131 isl_mat_free(graph
->region
[i
].trivial
);
3135 /* Extract the coefficients for the variables of "node" from "sol".
3137 * Each schedule coefficient c_i_x is represented as the difference
3138 * between two non-negative variables c_i_x^+ - c_i_x^-.
3139 * The c_i_x^- appear before their c_i_x^+ counterpart.
3140 * Furthermore, the order of these pairs is the opposite of that
3141 * of the corresponding coefficients.
3143 * Return c_i_x = c_i_x^+ - c_i_x^-
3145 static __isl_give isl_vec
*extract_var_coef(struct isl_sched_node
*node
,
3146 __isl_keep isl_vec
*sol
)
3154 csol
= isl_vec_alloc(isl_vec_get_ctx(sol
), node
->nvar
);
3158 pos
= 1 + node_var_coef_offset(node
);
3159 for (i
= 0; i
< node
->nvar
; ++i
)
3160 isl_int_sub(csol
->el
[node
->nvar
- 1 - i
],
3161 sol
->el
[pos
+ 2 * i
+ 1], sol
->el
[pos
+ 2 * i
]);
3166 /* Update the schedules of all nodes based on the given solution
3167 * of the LP problem.
3168 * The new row is added to the current band.
3169 * All possibly negative coefficients are encoded as a difference
3170 * of two non-negative variables, so we need to perform the subtraction
3173 * If coincident is set, then the caller guarantees that the new
3174 * row satisfies the coincidence constraints.
3176 static int update_schedule(struct isl_sched_graph
*graph
,
3177 __isl_take isl_vec
*sol
, int coincident
)
3180 isl_vec
*csol
= NULL
;
3185 isl_die(sol
->ctx
, isl_error_internal
,
3186 "no solution found", goto error
);
3187 if (graph
->n_total_row
>= graph
->max_row
)
3188 isl_die(sol
->ctx
, isl_error_internal
,
3189 "too many schedule rows", goto error
);
3191 for (i
= 0; i
< graph
->n
; ++i
) {
3192 struct isl_sched_node
*node
= &graph
->node
[i
];
3194 isl_size row
= isl_mat_rows(node
->sched
);
3197 csol
= extract_var_coef(node
, sol
);
3198 if (row
< 0 || !csol
)
3201 isl_map_free(node
->sched_map
);
3202 node
->sched_map
= NULL
;
3203 node
->sched
= isl_mat_add_rows(node
->sched
, 1);
3206 pos
= node_cst_coef_offset(node
);
3207 node
->sched
= isl_mat_set_element(node
->sched
,
3208 row
, 0, sol
->el
[1 + pos
]);
3209 pos
= node_par_coef_offset(node
);
3210 for (j
= 0; j
< node
->nparam
; ++j
)
3211 node
->sched
= isl_mat_set_element(node
->sched
,
3212 row
, 1 + j
, sol
->el
[1 + pos
+ j
]);
3213 for (j
= 0; j
< node
->nvar
; ++j
)
3214 node
->sched
= isl_mat_set_element(node
->sched
,
3215 row
, 1 + node
->nparam
+ j
, csol
->el
[j
]);
3216 node
->coincident
[graph
->n_total_row
] = coincident
;
3222 graph
->n_total_row
++;
3231 /* Convert row "row" of node->sched into an isl_aff living in "ls"
3232 * and return this isl_aff.
3234 static __isl_give isl_aff
*extract_schedule_row(__isl_take isl_local_space
*ls
,
3235 struct isl_sched_node
*node
, int row
)
3243 aff
= isl_aff_zero_on_domain(ls
);
3244 if (isl_mat_get_element(node
->sched
, row
, 0, &v
) < 0)
3246 aff
= isl_aff_set_constant(aff
, v
);
3247 for (j
= 0; j
< node
->nparam
; ++j
) {
3248 if (isl_mat_get_element(node
->sched
, row
, 1 + j
, &v
) < 0)
3250 aff
= isl_aff_set_coefficient(aff
, isl_dim_param
, j
, v
);
3252 for (j
= 0; j
< node
->nvar
; ++j
) {
3253 if (isl_mat_get_element(node
->sched
, row
,
3254 1 + node
->nparam
+ j
, &v
) < 0)
3256 aff
= isl_aff_set_coefficient(aff
, isl_dim_in
, j
, v
);
3268 /* Convert the "n" rows starting at "first" of node->sched into a multi_aff
3269 * and return this multi_aff.
3271 * The result is defined over the uncompressed node domain.
3273 static __isl_give isl_multi_aff
*node_extract_partial_schedule_multi_aff(
3274 struct isl_sched_node
*node
, int first
, int n
)
3278 isl_local_space
*ls
;
3285 nrow
= isl_mat_rows(node
->sched
);
3288 if (node
->compressed
)
3289 space
= isl_pw_multi_aff_get_domain_space(node
->decompress
);
3291 space
= isl_space_copy(node
->space
);
3292 ls
= isl_local_space_from_space(isl_space_copy(space
));
3293 space
= isl_space_from_domain(space
);
3294 space
= isl_space_add_dims(space
, isl_dim_out
, n
);
3295 ma
= isl_multi_aff_zero(space
);
3297 for (i
= first
; i
< first
+ n
; ++i
) {
3298 aff
= extract_schedule_row(isl_local_space_copy(ls
), node
, i
);
3299 ma
= isl_multi_aff_set_aff(ma
, i
- first
, aff
);
3302 isl_local_space_free(ls
);
3304 if (node
->compressed
)
3305 ma
= isl_multi_aff_pullback_multi_aff(ma
,
3306 isl_multi_aff_copy(node
->compress
));
3311 /* Convert node->sched into a multi_aff and return this multi_aff.
3313 * The result is defined over the uncompressed node domain.
3315 static __isl_give isl_multi_aff
*node_extract_schedule_multi_aff(
3316 struct isl_sched_node
*node
)
3320 nrow
= isl_mat_rows(node
->sched
);
3323 return node_extract_partial_schedule_multi_aff(node
, 0, nrow
);
3326 /* Convert node->sched into a map and return this map.
3328 * The result is cached in node->sched_map, which needs to be released
3329 * whenever node->sched is updated.
3330 * It is defined over the uncompressed node domain.
3332 static __isl_give isl_map
*node_extract_schedule(struct isl_sched_node
*node
)
3334 if (!node
->sched_map
) {
3337 ma
= node_extract_schedule_multi_aff(node
);
3338 node
->sched_map
= isl_map_from_multi_aff(ma
);
3341 return isl_map_copy(node
->sched_map
);
3344 /* Construct a map that can be used to update a dependence relation
3345 * based on the current schedule.
3346 * That is, construct a map expressing that source and sink
3347 * are executed within the same iteration of the current schedule.
3348 * This map can then be intersected with the dependence relation.
3349 * This is not the most efficient way, but this shouldn't be a critical
3352 static __isl_give isl_map
*specializer(struct isl_sched_node
*src
,
3353 struct isl_sched_node
*dst
)
3355 isl_map
*src_sched
, *dst_sched
;
3357 src_sched
= node_extract_schedule(src
);
3358 dst_sched
= node_extract_schedule(dst
);
3359 return isl_map_apply_range(src_sched
, isl_map_reverse(dst_sched
));
3362 /* Intersect the domains of the nested relations in domain and range
3363 * of "umap" with "map".
3365 static __isl_give isl_union_map
*intersect_domains(
3366 __isl_take isl_union_map
*umap
, __isl_keep isl_map
*map
)
3368 isl_union_set
*uset
;
3370 umap
= isl_union_map_zip(umap
);
3371 uset
= isl_union_set_from_set(isl_map_wrap(isl_map_copy(map
)));
3372 umap
= isl_union_map_intersect_domain(umap
, uset
);
3373 umap
= isl_union_map_zip(umap
);
3377 /* Update the dependence relation of the given edge based
3378 * on the current schedule.
3379 * If the dependence is carried completely by the current schedule, then
3380 * it is removed from the edge_tables. It is kept in the list of edges
3381 * as otherwise all edge_tables would have to be recomputed.
3383 * If the edge is of a type that can appear multiple times
3384 * between the same pair of nodes, then it is added to
3385 * the edge table (again). This prevents the situation
3386 * where none of these edges is referenced from the edge table
3387 * because the one that was referenced turned out to be empty and
3388 * was therefore removed from the table.
3390 static isl_stat
update_edge(isl_ctx
*ctx
, struct isl_sched_graph
*graph
,
3391 struct isl_sched_edge
*edge
)
3396 id
= specializer(edge
->src
, edge
->dst
);
3397 edge
->map
= isl_map_intersect(edge
->map
, isl_map_copy(id
));
3401 if (edge
->tagged_condition
) {
3402 edge
->tagged_condition
=
3403 intersect_domains(edge
->tagged_condition
, id
);
3404 if (!edge
->tagged_condition
)
3407 if (edge
->tagged_validity
) {
3408 edge
->tagged_validity
=
3409 intersect_domains(edge
->tagged_validity
, id
);
3410 if (!edge
->tagged_validity
)
3414 empty
= isl_map_plain_is_empty(edge
->map
);
3418 if (graph_remove_edge(graph
, edge
) < 0)
3420 } else if (is_multi_edge_type(edge
)) {
3421 if (graph_edge_tables_add(ctx
, graph
, edge
) < 0)
3429 return isl_stat_error
;
3432 /* Does the domain of "umap" intersect "uset"?
3434 static int domain_intersects(__isl_keep isl_union_map
*umap
,
3435 __isl_keep isl_union_set
*uset
)
3439 umap
= isl_union_map_copy(umap
);
3440 umap
= isl_union_map_intersect_domain(umap
, isl_union_set_copy(uset
));
3441 empty
= isl_union_map_is_empty(umap
);
3442 isl_union_map_free(umap
);
3444 return empty
< 0 ? -1 : !empty
;
3447 /* Does the range of "umap" intersect "uset"?
3449 static int range_intersects(__isl_keep isl_union_map
*umap
,
3450 __isl_keep isl_union_set
*uset
)
3454 umap
= isl_union_map_copy(umap
);
3455 umap
= isl_union_map_intersect_range(umap
, isl_union_set_copy(uset
));
3456 empty
= isl_union_map_is_empty(umap
);
3457 isl_union_map_free(umap
);
3459 return empty
< 0 ? -1 : !empty
;
3462 /* Are the condition dependences of "edge" local with respect to
3463 * the current schedule?
3465 * That is, are domain and range of the condition dependences mapped
3466 * to the same point?
3468 * In other words, is the condition false?
3470 static int is_condition_false(struct isl_sched_edge
*edge
)
3472 isl_union_map
*umap
;
3473 isl_map
*map
, *sched
, *test
;
3476 empty
= isl_union_map_is_empty(edge
->tagged_condition
);
3477 if (empty
< 0 || empty
)
3480 umap
= isl_union_map_copy(edge
->tagged_condition
);
3481 umap
= isl_union_map_zip(umap
);
3482 umap
= isl_union_set_unwrap(isl_union_map_domain(umap
));
3483 map
= isl_map_from_union_map(umap
);
3485 sched
= node_extract_schedule(edge
->src
);
3486 map
= isl_map_apply_domain(map
, sched
);
3487 sched
= node_extract_schedule(edge
->dst
);
3488 map
= isl_map_apply_range(map
, sched
);
3490 test
= isl_map_identity(isl_map_get_space(map
));
3491 local
= isl_map_is_subset(map
, test
);
3498 /* For each conditional validity constraint that is adjacent
3499 * to a condition with domain in condition_source or range in condition_sink,
3500 * turn it into an unconditional validity constraint.
3502 static int unconditionalize_adjacent_validity(struct isl_sched_graph
*graph
,
3503 __isl_take isl_union_set
*condition_source
,
3504 __isl_take isl_union_set
*condition_sink
)
3508 condition_source
= isl_union_set_coalesce(condition_source
);
3509 condition_sink
= isl_union_set_coalesce(condition_sink
);
3511 for (i
= 0; i
< graph
->n_edge
; ++i
) {
3513 isl_union_map
*validity
;
3515 if (!is_conditional_validity(&graph
->edge
[i
]))
3517 if (is_validity(&graph
->edge
[i
]))
3520 validity
= graph
->edge
[i
].tagged_validity
;
3521 adjacent
= domain_intersects(validity
, condition_sink
);
3522 if (adjacent
>= 0 && !adjacent
)
3523 adjacent
= range_intersects(validity
, condition_source
);
3529 set_validity(&graph
->edge
[i
]);
3532 isl_union_set_free(condition_source
);
3533 isl_union_set_free(condition_sink
);
3536 isl_union_set_free(condition_source
);
3537 isl_union_set_free(condition_sink
);
3541 /* Update the dependence relations of all edges based on the current schedule
3542 * and enforce conditional validity constraints that are adjacent
3543 * to satisfied condition constraints.
3545 * First check if any of the condition constraints are satisfied
3546 * (i.e., not local to the outer schedule) and keep track of
3547 * their domain and range.
3548 * Then update all dependence relations (which removes the non-local
3550 * Finally, if any condition constraints turned out to be satisfied,
3551 * then turn all adjacent conditional validity constraints into
3552 * unconditional validity constraints.
3554 static int update_edges(isl_ctx
*ctx
, struct isl_sched_graph
*graph
)
3558 isl_union_set
*source
, *sink
;
3560 source
= isl_union_set_empty(isl_space_params_alloc(ctx
, 0));
3561 sink
= isl_union_set_empty(isl_space_params_alloc(ctx
, 0));
3562 for (i
= 0; i
< graph
->n_edge
; ++i
) {
3564 isl_union_set
*uset
;
3565 isl_union_map
*umap
;
3567 if (!is_condition(&graph
->edge
[i
]))
3569 if (is_local(&graph
->edge
[i
]))
3571 local
= is_condition_false(&graph
->edge
[i
]);
3579 umap
= isl_union_map_copy(graph
->edge
[i
].tagged_condition
);
3580 uset
= isl_union_map_domain(umap
);
3581 source
= isl_union_set_union(source
, uset
);
3583 umap
= isl_union_map_copy(graph
->edge
[i
].tagged_condition
);
3584 uset
= isl_union_map_range(umap
);
3585 sink
= isl_union_set_union(sink
, uset
);
3588 for (i
= 0; i
< graph
->n_edge
; ++i
) {
3589 if (update_edge(ctx
, graph
, &graph
->edge
[i
]) < 0)
3594 return unconditionalize_adjacent_validity(graph
, source
, sink
);
3596 isl_union_set_free(source
);
3597 isl_union_set_free(sink
);
3600 isl_union_set_free(source
);
3601 isl_union_set_free(sink
);
3605 static void next_band(struct isl_sched_graph
*graph
)
3607 graph
->band_start
= graph
->n_total_row
;
3610 /* Return the union of the universe domains of the nodes in "graph"
3611 * that satisfy "pred".
3613 static __isl_give isl_union_set
*isl_sched_graph_domain(isl_ctx
*ctx
,
3614 struct isl_sched_graph
*graph
,
3615 int (*pred
)(struct isl_sched_node
*node
, int data
), int data
)
3621 for (i
= 0; i
< graph
->n
; ++i
)
3622 if (pred(&graph
->node
[i
], data
))
3626 isl_die(ctx
, isl_error_internal
,
3627 "empty component", return NULL
);
3629 set
= isl_set_universe(isl_space_copy(graph
->node
[i
].space
));
3630 dom
= isl_union_set_from_set(set
);
3632 for (i
= i
+ 1; i
< graph
->n
; ++i
) {
3633 if (!pred(&graph
->node
[i
], data
))
3635 set
= isl_set_universe(isl_space_copy(graph
->node
[i
].space
));
3636 dom
= isl_union_set_union(dom
, isl_union_set_from_set(set
));
3642 /* Return a list of unions of universe domains, where each element
3643 * in the list corresponds to an SCC (or WCC) indexed by node->scc.
3645 static __isl_give isl_union_set_list
*extract_sccs(isl_ctx
*ctx
,
3646 struct isl_sched_graph
*graph
)
3649 isl_union_set_list
*filters
;
3651 filters
= isl_union_set_list_alloc(ctx
, graph
->scc
);
3652 for (i
= 0; i
< graph
->scc
; ++i
) {
3655 dom
= isl_sched_graph_domain(ctx
, graph
, &node_scc_exactly
, i
);
3656 filters
= isl_union_set_list_add(filters
, dom
);
3662 /* Return a list of two unions of universe domains, one for the SCCs up
3663 * to and including graph->src_scc and another for the other SCCs.
3665 static __isl_give isl_union_set_list
*extract_split(isl_ctx
*ctx
,
3666 struct isl_sched_graph
*graph
)
3669 isl_union_set_list
*filters
;
3671 filters
= isl_union_set_list_alloc(ctx
, 2);
3672 dom
= isl_sched_graph_domain(ctx
, graph
,
3673 &node_scc_at_most
, graph
->src_scc
);
3674 filters
= isl_union_set_list_add(filters
, dom
);
3675 dom
= isl_sched_graph_domain(ctx
, graph
,
3676 &node_scc_at_least
, graph
->src_scc
+ 1);
3677 filters
= isl_union_set_list_add(filters
, dom
);
3682 /* Copy nodes that satisfy node_pred from the src dependence graph
3683 * to the dst dependence graph.
3685 static isl_stat
copy_nodes(struct isl_sched_graph
*dst
,
3686 struct isl_sched_graph
*src
,
3687 int (*node_pred
)(struct isl_sched_node
*node
, int data
), int data
)
3692 for (i
= 0; i
< src
->n
; ++i
) {
3695 if (!node_pred(&src
->node
[i
], data
))
3699 dst
->node
[j
].space
= isl_space_copy(src
->node
[i
].space
);
3700 dst
->node
[j
].compressed
= src
->node
[i
].compressed
;
3701 dst
->node
[j
].hull
= isl_set_copy(src
->node
[i
].hull
);
3702 dst
->node
[j
].compress
=
3703 isl_multi_aff_copy(src
->node
[i
].compress
);
3704 dst
->node
[j
].decompress
=
3705 isl_pw_multi_aff_copy(src
->node
[i
].decompress
);
3706 dst
->node
[j
].nvar
= src
->node
[i
].nvar
;
3707 dst
->node
[j
].nparam
= src
->node
[i
].nparam
;
3708 dst
->node
[j
].sched
= isl_mat_copy(src
->node
[i
].sched
);
3709 dst
->node
[j
].sched_map
= isl_map_copy(src
->node
[i
].sched_map
);
3710 dst
->node
[j
].coincident
= src
->node
[i
].coincident
;
3711 dst
->node
[j
].sizes
= isl_multi_val_copy(src
->node
[i
].sizes
);
3712 dst
->node
[j
].bounds
= isl_basic_set_copy(src
->node
[i
].bounds
);
3713 dst
->node
[j
].max
= isl_vec_copy(src
->node
[i
].max
);
3716 if (!dst
->node
[j
].space
|| !dst
->node
[j
].sched
)
3717 return isl_stat_error
;
3718 if (dst
->node
[j
].compressed
&&
3719 (!dst
->node
[j
].hull
|| !dst
->node
[j
].compress
||
3720 !dst
->node
[j
].decompress
))
3721 return isl_stat_error
;
3727 /* Copy non-empty edges that satisfy edge_pred from the src dependence graph
3728 * to the dst dependence graph.
3729 * If the source or destination node of the edge is not in the destination
3730 * graph, then it must be a backward proximity edge and it should simply
3733 static isl_stat
copy_edges(isl_ctx
*ctx
, struct isl_sched_graph
*dst
,
3734 struct isl_sched_graph
*src
,
3735 int (*edge_pred
)(struct isl_sched_edge
*edge
, int data
), int data
)
3740 for (i
= 0; i
< src
->n_edge
; ++i
) {
3741 struct isl_sched_edge
*edge
= &src
->edge
[i
];
3743 isl_union_map
*tagged_condition
;
3744 isl_union_map
*tagged_validity
;
3745 struct isl_sched_node
*dst_src
, *dst_dst
;
3747 if (!edge_pred(edge
, data
))
3750 if (isl_map_plain_is_empty(edge
->map
))
3753 dst_src
= graph_find_node(ctx
, dst
, edge
->src
->space
);
3754 dst_dst
= graph_find_node(ctx
, dst
, edge
->dst
->space
);
3755 if (!dst_src
|| !dst_dst
)
3756 return isl_stat_error
;
3757 if (!is_node(dst
, dst_src
) || !is_node(dst
, dst_dst
)) {
3758 if (is_validity(edge
) || is_conditional_validity(edge
))
3759 isl_die(ctx
, isl_error_internal
,
3760 "backward (conditional) validity edge",
3761 return isl_stat_error
);
3765 map
= isl_map_copy(edge
->map
);
3766 tagged_condition
= isl_union_map_copy(edge
->tagged_condition
);
3767 tagged_validity
= isl_union_map_copy(edge
->tagged_validity
);
3769 dst
->edge
[dst
->n_edge
].src
= dst_src
;
3770 dst
->edge
[dst
->n_edge
].dst
= dst_dst
;
3771 dst
->edge
[dst
->n_edge
].map
= map
;
3772 dst
->edge
[dst
->n_edge
].tagged_condition
= tagged_condition
;
3773 dst
->edge
[dst
->n_edge
].tagged_validity
= tagged_validity
;
3774 dst
->edge
[dst
->n_edge
].types
= edge
->types
;
3777 if (edge
->tagged_condition
&& !tagged_condition
)
3778 return isl_stat_error
;
3779 if (edge
->tagged_validity
&& !tagged_validity
)
3780 return isl_stat_error
;
3782 if (graph_edge_tables_add(ctx
, dst
,
3783 &dst
->edge
[dst
->n_edge
- 1]) < 0)
3784 return isl_stat_error
;
3790 /* Compute the maximal number of variables over all nodes.
3791 * This is the maximal number of linearly independent schedule
3792 * rows that we need to compute.
3793 * Just in case we end up in a part of the dependence graph
3794 * with only lower-dimensional domains, we make sure we will
3795 * compute the required amount of extra linearly independent rows.
3797 static int compute_maxvar(struct isl_sched_graph
*graph
)
3802 for (i
= 0; i
< graph
->n
; ++i
) {
3803 struct isl_sched_node
*node
= &graph
->node
[i
];
3806 if (node_update_vmap(node
) < 0)
3808 nvar
= node
->nvar
+ graph
->n_row
- node
->rank
;
3809 if (nvar
> graph
->maxvar
)
3810 graph
->maxvar
= nvar
;
3816 /* Extract the subgraph of "graph" that consists of the nodes satisfying
3817 * "node_pred" and the edges satisfying "edge_pred" and store
3818 * the result in "sub".
3820 static isl_stat
extract_sub_graph(isl_ctx
*ctx
, struct isl_sched_graph
*graph
,
3821 int (*node_pred
)(struct isl_sched_node
*node
, int data
),
3822 int (*edge_pred
)(struct isl_sched_edge
*edge
, int data
),
3823 int data
, struct isl_sched_graph
*sub
)
3825 int i
, n
= 0, n_edge
= 0;
3828 for (i
= 0; i
< graph
->n
; ++i
)
3829 if (node_pred(&graph
->node
[i
], data
))
3831 for (i
= 0; i
< graph
->n_edge
; ++i
)
3832 if (edge_pred(&graph
->edge
[i
], data
))
3834 if (graph_alloc(ctx
, sub
, n
, n_edge
) < 0)
3835 return isl_stat_error
;
3836 sub
->root
= graph
->root
;
3837 if (copy_nodes(sub
, graph
, node_pred
, data
) < 0)
3838 return isl_stat_error
;
3839 if (graph_init_table(ctx
, sub
) < 0)
3840 return isl_stat_error
;
3841 for (t
= 0; t
<= isl_edge_last
; ++t
)
3842 sub
->max_edge
[t
] = graph
->max_edge
[t
];
3843 if (graph_init_edge_tables(ctx
, sub
) < 0)
3844 return isl_stat_error
;
3845 if (copy_edges(ctx
, sub
, graph
, edge_pred
, data
) < 0)
3846 return isl_stat_error
;
3847 sub
->n_row
= graph
->n_row
;
3848 sub
->max_row
= graph
->max_row
;
3849 sub
->n_total_row
= graph
->n_total_row
;
3850 sub
->band_start
= graph
->band_start
;
3855 static __isl_give isl_schedule_node
*compute_schedule(isl_schedule_node
*node
,
3856 struct isl_sched_graph
*graph
);
3857 static __isl_give isl_schedule_node
*compute_schedule_wcc(
3858 isl_schedule_node
*node
, struct isl_sched_graph
*graph
);
3860 /* Compute a schedule for a subgraph of "graph". In particular, for
3861 * the graph composed of nodes that satisfy node_pred and edges that
3862 * that satisfy edge_pred.
3863 * If the subgraph is known to consist of a single component, then wcc should
3864 * be set and then we call compute_schedule_wcc on the constructed subgraph.
3865 * Otherwise, we call compute_schedule, which will check whether the subgraph
3868 * The schedule is inserted at "node" and the updated schedule node
3871 static __isl_give isl_schedule_node
*compute_sub_schedule(
3872 __isl_take isl_schedule_node
*node
, isl_ctx
*ctx
,
3873 struct isl_sched_graph
*graph
,
3874 int (*node_pred
)(struct isl_sched_node
*node
, int data
),
3875 int (*edge_pred
)(struct isl_sched_edge
*edge
, int data
),
3878 struct isl_sched_graph split
= { 0 };
3880 if (extract_sub_graph(ctx
, graph
, node_pred
, edge_pred
, data
,
3885 node
= compute_schedule_wcc(node
, &split
);
3887 node
= compute_schedule(node
, &split
);
3889 graph_free(ctx
, &split
);
3892 graph_free(ctx
, &split
);
3893 return isl_schedule_node_free(node
);
3896 static int edge_scc_exactly(struct isl_sched_edge
*edge
, int scc
)
3898 return edge
->src
->scc
== scc
&& edge
->dst
->scc
== scc
;
3901 static int edge_dst_scc_at_most(struct isl_sched_edge
*edge
, int scc
)
3903 return edge
->dst
->scc
<= scc
;
3906 static int edge_src_scc_at_least(struct isl_sched_edge
*edge
, int scc
)
3908 return edge
->src
->scc
>= scc
;
3911 /* Reset the current band by dropping all its schedule rows.
3913 static isl_stat
reset_band(struct isl_sched_graph
*graph
)
3918 drop
= graph
->n_total_row
- graph
->band_start
;
3919 graph
->n_total_row
-= drop
;
3920 graph
->n_row
-= drop
;
3922 for (i
= 0; i
< graph
->n
; ++i
) {
3923 struct isl_sched_node
*node
= &graph
->node
[i
];
3925 isl_map_free(node
->sched_map
);
3926 node
->sched_map
= NULL
;
3928 node
->sched
= isl_mat_drop_rows(node
->sched
,
3929 graph
->band_start
, drop
);
3932 return isl_stat_error
;
3938 /* Split the current graph into two parts and compute a schedule for each
3939 * part individually. In particular, one part consists of all SCCs up
3940 * to and including graph->src_scc, while the other part contains the other
3941 * SCCs. The split is enforced by a sequence node inserted at position "node"
3942 * in the schedule tree. Return the updated schedule node.
3943 * If either of these two parts consists of a sequence, then it is spliced
3944 * into the sequence containing the two parts.
3946 * The current band is reset. It would be possible to reuse
3947 * the previously computed rows as the first rows in the next
3948 * band, but recomputing them may result in better rows as we are looking
3949 * at a smaller part of the dependence graph.
3951 static __isl_give isl_schedule_node
*compute_split_schedule(
3952 __isl_take isl_schedule_node
*node
, struct isl_sched_graph
*graph
)
3956 isl_union_set_list
*filters
;
3961 if (reset_band(graph
) < 0)
3962 return isl_schedule_node_free(node
);
3966 ctx
= isl_schedule_node_get_ctx(node
);
3967 filters
= extract_split(ctx
, graph
);
3968 node
= isl_schedule_node_insert_sequence(node
, filters
);
3969 node
= isl_schedule_node_child(node
, 1);
3970 node
= isl_schedule_node_child(node
, 0);
3972 node
= compute_sub_schedule(node
, ctx
, graph
,
3973 &node_scc_at_least
, &edge_src_scc_at_least
,
3974 graph
->src_scc
+ 1, 0);
3975 is_seq
= isl_schedule_node_get_type(node
) == isl_schedule_node_sequence
;
3976 node
= isl_schedule_node_parent(node
);
3977 node
= isl_schedule_node_parent(node
);
3979 node
= isl_schedule_node_sequence_splice_child(node
, 1);
3980 node
= isl_schedule_node_child(node
, 0);
3981 node
= isl_schedule_node_child(node
, 0);
3982 node
= compute_sub_schedule(node
, ctx
, graph
,
3983 &node_scc_at_most
, &edge_dst_scc_at_most
,
3985 is_seq
= isl_schedule_node_get_type(node
) == isl_schedule_node_sequence
;
3986 node
= isl_schedule_node_parent(node
);
3987 node
= isl_schedule_node_parent(node
);
3989 node
= isl_schedule_node_sequence_splice_child(node
, 0);
3994 /* Insert a band node at position "node" in the schedule tree corresponding
3995 * to the current band in "graph". Mark the band node permutable
3996 * if "permutable" is set.
3997 * The partial schedules and the coincidence property are extracted
3998 * from the graph nodes.
3999 * Return the updated schedule node.
4001 static __isl_give isl_schedule_node
*insert_current_band(
4002 __isl_take isl_schedule_node
*node
, struct isl_sched_graph
*graph
,
4008 isl_multi_pw_aff
*mpa
;
4009 isl_multi_union_pw_aff
*mupa
;
4015 isl_die(isl_schedule_node_get_ctx(node
), isl_error_internal
,
4016 "graph should have at least one node",
4017 return isl_schedule_node_free(node
));
4019 start
= graph
->band_start
;
4020 end
= graph
->n_total_row
;
4023 ma
= node_extract_partial_schedule_multi_aff(&graph
->node
[0], start
, n
);
4024 mpa
= isl_multi_pw_aff_from_multi_aff(ma
);
4025 mupa
= isl_multi_union_pw_aff_from_multi_pw_aff(mpa
);
4027 for (i
= 1; i
< graph
->n
; ++i
) {
4028 isl_multi_union_pw_aff
*mupa_i
;
4030 ma
= node_extract_partial_schedule_multi_aff(&graph
->node
[i
],
4032 mpa
= isl_multi_pw_aff_from_multi_aff(ma
);
4033 mupa_i
= isl_multi_union_pw_aff_from_multi_pw_aff(mpa
);
4034 mupa
= isl_multi_union_pw_aff_union_add(mupa
, mupa_i
);
4036 node
= isl_schedule_node_insert_partial_schedule(node
, mupa
);
4038 for (i
= 0; i
< n
; ++i
)
4039 node
= isl_schedule_node_band_member_set_coincident(node
, i
,
4040 graph
->node
[0].coincident
[start
+ i
]);
4041 node
= isl_schedule_node_band_set_permutable(node
, permutable
);
4046 /* Update the dependence relations based on the current schedule,
4047 * add the current band to "node" and then continue with the computation
4049 * Return the updated schedule node.
4051 static __isl_give isl_schedule_node
*compute_next_band(
4052 __isl_take isl_schedule_node
*node
,
4053 struct isl_sched_graph
*graph
, int permutable
)
4060 ctx
= isl_schedule_node_get_ctx(node
);
4061 if (update_edges(ctx
, graph
) < 0)
4062 return isl_schedule_node_free(node
);
4063 node
= insert_current_band(node
, graph
, permutable
);
4066 node
= isl_schedule_node_child(node
, 0);
4067 node
= compute_schedule(node
, graph
);
4068 node
= isl_schedule_node_parent(node
);
4073 /* Add the constraints "coef" derived from an edge from "node" to itself
4074 * to graph->lp in order to respect the dependences and to try and carry them.
4075 * "pos" is the sequence number of the edge that needs to be carried.
4076 * "coef" represents general constraints on coefficients (c_0, c_x)
4077 * of valid constraints for (y - x) with x and y instances of the node.
4079 * The constraints added to graph->lp need to enforce
4081 * (c_j_0 + c_j_x y) - (c_j_0 + c_j_x x)
4082 * = c_j_x (y - x) >= e_i
4084 * for each (x,y) in the dependence relation of the edge.
4085 * That is, (-e_i, c_j_x) needs to be plugged in for (c_0, c_x),
4086 * taking into account that each coefficient in c_j_x is represented
4087 * as a pair of non-negative coefficients.
4089 static isl_stat
add_intra_constraints(struct isl_sched_graph
*graph
,
4090 struct isl_sched_node
*node
, __isl_take isl_basic_set
*coef
, int pos
)
4094 isl_dim_map
*dim_map
;
4096 offset
= coef_var_offset(coef
);
4098 coef
= isl_basic_set_free(coef
);
4100 return isl_stat_error
;
4102 ctx
= isl_basic_set_get_ctx(coef
);
4103 dim_map
= intra_dim_map(ctx
, graph
, node
, offset
, 1);
4104 isl_dim_map_range(dim_map
, 3 + pos
, 0, 0, 0, 1, -1);
4105 graph
->lp
= add_constraints_dim_map(graph
->lp
, coef
, dim_map
);
4110 /* Add the constraints "coef" derived from an edge from "src" to "dst"
4111 * to graph->lp in order to respect the dependences and to try and carry them.
4112 * "pos" is the sequence number of the edge that needs to be carried or
4113 * -1 if no attempt should be made to carry the dependences.
4114 * "coef" represents general constraints on coefficients (c_0, c_n, c_x, c_y)
4115 * of valid constraints for (x, y) with x and y instances of "src" and "dst".
4117 * The constraints added to graph->lp need to enforce
4119 * (c_k_0 + c_k_n n + c_k_x y) - (c_j_0 + c_j_n n + c_j_x x) >= e_i
4121 * for each (x,y) in the dependence relation of the edge or
4123 * (c_k_0 + c_k_n n + c_k_x y) - (c_j_0 + c_j_n n + c_j_x x) >= 0
4127 * (-e_i + c_k_0 - c_j_0, c_k_n - c_j_n, -c_j_x, c_k_x)
4129 * (c_k_0 - c_j_0, c_k_n - c_j_n, -c_j_x, c_k_x)
4130 * needs to be plugged in for (c_0, c_n, c_x, c_y),
4131 * taking into account that each coefficient in c_j_x and c_k_x is represented
4132 * as a pair of non-negative coefficients.
4134 static isl_stat
add_inter_constraints(struct isl_sched_graph
*graph
,
4135 struct isl_sched_node
*src
, struct isl_sched_node
*dst
,
4136 __isl_take isl_basic_set
*coef
, int pos
)
4140 isl_dim_map
*dim_map
;
4142 offset
= coef_var_offset(coef
);
4144 coef
= isl_basic_set_free(coef
);
4146 return isl_stat_error
;
4148 ctx
= isl_basic_set_get_ctx(coef
);
4149 dim_map
= inter_dim_map(ctx
, graph
, src
, dst
, offset
, 1);
4151 isl_dim_map_range(dim_map
, 3 + pos
, 0, 0, 0, 1, -1);
4152 graph
->lp
= add_constraints_dim_map(graph
->lp
, coef
, dim_map
);
4157 /* Data structure for keeping track of the data needed
4158 * to exploit non-trivial lineality spaces.
4160 * "any_non_trivial" is true if there are any non-trivial lineality spaces.
4161 * If "any_non_trivial" is not true, then "equivalent" and "mask" may be NULL.
4162 * "equivalent" connects instances to other instances on the same line(s).
4163 * "mask" contains the domain spaces of "equivalent".
4164 * Any instance set not in "mask" does not have a non-trivial lineality space.
4166 struct isl_exploit_lineality_data
{
4167 isl_bool any_non_trivial
;
4168 isl_union_map
*equivalent
;
4169 isl_union_set
*mask
;
4172 /* Data structure collecting information used during the construction
4173 * of an LP for carrying dependences.
4175 * "intra" is a sequence of coefficient constraints for intra-node edges.
4176 * "inter" is a sequence of coefficient constraints for inter-node edges.
4177 * "lineality" contains data used to exploit non-trivial lineality spaces.
4180 isl_basic_set_list
*intra
;
4181 isl_basic_set_list
*inter
;
4182 struct isl_exploit_lineality_data lineality
;
4185 /* Free all the data stored in "carry".
4187 static void isl_carry_clear(struct isl_carry
*carry
)
4189 isl_basic_set_list_free(carry
->intra
);
4190 isl_basic_set_list_free(carry
->inter
);
4191 isl_union_map_free(carry
->lineality
.equivalent
);
4192 isl_union_set_free(carry
->lineality
.mask
);
4195 /* Return a pointer to the node in "graph" that lives in "space".
4196 * If the requested node has been compressed, then "space"
4197 * corresponds to the compressed space.
4198 * The graph is assumed to have such a node.
4199 * Return NULL in case of error.
4201 * First try and see if "space" is the space of an uncompressed node.
4202 * If so, return that node.
4203 * Otherwise, "space" was constructed by construct_compressed_id and
4204 * contains a user pointer pointing to the node in the tuple id.
4205 * However, this node belongs to the original dependence graph.
4206 * If "graph" is a subgraph of this original dependence graph,
4207 * then the node with the same space still needs to be looked up
4208 * in the current graph.
4210 static struct isl_sched_node
*graph_find_compressed_node(isl_ctx
*ctx
,
4211 struct isl_sched_graph
*graph
, __isl_keep isl_space
*space
)
4214 struct isl_sched_node
*node
;
4219 node
= graph_find_node(ctx
, graph
, space
);
4222 if (is_node(graph
, node
))
4225 id
= isl_space_get_tuple_id(space
, isl_dim_set
);
4226 node
= isl_id_get_user(id
);
4232 if (!is_node(graph
->root
, node
))
4233 isl_die(ctx
, isl_error_internal
,
4234 "space points to invalid node", return NULL
);
4235 if (graph
!= graph
->root
)
4236 node
= graph_find_node(ctx
, graph
, node
->space
);
4237 if (!is_node(graph
, node
))
4238 isl_die(ctx
, isl_error_internal
,
4239 "unable to find node", return NULL
);
4244 /* Internal data structure for add_all_constraints.
4246 * "graph" is the schedule constraint graph for which an LP problem
4247 * is being constructed.
4248 * "carry_inter" indicates whether inter-node edges should be carried.
4249 * "pos" is the position of the next edge that needs to be carried.
4251 struct isl_add_all_constraints_data
{
4253 struct isl_sched_graph
*graph
;
4258 /* Add the constraints "coef" derived from an edge from a node to itself
4259 * to data->graph->lp in order to respect the dependences and
4260 * to try and carry them.
4262 * The space of "coef" is of the form
4264 * coefficients[[c_cst] -> S[c_x]]
4266 * with S[c_x] the (compressed) space of the node.
4267 * Extract the node from the space and call add_intra_constraints.
4269 static isl_stat
lp_add_intra(__isl_take isl_basic_set
*coef
, void *user
)
4271 struct isl_add_all_constraints_data
*data
= user
;
4273 struct isl_sched_node
*node
;
4275 space
= isl_basic_set_get_space(coef
);
4276 space
= isl_space_range(isl_space_unwrap(space
));
4277 node
= graph_find_compressed_node(data
->ctx
, data
->graph
, space
);
4278 isl_space_free(space
);
4279 return add_intra_constraints(data
->graph
, node
, coef
, data
->pos
++);
4282 /* Add the constraints "coef" derived from an edge from a node j
4283 * to a node k to data->graph->lp in order to respect the dependences and
4284 * to try and carry them (provided data->carry_inter is set).
4286 * The space of "coef" is of the form
4288 * coefficients[[c_cst, c_n] -> [S_j[c_x] -> S_k[c_y]]]
4290 * with S_j[c_x] and S_k[c_y] the (compressed) spaces of the nodes.
4291 * Extract the nodes from the space and call add_inter_constraints.
4293 static isl_stat
lp_add_inter(__isl_take isl_basic_set
*coef
, void *user
)
4295 struct isl_add_all_constraints_data
*data
= user
;
4296 isl_space
*space
, *dom
;
4297 struct isl_sched_node
*src
, *dst
;
4300 space
= isl_basic_set_get_space(coef
);
4301 space
= isl_space_unwrap(isl_space_range(isl_space_unwrap(space
)));
4302 dom
= isl_space_domain(isl_space_copy(space
));
4303 src
= graph_find_compressed_node(data
->ctx
, data
->graph
, dom
);
4304 isl_space_free(dom
);
4305 space
= isl_space_range(space
);
4306 dst
= graph_find_compressed_node(data
->ctx
, data
->graph
, space
);
4307 isl_space_free(space
);
4309 pos
= data
->carry_inter
? data
->pos
++ : -1;
4310 return add_inter_constraints(data
->graph
, src
, dst
, coef
, pos
);
4313 /* Add constraints to graph->lp that force all (conditional) validity
4314 * dependences to be respected and attempt to carry them.
4315 * "intra" is the sequence of coefficient constraints for intra-node edges.
4316 * "inter" is the sequence of coefficient constraints for inter-node edges.
4317 * "carry_inter" indicates whether inter-node edges should be carried or
4320 static isl_stat
add_all_constraints(isl_ctx
*ctx
, struct isl_sched_graph
*graph
,
4321 __isl_keep isl_basic_set_list
*intra
,
4322 __isl_keep isl_basic_set_list
*inter
, int carry_inter
)
4324 struct isl_add_all_constraints_data data
= { ctx
, graph
, carry_inter
};
4327 if (isl_basic_set_list_foreach(intra
, &lp_add_intra
, &data
) < 0)
4328 return isl_stat_error
;
4329 if (isl_basic_set_list_foreach(inter
, &lp_add_inter
, &data
) < 0)
4330 return isl_stat_error
;
4334 /* Internal data structure for count_all_constraints
4335 * for keeping track of the number of equality and inequality constraints.
4337 struct isl_sched_count
{
4342 /* Add the number of equality and inequality constraints of "bset"
4343 * to data->n_eq and data->n_ineq.
4345 static isl_stat
bset_update_count(__isl_take isl_basic_set
*bset
, void *user
)
4347 struct isl_sched_count
*data
= user
;
4349 return update_count(bset
, 1, &data
->n_eq
, &data
->n_ineq
);
4352 /* Count the number of equality and inequality constraints
4353 * that will be added to the carry_lp problem.
4354 * We count each edge exactly once.
4355 * "intra" is the sequence of coefficient constraints for intra-node edges.
4356 * "inter" is the sequence of coefficient constraints for inter-node edges.
4358 static isl_stat
count_all_constraints(__isl_keep isl_basic_set_list
*intra
,
4359 __isl_keep isl_basic_set_list
*inter
, int *n_eq
, int *n_ineq
)
4361 struct isl_sched_count data
;
4363 data
.n_eq
= data
.n_ineq
= 0;
4364 if (isl_basic_set_list_foreach(inter
, &bset_update_count
, &data
) < 0)
4365 return isl_stat_error
;
4366 if (isl_basic_set_list_foreach(intra
, &bset_update_count
, &data
) < 0)
4367 return isl_stat_error
;
4370 *n_ineq
= data
.n_ineq
;
4375 /* Construct an LP problem for finding schedule coefficients
4376 * such that the schedule carries as many validity dependences as possible.
4377 * In particular, for each dependence i, we bound the dependence distance
4378 * from below by e_i, with 0 <= e_i <= 1 and then maximize the sum
4379 * of all e_i's. Dependences with e_i = 0 in the solution are simply
4380 * respected, while those with e_i > 0 (in practice e_i = 1) are carried.
4381 * "intra" is the sequence of coefficient constraints for intra-node edges.
4382 * "inter" is the sequence of coefficient constraints for inter-node edges.
4383 * "n_edge" is the total number of edges.
4384 * "carry_inter" indicates whether inter-node edges should be carried or
4385 * only respected. That is, if "carry_inter" is not set, then
4386 * no e_i variables are introduced for the inter-node edges.
4388 * All variables of the LP are non-negative. The actual coefficients
4389 * may be negative, so each coefficient is represented as the difference
4390 * of two non-negative variables. The negative part always appears
4391 * immediately before the positive part.
4392 * Other than that, the variables have the following order
4394 * - sum of (1 - e_i) over all edges
4395 * - sum of all c_n coefficients
4396 * (unconstrained when computing non-parametric schedules)
4397 * - sum of positive and negative parts of all c_x coefficients
4401 * - positive and negative parts of c_i_x, in opposite order
4402 * - c_i_n (if parametric)
4405 * The constraints are those from the (validity) edges plus three equalities
4406 * to express the sums and n_edge inequalities to express e_i <= 1.
4408 static isl_stat
setup_carry_lp(isl_ctx
*ctx
, struct isl_sched_graph
*graph
,
4409 int n_edge
, __isl_keep isl_basic_set_list
*intra
,
4410 __isl_keep isl_basic_set_list
*inter
, int carry_inter
)
4419 for (i
= 0; i
< graph
->n
; ++i
) {
4420 struct isl_sched_node
*node
= &graph
->node
[graph
->sorted
[i
]];
4421 node
->start
= total
;
4422 total
+= 1 + node
->nparam
+ 2 * node
->nvar
;
4425 if (count_all_constraints(intra
, inter
, &n_eq
, &n_ineq
) < 0)
4426 return isl_stat_error
;
4428 space
= isl_space_set_alloc(ctx
, 0, total
);
4429 isl_basic_set_free(graph
->lp
);
4432 graph
->lp
= isl_basic_set_alloc_space(space
, 0, n_eq
, n_ineq
);
4433 graph
->lp
= isl_basic_set_set_rational(graph
->lp
);
4435 k
= isl_basic_set_alloc_equality(graph
->lp
);
4437 return isl_stat_error
;
4438 isl_seq_clr(graph
->lp
->eq
[k
], 1 + total
);
4439 isl_int_set_si(graph
->lp
->eq
[k
][0], -n_edge
);
4440 isl_int_set_si(graph
->lp
->eq
[k
][1], 1);
4441 for (i
= 0; i
< n_edge
; ++i
)
4442 isl_int_set_si(graph
->lp
->eq
[k
][4 + i
], 1);
4444 if (add_param_sum_constraint(graph
, 1) < 0)
4445 return isl_stat_error
;
4446 if (add_var_sum_constraint(graph
, 2) < 0)
4447 return isl_stat_error
;
4449 for (i
= 0; i
< n_edge
; ++i
) {
4450 k
= isl_basic_set_alloc_inequality(graph
->lp
);
4452 return isl_stat_error
;
4453 isl_seq_clr(graph
->lp
->ineq
[k
], 1 + total
);
4454 isl_int_set_si(graph
->lp
->ineq
[k
][4 + i
], -1);
4455 isl_int_set_si(graph
->lp
->ineq
[k
][0], 1);
4458 if (add_all_constraints(ctx
, graph
, intra
, inter
, carry_inter
) < 0)
4459 return isl_stat_error
;
4464 static __isl_give isl_schedule_node
*compute_component_schedule(
4465 __isl_take isl_schedule_node
*node
, struct isl_sched_graph
*graph
,
4468 /* If the schedule_split_scaled option is set and if the linear
4469 * parts of the scheduling rows for all nodes in the graphs have
4470 * a non-trivial common divisor, then remove this
4471 * common divisor from the linear part.
4472 * Otherwise, insert a band node directly and continue with
4473 * the construction of the schedule.
4475 * If a non-trivial common divisor is found, then
4476 * the linear part is reduced and the remainder is ignored.
4477 * The pieces of the graph that are assigned different remainders
4478 * form (groups of) strongly connected components within
4479 * the scaled down band. If needed, they can therefore
4480 * be ordered along this remainder in a sequence node.
4481 * However, this ordering is not enforced here in order to allow
4482 * the scheduler to combine some of the strongly connected components.
4484 static __isl_give isl_schedule_node
*split_scaled(
4485 __isl_take isl_schedule_node
*node
, struct isl_sched_graph
*graph
)
4496 ctx
= isl_schedule_node_get_ctx(node
);
4497 if (!ctx
->opt
->schedule_split_scaled
)
4498 return compute_next_band(node
, graph
, 0);
4500 return compute_next_band(node
, graph
, 0);
4501 n_row
= isl_mat_rows(graph
->node
[0].sched
);
4503 return isl_schedule_node_free(node
);
4506 isl_int_init(gcd_i
);
4508 isl_int_set_si(gcd
, 0);
4512 for (i
= 0; i
< graph
->n
; ++i
) {
4513 struct isl_sched_node
*node
= &graph
->node
[i
];
4514 isl_size cols
= isl_mat_cols(node
->sched
);
4518 isl_seq_gcd(node
->sched
->row
[row
] + 1, cols
- 1, &gcd_i
);
4519 isl_int_gcd(gcd
, gcd
, gcd_i
);
4522 isl_int_clear(gcd_i
);
4526 if (isl_int_cmp_si(gcd
, 1) <= 0) {
4528 return compute_next_band(node
, graph
, 0);
4531 for (i
= 0; i
< graph
->n
; ++i
) {
4532 struct isl_sched_node
*node
= &graph
->node
[i
];
4534 isl_int_fdiv_q(node
->sched
->row
[row
][0],
4535 node
->sched
->row
[row
][0], gcd
);
4536 isl_int_mul(node
->sched
->row
[row
][0],
4537 node
->sched
->row
[row
][0], gcd
);
4538 node
->sched
= isl_mat_scale_down_row(node
->sched
, row
, gcd
);
4545 return compute_next_band(node
, graph
, 0);
4548 return isl_schedule_node_free(node
);
4551 /* Is the schedule row "sol" trivial on node "node"?
4552 * That is, is the solution zero on the dimensions linearly independent of
4553 * the previously found solutions?
4554 * Return 1 if the solution is trivial, 0 if it is not and -1 on error.
4556 * Each coefficient is represented as the difference between
4557 * two non-negative values in "sol".
4558 * We construct the schedule row s and check if it is linearly
4559 * independent of previously computed schedule rows
4560 * by computing T s, with T the linear combinations that are zero
4561 * on linearly dependent schedule rows.
4562 * If the result consists of all zeros, then the solution is trivial.
4564 static int is_trivial(struct isl_sched_node
*node
, __isl_keep isl_vec
*sol
)
4571 if (node
->nvar
== node
->rank
)
4574 node_sol
= extract_var_coef(node
, sol
);
4575 node_sol
= isl_mat_vec_product(isl_mat_copy(node
->indep
), node_sol
);
4579 trivial
= isl_seq_first_non_zero(node_sol
->el
,
4580 node
->nvar
- node
->rank
) == -1;
4582 isl_vec_free(node_sol
);
4587 /* Is the schedule row "sol" trivial on any node where it should
4589 * Return 1 if any solution is trivial, 0 if they are not and -1 on error.
4591 static int is_any_trivial(struct isl_sched_graph
*graph
,
4592 __isl_keep isl_vec
*sol
)
4596 for (i
= 0; i
< graph
->n
; ++i
) {
4597 struct isl_sched_node
*node
= &graph
->node
[i
];
4600 if (!needs_row(graph
, node
))
4602 trivial
= is_trivial(node
, sol
);
4603 if (trivial
< 0 || trivial
)
4610 /* Does the schedule represented by "sol" perform loop coalescing on "node"?
4611 * If so, return the position of the coalesced dimension.
4612 * Otherwise, return node->nvar or -1 on error.
4614 * In particular, look for pairs of coefficients c_i and c_j such that
4615 * |c_j/c_i| > ceil(size_i/2), i.e., |c_j| > |c_i * ceil(size_i/2)|.
4616 * If any such pair is found, then return i.
4617 * If size_i is infinity, then no check on c_i needs to be performed.
4619 static int find_node_coalescing(struct isl_sched_node
*node
,
4620 __isl_keep isl_vec
*sol
)
4626 if (node
->nvar
<= 1)
4629 csol
= extract_var_coef(node
, sol
);
4633 for (i
= 0; i
< node
->nvar
; ++i
) {
4636 if (isl_int_is_zero(csol
->el
[i
]))
4638 v
= isl_multi_val_get_val(node
->sizes
, i
);
4641 if (!isl_val_is_int(v
)) {
4645 v
= isl_val_div_ui(v
, 2);
4646 v
= isl_val_ceil(v
);
4649 isl_int_mul(max
, v
->n
, csol
->el
[i
]);
4652 for (j
= 0; j
< node
->nvar
; ++j
) {
4655 if (isl_int_abs_gt(csol
->el
[j
], max
))
4671 /* Force the schedule coefficient at position "pos" of "node" to be zero
4673 * The coefficient is encoded as the difference between two non-negative
4674 * variables. Force these two variables to have the same value.
4676 static __isl_give isl_tab_lexmin
*zero_out_node_coef(
4677 __isl_take isl_tab_lexmin
*tl
, struct isl_sched_node
*node
, int pos
)
4683 ctx
= isl_space_get_ctx(node
->space
);
4684 dim
= isl_tab_lexmin_dim(tl
);
4686 return isl_tab_lexmin_free(tl
);
4687 eq
= isl_vec_alloc(ctx
, 1 + dim
);
4688 eq
= isl_vec_clr(eq
);
4690 return isl_tab_lexmin_free(tl
);
4692 pos
= 1 + node_var_coef_pos(node
, pos
);
4693 isl_int_set_si(eq
->el
[pos
], 1);
4694 isl_int_set_si(eq
->el
[pos
+ 1], -1);
4695 tl
= isl_tab_lexmin_add_eq(tl
, eq
->el
);
4701 /* Return the lexicographically smallest rational point in the basic set
4702 * from which "tl" was constructed, double checking that this input set
4705 static __isl_give isl_vec
*non_empty_solution(__isl_keep isl_tab_lexmin
*tl
)
4709 sol
= isl_tab_lexmin_get_solution(tl
);
4713 isl_die(isl_vec_get_ctx(sol
), isl_error_internal
,
4714 "error in schedule construction",
4715 return isl_vec_free(sol
));
4719 /* Does the solution "sol" of the LP problem constructed by setup_carry_lp
4720 * carry any of the "n_edge" groups of dependences?
4721 * The value in the first position is the sum of (1 - e_i) over all "n_edge"
4722 * edges, with 0 <= e_i <= 1 equal to 1 when the dependences represented
4723 * by the edge are carried by the solution.
4724 * If the sum of the (1 - e_i) is smaller than "n_edge" then at least
4725 * one of those is carried.
4727 * Note that despite the fact that the problem is solved using a rational
4728 * solver, the solution is guaranteed to be integral.
4729 * Specifically, the dependence distance lower bounds e_i (and therefore
4730 * also their sum) are integers. See Lemma 5 of [1].
4732 * Any potential denominator of the sum is cleared by this function.
4733 * The denominator is not relevant for any of the other elements
4736 * [1] P. Feautrier, Some Efficient Solutions to the Affine Scheduling
4737 * Problem, Part II: Multi-Dimensional Time.
4738 * In Intl. Journal of Parallel Programming, 1992.
4740 static int carries_dependences(__isl_keep isl_vec
*sol
, int n_edge
)
4742 isl_int_divexact(sol
->el
[1], sol
->el
[1], sol
->el
[0]);
4743 isl_int_set_si(sol
->el
[0], 1);
4744 return isl_int_cmp_si(sol
->el
[1], n_edge
) < 0;
4747 /* Return the lexicographically smallest rational point in "lp",
4748 * assuming that all variables are non-negative and performing some
4749 * additional sanity checks.
4750 * If "want_integral" is set, then compute the lexicographically smallest
4751 * integer point instead.
4752 * In particular, "lp" should not be empty by construction.
4753 * Double check that this is the case.
4754 * If dependences are not carried for any of the "n_edge" edges,
4755 * then return an empty vector.
4757 * If the schedule_treat_coalescing option is set and
4758 * if the computed schedule performs loop coalescing on a given node,
4759 * i.e., if it is of the form
4761 * c_i i + c_j j + ...
4763 * with |c_j/c_i| >= size_i, then force the coefficient c_i to be zero
4764 * to cut out this solution. Repeat this process until no more loop
4765 * coalescing occurs or until no more dependences can be carried.
4766 * In the latter case, revert to the previously computed solution.
4768 * If the caller requests an integral solution and if coalescing should
4769 * be treated, then perform the coalescing treatment first as
4770 * an integral solution computed before coalescing treatment
4771 * would carry the same number of edges and would therefore probably
4772 * also be coalescing.
4774 * To allow the coalescing treatment to be performed first,
4775 * the initial solution is allowed to be rational and it is only
4776 * cut out (if needed) in the next iteration, if no coalescing measures
4779 static __isl_give isl_vec
*non_neg_lexmin(struct isl_sched_graph
*graph
,
4780 __isl_take isl_basic_set
*lp
, int n_edge
, int want_integral
)
4785 isl_vec
*sol
= NULL
, *prev
;
4786 int treat_coalescing
;
4791 ctx
= isl_basic_set_get_ctx(lp
);
4792 treat_coalescing
= isl_options_get_schedule_treat_coalescing(ctx
);
4793 tl
= isl_tab_lexmin_from_basic_set(lp
);
4801 tl
= isl_tab_lexmin_cut_to_integer(tl
);
4803 sol
= non_empty_solution(tl
);
4807 integral
= isl_int_is_one(sol
->el
[0]);
4808 if (!carries_dependences(sol
, n_edge
)) {
4810 prev
= isl_vec_alloc(ctx
, 0);
4815 prev
= isl_vec_free(prev
);
4816 cut
= want_integral
&& !integral
;
4819 if (!treat_coalescing
)
4821 for (i
= 0; i
< graph
->n
; ++i
) {
4822 struct isl_sched_node
*node
= &graph
->node
[i
];
4824 pos
= find_node_coalescing(node
, sol
);
4827 if (pos
< node
->nvar
)
4832 tl
= zero_out_node_coef(tl
, &graph
->node
[i
], pos
);
4835 } while (try_again
);
4837 isl_tab_lexmin_free(tl
);
4841 isl_tab_lexmin_free(tl
);
4847 /* If "edge" is an edge from a node to itself, then add the corresponding
4848 * dependence relation to "umap".
4849 * If "node" has been compressed, then the dependence relation
4850 * is also compressed first.
4852 static __isl_give isl_union_map
*add_intra(__isl_take isl_union_map
*umap
,
4853 struct isl_sched_edge
*edge
)
4856 struct isl_sched_node
*node
= edge
->src
;
4858 if (edge
->src
!= edge
->dst
)
4861 map
= isl_map_copy(edge
->map
);
4862 map
= compress(map
, node
, node
);
4863 umap
= isl_union_map_add_map(umap
, map
);
4867 /* If "edge" is an edge from a node to another node, then add the corresponding
4868 * dependence relation to "umap".
4869 * If the source or destination nodes of "edge" have been compressed,
4870 * then the dependence relation is also compressed first.
4872 static __isl_give isl_union_map
*add_inter(__isl_take isl_union_map
*umap
,
4873 struct isl_sched_edge
*edge
)
4877 if (edge
->src
== edge
->dst
)
4880 map
= isl_map_copy(edge
->map
);
4881 map
= compress(map
, edge
->src
, edge
->dst
);
4882 umap
= isl_union_map_add_map(umap
, map
);
4886 /* Internal data structure used by union_drop_coalescing_constraints
4887 * to collect bounds on all relevant statements.
4889 * "graph" is the schedule constraint graph for which an LP problem
4890 * is being constructed.
4891 * "bounds" collects the bounds.
4893 struct isl_collect_bounds_data
{
4895 struct isl_sched_graph
*graph
;
4896 isl_union_set
*bounds
;
4899 /* Add the size bounds for the node with instance deltas in "set"
4902 static isl_stat
collect_bounds(__isl_take isl_set
*set
, void *user
)
4904 struct isl_collect_bounds_data
*data
= user
;
4905 struct isl_sched_node
*node
;
4909 space
= isl_set_get_space(set
);
4912 node
= graph_find_compressed_node(data
->ctx
, data
->graph
, space
);
4913 isl_space_free(space
);
4915 bounds
= isl_set_from_basic_set(get_size_bounds(node
));
4916 data
->bounds
= isl_union_set_add_set(data
->bounds
, bounds
);
4921 /* Drop some constraints from "delta" that could be exploited
4922 * to construct loop coalescing schedules.
4923 * In particular, drop those constraint that bound the difference
4924 * to the size of the domain.
4925 * Do this for each set/node in "delta" separately.
4926 * The parameters are assumed to have been projected out by the caller.
4928 static __isl_give isl_union_set
*union_drop_coalescing_constraints(isl_ctx
*ctx
,
4929 struct isl_sched_graph
*graph
, __isl_take isl_union_set
*delta
)
4931 struct isl_collect_bounds_data data
= { ctx
, graph
};
4933 data
.bounds
= isl_union_set_empty(isl_space_params_alloc(ctx
, 0));
4934 if (isl_union_set_foreach_set(delta
, &collect_bounds
, &data
) < 0)
4935 data
.bounds
= isl_union_set_free(data
.bounds
);
4936 delta
= isl_union_set_plain_gist(delta
, data
.bounds
);
4941 /* Given a non-trivial lineality space "lineality", add the corresponding
4942 * universe set to data->mask and add a map from elements to
4943 * other elements along the lines in "lineality" to data->equivalent.
4944 * If this is the first time this function gets called
4945 * (data->any_non_trivial is still false), then set data->any_non_trivial and
4946 * initialize data->mask and data->equivalent.
4948 * In particular, if the lineality space is defined by equality constraints
4952 * then construct an affine mapping
4956 * and compute the equivalence relation of having the same image under f:
4958 * { x -> x' : E x = E x' }
4960 static isl_stat
add_non_trivial_lineality(__isl_take isl_basic_set
*lineality
,
4961 struct isl_exploit_lineality_data
*data
)
4967 isl_multi_pw_aff
*mpa
;
4971 if (isl_basic_set_check_no_locals(lineality
) < 0)
4974 space
= isl_basic_set_get_space(lineality
);
4975 if (!data
->any_non_trivial
) {
4976 data
->equivalent
= isl_union_map_empty(isl_space_copy(space
));
4977 data
->mask
= isl_union_set_empty(isl_space_copy(space
));
4979 data
->any_non_trivial
= isl_bool_true
;
4981 univ
= isl_set_universe(isl_space_copy(space
));
4982 data
->mask
= isl_union_set_add_set(data
->mask
, univ
);
4984 eq
= isl_basic_set_extract_equalities(lineality
);
4985 n
= isl_mat_rows(eq
);
4987 space
= isl_space_free(space
);
4988 eq
= isl_mat_insert_zero_rows(eq
, 0, 1);
4989 eq
= isl_mat_set_element_si(eq
, 0, 0, 1);
4990 space
= isl_space_from_domain(space
);
4991 space
= isl_space_add_dims(space
, isl_dim_out
, n
);
4992 ma
= isl_multi_aff_from_aff_mat(space
, eq
);
4993 mpa
= isl_multi_pw_aff_from_multi_aff(ma
);
4994 map
= isl_multi_pw_aff_eq_map(mpa
, isl_multi_pw_aff_copy(mpa
));
4995 data
->equivalent
= isl_union_map_add_map(data
->equivalent
, map
);
4997 isl_basic_set_free(lineality
);
5000 isl_basic_set_free(lineality
);
5001 return isl_stat_error
;
5004 /* Check if the lineality space "set" is non-trivial (i.e., is not just
5005 * the origin or, in other words, satisfies a number of equality constraints
5006 * that is smaller than the dimension of the set).
5007 * If so, extend data->mask and data->equivalent accordingly.
5009 * The input should not have any local variables already, but
5010 * isl_set_remove_divs is called to make sure it does not.
5012 static isl_stat
add_lineality(__isl_take isl_set
*set
, void *user
)
5014 struct isl_exploit_lineality_data
*data
= user
;
5015 isl_basic_set
*hull
;
5019 set
= isl_set_remove_divs(set
);
5020 hull
= isl_set_unshifted_simple_hull(set
);
5021 dim
= isl_basic_set_dim(hull
, isl_dim_set
);
5022 n_eq
= isl_basic_set_n_equality(hull
);
5023 if (dim
< 0 || n_eq
< 0)
5026 return add_non_trivial_lineality(hull
, data
);
5027 isl_basic_set_free(hull
);
5030 isl_basic_set_free(hull
);
5031 return isl_stat_error
;
5034 /* Check if the difference set on intra-node schedule constraints "intra"
5035 * has any non-trivial lineality space.
5036 * If so, then extend the difference set to a difference set
5037 * on equivalent elements. That is, if "intra" is
5039 * { y - x : (x,y) \in V }
5041 * and elements are equivalent if they have the same image under f,
5044 * { y' - x' : (x,y) \in V and f(x) = f(x') and f(y) = f(y') }
5046 * or, since f is linear,
5048 * { y' - x' : (x,y) \in V and f(y - x) = f(y' - x') }
5050 * The results of the search for non-trivial lineality spaces is stored
5053 static __isl_give isl_union_set
*exploit_intra_lineality(
5054 __isl_take isl_union_set
*intra
,
5055 struct isl_exploit_lineality_data
*data
)
5057 isl_union_set
*lineality
;
5058 isl_union_set
*uset
;
5060 data
->any_non_trivial
= isl_bool_false
;
5061 lineality
= isl_union_set_copy(intra
);
5062 lineality
= isl_union_set_combined_lineality_space(lineality
);
5063 if (isl_union_set_foreach_set(lineality
, &add_lineality
, data
) < 0)
5064 data
->any_non_trivial
= isl_bool_error
;
5065 isl_union_set_free(lineality
);
5067 if (data
->any_non_trivial
< 0)
5068 return isl_union_set_free(intra
);
5069 if (!data
->any_non_trivial
)
5072 uset
= isl_union_set_copy(intra
);
5073 intra
= isl_union_set_subtract(intra
, isl_union_set_copy(data
->mask
));
5074 uset
= isl_union_set_apply(uset
, isl_union_map_copy(data
->equivalent
));
5075 intra
= isl_union_set_union(intra
, uset
);
5077 intra
= isl_union_set_remove_divs(intra
);
5082 /* If the difference set on intra-node schedule constraints was found to have
5083 * any non-trivial lineality space by exploit_intra_lineality,
5084 * as recorded in "data", then extend the inter-node
5085 * schedule constraints "inter" to schedule constraints on equivalent elements.
5086 * That is, if "inter" is V and
5087 * elements are equivalent if they have the same image under f, then return
5089 * { (x', y') : (x,y) \in V and f(x) = f(x') and f(y) = f(y') }
5091 static __isl_give isl_union_map
*exploit_inter_lineality(
5092 __isl_take isl_union_map
*inter
,
5093 struct isl_exploit_lineality_data
*data
)
5095 isl_union_map
*umap
;
5097 if (data
->any_non_trivial
< 0)
5098 return isl_union_map_free(inter
);
5099 if (!data
->any_non_trivial
)
5102 umap
= isl_union_map_copy(inter
);
5103 inter
= isl_union_map_subtract_range(inter
,
5104 isl_union_set_copy(data
->mask
));
5105 umap
= isl_union_map_apply_range(umap
,
5106 isl_union_map_copy(data
->equivalent
));
5107 inter
= isl_union_map_union(inter
, umap
);
5108 umap
= isl_union_map_copy(inter
);
5109 inter
= isl_union_map_subtract_domain(inter
,
5110 isl_union_set_copy(data
->mask
));
5111 umap
= isl_union_map_apply_range(isl_union_map_copy(data
->equivalent
),
5113 inter
= isl_union_map_union(inter
, umap
);
5115 inter
= isl_union_map_remove_divs(inter
);
5120 /* For each (conditional) validity edge in "graph",
5121 * add the corresponding dependence relation using "add"
5122 * to a collection of dependence relations and return the result.
5123 * If "coincidence" is set, then coincidence edges are considered as well.
5125 static __isl_give isl_union_map
*collect_validity(struct isl_sched_graph
*graph
,
5126 __isl_give isl_union_map
*(*add
)(__isl_take isl_union_map
*umap
,
5127 struct isl_sched_edge
*edge
), int coincidence
)
5131 isl_union_map
*umap
;
5133 space
= isl_space_copy(graph
->node
[0].space
);
5134 umap
= isl_union_map_empty(space
);
5136 for (i
= 0; i
< graph
->n_edge
; ++i
) {
5137 struct isl_sched_edge
*edge
= &graph
->edge
[i
];
5139 if (!is_any_validity(edge
) &&
5140 (!coincidence
|| !is_coincidence(edge
)))
5143 umap
= add(umap
, edge
);
5149 /* For each dependence relation on a (conditional) validity edge
5150 * from a node to itself,
5151 * construct the set of coefficients of valid constraints for elements
5152 * in that dependence relation and collect the results.
5153 * If "coincidence" is set, then coincidence edges are considered as well.
5155 * In particular, for each dependence relation R, constraints
5156 * on coefficients (c_0, c_x) are constructed such that
5158 * c_0 + c_x d >= 0 for each d in delta R = { y - x | (x,y) in R }
5160 * If the schedule_treat_coalescing option is set, then some constraints
5161 * that could be exploited to construct coalescing schedules
5162 * are removed before the dual is computed, but after the parameters
5163 * have been projected out.
5164 * The entire computation is essentially the same as that performed
5165 * by intra_coefficients, except that it operates on multiple
5166 * edges together and that the parameters are always projected out.
5168 * Additionally, exploit any non-trivial lineality space
5169 * in the difference set after removing coalescing constraints and
5170 * store the results of the non-trivial lineality space detection in "data".
5171 * The procedure is currently run unconditionally, but it is unlikely
5172 * to find any non-trivial lineality spaces if no coalescing constraints
5173 * have been removed.
5175 * Note that if a dependence relation is a union of basic maps,
5176 * then each basic map needs to be treated individually as it may only
5177 * be possible to carry the dependences expressed by some of those
5178 * basic maps and not all of them.
5179 * The collected validity constraints are therefore not coalesced and
5180 * it is assumed that they are not coalesced automatically.
5181 * Duplicate basic maps can be removed, however.
5182 * In particular, if the same basic map appears as a disjunct
5183 * in multiple edges, then it only needs to be carried once.
5185 static __isl_give isl_basic_set_list
*collect_intra_validity(isl_ctx
*ctx
,
5186 struct isl_sched_graph
*graph
, int coincidence
,
5187 struct isl_exploit_lineality_data
*data
)
5189 isl_union_map
*intra
;
5190 isl_union_set
*delta
;
5191 isl_basic_set_list
*list
;
5193 intra
= collect_validity(graph
, &add_intra
, coincidence
);
5194 delta
= isl_union_map_deltas(intra
);
5195 delta
= isl_union_set_project_out_all_params(delta
);
5196 delta
= isl_union_set_remove_divs(delta
);
5197 if (isl_options_get_schedule_treat_coalescing(ctx
))
5198 delta
= union_drop_coalescing_constraints(ctx
, graph
, delta
);
5199 delta
= exploit_intra_lineality(delta
, data
);
5200 list
= isl_union_set_get_basic_set_list(delta
);
5201 isl_union_set_free(delta
);
5203 return isl_basic_set_list_coefficients(list
);
5206 /* For each dependence relation on a (conditional) validity edge
5207 * from a node to some other node,
5208 * construct the set of coefficients of valid constraints for elements
5209 * in that dependence relation and collect the results.
5210 * If "coincidence" is set, then coincidence edges are considered as well.
5212 * In particular, for each dependence relation R, constraints
5213 * on coefficients (c_0, c_n, c_x, c_y) are constructed such that
5215 * c_0 + c_n n + c_x x + c_y y >= 0 for each (x,y) in R
5217 * This computation is essentially the same as that performed
5218 * by inter_coefficients, except that it operates on multiple
5221 * Additionally, exploit any non-trivial lineality space
5222 * that may have been discovered by collect_intra_validity
5223 * (as stored in "data").
5225 * Note that if a dependence relation is a union of basic maps,
5226 * then each basic map needs to be treated individually as it may only
5227 * be possible to carry the dependences expressed by some of those
5228 * basic maps and not all of them.
5229 * The collected validity constraints are therefore not coalesced and
5230 * it is assumed that they are not coalesced automatically.
5231 * Duplicate basic maps can be removed, however.
5232 * In particular, if the same basic map appears as a disjunct
5233 * in multiple edges, then it only needs to be carried once.
5235 static __isl_give isl_basic_set_list
*collect_inter_validity(
5236 struct isl_sched_graph
*graph
, int coincidence
,
5237 struct isl_exploit_lineality_data
*data
)
5239 isl_union_map
*inter
;
5240 isl_union_set
*wrap
;
5241 isl_basic_set_list
*list
;
5243 inter
= collect_validity(graph
, &add_inter
, coincidence
);
5244 inter
= exploit_inter_lineality(inter
, data
);
5245 inter
= isl_union_map_remove_divs(inter
);
5246 wrap
= isl_union_map_wrap(inter
);
5247 list
= isl_union_set_get_basic_set_list(wrap
);
5248 isl_union_set_free(wrap
);
5249 return isl_basic_set_list_coefficients(list
);
5252 /* Construct an LP problem for finding schedule coefficients
5253 * such that the schedule carries as many of the "n_edge" groups of
5254 * dependences as possible based on the corresponding coefficient
5255 * constraints and return the lexicographically smallest non-trivial solution.
5256 * "intra" is the sequence of coefficient constraints for intra-node edges.
5257 * "inter" is the sequence of coefficient constraints for inter-node edges.
5258 * If "want_integral" is set, then compute an integral solution
5259 * for the coefficients rather than using the numerators
5260 * of a rational solution.
5261 * "carry_inter" indicates whether inter-node edges should be carried or
5264 * If none of the "n_edge" groups can be carried
5265 * then return an empty vector.
5267 static __isl_give isl_vec
*compute_carrying_sol_coef(isl_ctx
*ctx
,
5268 struct isl_sched_graph
*graph
, int n_edge
,
5269 __isl_keep isl_basic_set_list
*intra
,
5270 __isl_keep isl_basic_set_list
*inter
, int want_integral
,
5275 if (setup_carry_lp(ctx
, graph
, n_edge
, intra
, inter
, carry_inter
) < 0)
5278 lp
= isl_basic_set_copy(graph
->lp
);
5279 return non_neg_lexmin(graph
, lp
, n_edge
, want_integral
);
5282 /* Construct an LP problem for finding schedule coefficients
5283 * such that the schedule carries as many of the validity dependences
5285 * return the lexicographically smallest non-trivial solution.
5286 * If "fallback" is set, then the carrying is performed as a fallback
5287 * for the Pluto-like scheduler.
5288 * If "coincidence" is set, then try and carry coincidence edges as well.
5290 * The variable "n_edge" stores the number of groups that should be carried.
5291 * If none of the "n_edge" groups can be carried
5292 * then return an empty vector.
5293 * If, moreover, "n_edge" is zero, then the LP problem does not even
5294 * need to be constructed.
5296 * If a fallback solution is being computed, then compute an integral solution
5297 * for the coefficients rather than using the numerators
5298 * of a rational solution.
5300 * If a fallback solution is being computed, if there are any intra-node
5301 * dependences, and if requested by the user, then first try
5302 * to only carry those intra-node dependences.
5303 * If this fails to carry any dependences, then try again
5304 * with the inter-node dependences included.
5306 static __isl_give isl_vec
*compute_carrying_sol(isl_ctx
*ctx
,
5307 struct isl_sched_graph
*graph
, int fallback
, int coincidence
)
5309 isl_size n_intra
, n_inter
;
5311 struct isl_carry carry
= { 0 };
5314 carry
.intra
= collect_intra_validity(ctx
, graph
, coincidence
,
5316 carry
.inter
= collect_inter_validity(graph
, coincidence
,
5318 n_intra
= isl_basic_set_list_n_basic_set(carry
.intra
);
5319 n_inter
= isl_basic_set_list_n_basic_set(carry
.inter
);
5320 if (n_intra
< 0 || n_inter
< 0)
5323 if (fallback
&& n_intra
> 0 &&
5324 isl_options_get_schedule_carry_self_first(ctx
)) {
5325 sol
= compute_carrying_sol_coef(ctx
, graph
, n_intra
,
5326 carry
.intra
, carry
.inter
, fallback
, 0);
5327 if (!sol
|| sol
->size
!= 0 || n_inter
== 0) {
5328 isl_carry_clear(&carry
);
5334 n_edge
= n_intra
+ n_inter
;
5336 isl_carry_clear(&carry
);
5337 return isl_vec_alloc(ctx
, 0);
5340 sol
= compute_carrying_sol_coef(ctx
, graph
, n_edge
,
5341 carry
.intra
, carry
.inter
, fallback
, 1);
5342 isl_carry_clear(&carry
);
5345 isl_carry_clear(&carry
);
5349 /* Construct a schedule row for each node such that as many validity dependences
5350 * as possible are carried and then continue with the next band.
5351 * If "fallback" is set, then the carrying is performed as a fallback
5352 * for the Pluto-like scheduler.
5353 * If "coincidence" is set, then try and carry coincidence edges as well.
5355 * If there are no validity dependences, then no dependence can be carried and
5356 * the procedure is guaranteed to fail. If there is more than one component,
5357 * then try computing a schedule on each component separately
5358 * to prevent or at least postpone this failure.
5360 * If a schedule row is computed, then check that dependences are carried
5361 * for at least one of the edges.
5363 * If the computed schedule row turns out to be trivial on one or
5364 * more nodes where it should not be trivial, then we throw it away
5365 * and try again on each component separately.
5367 * If there is only one component, then we accept the schedule row anyway,
5368 * but we do not consider it as a complete row and therefore do not
5369 * increment graph->n_row. Note that the ranks of the nodes that
5370 * do get a non-trivial schedule part will get updated regardless and
5371 * graph->maxvar is computed based on these ranks. The test for
5372 * whether more schedule rows are required in compute_schedule_wcc
5373 * is therefore not affected.
5375 * Insert a band corresponding to the schedule row at position "node"
5376 * of the schedule tree and continue with the construction of the schedule.
5377 * This insertion and the continued construction is performed by split_scaled
5378 * after optionally checking for non-trivial common divisors.
5380 static __isl_give isl_schedule_node
*carry(__isl_take isl_schedule_node
*node
,
5381 struct isl_sched_graph
*graph
, int fallback
, int coincidence
)
5390 ctx
= isl_schedule_node_get_ctx(node
);
5391 sol
= compute_carrying_sol(ctx
, graph
, fallback
, coincidence
);
5393 return isl_schedule_node_free(node
);
5394 if (sol
->size
== 0) {
5397 return compute_component_schedule(node
, graph
, 1);
5398 isl_die(ctx
, isl_error_unknown
, "unable to carry dependences",
5399 return isl_schedule_node_free(node
));
5402 trivial
= is_any_trivial(graph
, sol
);
5404 sol
= isl_vec_free(sol
);
5405 } else if (trivial
&& graph
->scc
> 1) {
5407 return compute_component_schedule(node
, graph
, 1);
5410 if (update_schedule(graph
, sol
, 0) < 0)
5411 return isl_schedule_node_free(node
);
5415 return split_scaled(node
, graph
);
5418 /* Construct a schedule row for each node such that as many validity dependences
5419 * as possible are carried and then continue with the next band.
5420 * Do so as a fallback for the Pluto-like scheduler.
5421 * If "coincidence" is set, then try and carry coincidence edges as well.
5423 static __isl_give isl_schedule_node
*carry_fallback(
5424 __isl_take isl_schedule_node
*node
, struct isl_sched_graph
*graph
,
5427 return carry(node
, graph
, 1, coincidence
);
5430 /* Construct a schedule row for each node such that as many validity dependences
5431 * as possible are carried and then continue with the next band.
5432 * Do so for the case where the Feautrier scheduler was selected
5435 static __isl_give isl_schedule_node
*carry_feautrier(
5436 __isl_take isl_schedule_node
*node
, struct isl_sched_graph
*graph
)
5438 return carry(node
, graph
, 0, 0);
5441 /* Construct a schedule row for each node such that as many validity dependences
5442 * as possible are carried and then continue with the next band.
5443 * Do so as a fallback for the Pluto-like scheduler.
5445 static __isl_give isl_schedule_node
*carry_dependences(
5446 __isl_take isl_schedule_node
*node
, struct isl_sched_graph
*graph
)
5448 return carry_fallback(node
, graph
, 0);
5451 /* Construct a schedule row for each node such that as many validity or
5452 * coincidence dependences as possible are carried and
5453 * then continue with the next band.
5454 * Do so as a fallback for the Pluto-like scheduler.
5456 static __isl_give isl_schedule_node
*carry_coincidence(
5457 __isl_take isl_schedule_node
*node
, struct isl_sched_graph
*graph
)
5459 return carry_fallback(node
, graph
, 1);
5462 /* Topologically sort statements mapped to the same schedule iteration
5463 * and add insert a sequence node in front of "node"
5464 * corresponding to this order.
5465 * If "initialized" is set, then it may be assumed that compute_maxvar
5466 * has been called on the current band. Otherwise, call
5467 * compute_maxvar if and before carry_dependences gets called.
5469 * If it turns out to be impossible to sort the statements apart,
5470 * because different dependences impose different orderings
5471 * on the statements, then we extend the schedule such that
5472 * it carries at least one more dependence.
5474 static __isl_give isl_schedule_node
*sort_statements(
5475 __isl_take isl_schedule_node
*node
, struct isl_sched_graph
*graph
,
5479 isl_union_set_list
*filters
;
5484 ctx
= isl_schedule_node_get_ctx(node
);
5486 isl_die(ctx
, isl_error_internal
,
5487 "graph should have at least one node",
5488 return isl_schedule_node_free(node
));
5493 if (update_edges(ctx
, graph
) < 0)
5494 return isl_schedule_node_free(node
);
5496 if (graph
->n_edge
== 0)
5499 if (detect_sccs(ctx
, graph
) < 0)
5500 return isl_schedule_node_free(node
);
5503 if (graph
->scc
< graph
->n
) {
5504 if (!initialized
&& compute_maxvar(graph
) < 0)
5505 return isl_schedule_node_free(node
);
5506 return carry_dependences(node
, graph
);
5509 filters
= extract_sccs(ctx
, graph
);
5510 node
= isl_schedule_node_insert_sequence(node
, filters
);
5515 /* Are there any (non-empty) (conditional) validity edges in the graph?
5517 static int has_validity_edges(struct isl_sched_graph
*graph
)
5521 for (i
= 0; i
< graph
->n_edge
; ++i
) {
5524 empty
= isl_map_plain_is_empty(graph
->edge
[i
].map
);
5529 if (is_any_validity(&graph
->edge
[i
]))
5536 /* Should we apply a Feautrier step?
5537 * That is, did the user request the Feautrier algorithm and are
5538 * there any validity dependences (left)?
5540 static int need_feautrier_step(isl_ctx
*ctx
, struct isl_sched_graph
*graph
)
5542 if (ctx
->opt
->schedule_algorithm
!= ISL_SCHEDULE_ALGORITHM_FEAUTRIER
)
5545 return has_validity_edges(graph
);
5548 /* Compute a schedule for a connected dependence graph using Feautrier's
5549 * multi-dimensional scheduling algorithm and return the updated schedule node.
5551 * The original algorithm is described in [1].
5552 * The main idea is to minimize the number of scheduling dimensions, by
5553 * trying to satisfy as many dependences as possible per scheduling dimension.
5555 * [1] P. Feautrier, Some Efficient Solutions to the Affine Scheduling
5556 * Problem, Part II: Multi-Dimensional Time.
5557 * In Intl. Journal of Parallel Programming, 1992.
5559 static __isl_give isl_schedule_node
*compute_schedule_wcc_feautrier(
5560 isl_schedule_node
*node
, struct isl_sched_graph
*graph
)
5562 return carry_feautrier(node
, graph
);
5565 /* Turn off the "local" bit on all (condition) edges.
5567 static void clear_local_edges(struct isl_sched_graph
*graph
)
5571 for (i
= 0; i
< graph
->n_edge
; ++i
)
5572 if (is_condition(&graph
->edge
[i
]))
5573 clear_local(&graph
->edge
[i
]);
5576 /* Does "graph" have both condition and conditional validity edges?
5578 static int need_condition_check(struct isl_sched_graph
*graph
)
5581 int any_condition
= 0;
5582 int any_conditional_validity
= 0;
5584 for (i
= 0; i
< graph
->n_edge
; ++i
) {
5585 if (is_condition(&graph
->edge
[i
]))
5587 if (is_conditional_validity(&graph
->edge
[i
]))
5588 any_conditional_validity
= 1;
5591 return any_condition
&& any_conditional_validity
;
5594 /* Does "graph" contain any coincidence edge?
5596 static int has_any_coincidence(struct isl_sched_graph
*graph
)
5600 for (i
= 0; i
< graph
->n_edge
; ++i
)
5601 if (is_coincidence(&graph
->edge
[i
]))
5607 /* Extract the final schedule row as a map with the iteration domain
5608 * of "node" as domain.
5610 static __isl_give isl_map
*final_row(struct isl_sched_node
*node
)
5615 n_row
= isl_mat_rows(node
->sched
);
5618 ma
= node_extract_partial_schedule_multi_aff(node
, n_row
- 1, 1);
5619 return isl_map_from_multi_aff(ma
);
5622 /* Is the conditional validity dependence in the edge with index "edge_index"
5623 * violated by the latest (i.e., final) row of the schedule?
5624 * That is, is i scheduled after j
5625 * for any conditional validity dependence i -> j?
5627 static int is_violated(struct isl_sched_graph
*graph
, int edge_index
)
5629 isl_map
*src_sched
, *dst_sched
, *map
;
5630 struct isl_sched_edge
*edge
= &graph
->edge
[edge_index
];
5633 src_sched
= final_row(edge
->src
);
5634 dst_sched
= final_row(edge
->dst
);
5635 map
= isl_map_copy(edge
->map
);
5636 map
= isl_map_apply_domain(map
, src_sched
);
5637 map
= isl_map_apply_range(map
, dst_sched
);
5638 map
= isl_map_order_gt(map
, isl_dim_in
, 0, isl_dim_out
, 0);
5639 empty
= isl_map_is_empty(map
);
5648 /* Does "graph" have any satisfied condition edges that
5649 * are adjacent to the conditional validity constraint with
5650 * domain "conditional_source" and range "conditional_sink"?
5652 * A satisfied condition is one that is not local.
5653 * If a condition was forced to be local already (i.e., marked as local)
5654 * then there is no need to check if it is in fact local.
5656 * Additionally, mark all adjacent condition edges found as local.
5658 static int has_adjacent_true_conditions(struct isl_sched_graph
*graph
,
5659 __isl_keep isl_union_set
*conditional_source
,
5660 __isl_keep isl_union_set
*conditional_sink
)
5665 for (i
= 0; i
< graph
->n_edge
; ++i
) {
5666 int adjacent
, local
;
5667 isl_union_map
*condition
;
5669 if (!is_condition(&graph
->edge
[i
]))
5671 if (is_local(&graph
->edge
[i
]))
5674 condition
= graph
->edge
[i
].tagged_condition
;
5675 adjacent
= domain_intersects(condition
, conditional_sink
);
5676 if (adjacent
>= 0 && !adjacent
)
5677 adjacent
= range_intersects(condition
,
5678 conditional_source
);
5684 set_local(&graph
->edge
[i
]);
5686 local
= is_condition_false(&graph
->edge
[i
]);
5696 /* Are there any violated conditional validity dependences with
5697 * adjacent condition dependences that are not local with respect
5698 * to the current schedule?
5699 * That is, is the conditional validity constraint violated?
5701 * Additionally, mark all those adjacent condition dependences as local.
5702 * We also mark those adjacent condition dependences that were not marked
5703 * as local before, but just happened to be local already. This ensures
5704 * that they remain local if the schedule is recomputed.
5706 * We first collect domain and range of all violated conditional validity
5707 * dependences and then check if there are any adjacent non-local
5708 * condition dependences.
5710 static int has_violated_conditional_constraint(isl_ctx
*ctx
,
5711 struct isl_sched_graph
*graph
)
5715 isl_union_set
*source
, *sink
;
5717 source
= isl_union_set_empty(isl_space_params_alloc(ctx
, 0));
5718 sink
= isl_union_set_empty(isl_space_params_alloc(ctx
, 0));
5719 for (i
= 0; i
< graph
->n_edge
; ++i
) {
5720 isl_union_set
*uset
;
5721 isl_union_map
*umap
;
5724 if (!is_conditional_validity(&graph
->edge
[i
]))
5727 violated
= is_violated(graph
, i
);
5735 umap
= isl_union_map_copy(graph
->edge
[i
].tagged_validity
);
5736 uset
= isl_union_map_domain(umap
);
5737 source
= isl_union_set_union(source
, uset
);
5738 source
= isl_union_set_coalesce(source
);
5740 umap
= isl_union_map_copy(graph
->edge
[i
].tagged_validity
);
5741 uset
= isl_union_map_range(umap
);
5742 sink
= isl_union_set_union(sink
, uset
);
5743 sink
= isl_union_set_coalesce(sink
);
5747 any
= has_adjacent_true_conditions(graph
, source
, sink
);
5749 isl_union_set_free(source
);
5750 isl_union_set_free(sink
);
5753 isl_union_set_free(source
);
5754 isl_union_set_free(sink
);
5758 /* Examine the current band (the rows between graph->band_start and
5759 * graph->n_total_row), deciding whether to drop it or add it to "node"
5760 * and then continue with the computation of the next band, if any.
5761 * If "initialized" is set, then it may be assumed that compute_maxvar
5762 * has been called on the current band. Otherwise, call
5763 * compute_maxvar if and before carry_dependences gets called.
5765 * The caller keeps looking for a new row as long as
5766 * graph->n_row < graph->maxvar. If the latest attempt to find
5767 * such a row failed (i.e., we still have graph->n_row < graph->maxvar),
5769 * - split between SCCs and start over (assuming we found an interesting
5770 * pair of SCCs between which to split)
5771 * - continue with the next band (assuming the current band has at least
5773 * - if there is more than one SCC left, then split along all SCCs
5774 * - if outer coincidence needs to be enforced, then try to carry as many
5775 * validity or coincidence dependences as possible and
5776 * continue with the next band
5777 * - try to carry as many validity dependences as possible and
5778 * continue with the next band
5779 * In each case, we first insert a band node in the schedule tree
5780 * if any rows have been computed.
5782 * If the caller managed to complete the schedule and the current band
5783 * is empty, then finish off by topologically
5784 * sorting the statements based on the remaining dependences.
5785 * If, on the other hand, the current band has at least one row,
5786 * then continue with the next band. Note that this next band
5787 * will necessarily be empty, but the graph may still be split up
5788 * into weakly connected components before arriving back here.
5790 static __isl_give isl_schedule_node
*compute_schedule_finish_band(
5791 __isl_take isl_schedule_node
*node
, struct isl_sched_graph
*graph
,
5799 empty
= graph
->n_total_row
== graph
->band_start
;
5800 if (graph
->n_row
< graph
->maxvar
) {
5803 ctx
= isl_schedule_node_get_ctx(node
);
5804 if (!ctx
->opt
->schedule_maximize_band_depth
&& !empty
)
5805 return compute_next_band(node
, graph
, 1);
5806 if (graph
->src_scc
>= 0)
5807 return compute_split_schedule(node
, graph
);
5809 return compute_next_band(node
, graph
, 1);
5811 return compute_component_schedule(node
, graph
, 1);
5812 if (!initialized
&& compute_maxvar(graph
) < 0)
5813 return isl_schedule_node_free(node
);
5814 if (isl_options_get_schedule_outer_coincidence(ctx
))
5815 return carry_coincidence(node
, graph
);
5816 return carry_dependences(node
, graph
);
5820 return compute_next_band(node
, graph
, 1);
5821 return sort_statements(node
, graph
, initialized
);
5824 /* Construct a band of schedule rows for a connected dependence graph.
5825 * The caller is responsible for determining the strongly connected
5826 * components and calling compute_maxvar first.
5828 * We try to find a sequence of as many schedule rows as possible that result
5829 * in non-negative dependence distances (independent of the previous rows
5830 * in the sequence, i.e., such that the sequence is tilable), with as
5831 * many of the initial rows as possible satisfying the coincidence constraints.
5832 * The computation stops if we can't find any more rows or if we have found
5833 * all the rows we wanted to find.
5835 * If ctx->opt->schedule_outer_coincidence is set, then we force the
5836 * outermost dimension to satisfy the coincidence constraints. If this
5837 * turns out to be impossible, we fall back on the general scheme above
5838 * and try to carry as many dependences as possible.
5840 * If "graph" contains both condition and conditional validity dependences,
5841 * then we need to check that that the conditional schedule constraint
5842 * is satisfied, i.e., there are no violated conditional validity dependences
5843 * that are adjacent to any non-local condition dependences.
5844 * If there are, then we mark all those adjacent condition dependences
5845 * as local and recompute the current band. Those dependences that
5846 * are marked local will then be forced to be local.
5847 * The initial computation is performed with no dependences marked as local.
5848 * If we are lucky, then there will be no violated conditional validity
5849 * dependences adjacent to any non-local condition dependences.
5850 * Otherwise, we mark some additional condition dependences as local and
5851 * recompute. We continue this process until there are no violations left or
5852 * until we are no longer able to compute a schedule.
5853 * Since there are only a finite number of dependences,
5854 * there will only be a finite number of iterations.
5856 static isl_stat
compute_schedule_wcc_band(isl_ctx
*ctx
,
5857 struct isl_sched_graph
*graph
)
5859 int has_coincidence
;
5860 int use_coincidence
;
5861 int force_coincidence
= 0;
5862 int check_conditional
;
5864 if (sort_sccs(graph
) < 0)
5865 return isl_stat_error
;
5867 clear_local_edges(graph
);
5868 check_conditional
= need_condition_check(graph
);
5869 has_coincidence
= has_any_coincidence(graph
);
5871 if (ctx
->opt
->schedule_outer_coincidence
)
5872 force_coincidence
= 1;
5874 use_coincidence
= has_coincidence
;
5875 while (graph
->n_row
< graph
->maxvar
) {
5880 graph
->src_scc
= -1;
5881 graph
->dst_scc
= -1;
5883 if (setup_lp(ctx
, graph
, use_coincidence
) < 0)
5884 return isl_stat_error
;
5885 sol
= solve_lp(ctx
, graph
);
5887 return isl_stat_error
;
5888 if (sol
->size
== 0) {
5889 int empty
= graph
->n_total_row
== graph
->band_start
;
5892 if (use_coincidence
&& (!force_coincidence
|| !empty
)) {
5893 use_coincidence
= 0;
5898 coincident
= !has_coincidence
|| use_coincidence
;
5899 if (update_schedule(graph
, sol
, coincident
) < 0)
5900 return isl_stat_error
;
5902 if (!check_conditional
)
5904 violated
= has_violated_conditional_constraint(ctx
, graph
);
5906 return isl_stat_error
;
5909 if (reset_band(graph
) < 0)
5910 return isl_stat_error
;
5911 use_coincidence
= has_coincidence
;
5917 /* Compute a schedule for a connected dependence graph by considering
5918 * the graph as a whole and return the updated schedule node.
5920 * The actual schedule rows of the current band are computed by
5921 * compute_schedule_wcc_band. compute_schedule_finish_band takes
5922 * care of integrating the band into "node" and continuing
5925 static __isl_give isl_schedule_node
*compute_schedule_wcc_whole(
5926 __isl_take isl_schedule_node
*node
, struct isl_sched_graph
*graph
)
5933 ctx
= isl_schedule_node_get_ctx(node
);
5934 if (compute_schedule_wcc_band(ctx
, graph
) < 0)
5935 return isl_schedule_node_free(node
);
5937 return compute_schedule_finish_band(node
, graph
, 1);
5940 /* Clustering information used by compute_schedule_wcc_clustering.
5942 * "n" is the number of SCCs in the original dependence graph
5943 * "scc" is an array of "n" elements, each representing an SCC
5944 * of the original dependence graph. All entries in the same cluster
5945 * have the same number of schedule rows.
5946 * "scc_cluster" maps each SCC index to the cluster to which it belongs,
5947 * where each cluster is represented by the index of the first SCC
5948 * in the cluster. Initially, each SCC belongs to a cluster containing
5951 * "scc_in_merge" is used by merge_clusters_along_edge to keep
5952 * track of which SCCs need to be merged.
5954 * "cluster" contains the merged clusters of SCCs after the clustering
5957 * "scc_node" is a temporary data structure used inside copy_partial.
5958 * For each SCC, it keeps track of the number of nodes in the SCC
5959 * that have already been copied.
5961 struct isl_clustering
{
5963 struct isl_sched_graph
*scc
;
5964 struct isl_sched_graph
*cluster
;
5970 /* Initialize the clustering data structure "c" from "graph".
5972 * In particular, allocate memory, extract the SCCs from "graph"
5973 * into c->scc, initialize scc_cluster and construct
5974 * a band of schedule rows for each SCC.
5975 * Within each SCC, there is only one SCC by definition.
5976 * Each SCC initially belongs to a cluster containing only that SCC.
5978 static isl_stat
clustering_init(isl_ctx
*ctx
, struct isl_clustering
*c
,
5979 struct isl_sched_graph
*graph
)
5984 c
->scc
= isl_calloc_array(ctx
, struct isl_sched_graph
, c
->n
);
5985 c
->cluster
= isl_calloc_array(ctx
, struct isl_sched_graph
, c
->n
);
5986 c
->scc_cluster
= isl_calloc_array(ctx
, int, c
->n
);
5987 c
->scc_node
= isl_calloc_array(ctx
, int, c
->n
);
5988 c
->scc_in_merge
= isl_calloc_array(ctx
, int, c
->n
);
5989 if (!c
->scc
|| !c
->cluster
||
5990 !c
->scc_cluster
|| !c
->scc_node
|| !c
->scc_in_merge
)
5991 return isl_stat_error
;
5993 for (i
= 0; i
< c
->n
; ++i
) {
5994 if (extract_sub_graph(ctx
, graph
, &node_scc_exactly
,
5995 &edge_scc_exactly
, i
, &c
->scc
[i
]) < 0)
5996 return isl_stat_error
;
5998 if (compute_maxvar(&c
->scc
[i
]) < 0)
5999 return isl_stat_error
;
6000 if (compute_schedule_wcc_band(ctx
, &c
->scc
[i
]) < 0)
6001 return isl_stat_error
;
6002 c
->scc_cluster
[i
] = i
;
6008 /* Free all memory allocated for "c".
6010 static void clustering_free(isl_ctx
*ctx
, struct isl_clustering
*c
)
6015 for (i
= 0; i
< c
->n
; ++i
)
6016 graph_free(ctx
, &c
->scc
[i
]);
6019 for (i
= 0; i
< c
->n
; ++i
)
6020 graph_free(ctx
, &c
->cluster
[i
]);
6022 free(c
->scc_cluster
);
6024 free(c
->scc_in_merge
);
6027 /* Should we refrain from merging the cluster in "graph" with
6028 * any other cluster?
6029 * In particular, is its current schedule band empty and incomplete.
6031 static int bad_cluster(struct isl_sched_graph
*graph
)
6033 return graph
->n_row
< graph
->maxvar
&&
6034 graph
->n_total_row
== graph
->band_start
;
6037 /* Is "edge" a proximity edge with a non-empty dependence relation?
6039 static isl_bool
is_non_empty_proximity(struct isl_sched_edge
*edge
)
6041 if (!is_proximity(edge
))
6042 return isl_bool_false
;
6043 return isl_bool_not(isl_map_plain_is_empty(edge
->map
));
6046 /* Return the index of an edge in "graph" that can be used to merge
6047 * two clusters in "c".
6048 * Return graph->n_edge if no such edge can be found.
6049 * Return -1 on error.
6051 * In particular, return a proximity edge between two clusters
6052 * that is not marked "no_merge" and such that neither of the
6053 * two clusters has an incomplete, empty band.
6055 * If there are multiple such edges, then try and find the most
6056 * appropriate edge to use for merging. In particular, pick the edge
6057 * with the greatest weight. If there are multiple of those,
6058 * then pick one with the shortest distance between
6059 * the two cluster representatives.
6061 static int find_proximity(struct isl_sched_graph
*graph
,
6062 struct isl_clustering
*c
)
6064 int i
, best
= graph
->n_edge
, best_dist
, best_weight
;
6066 for (i
= 0; i
< graph
->n_edge
; ++i
) {
6067 struct isl_sched_edge
*edge
= &graph
->edge
[i
];
6071 prox
= is_non_empty_proximity(edge
);
6078 if (bad_cluster(&c
->scc
[edge
->src
->scc
]) ||
6079 bad_cluster(&c
->scc
[edge
->dst
->scc
]))
6081 dist
= c
->scc_cluster
[edge
->dst
->scc
] -
6082 c
->scc_cluster
[edge
->src
->scc
];
6085 weight
= edge
->weight
;
6086 if (best
< graph
->n_edge
) {
6087 if (best_weight
> weight
)
6089 if (best_weight
== weight
&& best_dist
<= dist
)
6094 best_weight
= weight
;
6100 /* Internal data structure used in mark_merge_sccs.
6102 * "graph" is the dependence graph in which a strongly connected
6103 * component is constructed.
6104 * "scc_cluster" maps each SCC index to the cluster to which it belongs.
6105 * "src" and "dst" are the indices of the nodes that are being merged.
6107 struct isl_mark_merge_sccs_data
{
6108 struct isl_sched_graph
*graph
;
6114 /* Check whether the cluster containing node "i" depends on the cluster
6115 * containing node "j". If "i" and "j" belong to the same cluster,
6116 * then they are taken to depend on each other to ensure that
6117 * the resulting strongly connected component consists of complete
6118 * clusters. Furthermore, if "i" and "j" are the two nodes that
6119 * are being merged, then they are taken to depend on each other as well.
6120 * Otherwise, check if there is a (conditional) validity dependence
6121 * from node[j] to node[i], forcing node[i] to follow node[j].
6123 static isl_bool
cluster_follows(int i
, int j
, void *user
)
6125 struct isl_mark_merge_sccs_data
*data
= user
;
6126 struct isl_sched_graph
*graph
= data
->graph
;
6127 int *scc_cluster
= data
->scc_cluster
;
6129 if (data
->src
== i
&& data
->dst
== j
)
6130 return isl_bool_true
;
6131 if (data
->src
== j
&& data
->dst
== i
)
6132 return isl_bool_true
;
6133 if (scc_cluster
[graph
->node
[i
].scc
] == scc_cluster
[graph
->node
[j
].scc
])
6134 return isl_bool_true
;
6136 return graph_has_validity_edge(graph
, &graph
->node
[j
], &graph
->node
[i
]);
6139 /* Mark all SCCs that belong to either of the two clusters in "c"
6140 * connected by the edge in "graph" with index "edge", or to any
6141 * of the intermediate clusters.
6142 * The marking is recorded in c->scc_in_merge.
6144 * The given edge has been selected for merging two clusters,
6145 * meaning that there is at least a proximity edge between the two nodes.
6146 * However, there may also be (indirect) validity dependences
6147 * between the two nodes. When merging the two clusters, all clusters
6148 * containing one or more of the intermediate nodes along the
6149 * indirect validity dependences need to be merged in as well.
6151 * First collect all such nodes by computing the strongly connected
6152 * component (SCC) containing the two nodes connected by the edge, where
6153 * the two nodes are considered to depend on each other to make
6154 * sure they end up in the same SCC. Similarly, each node is considered
6155 * to depend on every other node in the same cluster to ensure
6156 * that the SCC consists of complete clusters.
6158 * Then the original SCCs that contain any of these nodes are marked
6159 * in c->scc_in_merge.
6161 static isl_stat
mark_merge_sccs(isl_ctx
*ctx
, struct isl_sched_graph
*graph
,
6162 int edge
, struct isl_clustering
*c
)
6164 struct isl_mark_merge_sccs_data data
;
6165 struct isl_tarjan_graph
*g
;
6168 for (i
= 0; i
< c
->n
; ++i
)
6169 c
->scc_in_merge
[i
] = 0;
6172 data
.scc_cluster
= c
->scc_cluster
;
6173 data
.src
= graph
->edge
[edge
].src
- graph
->node
;
6174 data
.dst
= graph
->edge
[edge
].dst
- graph
->node
;
6176 g
= isl_tarjan_graph_component(ctx
, graph
->n
, data
.dst
,
6177 &cluster_follows
, &data
);
6183 isl_die(ctx
, isl_error_internal
,
6184 "expecting at least two nodes in component",
6186 if (g
->order
[--i
] != -1)
6187 isl_die(ctx
, isl_error_internal
,
6188 "expecting end of component marker", goto error
);
6190 for (--i
; i
>= 0 && g
->order
[i
] != -1; --i
) {
6191 int scc
= graph
->node
[g
->order
[i
]].scc
;
6192 c
->scc_in_merge
[scc
] = 1;
6195 isl_tarjan_graph_free(g
);
6198 isl_tarjan_graph_free(g
);
6199 return isl_stat_error
;
6202 /* Construct the identifier "cluster_i".
6204 static __isl_give isl_id
*cluster_id(isl_ctx
*ctx
, int i
)
6208 snprintf(name
, sizeof(name
), "cluster_%d", i
);
6209 return isl_id_alloc(ctx
, name
, NULL
);
6212 /* Construct the space of the cluster with index "i" containing
6213 * the strongly connected component "scc".
6215 * In particular, construct a space called cluster_i with dimension equal
6216 * to the number of schedule rows in the current band of "scc".
6218 static __isl_give isl_space
*cluster_space(struct isl_sched_graph
*scc
, int i
)
6224 nvar
= scc
->n_total_row
- scc
->band_start
;
6225 space
= isl_space_copy(scc
->node
[0].space
);
6226 space
= isl_space_params(space
);
6227 space
= isl_space_set_from_params(space
);
6228 space
= isl_space_add_dims(space
, isl_dim_set
, nvar
);
6229 id
= cluster_id(isl_space_get_ctx(space
), i
);
6230 space
= isl_space_set_tuple_id(space
, isl_dim_set
, id
);
6235 /* Collect the domain of the graph for merging clusters.
6237 * In particular, for each cluster with first SCC "i", construct
6238 * a set in the space called cluster_i with dimension equal
6239 * to the number of schedule rows in the current band of the cluster.
6241 static __isl_give isl_union_set
*collect_domain(isl_ctx
*ctx
,
6242 struct isl_sched_graph
*graph
, struct isl_clustering
*c
)
6246 isl_union_set
*domain
;
6248 space
= isl_space_params_alloc(ctx
, 0);
6249 domain
= isl_union_set_empty(space
);
6251 for (i
= 0; i
< graph
->scc
; ++i
) {
6254 if (!c
->scc_in_merge
[i
])
6256 if (c
->scc_cluster
[i
] != i
)
6258 space
= cluster_space(&c
->scc
[i
], i
);
6259 domain
= isl_union_set_add_set(domain
, isl_set_universe(space
));
6265 /* Construct a map from the original instances to the corresponding
6266 * cluster instance in the current bands of the clusters in "c".
6268 static __isl_give isl_union_map
*collect_cluster_map(isl_ctx
*ctx
,
6269 struct isl_sched_graph
*graph
, struct isl_clustering
*c
)
6273 isl_union_map
*cluster_map
;
6275 space
= isl_space_params_alloc(ctx
, 0);
6276 cluster_map
= isl_union_map_empty(space
);
6277 for (i
= 0; i
< graph
->scc
; ++i
) {
6281 if (!c
->scc_in_merge
[i
])
6284 id
= cluster_id(ctx
, c
->scc_cluster
[i
]);
6285 start
= c
->scc
[i
].band_start
;
6286 n
= c
->scc
[i
].n_total_row
- start
;
6287 for (j
= 0; j
< c
->scc
[i
].n
; ++j
) {
6290 struct isl_sched_node
*node
= &c
->scc
[i
].node
[j
];
6292 ma
= node_extract_partial_schedule_multi_aff(node
,
6294 ma
= isl_multi_aff_set_tuple_id(ma
, isl_dim_out
,
6296 map
= isl_map_from_multi_aff(ma
);
6297 cluster_map
= isl_union_map_add_map(cluster_map
, map
);
6305 /* Add "umap" to the schedule constraints "sc" of all types of "edge"
6306 * that are not isl_edge_condition or isl_edge_conditional_validity.
6308 static __isl_give isl_schedule_constraints
*add_non_conditional_constraints(
6309 struct isl_sched_edge
*edge
, __isl_keep isl_union_map
*umap
,
6310 __isl_take isl_schedule_constraints
*sc
)
6312 enum isl_edge_type t
;
6317 for (t
= isl_edge_first
; t
<= isl_edge_last
; ++t
) {
6318 if (t
== isl_edge_condition
||
6319 t
== isl_edge_conditional_validity
)
6321 if (!is_type(edge
, t
))
6323 sc
= isl_schedule_constraints_add(sc
, t
,
6324 isl_union_map_copy(umap
));
6330 /* Add schedule constraints of types isl_edge_condition and
6331 * isl_edge_conditional_validity to "sc" by applying "umap" to
6332 * the domains of the wrapped relations in domain and range
6333 * of the corresponding tagged constraints of "edge".
6335 static __isl_give isl_schedule_constraints
*add_conditional_constraints(
6336 struct isl_sched_edge
*edge
, __isl_keep isl_union_map
*umap
,
6337 __isl_take isl_schedule_constraints
*sc
)
6339 enum isl_edge_type t
;
6340 isl_union_map
*tagged
;
6342 for (t
= isl_edge_condition
; t
<= isl_edge_conditional_validity
; ++t
) {
6343 if (!is_type(edge
, t
))
6345 if (t
== isl_edge_condition
)
6346 tagged
= isl_union_map_copy(edge
->tagged_condition
);
6348 tagged
= isl_union_map_copy(edge
->tagged_validity
);
6349 tagged
= isl_union_map_zip(tagged
);
6350 tagged
= isl_union_map_apply_domain(tagged
,
6351 isl_union_map_copy(umap
));
6352 tagged
= isl_union_map_zip(tagged
);
6353 sc
= isl_schedule_constraints_add(sc
, t
, tagged
);
6361 /* Given a mapping "cluster_map" from the original instances to
6362 * the cluster instances, add schedule constraints on the clusters
6363 * to "sc" corresponding to the original constraints represented by "edge".
6365 * For non-tagged dependence constraints, the cluster constraints
6366 * are obtained by applying "cluster_map" to the edge->map.
6368 * For tagged dependence constraints, "cluster_map" needs to be applied
6369 * to the domains of the wrapped relations in domain and range
6370 * of the tagged dependence constraints. Pick out the mappings
6371 * from these domains from "cluster_map" and construct their product.
6372 * This mapping can then be applied to the pair of domains.
6374 static __isl_give isl_schedule_constraints
*collect_edge_constraints(
6375 struct isl_sched_edge
*edge
, __isl_keep isl_union_map
*cluster_map
,
6376 __isl_take isl_schedule_constraints
*sc
)
6378 isl_union_map
*umap
;
6380 isl_union_set
*uset
;
6381 isl_union_map
*umap1
, *umap2
;
6386 umap
= isl_union_map_from_map(isl_map_copy(edge
->map
));
6387 umap
= isl_union_map_apply_domain(umap
,
6388 isl_union_map_copy(cluster_map
));
6389 umap
= isl_union_map_apply_range(umap
,
6390 isl_union_map_copy(cluster_map
));
6391 sc
= add_non_conditional_constraints(edge
, umap
, sc
);
6392 isl_union_map_free(umap
);
6394 if (!sc
|| (!is_condition(edge
) && !is_conditional_validity(edge
)))
6397 space
= isl_space_domain(isl_map_get_space(edge
->map
));
6398 uset
= isl_union_set_from_set(isl_set_universe(space
));
6399 umap1
= isl_union_map_copy(cluster_map
);
6400 umap1
= isl_union_map_intersect_domain(umap1
, uset
);
6401 space
= isl_space_range(isl_map_get_space(edge
->map
));
6402 uset
= isl_union_set_from_set(isl_set_universe(space
));
6403 umap2
= isl_union_map_copy(cluster_map
);
6404 umap2
= isl_union_map_intersect_domain(umap2
, uset
);
6405 umap
= isl_union_map_product(umap1
, umap2
);
6407 sc
= add_conditional_constraints(edge
, umap
, sc
);
6409 isl_union_map_free(umap
);
6413 /* Given a mapping "cluster_map" from the original instances to
6414 * the cluster instances, add schedule constraints on the clusters
6415 * to "sc" corresponding to all edges in "graph" between nodes that
6416 * belong to SCCs that are marked for merging in "scc_in_merge".
6418 static __isl_give isl_schedule_constraints
*collect_constraints(
6419 struct isl_sched_graph
*graph
, int *scc_in_merge
,
6420 __isl_keep isl_union_map
*cluster_map
,
6421 __isl_take isl_schedule_constraints
*sc
)
6425 for (i
= 0; i
< graph
->n_edge
; ++i
) {
6426 struct isl_sched_edge
*edge
= &graph
->edge
[i
];
6428 if (!scc_in_merge
[edge
->src
->scc
])
6430 if (!scc_in_merge
[edge
->dst
->scc
])
6432 sc
= collect_edge_constraints(edge
, cluster_map
, sc
);
6438 /* Construct a dependence graph for scheduling clusters with respect
6439 * to each other and store the result in "merge_graph".
6440 * In particular, the nodes of the graph correspond to the schedule
6441 * dimensions of the current bands of those clusters that have been
6442 * marked for merging in "c".
6444 * First construct an isl_schedule_constraints object for this domain
6445 * by transforming the edges in "graph" to the domain.
6446 * Then initialize a dependence graph for scheduling from these
6449 static isl_stat
init_merge_graph(isl_ctx
*ctx
, struct isl_sched_graph
*graph
,
6450 struct isl_clustering
*c
, struct isl_sched_graph
*merge_graph
)
6452 isl_union_set
*domain
;
6453 isl_union_map
*cluster_map
;
6454 isl_schedule_constraints
*sc
;
6457 domain
= collect_domain(ctx
, graph
, c
);
6458 sc
= isl_schedule_constraints_on_domain(domain
);
6460 return isl_stat_error
;
6461 cluster_map
= collect_cluster_map(ctx
, graph
, c
);
6462 sc
= collect_constraints(graph
, c
->scc_in_merge
, cluster_map
, sc
);
6463 isl_union_map_free(cluster_map
);
6465 r
= graph_init(merge_graph
, sc
);
6467 isl_schedule_constraints_free(sc
);
6472 /* Compute the maximal number of remaining schedule rows that still need
6473 * to be computed for the nodes that belong to clusters with the maximal
6474 * dimension for the current band (i.e., the band that is to be merged).
6475 * Only clusters that are about to be merged are considered.
6476 * "maxvar" is the maximal dimension for the current band.
6477 * "c" contains information about the clusters.
6479 * Return the maximal number of remaining schedule rows or -1 on error.
6481 static int compute_maxvar_max_slack(int maxvar
, struct isl_clustering
*c
)
6487 for (i
= 0; i
< c
->n
; ++i
) {
6489 struct isl_sched_graph
*scc
;
6491 if (!c
->scc_in_merge
[i
])
6494 nvar
= scc
->n_total_row
- scc
->band_start
;
6497 for (j
= 0; j
< scc
->n
; ++j
) {
6498 struct isl_sched_node
*node
= &scc
->node
[j
];
6501 if (node_update_vmap(node
) < 0)
6503 slack
= node
->nvar
- node
->rank
;
6504 if (slack
> max_slack
)
6512 /* If there are any clusters where the dimension of the current band
6513 * (i.e., the band that is to be merged) is smaller than "maxvar" and
6514 * if there are any nodes in such a cluster where the number
6515 * of remaining schedule rows that still need to be computed
6516 * is greater than "max_slack", then return the smallest current band
6517 * dimension of all these clusters. Otherwise return the original value
6518 * of "maxvar". Return -1 in case of any error.
6519 * Only clusters that are about to be merged are considered.
6520 * "c" contains information about the clusters.
6522 static int limit_maxvar_to_slack(int maxvar
, int max_slack
,
6523 struct isl_clustering
*c
)
6527 for (i
= 0; i
< c
->n
; ++i
) {
6529 struct isl_sched_graph
*scc
;
6531 if (!c
->scc_in_merge
[i
])
6534 nvar
= scc
->n_total_row
- scc
->band_start
;
6537 for (j
= 0; j
< scc
->n
; ++j
) {
6538 struct isl_sched_node
*node
= &scc
->node
[j
];
6541 if (node_update_vmap(node
) < 0)
6543 slack
= node
->nvar
- node
->rank
;
6544 if (slack
> max_slack
) {
6554 /* Adjust merge_graph->maxvar based on the number of remaining schedule rows
6555 * that still need to be computed. In particular, if there is a node
6556 * in a cluster where the dimension of the current band is smaller
6557 * than merge_graph->maxvar, but the number of remaining schedule rows
6558 * is greater than that of any node in a cluster with the maximal
6559 * dimension for the current band (i.e., merge_graph->maxvar),
6560 * then adjust merge_graph->maxvar to the (smallest) current band dimension
6561 * of those clusters. Without this adjustment, the total number of
6562 * schedule dimensions would be increased, resulting in a skewed view
6563 * of the number of coincident dimensions.
6564 * "c" contains information about the clusters.
6566 * If the maximize_band_depth option is set and merge_graph->maxvar is reduced,
6567 * then there is no point in attempting any merge since it will be rejected
6568 * anyway. Set merge_graph->maxvar to zero in such cases.
6570 static isl_stat
adjust_maxvar_to_slack(isl_ctx
*ctx
,
6571 struct isl_sched_graph
*merge_graph
, struct isl_clustering
*c
)
6573 int max_slack
, maxvar
;
6575 max_slack
= compute_maxvar_max_slack(merge_graph
->maxvar
, c
);
6577 return isl_stat_error
;
6578 maxvar
= limit_maxvar_to_slack(merge_graph
->maxvar
, max_slack
, c
);
6580 return isl_stat_error
;
6582 if (maxvar
< merge_graph
->maxvar
) {
6583 if (isl_options_get_schedule_maximize_band_depth(ctx
))
6584 merge_graph
->maxvar
= 0;
6586 merge_graph
->maxvar
= maxvar
;
6592 /* Return the number of coincident dimensions in the current band of "graph",
6593 * where the nodes of "graph" are assumed to be scheduled by a single band.
6595 static int get_n_coincident(struct isl_sched_graph
*graph
)
6599 for (i
= graph
->band_start
; i
< graph
->n_total_row
; ++i
)
6600 if (!graph
->node
[0].coincident
[i
])
6603 return i
- graph
->band_start
;
6606 /* Should the clusters be merged based on the cluster schedule
6607 * in the current (and only) band of "merge_graph", given that
6608 * coincidence should be maximized?
6610 * If the number of coincident schedule dimensions in the merged band
6611 * would be less than the maximal number of coincident schedule dimensions
6612 * in any of the merged clusters, then the clusters should not be merged.
6614 static isl_bool
ok_to_merge_coincident(struct isl_clustering
*c
,
6615 struct isl_sched_graph
*merge_graph
)
6622 for (i
= 0; i
< c
->n
; ++i
) {
6623 if (!c
->scc_in_merge
[i
])
6625 n_coincident
= get_n_coincident(&c
->scc
[i
]);
6626 if (n_coincident
> max_coincident
)
6627 max_coincident
= n_coincident
;
6630 n_coincident
= get_n_coincident(merge_graph
);
6632 return isl_bool_ok(n_coincident
>= max_coincident
);
6635 /* Return the transformation on "node" expressed by the current (and only)
6636 * band of "merge_graph" applied to the clusters in "c".
6638 * First find the representation of "node" in its SCC in "c" and
6639 * extract the transformation expressed by the current band.
6640 * Then extract the transformation applied by "merge_graph"
6641 * to the cluster to which this SCC belongs.
6642 * Combine the two to obtain the complete transformation on the node.
6644 * Note that the range of the first transformation is an anonymous space,
6645 * while the domain of the second is named "cluster_X". The range
6646 * of the former therefore needs to be adjusted before the two
6649 static __isl_give isl_map
*extract_node_transformation(isl_ctx
*ctx
,
6650 struct isl_sched_node
*node
, struct isl_clustering
*c
,
6651 struct isl_sched_graph
*merge_graph
)
6653 struct isl_sched_node
*scc_node
, *cluster_node
;
6657 isl_multi_aff
*ma
, *ma2
;
6659 scc_node
= graph_find_node(ctx
, &c
->scc
[node
->scc
], node
->space
);
6660 if (scc_node
&& !is_node(&c
->scc
[node
->scc
], scc_node
))
6661 isl_die(ctx
, isl_error_internal
, "unable to find node",
6663 start
= c
->scc
[node
->scc
].band_start
;
6664 n
= c
->scc
[node
->scc
].n_total_row
- start
;
6665 ma
= node_extract_partial_schedule_multi_aff(scc_node
, start
, n
);
6666 space
= cluster_space(&c
->scc
[node
->scc
], c
->scc_cluster
[node
->scc
]);
6667 cluster_node
= graph_find_node(ctx
, merge_graph
, space
);
6668 if (cluster_node
&& !is_node(merge_graph
, cluster_node
))
6669 isl_die(ctx
, isl_error_internal
, "unable to find cluster",
6670 space
= isl_space_free(space
));
6671 id
= isl_space_get_tuple_id(space
, isl_dim_set
);
6672 ma
= isl_multi_aff_set_tuple_id(ma
, isl_dim_out
, id
);
6673 isl_space_free(space
);
6674 n
= merge_graph
->n_total_row
;
6675 ma2
= node_extract_partial_schedule_multi_aff(cluster_node
, 0, n
);
6676 ma
= isl_multi_aff_pullback_multi_aff(ma2
, ma
);
6678 return isl_map_from_multi_aff(ma
);
6681 /* Give a set of distances "set", are they bounded by a small constant
6682 * in direction "pos"?
6683 * In practice, check if they are bounded by 2 by checking that there
6684 * are no elements with a value greater than or equal to 3 or
6685 * smaller than or equal to -3.
6687 static isl_bool
distance_is_bounded(__isl_keep isl_set
*set
, int pos
)
6693 return isl_bool_error
;
6695 test
= isl_set_copy(set
);
6696 test
= isl_set_lower_bound_si(test
, isl_dim_set
, pos
, 3);
6697 bounded
= isl_set_is_empty(test
);
6700 if (bounded
< 0 || !bounded
)
6703 test
= isl_set_copy(set
);
6704 test
= isl_set_upper_bound_si(test
, isl_dim_set
, pos
, -3);
6705 bounded
= isl_set_is_empty(test
);
6711 /* Does the set "set" have a fixed (but possible parametric) value
6712 * at dimension "pos"?
6714 static isl_bool
has_single_value(__isl_keep isl_set
*set
, int pos
)
6719 n
= isl_set_dim(set
, isl_dim_set
);
6721 return isl_bool_error
;
6722 set
= isl_set_copy(set
);
6723 set
= isl_set_project_out(set
, isl_dim_set
, pos
+ 1, n
- (pos
+ 1));
6724 set
= isl_set_project_out(set
, isl_dim_set
, 0, pos
);
6725 single
= isl_set_is_singleton(set
);
6731 /* Does "map" have a fixed (but possible parametric) value
6732 * at dimension "pos" of either its domain or its range?
6734 static isl_bool
has_singular_src_or_dst(__isl_keep isl_map
*map
, int pos
)
6739 set
= isl_map_domain(isl_map_copy(map
));
6740 single
= has_single_value(set
, pos
);
6743 if (single
< 0 || single
)
6746 set
= isl_map_range(isl_map_copy(map
));
6747 single
= has_single_value(set
, pos
);
6753 /* Does the edge "edge" from "graph" have bounded dependence distances
6754 * in the merged graph "merge_graph" of a selection of clusters in "c"?
6756 * Extract the complete transformations of the source and destination
6757 * nodes of the edge, apply them to the edge constraints and
6758 * compute the differences. Finally, check if these differences are bounded
6759 * in each direction.
6761 * If the dimension of the band is greater than the number of
6762 * dimensions that can be expected to be optimized by the edge
6763 * (based on its weight), then also allow the differences to be unbounded
6764 * in the remaining dimensions, but only if either the source or
6765 * the destination has a fixed value in that direction.
6766 * This allows a statement that produces values that are used by
6767 * several instances of another statement to be merged with that
6769 * However, merging such clusters will introduce an inherently
6770 * large proximity distance inside the merged cluster, meaning
6771 * that proximity distances will no longer be optimized in
6772 * subsequent merges. These merges are therefore only allowed
6773 * after all other possible merges have been tried.
6774 * The first time such a merge is encountered, the weight of the edge
6775 * is replaced by a negative weight. The second time (i.e., after
6776 * all merges over edges with a non-negative weight have been tried),
6777 * the merge is allowed.
6779 static isl_bool
has_bounded_distances(isl_ctx
*ctx
, struct isl_sched_edge
*edge
,
6780 struct isl_sched_graph
*graph
, struct isl_clustering
*c
,
6781 struct isl_sched_graph
*merge_graph
)
6789 map
= isl_map_copy(edge
->map
);
6790 t
= extract_node_transformation(ctx
, edge
->src
, c
, merge_graph
);
6791 map
= isl_map_apply_domain(map
, t
);
6792 t
= extract_node_transformation(ctx
, edge
->dst
, c
, merge_graph
);
6793 map
= isl_map_apply_range(map
, t
);
6794 dist
= isl_map_deltas(isl_map_copy(map
));
6796 bounded
= isl_bool_true
;
6797 n
= isl_set_dim(dist
, isl_dim_set
);
6800 n_slack
= n
- edge
->weight
;
6801 if (edge
->weight
< 0)
6802 n_slack
-= graph
->max_weight
+ 1;
6803 for (i
= 0; i
< n
; ++i
) {
6804 isl_bool bounded_i
, singular_i
;
6806 bounded_i
= distance_is_bounded(dist
, i
);
6811 if (edge
->weight
>= 0)
6812 bounded
= isl_bool_false
;
6816 singular_i
= has_singular_src_or_dst(map
, i
);
6821 bounded
= isl_bool_false
;
6824 if (!bounded
&& i
>= n
&& edge
->weight
>= 0)
6825 edge
->weight
-= graph
->max_weight
+ 1;
6833 return isl_bool_error
;
6836 /* Should the clusters be merged based on the cluster schedule
6837 * in the current (and only) band of "merge_graph"?
6838 * "graph" is the original dependence graph, while "c" records
6839 * which SCCs are involved in the latest merge.
6841 * In particular, is there at least one proximity constraint
6842 * that is optimized by the merge?
6844 * A proximity constraint is considered to be optimized
6845 * if the dependence distances are small.
6847 static isl_bool
ok_to_merge_proximity(isl_ctx
*ctx
,
6848 struct isl_sched_graph
*graph
, struct isl_clustering
*c
,
6849 struct isl_sched_graph
*merge_graph
)
6853 for (i
= 0; i
< graph
->n_edge
; ++i
) {
6854 struct isl_sched_edge
*edge
= &graph
->edge
[i
];
6857 if (!is_proximity(edge
))
6859 if (!c
->scc_in_merge
[edge
->src
->scc
])
6861 if (!c
->scc_in_merge
[edge
->dst
->scc
])
6863 if (c
->scc_cluster
[edge
->dst
->scc
] ==
6864 c
->scc_cluster
[edge
->src
->scc
])
6866 bounded
= has_bounded_distances(ctx
, edge
, graph
, c
,
6868 if (bounded
< 0 || bounded
)
6872 return isl_bool_false
;
6875 /* Should the clusters be merged based on the cluster schedule
6876 * in the current (and only) band of "merge_graph"?
6877 * "graph" is the original dependence graph, while "c" records
6878 * which SCCs are involved in the latest merge.
6880 * If the current band is empty, then the clusters should not be merged.
6882 * If the band depth should be maximized and the merge schedule
6883 * is incomplete (meaning that the dimension of some of the schedule
6884 * bands in the original schedule will be reduced), then the clusters
6885 * should not be merged.
6887 * If the schedule_maximize_coincidence option is set, then check that
6888 * the number of coincident schedule dimensions is not reduced.
6890 * Finally, only allow the merge if at least one proximity
6891 * constraint is optimized.
6893 static isl_bool
ok_to_merge(isl_ctx
*ctx
, struct isl_sched_graph
*graph
,
6894 struct isl_clustering
*c
, struct isl_sched_graph
*merge_graph
)
6896 if (merge_graph
->n_total_row
== merge_graph
->band_start
)
6897 return isl_bool_false
;
6899 if (isl_options_get_schedule_maximize_band_depth(ctx
) &&
6900 merge_graph
->n_total_row
< merge_graph
->maxvar
)
6901 return isl_bool_false
;
6903 if (isl_options_get_schedule_maximize_coincidence(ctx
)) {
6906 ok
= ok_to_merge_coincident(c
, merge_graph
);
6911 return ok_to_merge_proximity(ctx
, graph
, c
, merge_graph
);
6914 /* Apply the schedule in "t_node" to the "n" rows starting at "first"
6915 * of the schedule in "node" and return the result.
6917 * That is, essentially compute
6919 * T * N(first:first+n-1)
6921 * taking into account the constant term and the parameter coefficients
6924 static __isl_give isl_mat
*node_transformation(isl_ctx
*ctx
,
6925 struct isl_sched_node
*t_node
, struct isl_sched_node
*node
,
6930 isl_size n_row
, n_col
;
6933 n_param
= node
->nparam
;
6935 n_row
= isl_mat_rows(t_node
->sched
);
6936 n_col
= isl_mat_cols(node
->sched
);
6937 if (n_row
< 0 || n_col
< 0)
6939 t
= isl_mat_alloc(ctx
, n_row
, n_col
);
6942 for (i
= 0; i
< n_row
; ++i
) {
6943 isl_seq_cpy(t
->row
[i
], t_node
->sched
->row
[i
], 1 + n_param
);
6944 isl_seq_clr(t
->row
[i
] + 1 + n_param
, n_var
);
6945 for (j
= 0; j
< n
; ++j
)
6946 isl_seq_addmul(t
->row
[i
],
6947 t_node
->sched
->row
[i
][1 + n_param
+ j
],
6948 node
->sched
->row
[first
+ j
],
6949 1 + n_param
+ n_var
);
6954 /* Apply the cluster schedule in "t_node" to the current band
6955 * schedule of the nodes in "graph".
6957 * In particular, replace the rows starting at band_start
6958 * by the result of applying the cluster schedule in "t_node"
6959 * to the original rows.
6961 * The coincidence of the schedule is determined by the coincidence
6962 * of the cluster schedule.
6964 static isl_stat
transform(isl_ctx
*ctx
, struct isl_sched_graph
*graph
,
6965 struct isl_sched_node
*t_node
)
6971 start
= graph
->band_start
;
6972 n
= graph
->n_total_row
- start
;
6974 n_new
= isl_mat_rows(t_node
->sched
);
6976 return isl_stat_error
;
6977 for (i
= 0; i
< graph
->n
; ++i
) {
6978 struct isl_sched_node
*node
= &graph
->node
[i
];
6981 t
= node_transformation(ctx
, t_node
, node
, start
, n
);
6982 node
->sched
= isl_mat_drop_rows(node
->sched
, start
, n
);
6983 node
->sched
= isl_mat_concat(node
->sched
, t
);
6984 node
->sched_map
= isl_map_free(node
->sched_map
);
6986 return isl_stat_error
;
6987 for (j
= 0; j
< n_new
; ++j
)
6988 node
->coincident
[start
+ j
] = t_node
->coincident
[j
];
6990 graph
->n_total_row
-= n
;
6992 graph
->n_total_row
+= n_new
;
6993 graph
->n_row
+= n_new
;
6998 /* Merge the clusters marked for merging in "c" into a single
6999 * cluster using the cluster schedule in the current band of "merge_graph".
7000 * The representative SCC for the new cluster is the SCC with
7001 * the smallest index.
7003 * The current band schedule of each SCC in the new cluster is obtained
7004 * by applying the schedule of the corresponding original cluster
7005 * to the original band schedule.
7006 * All SCCs in the new cluster have the same number of schedule rows.
7008 static isl_stat
merge(isl_ctx
*ctx
, struct isl_clustering
*c
,
7009 struct isl_sched_graph
*merge_graph
)
7015 for (i
= 0; i
< c
->n
; ++i
) {
7016 struct isl_sched_node
*node
;
7018 if (!c
->scc_in_merge
[i
])
7022 space
= cluster_space(&c
->scc
[i
], c
->scc_cluster
[i
]);
7023 node
= graph_find_node(ctx
, merge_graph
, space
);
7024 isl_space_free(space
);
7026 return isl_stat_error
;
7027 if (!is_node(merge_graph
, node
))
7028 isl_die(ctx
, isl_error_internal
,
7029 "unable to find cluster",
7030 return isl_stat_error
);
7031 if (transform(ctx
, &c
->scc
[i
], node
) < 0)
7032 return isl_stat_error
;
7033 c
->scc_cluster
[i
] = cluster
;
7039 /* Try and merge the clusters of SCCs marked in c->scc_in_merge
7040 * by scheduling the current cluster bands with respect to each other.
7042 * Construct a dependence graph with a space for each cluster and
7043 * with the coordinates of each space corresponding to the schedule
7044 * dimensions of the current band of that cluster.
7045 * Construct a cluster schedule in this cluster dependence graph and
7046 * apply it to the current cluster bands if it is applicable
7047 * according to ok_to_merge.
7049 * If the number of remaining schedule dimensions in a cluster
7050 * with a non-maximal current schedule dimension is greater than
7051 * the number of remaining schedule dimensions in clusters
7052 * with a maximal current schedule dimension, then restrict
7053 * the number of rows to be computed in the cluster schedule
7054 * to the minimal such non-maximal current schedule dimension.
7055 * Do this by adjusting merge_graph.maxvar.
7057 * Return isl_bool_true if the clusters have effectively been merged
7058 * into a single cluster.
7060 * Note that since the standard scheduling algorithm minimizes the maximal
7061 * distance over proximity constraints, the proximity constraints between
7062 * the merged clusters may not be optimized any further than what is
7063 * sufficient to bring the distances within the limits of the internal
7064 * proximity constraints inside the individual clusters.
7065 * It may therefore make sense to perform an additional translation step
7066 * to bring the clusters closer to each other, while maintaining
7067 * the linear part of the merging schedule found using the standard
7068 * scheduling algorithm.
7070 static isl_bool
try_merge(isl_ctx
*ctx
, struct isl_sched_graph
*graph
,
7071 struct isl_clustering
*c
)
7073 struct isl_sched_graph merge_graph
= { 0 };
7076 if (init_merge_graph(ctx
, graph
, c
, &merge_graph
) < 0)
7079 if (compute_maxvar(&merge_graph
) < 0)
7081 if (adjust_maxvar_to_slack(ctx
, &merge_graph
,c
) < 0)
7083 if (compute_schedule_wcc_band(ctx
, &merge_graph
) < 0)
7085 merged
= ok_to_merge(ctx
, graph
, c
, &merge_graph
);
7086 if (merged
&& merge(ctx
, c
, &merge_graph
) < 0)
7089 graph_free(ctx
, &merge_graph
);
7092 graph_free(ctx
, &merge_graph
);
7093 return isl_bool_error
;
7096 /* Is there any edge marked "no_merge" between two SCCs that are
7097 * about to be merged (i.e., that are set in "scc_in_merge")?
7098 * "merge_edge" is the proximity edge along which the clusters of SCCs
7099 * are going to be merged.
7101 * If there is any edge between two SCCs with a negative weight,
7102 * while the weight of "merge_edge" is non-negative, then this
7103 * means that the edge was postponed. "merge_edge" should then
7104 * also be postponed since merging along the edge with negative weight should
7105 * be postponed until all edges with non-negative weight have been tried.
7106 * Replace the weight of "merge_edge" by a negative weight as well and
7107 * tell the caller not to attempt a merge.
7109 static int any_no_merge(struct isl_sched_graph
*graph
, int *scc_in_merge
,
7110 struct isl_sched_edge
*merge_edge
)
7114 for (i
= 0; i
< graph
->n_edge
; ++i
) {
7115 struct isl_sched_edge
*edge
= &graph
->edge
[i
];
7117 if (!scc_in_merge
[edge
->src
->scc
])
7119 if (!scc_in_merge
[edge
->dst
->scc
])
7123 if (merge_edge
->weight
>= 0 && edge
->weight
< 0) {
7124 merge_edge
->weight
-= graph
->max_weight
+ 1;
7132 /* Merge the two clusters in "c" connected by the edge in "graph"
7133 * with index "edge" into a single cluster.
7134 * If it turns out to be impossible to merge these two clusters,
7135 * then mark the edge as "no_merge" such that it will not be
7138 * First mark all SCCs that need to be merged. This includes the SCCs
7139 * in the two clusters, but it may also include the SCCs
7140 * of intermediate clusters.
7141 * If there is already a no_merge edge between any pair of such SCCs,
7142 * then simply mark the current edge as no_merge as well.
7143 * Likewise, if any of those edges was postponed by has_bounded_distances,
7144 * then postpone the current edge as well.
7145 * Otherwise, try and merge the clusters and mark "edge" as "no_merge"
7146 * if the clusters did not end up getting merged, unless the non-merge
7147 * is due to the fact that the edge was postponed. This postponement
7148 * can be recognized by a change in weight (from non-negative to negative).
7150 static isl_stat
merge_clusters_along_edge(isl_ctx
*ctx
,
7151 struct isl_sched_graph
*graph
, int edge
, struct isl_clustering
*c
)
7154 int edge_weight
= graph
->edge
[edge
].weight
;
7156 if (mark_merge_sccs(ctx
, graph
, edge
, c
) < 0)
7157 return isl_stat_error
;
7159 if (any_no_merge(graph
, c
->scc_in_merge
, &graph
->edge
[edge
]))
7160 merged
= isl_bool_false
;
7162 merged
= try_merge(ctx
, graph
, c
);
7164 return isl_stat_error
;
7165 if (!merged
&& edge_weight
== graph
->edge
[edge
].weight
)
7166 graph
->edge
[edge
].no_merge
= 1;
7171 /* Does "node" belong to the cluster identified by "cluster"?
7173 static int node_cluster_exactly(struct isl_sched_node
*node
, int cluster
)
7175 return node
->cluster
== cluster
;
7178 /* Does "edge" connect two nodes belonging to the cluster
7179 * identified by "cluster"?
7181 static int edge_cluster_exactly(struct isl_sched_edge
*edge
, int cluster
)
7183 return edge
->src
->cluster
== cluster
&& edge
->dst
->cluster
== cluster
;
7186 /* Swap the schedule of "node1" and "node2".
7187 * Both nodes have been derived from the same node in a common parent graph.
7188 * Since the "coincident" field is shared with that node
7189 * in the parent graph, there is no need to also swap this field.
7191 static void swap_sched(struct isl_sched_node
*node1
,
7192 struct isl_sched_node
*node2
)
7197 sched
= node1
->sched
;
7198 node1
->sched
= node2
->sched
;
7199 node2
->sched
= sched
;
7201 sched_map
= node1
->sched_map
;
7202 node1
->sched_map
= node2
->sched_map
;
7203 node2
->sched_map
= sched_map
;
7206 /* Copy the current band schedule from the SCCs that form the cluster
7207 * with index "pos" to the actual cluster at position "pos".
7208 * By construction, the index of the first SCC that belongs to the cluster
7211 * The order of the nodes inside both the SCCs and the cluster
7212 * is assumed to be same as the order in the original "graph".
7214 * Since the SCC graphs will no longer be used after this function,
7215 * the schedules are actually swapped rather than copied.
7217 static isl_stat
copy_partial(struct isl_sched_graph
*graph
,
7218 struct isl_clustering
*c
, int pos
)
7222 c
->cluster
[pos
].n_total_row
= c
->scc
[pos
].n_total_row
;
7223 c
->cluster
[pos
].n_row
= c
->scc
[pos
].n_row
;
7224 c
->cluster
[pos
].maxvar
= c
->scc
[pos
].maxvar
;
7226 for (i
= 0; i
< graph
->n
; ++i
) {
7230 if (graph
->node
[i
].cluster
!= pos
)
7232 s
= graph
->node
[i
].scc
;
7233 k
= c
->scc_node
[s
]++;
7234 swap_sched(&c
->cluster
[pos
].node
[j
], &c
->scc
[s
].node
[k
]);
7235 if (c
->scc
[s
].maxvar
> c
->cluster
[pos
].maxvar
)
7236 c
->cluster
[pos
].maxvar
= c
->scc
[s
].maxvar
;
7243 /* Is there a (conditional) validity dependence from node[j] to node[i],
7244 * forcing node[i] to follow node[j] or do the nodes belong to the same
7247 static isl_bool
node_follows_strong_or_same_cluster(int i
, int j
, void *user
)
7249 struct isl_sched_graph
*graph
= user
;
7251 if (graph
->node
[i
].cluster
== graph
->node
[j
].cluster
)
7252 return isl_bool_true
;
7253 return graph_has_validity_edge(graph
, &graph
->node
[j
], &graph
->node
[i
]);
7256 /* Extract the merged clusters of SCCs in "graph", sort them, and
7257 * store them in c->clusters. Update c->scc_cluster accordingly.
7259 * First keep track of the cluster containing the SCC to which a node
7260 * belongs in the node itself.
7261 * Then extract the clusters into c->clusters, copying the current
7262 * band schedule from the SCCs that belong to the cluster.
7263 * Do this only once per cluster.
7265 * Finally, topologically sort the clusters and update c->scc_cluster
7266 * to match the new scc numbering. While the SCCs were originally
7267 * sorted already, some SCCs that depend on some other SCCs may
7268 * have been merged with SCCs that appear before these other SCCs.
7269 * A reordering may therefore be required.
7271 static isl_stat
extract_clusters(isl_ctx
*ctx
, struct isl_sched_graph
*graph
,
7272 struct isl_clustering
*c
)
7276 for (i
= 0; i
< graph
->n
; ++i
)
7277 graph
->node
[i
].cluster
= c
->scc_cluster
[graph
->node
[i
].scc
];
7279 for (i
= 0; i
< graph
->scc
; ++i
) {
7280 if (c
->scc_cluster
[i
] != i
)
7282 if (extract_sub_graph(ctx
, graph
, &node_cluster_exactly
,
7283 &edge_cluster_exactly
, i
, &c
->cluster
[i
]) < 0)
7284 return isl_stat_error
;
7285 c
->cluster
[i
].src_scc
= -1;
7286 c
->cluster
[i
].dst_scc
= -1;
7287 if (copy_partial(graph
, c
, i
) < 0)
7288 return isl_stat_error
;
7291 if (detect_ccs(ctx
, graph
, &node_follows_strong_or_same_cluster
) < 0)
7292 return isl_stat_error
;
7293 for (i
= 0; i
< graph
->n
; ++i
)
7294 c
->scc_cluster
[graph
->node
[i
].scc
] = graph
->node
[i
].cluster
;
7299 /* Compute weights on the proximity edges of "graph" that can
7300 * be used by find_proximity to find the most appropriate
7301 * proximity edge to use to merge two clusters in "c".
7302 * The weights are also used by has_bounded_distances to determine
7303 * whether the merge should be allowed.
7304 * Store the maximum of the computed weights in graph->max_weight.
7306 * The computed weight is a measure for the number of remaining schedule
7307 * dimensions that can still be completely aligned.
7308 * In particular, compute the number of equalities between
7309 * input dimensions and output dimensions in the proximity constraints.
7310 * The directions that are already handled by outer schedule bands
7311 * are projected out prior to determining this number.
7313 * Edges that will never be considered by find_proximity are ignored.
7315 static isl_stat
compute_weights(struct isl_sched_graph
*graph
,
7316 struct isl_clustering
*c
)
7320 graph
->max_weight
= 0;
7322 for (i
= 0; i
< graph
->n_edge
; ++i
) {
7323 struct isl_sched_edge
*edge
= &graph
->edge
[i
];
7324 struct isl_sched_node
*src
= edge
->src
;
7325 struct isl_sched_node
*dst
= edge
->dst
;
7326 isl_basic_map
*hull
;
7328 isl_size n_in
, n_out
, n
;
7330 prox
= is_non_empty_proximity(edge
);
7332 return isl_stat_error
;
7335 if (bad_cluster(&c
->scc
[edge
->src
->scc
]) ||
7336 bad_cluster(&c
->scc
[edge
->dst
->scc
]))
7338 if (c
->scc_cluster
[edge
->dst
->scc
] ==
7339 c
->scc_cluster
[edge
->src
->scc
])
7342 hull
= isl_map_affine_hull(isl_map_copy(edge
->map
));
7343 hull
= isl_basic_map_transform_dims(hull
, isl_dim_in
, 0,
7344 isl_mat_copy(src
->vmap
));
7345 hull
= isl_basic_map_transform_dims(hull
, isl_dim_out
, 0,
7346 isl_mat_copy(dst
->vmap
));
7347 hull
= isl_basic_map_project_out(hull
,
7348 isl_dim_in
, 0, src
->rank
);
7349 hull
= isl_basic_map_project_out(hull
,
7350 isl_dim_out
, 0, dst
->rank
);
7351 hull
= isl_basic_map_remove_divs(hull
);
7352 n_in
= isl_basic_map_dim(hull
, isl_dim_in
);
7353 n_out
= isl_basic_map_dim(hull
, isl_dim_out
);
7354 if (n_in
< 0 || n_out
< 0)
7355 hull
= isl_basic_map_free(hull
);
7356 hull
= isl_basic_map_drop_constraints_not_involving_dims(hull
,
7357 isl_dim_in
, 0, n_in
);
7358 hull
= isl_basic_map_drop_constraints_not_involving_dims(hull
,
7359 isl_dim_out
, 0, n_out
);
7360 n
= isl_basic_map_n_equality(hull
);
7361 isl_basic_map_free(hull
);
7363 return isl_stat_error
;
7366 if (edge
->weight
> graph
->max_weight
)
7367 graph
->max_weight
= edge
->weight
;
7373 /* Call compute_schedule_finish_band on each of the clusters in "c"
7374 * in their topological order. This order is determined by the scc
7375 * fields of the nodes in "graph".
7376 * Combine the results in a sequence expressing the topological order.
7378 * If there is only one cluster left, then there is no need to introduce
7379 * a sequence node. Also, in this case, the cluster necessarily contains
7380 * the SCC at position 0 in the original graph and is therefore also
7381 * stored in the first cluster of "c".
7383 static __isl_give isl_schedule_node
*finish_bands_clustering(
7384 __isl_take isl_schedule_node
*node
, struct isl_sched_graph
*graph
,
7385 struct isl_clustering
*c
)
7389 isl_union_set_list
*filters
;
7391 if (graph
->scc
== 1)
7392 return compute_schedule_finish_band(node
, &c
->cluster
[0], 0);
7394 ctx
= isl_schedule_node_get_ctx(node
);
7396 filters
= extract_sccs(ctx
, graph
);
7397 node
= isl_schedule_node_insert_sequence(node
, filters
);
7399 for (i
= 0; i
< graph
->scc
; ++i
) {
7400 int j
= c
->scc_cluster
[i
];
7401 node
= isl_schedule_node_child(node
, i
);
7402 node
= isl_schedule_node_child(node
, 0);
7403 node
= compute_schedule_finish_band(node
, &c
->cluster
[j
], 0);
7404 node
= isl_schedule_node_parent(node
);
7405 node
= isl_schedule_node_parent(node
);
7411 /* Compute a schedule for a connected dependence graph by first considering
7412 * each strongly connected component (SCC) in the graph separately and then
7413 * incrementally combining them into clusters.
7414 * Return the updated schedule node.
7416 * Initially, each cluster consists of a single SCC, each with its
7417 * own band schedule. The algorithm then tries to merge pairs
7418 * of clusters along a proximity edge until no more suitable
7419 * proximity edges can be found. During this merging, the schedule
7420 * is maintained in the individual SCCs.
7421 * After the merging is completed, the full resulting clusters
7422 * are extracted and in finish_bands_clustering,
7423 * compute_schedule_finish_band is called on each of them to integrate
7424 * the band into "node" and to continue the computation.
7426 * compute_weights initializes the weights that are used by find_proximity.
7428 static __isl_give isl_schedule_node
*compute_schedule_wcc_clustering(
7429 __isl_take isl_schedule_node
*node
, struct isl_sched_graph
*graph
)
7432 struct isl_clustering c
;
7435 ctx
= isl_schedule_node_get_ctx(node
);
7437 if (clustering_init(ctx
, &c
, graph
) < 0)
7440 if (compute_weights(graph
, &c
) < 0)
7444 i
= find_proximity(graph
, &c
);
7447 if (i
>= graph
->n_edge
)
7449 if (merge_clusters_along_edge(ctx
, graph
, i
, &c
) < 0)
7453 if (extract_clusters(ctx
, graph
, &c
) < 0)
7456 node
= finish_bands_clustering(node
, graph
, &c
);
7458 clustering_free(ctx
, &c
);
7461 clustering_free(ctx
, &c
);
7462 return isl_schedule_node_free(node
);
7465 /* Compute a schedule for a connected dependence graph and return
7466 * the updated schedule node.
7468 * If Feautrier's algorithm is selected, we first recursively try to satisfy
7469 * as many validity dependences as possible. When all validity dependences
7470 * are satisfied we extend the schedule to a full-dimensional schedule.
7472 * Call compute_schedule_wcc_whole or compute_schedule_wcc_clustering
7473 * depending on whether the user has selected the option to try and
7474 * compute a schedule for the entire (weakly connected) component first.
7475 * If there is only a single strongly connected component (SCC), then
7476 * there is no point in trying to combine SCCs
7477 * in compute_schedule_wcc_clustering, so compute_schedule_wcc_whole
7478 * is called instead.
7480 static __isl_give isl_schedule_node
*compute_schedule_wcc(
7481 __isl_take isl_schedule_node
*node
, struct isl_sched_graph
*graph
)
7488 ctx
= isl_schedule_node_get_ctx(node
);
7489 if (detect_sccs(ctx
, graph
) < 0)
7490 return isl_schedule_node_free(node
);
7492 if (compute_maxvar(graph
) < 0)
7493 return isl_schedule_node_free(node
);
7495 if (need_feautrier_step(ctx
, graph
))
7496 return compute_schedule_wcc_feautrier(node
, graph
);
7498 if (graph
->scc
<= 1 || isl_options_get_schedule_whole_component(ctx
))
7499 return compute_schedule_wcc_whole(node
, graph
);
7501 return compute_schedule_wcc_clustering(node
, graph
);
7504 /* Compute a schedule for each group of nodes identified by node->scc
7505 * separately and then combine them in a sequence node (or as set node
7506 * if graph->weak is set) inserted at position "node" of the schedule tree.
7507 * Return the updated schedule node.
7509 * If "wcc" is set then each of the groups belongs to a single
7510 * weakly connected component in the dependence graph so that
7511 * there is no need for compute_sub_schedule to look for weakly
7512 * connected components.
7514 * If a set node would be introduced and if the number of components
7515 * is equal to the number of nodes, then check if the schedule
7516 * is already complete. If so, a redundant set node would be introduced
7517 * (without any further descendants) stating that the statements
7518 * can be executed in arbitrary order, which is also expressed
7519 * by the absence of any node. Refrain from inserting any nodes
7520 * in this case and simply return.
7522 static __isl_give isl_schedule_node
*compute_component_schedule(
7523 __isl_take isl_schedule_node
*node
, struct isl_sched_graph
*graph
,
7528 isl_union_set_list
*filters
;
7533 if (graph
->weak
&& graph
->scc
== graph
->n
) {
7534 if (compute_maxvar(graph
) < 0)
7535 return isl_schedule_node_free(node
);
7536 if (graph
->n_row
>= graph
->maxvar
)
7540 ctx
= isl_schedule_node_get_ctx(node
);
7541 filters
= extract_sccs(ctx
, graph
);
7543 node
= isl_schedule_node_insert_set(node
, filters
);
7545 node
= isl_schedule_node_insert_sequence(node
, filters
);
7547 for (component
= 0; component
< graph
->scc
; ++component
) {
7548 node
= isl_schedule_node_child(node
, component
);
7549 node
= isl_schedule_node_child(node
, 0);
7550 node
= compute_sub_schedule(node
, ctx
, graph
,
7552 &edge_scc_exactly
, component
, wcc
);
7553 node
= isl_schedule_node_parent(node
);
7554 node
= isl_schedule_node_parent(node
);
7560 /* Compute a schedule for the given dependence graph and insert it at "node".
7561 * Return the updated schedule node.
7563 * We first check if the graph is connected (through validity and conditional
7564 * validity dependences) and, if not, compute a schedule
7565 * for each component separately.
7566 * If the schedule_serialize_sccs option is set, then we check for strongly
7567 * connected components instead and compute a separate schedule for
7568 * each such strongly connected component.
7570 static __isl_give isl_schedule_node
*compute_schedule(isl_schedule_node
*node
,
7571 struct isl_sched_graph
*graph
)
7578 ctx
= isl_schedule_node_get_ctx(node
);
7579 if (isl_options_get_schedule_serialize_sccs(ctx
)) {
7580 if (detect_sccs(ctx
, graph
) < 0)
7581 return isl_schedule_node_free(node
);
7583 if (detect_wccs(ctx
, graph
) < 0)
7584 return isl_schedule_node_free(node
);
7588 return compute_component_schedule(node
, graph
, 1);
7590 return compute_schedule_wcc(node
, graph
);
7593 /* Compute a schedule on sc->domain that respects the given schedule
7596 * In particular, the schedule respects all the validity dependences.
7597 * If the default isl scheduling algorithm is used, it tries to minimize
7598 * the dependence distances over the proximity dependences.
7599 * If Feautrier's scheduling algorithm is used, the proximity dependence
7600 * distances are only minimized during the extension to a full-dimensional
7603 * If there are any condition and conditional validity dependences,
7604 * then the conditional validity dependences may be violated inside
7605 * a tilable band, provided they have no adjacent non-local
7606 * condition dependences.
7608 __isl_give isl_schedule
*isl_schedule_constraints_compute_schedule(
7609 __isl_take isl_schedule_constraints
*sc
)
7611 isl_ctx
*ctx
= isl_schedule_constraints_get_ctx(sc
);
7612 struct isl_sched_graph graph
= { 0 };
7613 isl_schedule
*sched
;
7614 isl_schedule_node
*node
;
7615 isl_union_set
*domain
;
7618 sc
= isl_schedule_constraints_align_params(sc
);
7620 domain
= isl_schedule_constraints_get_domain(sc
);
7621 n
= isl_union_set_n_set(domain
);
7623 isl_schedule_constraints_free(sc
);
7624 return isl_schedule_from_domain(domain
);
7627 if (n
< 0 || graph_init(&graph
, sc
) < 0)
7628 domain
= isl_union_set_free(domain
);
7630 node
= isl_schedule_node_from_domain(domain
);
7631 node
= isl_schedule_node_child(node
, 0);
7633 node
= compute_schedule(node
, &graph
);
7634 sched
= isl_schedule_node_get_schedule(node
);
7635 isl_schedule_node_free(node
);
7637 graph_free(ctx
, &graph
);
7638 isl_schedule_constraints_free(sc
);
7643 /* Compute a schedule for the given union of domains that respects
7644 * all the validity dependences and minimizes
7645 * the dependence distances over the proximity dependences.
7647 * This function is kept for backward compatibility.
7649 __isl_give isl_schedule
*isl_union_set_compute_schedule(
7650 __isl_take isl_union_set
*domain
,
7651 __isl_take isl_union_map
*validity
,
7652 __isl_take isl_union_map
*proximity
)
7654 isl_schedule_constraints
*sc
;
7656 sc
= isl_schedule_constraints_on_domain(domain
);
7657 sc
= isl_schedule_constraints_set_validity(sc
, validity
);
7658 sc
= isl_schedule_constraints_set_proximity(sc
, proximity
);
7660 return isl_schedule_constraints_compute_schedule(sc
);