2 * Copyright 2011 INRIA Saclay
3 * Copyright 2012-2014 Ecole Normale Superieure
4 * Copyright 2015-2016 Sven Verdoolaege
5 * Copyright 2016 INRIA Paris
7 * Use of this software is governed by the MIT license
9 * Written by Sven Verdoolaege, INRIA Saclay - Ile-de-France,
10 * Parc Club Orsay Universite, ZAC des vignes, 4 rue Jacques Monod,
12 * and Ecole Normale Superieure, 45 rue d'Ulm, 75230 Paris, France
13 * and Centre de Recherche Inria de Paris, 2 rue Simone Iff - Voie DQ12,
14 * CS 42112, 75589 Paris Cedex 12, France
17 #include <isl_ctx_private.h>
18 #include <isl_map_private.h>
19 #include <isl_space_private.h>
20 #include <isl_aff_private.h>
22 #include <isl/constraint.h>
23 #include <isl/schedule.h>
24 #include <isl_schedule_constraints.h>
25 #include <isl/schedule_node.h>
26 #include <isl_mat_private.h>
27 #include <isl_vec_private.h>
29 #include <isl/union_set.h>
32 #include <isl_dim_map.h>
33 #include <isl/map_to_basic_set.h>
35 #include <isl_options_private.h>
36 #include <isl_tarjan.h>
37 #include <isl_morph.h>
39 #include <isl_val_private.h>
42 * The scheduling algorithm implemented in this file was inspired by
43 * Bondhugula et al., "Automatic Transformations for Communication-Minimized
44 * Parallelization and Locality Optimization in the Polyhedral Model".
48 /* Internal information about a node that is used during the construction
50 * space represents the space in which the domain lives
51 * sched is a matrix representation of the schedule being constructed
52 * for this node; if compressed is set, then this schedule is
53 * defined over the compressed domain space
54 * sched_map is an isl_map representation of the same (partial) schedule
55 * sched_map may be NULL; if compressed is set, then this map
56 * is defined over the uncompressed domain space
57 * rank is the number of linearly independent rows in the linear part
59 * the columns of cmap represent a change of basis for the schedule
60 * coefficients; the first rank columns span the linear part of
62 * cinv is the inverse of cmap.
63 * ctrans is the transpose of cmap.
64 * start is the first variable in the LP problem in the sequences that
65 * represents the schedule coefficients of this node
66 * nvar is the dimension of the domain
67 * nparam is the number of parameters or 0 if we are not constructing
68 * a parametric schedule
70 * If compressed is set, then hull represents the constraints
71 * that were used to derive the compression, while compress and
72 * decompress map the original space to the compressed space and
75 * scc is the index of SCC (or WCC) this node belongs to
77 * "cluster" is only used inside extract_clusters and identifies
78 * the cluster of SCCs that the node belongs to.
80 * coincident contains a boolean for each of the rows of the schedule,
81 * indicating whether the corresponding scheduling dimension satisfies
82 * the coincidence constraints in the sense that the corresponding
83 * dependence distances are zero.
85 * If the schedule_treat_coalescing option is set, then
86 * "sizes" contains the sizes of the (compressed) instance set
87 * in each direction. If there is no fixed size in a given direction,
88 * then the corresponding size value is set to infinity.
89 * If the schedule_treat_coalescing option or the schedule_max_coefficient
90 * option is set, then "max" contains the maximal values for
91 * schedule coefficients of the (compressed) variables. If no bound
92 * needs to be imposed on a particular variable, then the corresponding
95 struct isl_sched_node
{
99 isl_multi_aff
*compress
;
100 isl_multi_aff
*decompress
;
116 isl_multi_val
*sizes
;
120 static int node_has_space(const void *entry
, const void *val
)
122 struct isl_sched_node
*node
= (struct isl_sched_node
*)entry
;
123 isl_space
*dim
= (isl_space
*)val
;
125 return isl_space_is_equal(node
->space
, dim
);
128 static int node_scc_exactly(struct isl_sched_node
*node
, int scc
)
130 return node
->scc
== scc
;
133 static int node_scc_at_most(struct isl_sched_node
*node
, int scc
)
135 return node
->scc
<= scc
;
138 static int node_scc_at_least(struct isl_sched_node
*node
, int scc
)
140 return node
->scc
>= scc
;
143 /* An edge in the dependence graph. An edge may be used to
144 * ensure validity of the generated schedule, to minimize the dependence
147 * map is the dependence relation, with i -> j in the map if j depends on i
148 * tagged_condition and tagged_validity contain the union of all tagged
149 * condition or conditional validity dependence relations that
150 * specialize the dependence relation "map"; that is,
151 * if (i -> a) -> (j -> b) is an element of "tagged_condition"
152 * or "tagged_validity", then i -> j is an element of "map".
153 * If these fields are NULL, then they represent the empty relation.
154 * src is the source node
155 * dst is the sink node
157 * types is a bit vector containing the types of this edge.
158 * validity is set if the edge is used to ensure correctness
159 * coincidence is used to enforce zero dependence distances
160 * proximity is set if the edge is used to minimize dependence distances
161 * condition is set if the edge represents a condition
162 * for a conditional validity schedule constraint
163 * local can only be set for condition edges and indicates that
164 * the dependence distance over the edge should be zero
165 * conditional_validity is set if the edge is used to conditionally
168 * For validity edges, start and end mark the sequence of inequality
169 * constraints in the LP problem that encode the validity constraint
170 * corresponding to this edge.
172 * During clustering, an edge may be marked "no_merge" if it should
173 * not be used to merge clusters.
174 * The weight is also only used during clustering and it is
175 * an indication of how many schedule dimensions on either side
176 * of the schedule constraints can be aligned.
177 * If the weight is negative, then this means that this edge was postponed
178 * by has_bounded_distances or any_no_merge. The original weight can
179 * be retrieved by adding 1 + graph->max_weight, with "graph"
180 * the graph containing this edge.
182 struct isl_sched_edge
{
184 isl_union_map
*tagged_condition
;
185 isl_union_map
*tagged_validity
;
187 struct isl_sched_node
*src
;
188 struct isl_sched_node
*dst
;
199 /* Is "edge" marked as being of type "type"?
201 static int is_type(struct isl_sched_edge
*edge
, enum isl_edge_type type
)
203 return ISL_FL_ISSET(edge
->types
, 1 << type
);
206 /* Mark "edge" as being of type "type".
208 static void set_type(struct isl_sched_edge
*edge
, enum isl_edge_type type
)
210 ISL_FL_SET(edge
->types
, 1 << type
);
213 /* No longer mark "edge" as being of type "type"?
215 static void clear_type(struct isl_sched_edge
*edge
, enum isl_edge_type type
)
217 ISL_FL_CLR(edge
->types
, 1 << type
);
220 /* Is "edge" marked as a validity edge?
222 static int is_validity(struct isl_sched_edge
*edge
)
224 return is_type(edge
, isl_edge_validity
);
227 /* Mark "edge" as a validity edge.
229 static void set_validity(struct isl_sched_edge
*edge
)
231 set_type(edge
, isl_edge_validity
);
234 /* Is "edge" marked as a proximity edge?
236 static int is_proximity(struct isl_sched_edge
*edge
)
238 return is_type(edge
, isl_edge_proximity
);
241 /* Is "edge" marked as a local edge?
243 static int is_local(struct isl_sched_edge
*edge
)
245 return is_type(edge
, isl_edge_local
);
248 /* Mark "edge" as a local edge.
250 static void set_local(struct isl_sched_edge
*edge
)
252 set_type(edge
, isl_edge_local
);
255 /* No longer mark "edge" as a local edge.
257 static void clear_local(struct isl_sched_edge
*edge
)
259 clear_type(edge
, isl_edge_local
);
262 /* Is "edge" marked as a coincidence edge?
264 static int is_coincidence(struct isl_sched_edge
*edge
)
266 return is_type(edge
, isl_edge_coincidence
);
269 /* Is "edge" marked as a condition edge?
271 static int is_condition(struct isl_sched_edge
*edge
)
273 return is_type(edge
, isl_edge_condition
);
276 /* Is "edge" marked as a conditional validity edge?
278 static int is_conditional_validity(struct isl_sched_edge
*edge
)
280 return is_type(edge
, isl_edge_conditional_validity
);
283 /* Internal information about the dependence graph used during
284 * the construction of the schedule.
286 * intra_hmap is a cache, mapping dependence relations to their dual,
287 * for dependences from a node to itself
288 * inter_hmap is a cache, mapping dependence relations to their dual,
289 * for dependences between distinct nodes
290 * if compression is involved then the key for these maps
291 * is the original, uncompressed dependence relation, while
292 * the value is the dual of the compressed dependence relation.
294 * n is the number of nodes
295 * node is the list of nodes
296 * maxvar is the maximal number of variables over all nodes
297 * max_row is the allocated number of rows in the schedule
298 * n_row is the current (maximal) number of linearly independent
299 * rows in the node schedules
300 * n_total_row is the current number of rows in the node schedules
301 * band_start is the starting row in the node schedules of the current band
302 * root is set if this graph is the original dependence graph,
303 * without any splitting
305 * sorted contains a list of node indices sorted according to the
306 * SCC to which a node belongs
308 * n_edge is the number of edges
309 * edge is the list of edges
310 * max_edge contains the maximal number of edges of each type;
311 * in particular, it contains the number of edges in the inital graph.
312 * edge_table contains pointers into the edge array, hashed on the source
313 * and sink spaces; there is one such table for each type;
314 * a given edge may be referenced from more than one table
315 * if the corresponding relation appears in more than one of the
316 * sets of dependences; however, for each type there is only
317 * a single edge between a given pair of source and sink space
318 * in the entire graph
320 * node_table contains pointers into the node array, hashed on the space
322 * region contains a list of variable sequences that should be non-trivial
324 * lp contains the (I)LP problem used to obtain new schedule rows
326 * src_scc and dst_scc are the source and sink SCCs of an edge with
327 * conflicting constraints
329 * scc represents the number of components
330 * weak is set if the components are weakly connected
332 * max_weight is used during clustering and represents the maximal
333 * weight of the relevant proximity edges.
335 struct isl_sched_graph
{
336 isl_map_to_basic_set
*intra_hmap
;
337 isl_map_to_basic_set
*inter_hmap
;
339 struct isl_sched_node
*node
;
352 struct isl_sched_edge
*edge
;
354 int max_edge
[isl_edge_last
+ 1];
355 struct isl_hash_table
*edge_table
[isl_edge_last
+ 1];
357 struct isl_hash_table
*node_table
;
358 struct isl_region
*region
;
371 /* Initialize node_table based on the list of nodes.
373 static int graph_init_table(isl_ctx
*ctx
, struct isl_sched_graph
*graph
)
377 graph
->node_table
= isl_hash_table_alloc(ctx
, graph
->n
);
378 if (!graph
->node_table
)
381 for (i
= 0; i
< graph
->n
; ++i
) {
382 struct isl_hash_table_entry
*entry
;
385 hash
= isl_space_get_hash(graph
->node
[i
].space
);
386 entry
= isl_hash_table_find(ctx
, graph
->node_table
, hash
,
388 graph
->node
[i
].space
, 1);
391 entry
->data
= &graph
->node
[i
];
397 /* Return a pointer to the node that lives within the given space,
398 * or NULL if there is no such node.
400 static struct isl_sched_node
*graph_find_node(isl_ctx
*ctx
,
401 struct isl_sched_graph
*graph
, __isl_keep isl_space
*dim
)
403 struct isl_hash_table_entry
*entry
;
406 hash
= isl_space_get_hash(dim
);
407 entry
= isl_hash_table_find(ctx
, graph
->node_table
, hash
,
408 &node_has_space
, dim
, 0);
410 return entry
? entry
->data
: NULL
;
413 static int edge_has_src_and_dst(const void *entry
, const void *val
)
415 const struct isl_sched_edge
*edge
= entry
;
416 const struct isl_sched_edge
*temp
= val
;
418 return edge
->src
== temp
->src
&& edge
->dst
== temp
->dst
;
421 /* Add the given edge to graph->edge_table[type].
423 static isl_stat
graph_edge_table_add(isl_ctx
*ctx
,
424 struct isl_sched_graph
*graph
, enum isl_edge_type type
,
425 struct isl_sched_edge
*edge
)
427 struct isl_hash_table_entry
*entry
;
430 hash
= isl_hash_init();
431 hash
= isl_hash_builtin(hash
, edge
->src
);
432 hash
= isl_hash_builtin(hash
, edge
->dst
);
433 entry
= isl_hash_table_find(ctx
, graph
->edge_table
[type
], hash
,
434 &edge_has_src_and_dst
, edge
, 1);
436 return isl_stat_error
;
442 /* Allocate the edge_tables based on the maximal number of edges of
445 static int graph_init_edge_tables(isl_ctx
*ctx
, struct isl_sched_graph
*graph
)
449 for (i
= 0; i
<= isl_edge_last
; ++i
) {
450 graph
->edge_table
[i
] = isl_hash_table_alloc(ctx
,
452 if (!graph
->edge_table
[i
])
459 /* If graph->edge_table[type] contains an edge from the given source
460 * to the given destination, then return the hash table entry of this edge.
461 * Otherwise, return NULL.
463 static struct isl_hash_table_entry
*graph_find_edge_entry(
464 struct isl_sched_graph
*graph
,
465 enum isl_edge_type type
,
466 struct isl_sched_node
*src
, struct isl_sched_node
*dst
)
468 isl_ctx
*ctx
= isl_space_get_ctx(src
->space
);
470 struct isl_sched_edge temp
= { .src
= src
, .dst
= dst
};
472 hash
= isl_hash_init();
473 hash
= isl_hash_builtin(hash
, temp
.src
);
474 hash
= isl_hash_builtin(hash
, temp
.dst
);
475 return isl_hash_table_find(ctx
, graph
->edge_table
[type
], hash
,
476 &edge_has_src_and_dst
, &temp
, 0);
480 /* If graph->edge_table[type] contains an edge from the given source
481 * to the given destination, then return this edge.
482 * Otherwise, return NULL.
484 static struct isl_sched_edge
*graph_find_edge(struct isl_sched_graph
*graph
,
485 enum isl_edge_type type
,
486 struct isl_sched_node
*src
, struct isl_sched_node
*dst
)
488 struct isl_hash_table_entry
*entry
;
490 entry
= graph_find_edge_entry(graph
, type
, src
, dst
);
497 /* Check whether the dependence graph has an edge of the given type
498 * between the given two nodes.
500 static isl_bool
graph_has_edge(struct isl_sched_graph
*graph
,
501 enum isl_edge_type type
,
502 struct isl_sched_node
*src
, struct isl_sched_node
*dst
)
504 struct isl_sched_edge
*edge
;
507 edge
= graph_find_edge(graph
, type
, src
, dst
);
511 empty
= isl_map_plain_is_empty(edge
->map
);
513 return isl_bool_error
;
518 /* Look for any edge with the same src, dst and map fields as "model".
520 * Return the matching edge if one can be found.
521 * Return "model" if no matching edge is found.
522 * Return NULL on error.
524 static struct isl_sched_edge
*graph_find_matching_edge(
525 struct isl_sched_graph
*graph
, struct isl_sched_edge
*model
)
527 enum isl_edge_type i
;
528 struct isl_sched_edge
*edge
;
530 for (i
= isl_edge_first
; i
<= isl_edge_last
; ++i
) {
533 edge
= graph_find_edge(graph
, i
, model
->src
, model
->dst
);
536 is_equal
= isl_map_plain_is_equal(model
->map
, edge
->map
);
546 /* Remove the given edge from all the edge_tables that refer to it.
548 static void graph_remove_edge(struct isl_sched_graph
*graph
,
549 struct isl_sched_edge
*edge
)
551 isl_ctx
*ctx
= isl_map_get_ctx(edge
->map
);
552 enum isl_edge_type i
;
554 for (i
= isl_edge_first
; i
<= isl_edge_last
; ++i
) {
555 struct isl_hash_table_entry
*entry
;
557 entry
= graph_find_edge_entry(graph
, i
, edge
->src
, edge
->dst
);
560 if (entry
->data
!= edge
)
562 isl_hash_table_remove(ctx
, graph
->edge_table
[i
], entry
);
566 /* Check whether the dependence graph has any edge
567 * between the given two nodes.
569 static isl_bool
graph_has_any_edge(struct isl_sched_graph
*graph
,
570 struct isl_sched_node
*src
, struct isl_sched_node
*dst
)
572 enum isl_edge_type i
;
575 for (i
= isl_edge_first
; i
<= isl_edge_last
; ++i
) {
576 r
= graph_has_edge(graph
, i
, src
, dst
);
584 /* Check whether the dependence graph has a validity edge
585 * between the given two nodes.
587 * Conditional validity edges are essentially validity edges that
588 * can be ignored if the corresponding condition edges are iteration private.
589 * Here, we are only checking for the presence of validity
590 * edges, so we need to consider the conditional validity edges too.
591 * In particular, this function is used during the detection
592 * of strongly connected components and we cannot ignore
593 * conditional validity edges during this detection.
595 static isl_bool
graph_has_validity_edge(struct isl_sched_graph
*graph
,
596 struct isl_sched_node
*src
, struct isl_sched_node
*dst
)
600 r
= graph_has_edge(graph
, isl_edge_validity
, src
, dst
);
604 return graph_has_edge(graph
, isl_edge_conditional_validity
, src
, dst
);
607 static int graph_alloc(isl_ctx
*ctx
, struct isl_sched_graph
*graph
,
608 int n_node
, int n_edge
)
613 graph
->n_edge
= n_edge
;
614 graph
->node
= isl_calloc_array(ctx
, struct isl_sched_node
, graph
->n
);
615 graph
->sorted
= isl_calloc_array(ctx
, int, graph
->n
);
616 graph
->region
= isl_alloc_array(ctx
, struct isl_region
, graph
->n
);
617 graph
->edge
= isl_calloc_array(ctx
,
618 struct isl_sched_edge
, graph
->n_edge
);
620 graph
->intra_hmap
= isl_map_to_basic_set_alloc(ctx
, 2 * n_edge
);
621 graph
->inter_hmap
= isl_map_to_basic_set_alloc(ctx
, 2 * n_edge
);
623 if (!graph
->node
|| !graph
->region
|| (graph
->n_edge
&& !graph
->edge
) ||
627 for(i
= 0; i
< graph
->n
; ++i
)
628 graph
->sorted
[i
] = i
;
633 static void graph_free(isl_ctx
*ctx
, struct isl_sched_graph
*graph
)
637 isl_map_to_basic_set_free(graph
->intra_hmap
);
638 isl_map_to_basic_set_free(graph
->inter_hmap
);
641 for (i
= 0; i
< graph
->n
; ++i
) {
642 isl_space_free(graph
->node
[i
].space
);
643 isl_set_free(graph
->node
[i
].hull
);
644 isl_multi_aff_free(graph
->node
[i
].compress
);
645 isl_multi_aff_free(graph
->node
[i
].decompress
);
646 isl_mat_free(graph
->node
[i
].sched
);
647 isl_map_free(graph
->node
[i
].sched_map
);
648 isl_mat_free(graph
->node
[i
].cmap
);
649 isl_mat_free(graph
->node
[i
].cinv
);
650 isl_mat_free(graph
->node
[i
].ctrans
);
652 free(graph
->node
[i
].coincident
);
653 isl_multi_val_free(graph
->node
[i
].sizes
);
654 isl_vec_free(graph
->node
[i
].max
);
659 for (i
= 0; i
< graph
->n_edge
; ++i
) {
660 isl_map_free(graph
->edge
[i
].map
);
661 isl_union_map_free(graph
->edge
[i
].tagged_condition
);
662 isl_union_map_free(graph
->edge
[i
].tagged_validity
);
666 for (i
= 0; i
<= isl_edge_last
; ++i
)
667 isl_hash_table_free(ctx
, graph
->edge_table
[i
]);
668 isl_hash_table_free(ctx
, graph
->node_table
);
669 isl_basic_set_free(graph
->lp
);
672 /* For each "set" on which this function is called, increment
673 * graph->n by one and update graph->maxvar.
675 static isl_stat
init_n_maxvar(__isl_take isl_set
*set
, void *user
)
677 struct isl_sched_graph
*graph
= user
;
678 int nvar
= isl_set_dim(set
, isl_dim_set
);
681 if (nvar
> graph
->maxvar
)
682 graph
->maxvar
= nvar
;
689 /* Compute the number of rows that should be allocated for the schedule.
690 * In particular, we need one row for each variable or one row
691 * for each basic map in the dependences.
692 * Note that it is practically impossible to exhaust both
693 * the number of dependences and the number of variables.
695 static isl_stat
compute_max_row(struct isl_sched_graph
*graph
,
696 __isl_keep isl_schedule_constraints
*sc
)
700 isl_union_set
*domain
;
704 domain
= isl_schedule_constraints_get_domain(sc
);
705 r
= isl_union_set_foreach_set(domain
, &init_n_maxvar
, graph
);
706 isl_union_set_free(domain
);
708 return isl_stat_error
;
709 n_edge
= isl_schedule_constraints_n_basic_map(sc
);
711 return isl_stat_error
;
712 graph
->max_row
= n_edge
+ graph
->maxvar
;
717 /* Does "bset" have any defining equalities for its set variables?
719 static isl_bool
has_any_defining_equality(__isl_keep isl_basic_set
*bset
)
724 return isl_bool_error
;
726 n
= isl_basic_set_dim(bset
, isl_dim_set
);
727 for (i
= 0; i
< n
; ++i
) {
730 has
= isl_basic_set_has_defining_equality(bset
, isl_dim_set
, i
,
736 return isl_bool_false
;
739 /* Set the entries of node->max to the value of the schedule_max_coefficient
742 static isl_stat
set_max_coefficient(isl_ctx
*ctx
, struct isl_sched_node
*node
)
746 max
= isl_options_get_schedule_max_coefficient(ctx
);
750 node
->max
= isl_vec_alloc(ctx
, node
->nvar
);
751 node
->max
= isl_vec_set_si(node
->max
, max
);
753 return isl_stat_error
;
758 /* Set the entries of node->max to the minimum of the schedule_max_coefficient
759 * option (if set) and half of the minimum of the sizes in the other
760 * dimensions. If the minimum of the sizes is one, half of the size
761 * is zero and this value is reset to one.
762 * If the global minimum is unbounded (i.e., if both
763 * the schedule_max_coefficient is not set and the sizes in the other
764 * dimensions are unbounded), then store a negative value.
765 * If the schedule coefficient is close to the size of the instance set
766 * in another dimension, then the schedule may represent a loop
767 * coalescing transformation (especially if the coefficient
768 * in that other dimension is one). Forcing the coefficient to be
769 * smaller than or equal to half the minimal size should avoid this
772 static isl_stat
compute_max_coefficient(isl_ctx
*ctx
,
773 struct isl_sched_node
*node
)
779 max
= isl_options_get_schedule_max_coefficient(ctx
);
780 v
= isl_vec_alloc(ctx
, node
->nvar
);
782 return isl_stat_error
;
784 for (i
= 0; i
< node
->nvar
; ++i
) {
785 isl_int_set_si(v
->el
[i
], max
);
786 isl_int_mul_si(v
->el
[i
], v
->el
[i
], 2);
789 for (i
= 0; i
< node
->nvar
; ++i
) {
792 size
= isl_multi_val_get_val(node
->sizes
, i
);
795 if (!isl_val_is_int(size
)) {
799 for (j
= 0; j
< node
->nvar
; ++j
) {
802 if (isl_int_is_neg(v
->el
[j
]) ||
803 isl_int_gt(v
->el
[j
], size
->n
))
804 isl_int_set(v
->el
[j
], size
->n
);
809 for (i
= 0; i
< node
->nvar
; ++i
) {
810 isl_int_fdiv_q_ui(v
->el
[i
], v
->el
[i
], 2);
811 if (isl_int_is_zero(v
->el
[i
]))
812 isl_int_set_si(v
->el
[i
], 1);
819 return isl_stat_error
;
822 /* Compute and return the size of "set" in dimension "dim".
823 * The size is taken to be the difference in values for that variable
824 * for fixed values of the other variables.
825 * In particular, the variable is first isolated from the other variables
826 * in the range of a map
828 * [i_0, ..., i_dim-1, i_dim+1, ...] -> [i_dim]
830 * and then duplicated
832 * [i_0, ..., i_dim-1, i_dim+1, ...] -> [[i_dim] -> [i_dim']]
834 * The shared variables are then projected out and the maximal value
835 * of i_dim' - i_dim is computed.
837 static __isl_give isl_val
*compute_size(__isl_take isl_set
*set
, int dim
)
844 map
= isl_set_project_onto_map(set
, isl_dim_set
, dim
, 1);
845 map
= isl_map_project_out(map
, isl_dim_in
, dim
, 1);
846 map
= isl_map_range_product(map
, isl_map_copy(map
));
847 map
= isl_set_unwrap(isl_map_range(map
));
848 set
= isl_map_deltas(map
);
849 ls
= isl_local_space_from_space(isl_set_get_space(set
));
850 obj
= isl_aff_var_on_domain(ls
, isl_dim_set
, 0);
851 v
= isl_set_max_val(set
, obj
);
858 /* Compute the size of the instance set "set" of "node", after compression,
859 * as well as bounds on the corresponding coefficients, if needed.
861 * The sizes are needed when the schedule_treat_coalescing option is set.
862 * The bounds are needed when the schedule_treat_coalescing option or
863 * the schedule_max_coefficient option is set.
865 * If the schedule_treat_coalescing option is not set, then at most
866 * the bounds need to be set and this is done in set_max_coefficient.
867 * Otherwise, compress the domain if needed, compute the size
868 * in each direction and store the results in node->size.
869 * Finally, set the bounds on the coefficients based on the sizes
870 * and the schedule_max_coefficient option in compute_max_coefficient.
872 static isl_stat
compute_sizes_and_max(isl_ctx
*ctx
, struct isl_sched_node
*node
,
873 __isl_take isl_set
*set
)
878 if (!isl_options_get_schedule_treat_coalescing(ctx
)) {
880 return set_max_coefficient(ctx
, node
);
883 if (node
->compressed
)
884 set
= isl_set_preimage_multi_aff(set
,
885 isl_multi_aff_copy(node
->decompress
));
886 mv
= isl_multi_val_zero(isl_set_get_space(set
));
887 n
= isl_set_dim(set
, isl_dim_set
);
888 for (j
= 0; j
< n
; ++j
) {
891 v
= compute_size(isl_set_copy(set
), j
);
892 mv
= isl_multi_val_set_val(mv
, j
, v
);
897 return isl_stat_error
;
898 return compute_max_coefficient(ctx
, node
);
901 /* Add a new node to the graph representing the given instance set.
902 * "nvar" is the (possibly compressed) number of variables and
903 * may be smaller than then number of set variables in "set"
904 * if "compressed" is set.
905 * If "compressed" is set, then "hull" represents the constraints
906 * that were used to derive the compression, while "compress" and
907 * "decompress" map the original space to the compressed space and
909 * If "compressed" is not set, then "hull", "compress" and "decompress"
912 * Compute the size of the instance set and bounds on the coefficients,
915 static isl_stat
add_node(struct isl_sched_graph
*graph
,
916 __isl_take isl_set
*set
, int nvar
, int compressed
,
917 __isl_take isl_set
*hull
, __isl_take isl_multi_aff
*compress
,
918 __isl_take isl_multi_aff
*decompress
)
925 struct isl_sched_node
*node
;
928 return isl_stat_error
;
930 ctx
= isl_set_get_ctx(set
);
931 nparam
= isl_set_dim(set
, isl_dim_param
);
932 if (!ctx
->opt
->schedule_parametric
)
934 sched
= isl_mat_alloc(ctx
, 0, 1 + nparam
+ nvar
);
935 node
= &graph
->node
[graph
->n
];
937 space
= isl_set_get_space(set
);
940 node
->nparam
= nparam
;
942 node
->sched_map
= NULL
;
943 coincident
= isl_calloc_array(ctx
, int, graph
->max_row
);
944 node
->coincident
= coincident
;
945 node
->compressed
= compressed
;
947 node
->compress
= compress
;
948 node
->decompress
= decompress
;
949 if (compute_sizes_and_max(ctx
, node
, set
) < 0)
950 return isl_stat_error
;
952 if (!space
|| !sched
|| (graph
->max_row
&& !coincident
))
953 return isl_stat_error
;
954 if (compressed
&& (!hull
|| !compress
|| !decompress
))
955 return isl_stat_error
;
960 /* Add a new node to the graph representing the given set.
962 * If any of the set variables is defined by an equality, then
963 * we perform variable compression such that we can perform
964 * the scheduling on the compressed domain.
966 static isl_stat
extract_node(__isl_take isl_set
*set
, void *user
)
969 isl_bool has_equality
;
973 isl_multi_aff
*compress
, *decompress
;
974 struct isl_sched_graph
*graph
= user
;
976 hull
= isl_set_affine_hull(isl_set_copy(set
));
977 hull
= isl_basic_set_remove_divs(hull
);
978 nvar
= isl_set_dim(set
, isl_dim_set
);
979 has_equality
= has_any_defining_equality(hull
);
981 if (has_equality
< 0)
984 isl_basic_set_free(hull
);
985 return add_node(graph
, set
, nvar
, 0, NULL
, NULL
, NULL
);
988 morph
= isl_basic_set_variable_compression(hull
, isl_dim_set
);
989 nvar
= isl_morph_ran_dim(morph
, isl_dim_set
);
990 compress
= isl_morph_get_var_multi_aff(morph
);
991 morph
= isl_morph_inverse(morph
);
992 decompress
= isl_morph_get_var_multi_aff(morph
);
993 isl_morph_free(morph
);
995 hull_set
= isl_set_from_basic_set(hull
);
996 return add_node(graph
, set
, nvar
, 1, hull_set
, compress
, decompress
);
998 isl_basic_set_free(hull
);
1000 return isl_stat_error
;
1003 struct isl_extract_edge_data
{
1004 enum isl_edge_type type
;
1005 struct isl_sched_graph
*graph
;
1008 /* Merge edge2 into edge1, freeing the contents of edge2.
1009 * Return 0 on success and -1 on failure.
1011 * edge1 and edge2 are assumed to have the same value for the map field.
1013 static int merge_edge(struct isl_sched_edge
*edge1
,
1014 struct isl_sched_edge
*edge2
)
1016 edge1
->types
|= edge2
->types
;
1017 isl_map_free(edge2
->map
);
1019 if (is_condition(edge2
)) {
1020 if (!edge1
->tagged_condition
)
1021 edge1
->tagged_condition
= edge2
->tagged_condition
;
1023 edge1
->tagged_condition
=
1024 isl_union_map_union(edge1
->tagged_condition
,
1025 edge2
->tagged_condition
);
1028 if (is_conditional_validity(edge2
)) {
1029 if (!edge1
->tagged_validity
)
1030 edge1
->tagged_validity
= edge2
->tagged_validity
;
1032 edge1
->tagged_validity
=
1033 isl_union_map_union(edge1
->tagged_validity
,
1034 edge2
->tagged_validity
);
1037 if (is_condition(edge2
) && !edge1
->tagged_condition
)
1039 if (is_conditional_validity(edge2
) && !edge1
->tagged_validity
)
1045 /* Insert dummy tags in domain and range of "map".
1047 * In particular, if "map" is of the form
1053 * [A -> dummy_tag] -> [B -> dummy_tag]
1055 * where the dummy_tags are identical and equal to any dummy tags
1056 * introduced by any other call to this function.
1058 static __isl_give isl_map
*insert_dummy_tags(__isl_take isl_map
*map
)
1064 isl_set
*domain
, *range
;
1066 ctx
= isl_map_get_ctx(map
);
1068 id
= isl_id_alloc(ctx
, NULL
, &dummy
);
1069 space
= isl_space_params(isl_map_get_space(map
));
1070 space
= isl_space_set_from_params(space
);
1071 space
= isl_space_set_tuple_id(space
, isl_dim_set
, id
);
1072 space
= isl_space_map_from_set(space
);
1074 domain
= isl_map_wrap(map
);
1075 range
= isl_map_wrap(isl_map_universe(space
));
1076 map
= isl_map_from_domain_and_range(domain
, range
);
1077 map
= isl_map_zip(map
);
1082 /* Given that at least one of "src" or "dst" is compressed, return
1083 * a map between the spaces of these nodes restricted to the affine
1084 * hull that was used in the compression.
1086 static __isl_give isl_map
*extract_hull(struct isl_sched_node
*src
,
1087 struct isl_sched_node
*dst
)
1091 if (src
->compressed
)
1092 dom
= isl_set_copy(src
->hull
);
1094 dom
= isl_set_universe(isl_space_copy(src
->space
));
1095 if (dst
->compressed
)
1096 ran
= isl_set_copy(dst
->hull
);
1098 ran
= isl_set_universe(isl_space_copy(dst
->space
));
1100 return isl_map_from_domain_and_range(dom
, ran
);
1103 /* Intersect the domains of the nested relations in domain and range
1104 * of "tagged" with "map".
1106 static __isl_give isl_map
*map_intersect_domains(__isl_take isl_map
*tagged
,
1107 __isl_keep isl_map
*map
)
1111 tagged
= isl_map_zip(tagged
);
1112 set
= isl_map_wrap(isl_map_copy(map
));
1113 tagged
= isl_map_intersect_domain(tagged
, set
);
1114 tagged
= isl_map_zip(tagged
);
1118 /* Return a pointer to the node that lives in the domain space of "map"
1119 * or NULL if there is no such node.
1121 static struct isl_sched_node
*find_domain_node(isl_ctx
*ctx
,
1122 struct isl_sched_graph
*graph
, __isl_keep isl_map
*map
)
1124 struct isl_sched_node
*node
;
1127 space
= isl_space_domain(isl_map_get_space(map
));
1128 node
= graph_find_node(ctx
, graph
, space
);
1129 isl_space_free(space
);
1134 /* Return a pointer to the node that lives in the range space of "map"
1135 * or NULL if there is no such node.
1137 static struct isl_sched_node
*find_range_node(isl_ctx
*ctx
,
1138 struct isl_sched_graph
*graph
, __isl_keep isl_map
*map
)
1140 struct isl_sched_node
*node
;
1143 space
= isl_space_range(isl_map_get_space(map
));
1144 node
= graph_find_node(ctx
, graph
, space
);
1145 isl_space_free(space
);
1150 /* Add a new edge to the graph based on the given map
1151 * and add it to data->graph->edge_table[data->type].
1152 * If a dependence relation of a given type happens to be identical
1153 * to one of the dependence relations of a type that was added before,
1154 * then we don't create a new edge, but instead mark the original edge
1155 * as also representing a dependence of the current type.
1157 * Edges of type isl_edge_condition or isl_edge_conditional_validity
1158 * may be specified as "tagged" dependence relations. That is, "map"
1159 * may contain elements (i -> a) -> (j -> b), where i -> j denotes
1160 * the dependence on iterations and a and b are tags.
1161 * edge->map is set to the relation containing the elements i -> j,
1162 * while edge->tagged_condition and edge->tagged_validity contain
1163 * the union of all the "map" relations
1164 * for which extract_edge is called that result in the same edge->map.
1166 * If the source or the destination node is compressed, then
1167 * intersect both "map" and "tagged" with the constraints that
1168 * were used to construct the compression.
1169 * This ensures that there are no schedule constraints defined
1170 * outside of these domains, while the scheduler no longer has
1171 * any control over those outside parts.
1173 static isl_stat
extract_edge(__isl_take isl_map
*map
, void *user
)
1175 isl_ctx
*ctx
= isl_map_get_ctx(map
);
1176 struct isl_extract_edge_data
*data
= user
;
1177 struct isl_sched_graph
*graph
= data
->graph
;
1178 struct isl_sched_node
*src
, *dst
;
1179 struct isl_sched_edge
*edge
;
1180 isl_map
*tagged
= NULL
;
1182 if (data
->type
== isl_edge_condition
||
1183 data
->type
== isl_edge_conditional_validity
) {
1184 if (isl_map_can_zip(map
)) {
1185 tagged
= isl_map_copy(map
);
1186 map
= isl_set_unwrap(isl_map_domain(isl_map_zip(map
)));
1188 tagged
= insert_dummy_tags(isl_map_copy(map
));
1192 src
= find_domain_node(ctx
, graph
, map
);
1193 dst
= find_range_node(ctx
, graph
, map
);
1197 isl_map_free(tagged
);
1201 if (src
->compressed
|| dst
->compressed
) {
1203 hull
= extract_hull(src
, dst
);
1205 tagged
= map_intersect_domains(tagged
, hull
);
1206 map
= isl_map_intersect(map
, hull
);
1209 graph
->edge
[graph
->n_edge
].src
= src
;
1210 graph
->edge
[graph
->n_edge
].dst
= dst
;
1211 graph
->edge
[graph
->n_edge
].map
= map
;
1212 graph
->edge
[graph
->n_edge
].types
= 0;
1213 graph
->edge
[graph
->n_edge
].tagged_condition
= NULL
;
1214 graph
->edge
[graph
->n_edge
].tagged_validity
= NULL
;
1215 set_type(&graph
->edge
[graph
->n_edge
], data
->type
);
1216 if (data
->type
== isl_edge_condition
)
1217 graph
->edge
[graph
->n_edge
].tagged_condition
=
1218 isl_union_map_from_map(tagged
);
1219 if (data
->type
== isl_edge_conditional_validity
)
1220 graph
->edge
[graph
->n_edge
].tagged_validity
=
1221 isl_union_map_from_map(tagged
);
1223 edge
= graph_find_matching_edge(graph
, &graph
->edge
[graph
->n_edge
]);
1226 return isl_stat_error
;
1228 if (edge
== &graph
->edge
[graph
->n_edge
])
1229 return graph_edge_table_add(ctx
, graph
, data
->type
,
1230 &graph
->edge
[graph
->n_edge
++]);
1232 if (merge_edge(edge
, &graph
->edge
[graph
->n_edge
]) < 0)
1235 return graph_edge_table_add(ctx
, graph
, data
->type
, edge
);
1238 /* Initialize the schedule graph "graph" from the schedule constraints "sc".
1240 * The context is included in the domain before the nodes of
1241 * the graphs are extracted in order to be able to exploit
1242 * any possible additional equalities.
1243 * Note that this intersection is only performed locally here.
1245 static isl_stat
graph_init(struct isl_sched_graph
*graph
,
1246 __isl_keep isl_schedule_constraints
*sc
)
1249 isl_union_set
*domain
;
1251 struct isl_extract_edge_data data
;
1252 enum isl_edge_type i
;
1256 return isl_stat_error
;
1258 ctx
= isl_schedule_constraints_get_ctx(sc
);
1260 domain
= isl_schedule_constraints_get_domain(sc
);
1261 graph
->n
= isl_union_set_n_set(domain
);
1262 isl_union_set_free(domain
);
1264 if (graph_alloc(ctx
, graph
, graph
->n
,
1265 isl_schedule_constraints_n_map(sc
)) < 0)
1266 return isl_stat_error
;
1268 if (compute_max_row(graph
, sc
) < 0)
1269 return isl_stat_error
;
1272 domain
= isl_schedule_constraints_get_domain(sc
);
1273 domain
= isl_union_set_intersect_params(domain
,
1274 isl_schedule_constraints_get_context(sc
));
1275 r
= isl_union_set_foreach_set(domain
, &extract_node
, graph
);
1276 isl_union_set_free(domain
);
1278 return isl_stat_error
;
1279 if (graph_init_table(ctx
, graph
) < 0)
1280 return isl_stat_error
;
1281 for (i
= isl_edge_first
; i
<= isl_edge_last
; ++i
) {
1282 c
= isl_schedule_constraints_get(sc
, i
);
1283 graph
->max_edge
[i
] = isl_union_map_n_map(c
);
1284 isl_union_map_free(c
);
1286 return isl_stat_error
;
1288 if (graph_init_edge_tables(ctx
, graph
) < 0)
1289 return isl_stat_error
;
1292 for (i
= isl_edge_first
; i
<= isl_edge_last
; ++i
) {
1296 c
= isl_schedule_constraints_get(sc
, i
);
1297 r
= isl_union_map_foreach_map(c
, &extract_edge
, &data
);
1298 isl_union_map_free(c
);
1300 return isl_stat_error
;
1306 /* Check whether there is any dependence from node[j] to node[i]
1307 * or from node[i] to node[j].
1309 static isl_bool
node_follows_weak(int i
, int j
, void *user
)
1312 struct isl_sched_graph
*graph
= user
;
1314 f
= graph_has_any_edge(graph
, &graph
->node
[j
], &graph
->node
[i
]);
1317 return graph_has_any_edge(graph
, &graph
->node
[i
], &graph
->node
[j
]);
1320 /* Check whether there is a (conditional) validity dependence from node[j]
1321 * to node[i], forcing node[i] to follow node[j].
1323 static isl_bool
node_follows_strong(int i
, int j
, void *user
)
1325 struct isl_sched_graph
*graph
= user
;
1327 return graph_has_validity_edge(graph
, &graph
->node
[j
], &graph
->node
[i
]);
1330 /* Use Tarjan's algorithm for computing the strongly connected components
1331 * in the dependence graph only considering those edges defined by "follows".
1333 static int detect_ccs(isl_ctx
*ctx
, struct isl_sched_graph
*graph
,
1334 isl_bool (*follows
)(int i
, int j
, void *user
))
1337 struct isl_tarjan_graph
*g
= NULL
;
1339 g
= isl_tarjan_graph_init(ctx
, graph
->n
, follows
, graph
);
1347 while (g
->order
[i
] != -1) {
1348 graph
->node
[g
->order
[i
]].scc
= graph
->scc
;
1356 isl_tarjan_graph_free(g
);
1361 /* Apply Tarjan's algorithm to detect the strongly connected components
1362 * in the dependence graph.
1363 * Only consider the (conditional) validity dependences and clear "weak".
1365 static int detect_sccs(isl_ctx
*ctx
, struct isl_sched_graph
*graph
)
1368 return detect_ccs(ctx
, graph
, &node_follows_strong
);
1371 /* Apply Tarjan's algorithm to detect the (weakly) connected components
1372 * in the dependence graph.
1373 * Consider all dependences and set "weak".
1375 static int detect_wccs(isl_ctx
*ctx
, struct isl_sched_graph
*graph
)
1378 return detect_ccs(ctx
, graph
, &node_follows_weak
);
1381 static int cmp_scc(const void *a
, const void *b
, void *data
)
1383 struct isl_sched_graph
*graph
= data
;
1387 return graph
->node
[*i1
].scc
- graph
->node
[*i2
].scc
;
1390 /* Sort the elements of graph->sorted according to the corresponding SCCs.
1392 static int sort_sccs(struct isl_sched_graph
*graph
)
1394 return isl_sort(graph
->sorted
, graph
->n
, sizeof(int), &cmp_scc
, graph
);
1397 /* Given a dependence relation R from "node" to itself,
1398 * construct the set of coefficients of valid constraints for elements
1399 * in that dependence relation.
1400 * In particular, the result contains tuples of coefficients
1401 * c_0, c_n, c_x such that
1403 * c_0 + c_n n + c_x y - c_x x >= 0 for each (x,y) in R
1407 * c_0 + c_n n + c_x d >= 0 for each d in delta R = { y - x | (x,y) in R }
1409 * We choose here to compute the dual of delta R.
1410 * Alternatively, we could have computed the dual of R, resulting
1411 * in a set of tuples c_0, c_n, c_x, c_y, and then
1412 * plugged in (c_0, c_n, c_x, -c_x).
1414 * If "node" has been compressed, then the dependence relation
1415 * is also compressed before the set of coefficients is computed.
1417 static __isl_give isl_basic_set
*intra_coefficients(
1418 struct isl_sched_graph
*graph
, struct isl_sched_node
*node
,
1419 __isl_take isl_map
*map
)
1423 isl_basic_set
*coef
;
1424 isl_maybe_isl_basic_set m
;
1426 m
= isl_map_to_basic_set_try_get(graph
->intra_hmap
, map
);
1427 if (m
.valid
< 0 || m
.valid
) {
1432 key
= isl_map_copy(map
);
1433 if (node
->compressed
) {
1434 map
= isl_map_preimage_domain_multi_aff(map
,
1435 isl_multi_aff_copy(node
->decompress
));
1436 map
= isl_map_preimage_range_multi_aff(map
,
1437 isl_multi_aff_copy(node
->decompress
));
1439 delta
= isl_set_remove_divs(isl_map_deltas(map
));
1440 coef
= isl_set_coefficients(delta
);
1441 graph
->intra_hmap
= isl_map_to_basic_set_set(graph
->intra_hmap
, key
,
1442 isl_basic_set_copy(coef
));
1447 /* Given a dependence relation R, construct the set of coefficients
1448 * of valid constraints for elements in that dependence relation.
1449 * In particular, the result contains tuples of coefficients
1450 * c_0, c_n, c_x, c_y such that
1452 * c_0 + c_n n + c_x x + c_y y >= 0 for each (x,y) in R
1454 * If the source or destination nodes of "edge" have been compressed,
1455 * then the dependence relation is also compressed before
1456 * the set of coefficients is computed.
1458 static __isl_give isl_basic_set
*inter_coefficients(
1459 struct isl_sched_graph
*graph
, struct isl_sched_edge
*edge
,
1460 __isl_take isl_map
*map
)
1464 isl_basic_set
*coef
;
1465 isl_maybe_isl_basic_set m
;
1467 m
= isl_map_to_basic_set_try_get(graph
->inter_hmap
, map
);
1468 if (m
.valid
< 0 || m
.valid
) {
1473 key
= isl_map_copy(map
);
1474 if (edge
->src
->compressed
)
1475 map
= isl_map_preimage_domain_multi_aff(map
,
1476 isl_multi_aff_copy(edge
->src
->decompress
));
1477 if (edge
->dst
->compressed
)
1478 map
= isl_map_preimage_range_multi_aff(map
,
1479 isl_multi_aff_copy(edge
->dst
->decompress
));
1480 set
= isl_map_wrap(isl_map_remove_divs(map
));
1481 coef
= isl_set_coefficients(set
);
1482 graph
->inter_hmap
= isl_map_to_basic_set_set(graph
->inter_hmap
, key
,
1483 isl_basic_set_copy(coef
));
1488 /* Return the position of the coefficients of the variables in
1489 * the coefficients constraints "coef".
1491 * The space of "coef" is of the form
1493 * { coefficients[[cst, params] -> S] }
1495 * Return the position of S.
1497 static int coef_var_offset(__isl_keep isl_basic_set
*coef
)
1502 space
= isl_space_unwrap(isl_basic_set_get_space(coef
));
1503 offset
= isl_space_dim(space
, isl_dim_in
);
1504 isl_space_free(space
);
1509 /* Return the offset of the coefficients of the variables of "node"
1512 * Within each node, the coefficients have the following order:
1514 * - c_i_n (if parametric)
1515 * - positive and negative parts of c_i_x
1517 static int node_var_coef_offset(struct isl_sched_node
*node
)
1519 return node
->start
+ 1 + node
->nparam
;
1522 /* Construct an isl_dim_map for mapping constraints on coefficients
1523 * for "node" to the corresponding positions in graph->lp.
1524 * "offset" is the offset of the coefficients for the variables
1525 * in the input constraints.
1526 * "s" is the sign of the mapping.
1528 * The input constraints are given in terms of the coefficients (c_0, c_n, c_x).
1529 * The mapping produced by this function essentially plugs in
1530 * (0, 0, c_i_x^+ - c_i_x^-) if s = 1 and
1531 * (0, 0, -c_i_x^+ + c_i_x^-) if s = -1.
1532 * In graph->lp, the c_i_x^- appear before their c_i_x^+ counterpart.
1534 * The caller can extend the mapping to also map the other coefficients
1535 * (and therefore not plug in 0).
1537 static __isl_give isl_dim_map
*intra_dim_map(isl_ctx
*ctx
,
1538 struct isl_sched_graph
*graph
, struct isl_sched_node
*node
,
1543 isl_dim_map
*dim_map
;
1545 total
= isl_basic_set_total_dim(graph
->lp
);
1546 pos
= node_var_coef_offset(node
);
1547 dim_map
= isl_dim_map_alloc(ctx
, total
);
1548 isl_dim_map_range(dim_map
, pos
, 2, offset
, 1, node
->nvar
, -s
);
1549 isl_dim_map_range(dim_map
, pos
+ 1, 2, offset
, 1, node
->nvar
, s
);
1554 /* Construct an isl_dim_map for mapping constraints on coefficients
1555 * for "src" (node i) and "dst" (node j) to the corresponding positions
1557 * "offset" is the offset of the coefficients for the variables of "src"
1558 * in the input constraints.
1559 * "s" is the sign of the mapping.
1561 * The input constraints are given in terms of the coefficients
1562 * (c_0, c_n, c_x, c_y).
1563 * The mapping produced by this function essentially plugs in
1564 * (c_j_0 - c_i_0, c_j_n - c_i_n,
1565 * c_j_x^+ - c_j_x^-, -(c_i_x^+ - c_i_x^-)) if s = 1 and
1566 * (-c_j_0 + c_i_0, -c_j_n + c_i_n,
1567 * - (c_j_x^+ - c_j_x^-), c_i_x^+ - c_i_x^-) if s = -1.
1568 * In graph->lp, the c_*^- appear before their c_*^+ counterpart.
1570 * The caller can further extend the mapping.
1572 static __isl_give isl_dim_map
*inter_dim_map(isl_ctx
*ctx
,
1573 struct isl_sched_graph
*graph
, struct isl_sched_node
*src
,
1574 struct isl_sched_node
*dst
, int offset
, int s
)
1578 isl_dim_map
*dim_map
;
1580 total
= isl_basic_set_total_dim(graph
->lp
);
1581 dim_map
= isl_dim_map_alloc(ctx
, total
);
1583 isl_dim_map_range(dim_map
, dst
->start
, 0, 0, 0, 1, s
);
1584 isl_dim_map_range(dim_map
, dst
->start
+ 1, 1, 1, 1, dst
->nparam
, s
);
1585 pos
= node_var_coef_offset(dst
);
1586 isl_dim_map_range(dim_map
, pos
, 2, offset
+ src
->nvar
, 1,
1588 isl_dim_map_range(dim_map
, pos
+ 1, 2, offset
+ src
->nvar
, 1,
1591 isl_dim_map_range(dim_map
, src
->start
, 0, 0, 0, 1, -s
);
1592 isl_dim_map_range(dim_map
, src
->start
+ 1, 1, 1, 1, src
->nparam
, -s
);
1593 pos
= node_var_coef_offset(src
);
1594 isl_dim_map_range(dim_map
, pos
, 2, offset
, 1, src
->nvar
, s
);
1595 isl_dim_map_range(dim_map
, pos
+ 1, 2, offset
, 1, src
->nvar
, -s
);
1600 /* Add constraints to graph->lp that force validity for the given
1601 * dependence from a node i to itself.
1602 * That is, add constraints that enforce
1604 * (c_i_0 + c_i_n n + c_i_x y) - (c_i_0 + c_i_n n + c_i_x x)
1605 * = c_i_x (y - x) >= 0
1607 * for each (x,y) in R.
1608 * We obtain general constraints on coefficients (c_0, c_n, c_x)
1609 * of valid constraints for (y - x) and then plug in (0, 0, c_i_x^+ - c_i_x^-),
1610 * where c_i_x = c_i_x^+ - c_i_x^-, with c_i_x^+ and c_i_x^- non-negative.
1611 * In graph->lp, the c_i_x^- appear before their c_i_x^+ counterpart.
1613 * Actually, we do not construct constraints for the c_i_x themselves,
1614 * but for the coefficients of c_i_x written as a linear combination
1615 * of the columns in node->cmap.
1617 static isl_stat
add_intra_validity_constraints(struct isl_sched_graph
*graph
,
1618 struct isl_sched_edge
*edge
)
1621 isl_map
*map
= isl_map_copy(edge
->map
);
1622 isl_ctx
*ctx
= isl_map_get_ctx(map
);
1623 isl_dim_map
*dim_map
;
1624 isl_basic_set
*coef
;
1625 struct isl_sched_node
*node
= edge
->src
;
1627 coef
= intra_coefficients(graph
, node
, map
);
1629 offset
= coef_var_offset(coef
);
1631 coef
= isl_basic_set_transform_dims(coef
, isl_dim_set
,
1632 offset
, isl_mat_copy(node
->cmap
));
1634 return isl_stat_error
;
1636 dim_map
= intra_dim_map(ctx
, graph
, node
, offset
, 1);
1637 graph
->lp
= isl_basic_set_extend_constraints(graph
->lp
,
1638 coef
->n_eq
, coef
->n_ineq
);
1639 graph
->lp
= isl_basic_set_add_constraints_dim_map(graph
->lp
,
1645 /* Add constraints to graph->lp that force validity for the given
1646 * dependence from node i to node j.
1647 * That is, add constraints that enforce
1649 * (c_j_0 + c_j_n n + c_j_x y) - (c_i_0 + c_i_n n + c_i_x x) >= 0
1651 * for each (x,y) in R.
1652 * We obtain general constraints on coefficients (c_0, c_n, c_x, c_y)
1653 * of valid constraints for R and then plug in
1654 * (c_j_0 - c_i_0, c_j_n - c_i_n, c_j_x^+ - c_j_x^- - (c_i_x^+ - c_i_x^-)),
1655 * where c_* = c_*^+ - c_*^-, with c_*^+ and c_*^- non-negative.
1656 * In graph->lp, the c_*^- appear before their c_*^+ counterpart.
1658 * Actually, we do not construct constraints for the c_*_x themselves,
1659 * but for the coefficients of c_*_x written as a linear combination
1660 * of the columns in node->cmap.
1662 static isl_stat
add_inter_validity_constraints(struct isl_sched_graph
*graph
,
1663 struct isl_sched_edge
*edge
)
1666 isl_map
*map
= isl_map_copy(edge
->map
);
1667 isl_ctx
*ctx
= isl_map_get_ctx(map
);
1668 isl_dim_map
*dim_map
;
1669 isl_basic_set
*coef
;
1670 struct isl_sched_node
*src
= edge
->src
;
1671 struct isl_sched_node
*dst
= edge
->dst
;
1673 coef
= inter_coefficients(graph
, edge
, map
);
1675 offset
= coef_var_offset(coef
);
1677 coef
= isl_basic_set_transform_dims(coef
, isl_dim_set
,
1678 offset
, isl_mat_copy(src
->cmap
));
1679 coef
= isl_basic_set_transform_dims(coef
, isl_dim_set
,
1680 offset
+ src
->nvar
, isl_mat_copy(dst
->cmap
));
1682 return isl_stat_error
;
1684 dim_map
= inter_dim_map(ctx
, graph
, src
, dst
, offset
, 1);
1686 edge
->start
= graph
->lp
->n_ineq
;
1687 graph
->lp
= isl_basic_set_extend_constraints(graph
->lp
,
1688 coef
->n_eq
, coef
->n_ineq
);
1689 graph
->lp
= isl_basic_set_add_constraints_dim_map(graph
->lp
,
1692 return isl_stat_error
;
1693 edge
->end
= graph
->lp
->n_ineq
;
1698 /* Add constraints to graph->lp that bound the dependence distance for the given
1699 * dependence from a node i to itself.
1700 * If s = 1, we add the constraint
1702 * c_i_x (y - x) <= m_0 + m_n n
1706 * -c_i_x (y - x) + m_0 + m_n n >= 0
1708 * for each (x,y) in R.
1709 * If s = -1, we add the constraint
1711 * -c_i_x (y - x) <= m_0 + m_n n
1715 * c_i_x (y - x) + m_0 + m_n n >= 0
1717 * for each (x,y) in R.
1718 * We obtain general constraints on coefficients (c_0, c_n, c_x)
1719 * of valid constraints for (y - x) and then plug in (m_0, m_n, -s * c_i_x),
1720 * with each coefficient (except m_0) represented as a pair of non-negative
1723 * Actually, we do not construct constraints for the c_i_x themselves,
1724 * but for the coefficients of c_i_x written as a linear combination
1725 * of the columns in node->cmap.
1728 * If "local" is set, then we add constraints
1730 * c_i_x (y - x) <= 0
1734 * -c_i_x (y - x) <= 0
1736 * instead, forcing the dependence distance to be (less than or) equal to 0.
1737 * That is, we plug in (0, 0, -s * c_i_x),
1738 * Note that dependences marked local are treated as validity constraints
1739 * by add_all_validity_constraints and therefore also have
1740 * their distances bounded by 0 from below.
1742 static isl_stat
add_intra_proximity_constraints(struct isl_sched_graph
*graph
,
1743 struct isl_sched_edge
*edge
, int s
, int local
)
1747 isl_map
*map
= isl_map_copy(edge
->map
);
1748 isl_ctx
*ctx
= isl_map_get_ctx(map
);
1749 isl_dim_map
*dim_map
;
1750 isl_basic_set
*coef
;
1751 struct isl_sched_node
*node
= edge
->src
;
1753 coef
= intra_coefficients(graph
, node
, map
);
1755 offset
= coef_var_offset(coef
);
1757 coef
= isl_basic_set_transform_dims(coef
, isl_dim_set
,
1758 offset
, isl_mat_copy(node
->cmap
));
1760 return isl_stat_error
;
1762 nparam
= isl_space_dim(node
->space
, isl_dim_param
);
1763 dim_map
= intra_dim_map(ctx
, graph
, node
, offset
, -s
);
1766 isl_dim_map_range(dim_map
, 1, 0, 0, 0, 1, 1);
1767 isl_dim_map_range(dim_map
, 4, 2, 1, 1, nparam
, -1);
1768 isl_dim_map_range(dim_map
, 5, 2, 1, 1, nparam
, 1);
1770 graph
->lp
= isl_basic_set_extend_constraints(graph
->lp
,
1771 coef
->n_eq
, coef
->n_ineq
);
1772 graph
->lp
= isl_basic_set_add_constraints_dim_map(graph
->lp
,
1778 /* Add constraints to graph->lp that bound the dependence distance for the given
1779 * dependence from node i to node j.
1780 * If s = 1, we add the constraint
1782 * (c_j_0 + c_j_n n + c_j_x y) - (c_i_0 + c_i_n n + c_i_x x)
1787 * -(c_j_0 + c_j_n n + c_j_x y) + (c_i_0 + c_i_n n + c_i_x x) +
1790 * for each (x,y) in R.
1791 * If s = -1, we add the constraint
1793 * -((c_j_0 + c_j_n n + c_j_x y) - (c_i_0 + c_i_n n + c_i_x x))
1798 * (c_j_0 + c_j_n n + c_j_x y) - (c_i_0 + c_i_n n + c_i_x x) +
1801 * for each (x,y) in R.
1802 * We obtain general constraints on coefficients (c_0, c_n, c_x, c_y)
1803 * of valid constraints for R and then plug in
1804 * (m_0 - s*c_j_0 + s*c_i_0, m_n - s*c_j_n + s*c_i_n,
1806 * with each coefficient (except m_0, c_*_0 and c_*_n)
1807 * represented as a pair of non-negative coefficients.
1809 * Actually, we do not construct constraints for the c_*_x themselves,
1810 * but for the coefficients of c_*_x written as a linear combination
1811 * of the columns in node->cmap.
1814 * If "local" is set, then we add constraints
1816 * (c_j_0 + c_j_n n + c_j_x y) - (c_i_0 + c_i_n n + c_i_x x) <= 0
1820 * -((c_j_0 + c_j_n n + c_j_x y) - (c_i_0 + c_i_n n + c_i_x x)) <= 0
1822 * instead, forcing the dependence distance to be (less than or) equal to 0.
1823 * That is, we plug in
1824 * (-s*c_j_0 + s*c_i_0, -s*c_j_n + s*c_i_n, -s*c_j_x+s*c_i_x).
1825 * Note that dependences marked local are treated as validity constraints
1826 * by add_all_validity_constraints and therefore also have
1827 * their distances bounded by 0 from below.
1829 static isl_stat
add_inter_proximity_constraints(struct isl_sched_graph
*graph
,
1830 struct isl_sched_edge
*edge
, int s
, int local
)
1834 isl_map
*map
= isl_map_copy(edge
->map
);
1835 isl_ctx
*ctx
= isl_map_get_ctx(map
);
1836 isl_dim_map
*dim_map
;
1837 isl_basic_set
*coef
;
1838 struct isl_sched_node
*src
= edge
->src
;
1839 struct isl_sched_node
*dst
= edge
->dst
;
1841 coef
= inter_coefficients(graph
, edge
, map
);
1843 offset
= coef_var_offset(coef
);
1845 coef
= isl_basic_set_transform_dims(coef
, isl_dim_set
,
1846 offset
, isl_mat_copy(src
->cmap
));
1847 coef
= isl_basic_set_transform_dims(coef
, isl_dim_set
,
1848 offset
+ src
->nvar
, isl_mat_copy(dst
->cmap
));
1850 return isl_stat_error
;
1852 nparam
= isl_space_dim(src
->space
, isl_dim_param
);
1853 dim_map
= inter_dim_map(ctx
, graph
, src
, dst
, offset
, -s
);
1856 isl_dim_map_range(dim_map
, 1, 0, 0, 0, 1, 1);
1857 isl_dim_map_range(dim_map
, 4, 2, 1, 1, nparam
, -1);
1858 isl_dim_map_range(dim_map
, 5, 2, 1, 1, nparam
, 1);
1861 graph
->lp
= isl_basic_set_extend_constraints(graph
->lp
,
1862 coef
->n_eq
, coef
->n_ineq
);
1863 graph
->lp
= isl_basic_set_add_constraints_dim_map(graph
->lp
,
1869 /* Add all validity constraints to graph->lp.
1871 * An edge that is forced to be local needs to have its dependence
1872 * distances equal to zero. We take care of bounding them by 0 from below
1873 * here. add_all_proximity_constraints takes care of bounding them by 0
1876 * If "use_coincidence" is set, then we treat coincidence edges as local edges.
1877 * Otherwise, we ignore them.
1879 static int add_all_validity_constraints(struct isl_sched_graph
*graph
,
1880 int use_coincidence
)
1884 for (i
= 0; i
< graph
->n_edge
; ++i
) {
1885 struct isl_sched_edge
*edge
= &graph
->edge
[i
];
1888 local
= is_local(edge
) ||
1889 (is_coincidence(edge
) && use_coincidence
);
1890 if (!is_validity(edge
) && !local
)
1892 if (edge
->src
!= edge
->dst
)
1894 if (add_intra_validity_constraints(graph
, edge
) < 0)
1898 for (i
= 0; i
< graph
->n_edge
; ++i
) {
1899 struct isl_sched_edge
*edge
= &graph
->edge
[i
];
1902 local
= is_local(edge
) ||
1903 (is_coincidence(edge
) && use_coincidence
);
1904 if (!is_validity(edge
) && !local
)
1906 if (edge
->src
== edge
->dst
)
1908 if (add_inter_validity_constraints(graph
, edge
) < 0)
1915 /* Add constraints to graph->lp that bound the dependence distance
1916 * for all dependence relations.
1917 * If a given proximity dependence is identical to a validity
1918 * dependence, then the dependence distance is already bounded
1919 * from below (by zero), so we only need to bound the distance
1920 * from above. (This includes the case of "local" dependences
1921 * which are treated as validity dependence by add_all_validity_constraints.)
1922 * Otherwise, we need to bound the distance both from above and from below.
1924 * If "use_coincidence" is set, then we treat coincidence edges as local edges.
1925 * Otherwise, we ignore them.
1927 static int add_all_proximity_constraints(struct isl_sched_graph
*graph
,
1928 int use_coincidence
)
1932 for (i
= 0; i
< graph
->n_edge
; ++i
) {
1933 struct isl_sched_edge
*edge
= &graph
->edge
[i
];
1936 local
= is_local(edge
) ||
1937 (is_coincidence(edge
) && use_coincidence
);
1938 if (!is_proximity(edge
) && !local
)
1940 if (edge
->src
== edge
->dst
&&
1941 add_intra_proximity_constraints(graph
, edge
, 1, local
) < 0)
1943 if (edge
->src
!= edge
->dst
&&
1944 add_inter_proximity_constraints(graph
, edge
, 1, local
) < 0)
1946 if (is_validity(edge
) || local
)
1948 if (edge
->src
== edge
->dst
&&
1949 add_intra_proximity_constraints(graph
, edge
, -1, 0) < 0)
1951 if (edge
->src
!= edge
->dst
&&
1952 add_inter_proximity_constraints(graph
, edge
, -1, 0) < 0)
1959 /* Compute a basis for the rows in the linear part of the schedule
1960 * and extend this basis to a full basis. The remaining rows
1961 * can then be used to force linear independence from the rows
1964 * In particular, given the schedule rows S, we compute
1969 * with H the Hermite normal form of S. That is, all but the
1970 * first rank columns of H are zero and so each row in S is
1971 * a linear combination of the first rank rows of Q.
1972 * The matrix Q is then transposed because we will write the
1973 * coefficients of the next schedule row as a column vector s
1974 * and express this s as a linear combination s = Q c of the
1976 * Similarly, the matrix U is transposed such that we can
1977 * compute the coefficients c = U s from a schedule row s.
1979 static int node_update_cmap(struct isl_sched_node
*node
)
1982 int n_row
= isl_mat_rows(node
->sched
);
1984 H
= isl_mat_sub_alloc(node
->sched
, 0, n_row
,
1985 1 + node
->nparam
, node
->nvar
);
1987 H
= isl_mat_left_hermite(H
, 0, &U
, &Q
);
1988 isl_mat_free(node
->cmap
);
1989 isl_mat_free(node
->cinv
);
1990 isl_mat_free(node
->ctrans
);
1991 node
->ctrans
= isl_mat_copy(Q
);
1992 node
->cmap
= isl_mat_transpose(Q
);
1993 node
->cinv
= isl_mat_transpose(U
);
1994 node
->rank
= isl_mat_initial_non_zero_cols(H
);
1997 if (!node
->cmap
|| !node
->cinv
|| !node
->ctrans
|| node
->rank
< 0)
2002 /* Is "edge" marked as a validity or a conditional validity edge?
2004 static int is_any_validity(struct isl_sched_edge
*edge
)
2006 return is_validity(edge
) || is_conditional_validity(edge
);
2009 /* How many times should we count the constraints in "edge"?
2011 * If carry is set, then we are counting the number of
2012 * (validity or conditional validity) constraints that will be added
2013 * in setup_carry_lp and we count each edge exactly once.
2015 * Otherwise, we count as follows
2016 * validity -> 1 (>= 0)
2017 * validity+proximity -> 2 (>= 0 and upper bound)
2018 * proximity -> 2 (lower and upper bound)
2019 * local(+any) -> 2 (>= 0 and <= 0)
2021 * If an edge is only marked conditional_validity then it counts
2022 * as zero since it is only checked afterwards.
2024 * If "use_coincidence" is set, then we treat coincidence edges as local edges.
2025 * Otherwise, we ignore them.
2027 static int edge_multiplicity(struct isl_sched_edge
*edge
, int carry
,
2028 int use_coincidence
)
2032 if (is_proximity(edge
) || is_local(edge
))
2034 if (use_coincidence
&& is_coincidence(edge
))
2036 if (is_validity(edge
))
2041 /* Count the number of equality and inequality constraints
2042 * that will be added for the given map.
2044 * "use_coincidence" is set if we should take into account coincidence edges.
2046 static isl_stat
count_map_constraints(struct isl_sched_graph
*graph
,
2047 struct isl_sched_edge
*edge
, __isl_take isl_map
*map
,
2048 int *n_eq
, int *n_ineq
, int carry
, int use_coincidence
)
2050 isl_basic_set
*coef
;
2051 int f
= edge_multiplicity(edge
, carry
, use_coincidence
);
2058 if (edge
->src
== edge
->dst
)
2059 coef
= intra_coefficients(graph
, edge
->src
, map
);
2061 coef
= inter_coefficients(graph
, edge
, map
);
2063 return isl_stat_error
;
2064 *n_eq
+= f
* coef
->n_eq
;
2065 *n_ineq
+= f
* coef
->n_ineq
;
2066 isl_basic_set_free(coef
);
2071 /* Count the number of equality and inequality constraints
2072 * that will be added to the main lp problem.
2073 * We count as follows
2074 * validity -> 1 (>= 0)
2075 * validity+proximity -> 2 (>= 0 and upper bound)
2076 * proximity -> 2 (lower and upper bound)
2077 * local(+any) -> 2 (>= 0 and <= 0)
2079 * If "use_coincidence" is set, then we treat coincidence edges as local edges.
2080 * Otherwise, we ignore them.
2082 static int count_constraints(struct isl_sched_graph
*graph
,
2083 int *n_eq
, int *n_ineq
, int use_coincidence
)
2087 *n_eq
= *n_ineq
= 0;
2088 for (i
= 0; i
< graph
->n_edge
; ++i
) {
2089 struct isl_sched_edge
*edge
= &graph
->edge
[i
];
2090 isl_map
*map
= isl_map_copy(edge
->map
);
2092 if (count_map_constraints(graph
, edge
, map
, n_eq
, n_ineq
,
2093 0, use_coincidence
) < 0)
2100 /* Count the number of constraints that will be added by
2101 * add_bound_constant_constraints to bound the values of the constant terms
2102 * and increment *n_eq and *n_ineq accordingly.
2104 * In practice, add_bound_constant_constraints only adds inequalities.
2106 static isl_stat
count_bound_constant_constraints(isl_ctx
*ctx
,
2107 struct isl_sched_graph
*graph
, int *n_eq
, int *n_ineq
)
2109 if (isl_options_get_schedule_max_constant_term(ctx
) == -1)
2112 *n_ineq
+= graph
->n
;
2117 /* Add constraints to bound the values of the constant terms in the schedule,
2118 * if requested by the user.
2120 * The maximal value of the constant terms is defined by the option
2121 * "schedule_max_constant_term".
2123 * Within each node, the coefficients have the following order:
2125 * - c_i_n (if parametric)
2126 * - positive and negative parts of c_i_x
2128 static isl_stat
add_bound_constant_constraints(isl_ctx
*ctx
,
2129 struct isl_sched_graph
*graph
)
2135 max
= isl_options_get_schedule_max_constant_term(ctx
);
2139 total
= isl_basic_set_dim(graph
->lp
, isl_dim_set
);
2141 for (i
= 0; i
< graph
->n
; ++i
) {
2142 struct isl_sched_node
*node
= &graph
->node
[i
];
2143 k
= isl_basic_set_alloc_inequality(graph
->lp
);
2145 return isl_stat_error
;
2146 isl_seq_clr(graph
->lp
->ineq
[k
], 1 + total
);
2147 isl_int_set_si(graph
->lp
->ineq
[k
][1 + node
->start
], -1);
2148 isl_int_set_si(graph
->lp
->ineq
[k
][0], max
);
2154 /* Count the number of constraints that will be added by
2155 * add_bound_coefficient_constraints and increment *n_eq and *n_ineq
2158 * In practice, add_bound_coefficient_constraints only adds inequalities.
2160 static int count_bound_coefficient_constraints(isl_ctx
*ctx
,
2161 struct isl_sched_graph
*graph
, int *n_eq
, int *n_ineq
)
2165 if (isl_options_get_schedule_max_coefficient(ctx
) == -1 &&
2166 !isl_options_get_schedule_treat_coalescing(ctx
))
2169 for (i
= 0; i
< graph
->n
; ++i
)
2170 *n_ineq
+= graph
->node
[i
].nparam
+ 2 * graph
->node
[i
].nvar
;
2175 /* Add constraints to graph->lp that bound the values of
2176 * the parameter schedule coefficients of "node" to "max" and
2177 * the variable schedule coefficients to the corresponding entry
2179 * In either case, a negative value means that no bound needs to be imposed.
2181 * For parameter coefficients, this amounts to adding a constraint
2189 * The variables coefficients are, however, not represented directly.
2190 * Instead, the variables coefficients c_x are written as a linear
2191 * combination c_x = cmap c_z of some other coefficients c_z,
2192 * which are in turn encoded as c_z = c_z^+ - c_z^-.
2193 * Let a_j be the elements of row i of node->cmap, then
2195 * -max_i <= c_x_i <= max_i
2199 * -max_i <= \sum_j a_j (c_z_j^+ - c_z_j^-) <= max_i
2203 * -\sum_j a_j (c_z_j^+ - c_z_j^-) + max_i >= 0
2204 * \sum_j a_j (c_z_j^+ - c_z_j^-) + max_i >= 0
2206 static isl_stat
node_add_coefficient_constraints(isl_ctx
*ctx
,
2207 struct isl_sched_graph
*graph
, struct isl_sched_node
*node
, int max
)
2213 total
= isl_basic_set_dim(graph
->lp
, isl_dim_set
);
2215 for (j
= 0; j
< node
->nparam
; ++j
) {
2221 k
= isl_basic_set_alloc_inequality(graph
->lp
);
2223 return isl_stat_error
;
2224 dim
= 1 + node
->start
+ 1 + j
;
2225 isl_seq_clr(graph
->lp
->ineq
[k
], 1 + total
);
2226 isl_int_set_si(graph
->lp
->ineq
[k
][dim
], -1);
2227 isl_int_set_si(graph
->lp
->ineq
[k
][0], max
);
2230 ineq
= isl_vec_alloc(ctx
, 1 + total
);
2231 ineq
= isl_vec_clr(ineq
);
2233 return isl_stat_error
;
2234 for (i
= 0; i
< node
->nvar
; ++i
) {
2235 int pos
= 1 + node_var_coef_offset(node
);
2237 if (isl_int_is_neg(node
->max
->el
[i
]))
2240 for (j
= 0; j
< node
->nvar
; ++j
) {
2241 isl_int_set(ineq
->el
[pos
+ 2 * j
],
2242 node
->cmap
->row
[i
][j
]);
2243 isl_int_neg(ineq
->el
[pos
+ 2 * j
+ 1],
2244 node
->cmap
->row
[i
][j
]);
2246 isl_int_set(ineq
->el
[0], node
->max
->el
[i
]);
2248 k
= isl_basic_set_alloc_inequality(graph
->lp
);
2251 isl_seq_cpy(graph
->lp
->ineq
[k
], ineq
->el
, 1 + total
);
2253 isl_seq_neg(ineq
->el
+ pos
, ineq
->el
+ pos
, 2 * node
->nvar
);
2254 k
= isl_basic_set_alloc_inequality(graph
->lp
);
2257 isl_seq_cpy(graph
->lp
->ineq
[k
], ineq
->el
, 1 + total
);
2264 return isl_stat_error
;
2267 /* Add constraints that bound the values of the variable and parameter
2268 * coefficients of the schedule.
2270 * The maximal value of the coefficients is defined by the option
2271 * 'schedule_max_coefficient' and the entries in node->max.
2272 * These latter entries are only set if either the schedule_max_coefficient
2273 * option or the schedule_treat_coalescing option is set.
2275 static isl_stat
add_bound_coefficient_constraints(isl_ctx
*ctx
,
2276 struct isl_sched_graph
*graph
)
2281 max
= isl_options_get_schedule_max_coefficient(ctx
);
2283 if (max
== -1 && !isl_options_get_schedule_treat_coalescing(ctx
))
2286 for (i
= 0; i
< graph
->n
; ++i
) {
2287 struct isl_sched_node
*node
= &graph
->node
[i
];
2289 if (node_add_coefficient_constraints(ctx
, graph
, node
, max
) < 0)
2290 return isl_stat_error
;
2296 /* Add a constraint to graph->lp that equates the value at position
2297 * "sum_pos" to the sum of the "n" values starting at "first".
2299 static isl_stat
add_sum_constraint(struct isl_sched_graph
*graph
,
2300 int sum_pos
, int first
, int n
)
2305 total
= isl_basic_set_dim(graph
->lp
, isl_dim_set
);
2307 k
= isl_basic_set_alloc_equality(graph
->lp
);
2309 return isl_stat_error
;
2310 isl_seq_clr(graph
->lp
->eq
[k
], 1 + total
);
2311 isl_int_set_si(graph
->lp
->eq
[k
][1 + sum_pos
], -1);
2312 for (i
= 0; i
< n
; ++i
)
2313 isl_int_set_si(graph
->lp
->eq
[k
][1 + first
+ i
], 1);
2318 /* Add a constraint to graph->lp that equates the value at position
2319 * "sum_pos" to the sum of the parameter coefficients of all nodes.
2321 * Within each node, the coefficients have the following order:
2323 * - c_i_n (if parametric)
2324 * - positive and negative parts of c_i_x
2326 static isl_stat
add_param_sum_constraint(struct isl_sched_graph
*graph
,
2332 total
= isl_basic_set_dim(graph
->lp
, isl_dim_set
);
2334 k
= isl_basic_set_alloc_equality(graph
->lp
);
2336 return isl_stat_error
;
2337 isl_seq_clr(graph
->lp
->eq
[k
], 1 + total
);
2338 isl_int_set_si(graph
->lp
->eq
[k
][1 + sum_pos
], -1);
2339 for (i
= 0; i
< graph
->n
; ++i
) {
2340 int pos
= 1 + graph
->node
[i
].start
+ 1;
2342 for (j
= 0; j
< graph
->node
[i
].nparam
; ++j
)
2343 isl_int_set_si(graph
->lp
->eq
[k
][pos
+ j
], 1);
2349 /* Add a constraint to graph->lp that equates the value at position
2350 * "sum_pos" to the sum of the variable coefficients of all nodes.
2352 * Within each node, the coefficients have the following order:
2354 * - c_i_n (if parametric)
2355 * - positive and negative parts of c_i_x
2357 static isl_stat
add_var_sum_constraint(struct isl_sched_graph
*graph
,
2363 total
= isl_basic_set_dim(graph
->lp
, isl_dim_set
);
2365 k
= isl_basic_set_alloc_equality(graph
->lp
);
2367 return isl_stat_error
;
2368 isl_seq_clr(graph
->lp
->eq
[k
], 1 + total
);
2369 isl_int_set_si(graph
->lp
->eq
[k
][1 + sum_pos
], -1);
2370 for (i
= 0; i
< graph
->n
; ++i
) {
2371 struct isl_sched_node
*node
= &graph
->node
[i
];
2372 int pos
= 1 + node_var_coef_offset(node
);
2374 for (j
= 0; j
< 2 * node
->nvar
; ++j
)
2375 isl_int_set_si(graph
->lp
->eq
[k
][pos
+ j
], 1);
2381 /* Construct an ILP problem for finding schedule coefficients
2382 * that result in non-negative, but small dependence distances
2383 * over all dependences.
2384 * In particular, the dependence distances over proximity edges
2385 * are bounded by m_0 + m_n n and we compute schedule coefficients
2386 * with small values (preferably zero) of m_n and m_0.
2388 * All variables of the ILP are non-negative. The actual coefficients
2389 * may be negative, so each coefficient is represented as the difference
2390 * of two non-negative variables. The negative part always appears
2391 * immediately before the positive part.
2392 * Other than that, the variables have the following order
2394 * - sum of positive and negative parts of m_n coefficients
2396 * - sum of all c_n coefficients
2397 * (unconstrained when computing non-parametric schedules)
2398 * - sum of positive and negative parts of all c_x coefficients
2399 * - positive and negative parts of m_n coefficients
2402 * - c_i_n (if parametric)
2403 * - positive and negative parts of c_i_x
2405 * The c_i_x are not represented directly, but through the columns of
2406 * node->cmap. That is, the computed values are for variable t_i_x
2407 * such that c_i_x = Q t_i_x with Q equal to node->cmap.
2409 * The constraints are those from the edges plus two or three equalities
2410 * to express the sums.
2412 * If "use_coincidence" is set, then we treat coincidence edges as local edges.
2413 * Otherwise, we ignore them.
2415 static isl_stat
setup_lp(isl_ctx
*ctx
, struct isl_sched_graph
*graph
,
2416 int use_coincidence
)
2426 parametric
= ctx
->opt
->schedule_parametric
;
2427 nparam
= isl_space_dim(graph
->node
[0].space
, isl_dim_param
);
2429 total
= param_pos
+ 2 * nparam
;
2430 for (i
= 0; i
< graph
->n
; ++i
) {
2431 struct isl_sched_node
*node
= &graph
->node
[graph
->sorted
[i
]];
2432 if (node_update_cmap(node
) < 0)
2433 return isl_stat_error
;
2434 node
->start
= total
;
2435 total
+= 1 + node
->nparam
+ 2 * node
->nvar
;
2438 if (count_constraints(graph
, &n_eq
, &n_ineq
, use_coincidence
) < 0)
2439 return isl_stat_error
;
2440 if (count_bound_constant_constraints(ctx
, graph
, &n_eq
, &n_ineq
) < 0)
2441 return isl_stat_error
;
2442 if (count_bound_coefficient_constraints(ctx
, graph
, &n_eq
, &n_ineq
) < 0)
2443 return isl_stat_error
;
2445 space
= isl_space_set_alloc(ctx
, 0, total
);
2446 isl_basic_set_free(graph
->lp
);
2447 n_eq
+= 2 + parametric
;
2449 graph
->lp
= isl_basic_set_alloc_space(space
, 0, n_eq
, n_ineq
);
2451 if (add_sum_constraint(graph
, 0, param_pos
, 2 * nparam
) < 0)
2452 return isl_stat_error
;
2453 if (parametric
&& add_param_sum_constraint(graph
, 2) < 0)
2454 return isl_stat_error
;
2455 if (add_var_sum_constraint(graph
, 3) < 0)
2456 return isl_stat_error
;
2457 if (add_bound_constant_constraints(ctx
, graph
) < 0)
2458 return isl_stat_error
;
2459 if (add_bound_coefficient_constraints(ctx
, graph
) < 0)
2460 return isl_stat_error
;
2461 if (add_all_validity_constraints(graph
, use_coincidence
) < 0)
2462 return isl_stat_error
;
2463 if (add_all_proximity_constraints(graph
, use_coincidence
) < 0)
2464 return isl_stat_error
;
2469 /* Analyze the conflicting constraint found by
2470 * isl_tab_basic_set_non_trivial_lexmin. If it corresponds to the validity
2471 * constraint of one of the edges between distinct nodes, living, moreover
2472 * in distinct SCCs, then record the source and sink SCC as this may
2473 * be a good place to cut between SCCs.
2475 static int check_conflict(int con
, void *user
)
2478 struct isl_sched_graph
*graph
= user
;
2480 if (graph
->src_scc
>= 0)
2483 con
-= graph
->lp
->n_eq
;
2485 if (con
>= graph
->lp
->n_ineq
)
2488 for (i
= 0; i
< graph
->n_edge
; ++i
) {
2489 if (!is_validity(&graph
->edge
[i
]))
2491 if (graph
->edge
[i
].src
== graph
->edge
[i
].dst
)
2493 if (graph
->edge
[i
].src
->scc
== graph
->edge
[i
].dst
->scc
)
2495 if (graph
->edge
[i
].start
> con
)
2497 if (graph
->edge
[i
].end
<= con
)
2499 graph
->src_scc
= graph
->edge
[i
].src
->scc
;
2500 graph
->dst_scc
= graph
->edge
[i
].dst
->scc
;
2506 /* Check whether the next schedule row of the given node needs to be
2507 * non-trivial. Lower-dimensional domains may have some trivial rows,
2508 * but as soon as the number of remaining required non-trivial rows
2509 * is as large as the number or remaining rows to be computed,
2510 * all remaining rows need to be non-trivial.
2512 static int needs_row(struct isl_sched_graph
*graph
, struct isl_sched_node
*node
)
2514 return node
->nvar
- node
->rank
>= graph
->maxvar
- graph
->n_row
;
2517 /* Solve the ILP problem constructed in setup_lp.
2518 * For each node such that all the remaining rows of its schedule
2519 * need to be non-trivial, we construct a non-triviality region.
2520 * This region imposes that the next row is independent of previous rows.
2521 * In particular the coefficients c_i_x are represented by t_i_x
2522 * variables with c_i_x = Q t_i_x and Q a unimodular matrix such that
2523 * its first columns span the rows of the previously computed part
2524 * of the schedule. The non-triviality region enforces that at least
2525 * one of the remaining components of t_i_x is non-zero, i.e.,
2526 * that the new schedule row depends on at least one of the remaining
2529 static __isl_give isl_vec
*solve_lp(struct isl_sched_graph
*graph
)
2535 for (i
= 0; i
< graph
->n
; ++i
) {
2536 struct isl_sched_node
*node
= &graph
->node
[i
];
2537 int skip
= node
->rank
;
2538 graph
->region
[i
].pos
= node_var_coef_offset(node
) + 2 * skip
;
2539 if (needs_row(graph
, node
))
2540 graph
->region
[i
].len
= 2 * (node
->nvar
- skip
);
2542 graph
->region
[i
].len
= 0;
2544 lp
= isl_basic_set_copy(graph
->lp
);
2545 sol
= isl_tab_basic_set_non_trivial_lexmin(lp
, 2, graph
->n
,
2546 graph
->region
, &check_conflict
, graph
);
2550 /* Extract the coefficients for the variables of "node" from "sol".
2552 * Within each node, the coefficients have the following order:
2554 * - c_i_n (if parametric)
2555 * - positive and negative parts of c_i_x
2557 * The c_i_x^- appear before their c_i_x^+ counterpart.
2559 * Return c_i_x = c_i_x^+ - c_i_x^-
2561 static __isl_give isl_vec
*extract_var_coef(struct isl_sched_node
*node
,
2562 __isl_keep isl_vec
*sol
)
2570 csol
= isl_vec_alloc(isl_vec_get_ctx(sol
), node
->nvar
);
2574 pos
= 1 + node_var_coef_offset(node
);
2575 for (i
= 0; i
< node
->nvar
; ++i
)
2576 isl_int_sub(csol
->el
[i
],
2577 sol
->el
[pos
+ 2 * i
+ 1], sol
->el
[pos
+ 2 * i
]);
2582 /* Update the schedules of all nodes based on the given solution
2583 * of the LP problem.
2584 * The new row is added to the current band.
2585 * All possibly negative coefficients are encoded as a difference
2586 * of two non-negative variables, so we need to perform the subtraction
2587 * here. Moreover, if use_cmap is set, then the solution does
2588 * not refer to the actual coefficients c_i_x, but instead to variables
2589 * t_i_x such that c_i_x = Q t_i_x and Q is equal to node->cmap.
2590 * In this case, we then also need to perform this multiplication
2591 * to obtain the values of c_i_x.
2593 * If coincident is set, then the caller guarantees that the new
2594 * row satisfies the coincidence constraints.
2596 static int update_schedule(struct isl_sched_graph
*graph
,
2597 __isl_take isl_vec
*sol
, int use_cmap
, int coincident
)
2600 isl_vec
*csol
= NULL
;
2605 isl_die(sol
->ctx
, isl_error_internal
,
2606 "no solution found", goto error
);
2607 if (graph
->n_total_row
>= graph
->max_row
)
2608 isl_die(sol
->ctx
, isl_error_internal
,
2609 "too many schedule rows", goto error
);
2611 for (i
= 0; i
< graph
->n
; ++i
) {
2612 struct isl_sched_node
*node
= &graph
->node
[i
];
2613 int pos
= node
->start
;
2614 int row
= isl_mat_rows(node
->sched
);
2617 csol
= extract_var_coef(node
, sol
);
2621 isl_map_free(node
->sched_map
);
2622 node
->sched_map
= NULL
;
2623 node
->sched
= isl_mat_add_rows(node
->sched
, 1);
2626 for (j
= 0; j
< 1 + node
->nparam
; ++j
)
2627 node
->sched
= isl_mat_set_element(node
->sched
,
2628 row
, j
, sol
->el
[1 + pos
+ j
]);
2630 csol
= isl_mat_vec_product(isl_mat_copy(node
->cmap
),
2634 for (j
= 0; j
< node
->nvar
; ++j
)
2635 node
->sched
= isl_mat_set_element(node
->sched
,
2636 row
, 1 + node
->nparam
+ j
, csol
->el
[j
]);
2637 node
->coincident
[graph
->n_total_row
] = coincident
;
2643 graph
->n_total_row
++;
2652 /* Convert row "row" of node->sched into an isl_aff living in "ls"
2653 * and return this isl_aff.
2655 static __isl_give isl_aff
*extract_schedule_row(__isl_take isl_local_space
*ls
,
2656 struct isl_sched_node
*node
, int row
)
2664 aff
= isl_aff_zero_on_domain(ls
);
2665 isl_mat_get_element(node
->sched
, row
, 0, &v
);
2666 aff
= isl_aff_set_constant(aff
, v
);
2667 for (j
= 0; j
< node
->nparam
; ++j
) {
2668 isl_mat_get_element(node
->sched
, row
, 1 + j
, &v
);
2669 aff
= isl_aff_set_coefficient(aff
, isl_dim_param
, j
, v
);
2671 for (j
= 0; j
< node
->nvar
; ++j
) {
2672 isl_mat_get_element(node
->sched
, row
, 1 + node
->nparam
+ j
, &v
);
2673 aff
= isl_aff_set_coefficient(aff
, isl_dim_in
, j
, v
);
2681 /* Convert the "n" rows starting at "first" of node->sched into a multi_aff
2682 * and return this multi_aff.
2684 * The result is defined over the uncompressed node domain.
2686 static __isl_give isl_multi_aff
*node_extract_partial_schedule_multi_aff(
2687 struct isl_sched_node
*node
, int first
, int n
)
2691 isl_local_space
*ls
;
2698 nrow
= isl_mat_rows(node
->sched
);
2699 if (node
->compressed
)
2700 space
= isl_multi_aff_get_domain_space(node
->decompress
);
2702 space
= isl_space_copy(node
->space
);
2703 ls
= isl_local_space_from_space(isl_space_copy(space
));
2704 space
= isl_space_from_domain(space
);
2705 space
= isl_space_add_dims(space
, isl_dim_out
, n
);
2706 ma
= isl_multi_aff_zero(space
);
2708 for (i
= first
; i
< first
+ n
; ++i
) {
2709 aff
= extract_schedule_row(isl_local_space_copy(ls
), node
, i
);
2710 ma
= isl_multi_aff_set_aff(ma
, i
- first
, aff
);
2713 isl_local_space_free(ls
);
2715 if (node
->compressed
)
2716 ma
= isl_multi_aff_pullback_multi_aff(ma
,
2717 isl_multi_aff_copy(node
->compress
));
2722 /* Convert node->sched into a multi_aff and return this multi_aff.
2724 * The result is defined over the uncompressed node domain.
2726 static __isl_give isl_multi_aff
*node_extract_schedule_multi_aff(
2727 struct isl_sched_node
*node
)
2731 nrow
= isl_mat_rows(node
->sched
);
2732 return node_extract_partial_schedule_multi_aff(node
, 0, nrow
);
2735 /* Convert node->sched into a map and return this map.
2737 * The result is cached in node->sched_map, which needs to be released
2738 * whenever node->sched is updated.
2739 * It is defined over the uncompressed node domain.
2741 static __isl_give isl_map
*node_extract_schedule(struct isl_sched_node
*node
)
2743 if (!node
->sched_map
) {
2746 ma
= node_extract_schedule_multi_aff(node
);
2747 node
->sched_map
= isl_map_from_multi_aff(ma
);
2750 return isl_map_copy(node
->sched_map
);
2753 /* Construct a map that can be used to update a dependence relation
2754 * based on the current schedule.
2755 * That is, construct a map expressing that source and sink
2756 * are executed within the same iteration of the current schedule.
2757 * This map can then be intersected with the dependence relation.
2758 * This is not the most efficient way, but this shouldn't be a critical
2761 static __isl_give isl_map
*specializer(struct isl_sched_node
*src
,
2762 struct isl_sched_node
*dst
)
2764 isl_map
*src_sched
, *dst_sched
;
2766 src_sched
= node_extract_schedule(src
);
2767 dst_sched
= node_extract_schedule(dst
);
2768 return isl_map_apply_range(src_sched
, isl_map_reverse(dst_sched
));
2771 /* Intersect the domains of the nested relations in domain and range
2772 * of "umap" with "map".
2774 static __isl_give isl_union_map
*intersect_domains(
2775 __isl_take isl_union_map
*umap
, __isl_keep isl_map
*map
)
2777 isl_union_set
*uset
;
2779 umap
= isl_union_map_zip(umap
);
2780 uset
= isl_union_set_from_set(isl_map_wrap(isl_map_copy(map
)));
2781 umap
= isl_union_map_intersect_domain(umap
, uset
);
2782 umap
= isl_union_map_zip(umap
);
2786 /* Update the dependence relation of the given edge based
2787 * on the current schedule.
2788 * If the dependence is carried completely by the current schedule, then
2789 * it is removed from the edge_tables. It is kept in the list of edges
2790 * as otherwise all edge_tables would have to be recomputed.
2792 static int update_edge(struct isl_sched_graph
*graph
,
2793 struct isl_sched_edge
*edge
)
2798 id
= specializer(edge
->src
, edge
->dst
);
2799 edge
->map
= isl_map_intersect(edge
->map
, isl_map_copy(id
));
2803 if (edge
->tagged_condition
) {
2804 edge
->tagged_condition
=
2805 intersect_domains(edge
->tagged_condition
, id
);
2806 if (!edge
->tagged_condition
)
2809 if (edge
->tagged_validity
) {
2810 edge
->tagged_validity
=
2811 intersect_domains(edge
->tagged_validity
, id
);
2812 if (!edge
->tagged_validity
)
2816 empty
= isl_map_plain_is_empty(edge
->map
);
2820 graph_remove_edge(graph
, edge
);
2829 /* Does the domain of "umap" intersect "uset"?
2831 static int domain_intersects(__isl_keep isl_union_map
*umap
,
2832 __isl_keep isl_union_set
*uset
)
2836 umap
= isl_union_map_copy(umap
);
2837 umap
= isl_union_map_intersect_domain(umap
, isl_union_set_copy(uset
));
2838 empty
= isl_union_map_is_empty(umap
);
2839 isl_union_map_free(umap
);
2841 return empty
< 0 ? -1 : !empty
;
2844 /* Does the range of "umap" intersect "uset"?
2846 static int range_intersects(__isl_keep isl_union_map
*umap
,
2847 __isl_keep isl_union_set
*uset
)
2851 umap
= isl_union_map_copy(umap
);
2852 umap
= isl_union_map_intersect_range(umap
, isl_union_set_copy(uset
));
2853 empty
= isl_union_map_is_empty(umap
);
2854 isl_union_map_free(umap
);
2856 return empty
< 0 ? -1 : !empty
;
2859 /* Are the condition dependences of "edge" local with respect to
2860 * the current schedule?
2862 * That is, are domain and range of the condition dependences mapped
2863 * to the same point?
2865 * In other words, is the condition false?
2867 static int is_condition_false(struct isl_sched_edge
*edge
)
2869 isl_union_map
*umap
;
2870 isl_map
*map
, *sched
, *test
;
2873 empty
= isl_union_map_is_empty(edge
->tagged_condition
);
2874 if (empty
< 0 || empty
)
2877 umap
= isl_union_map_copy(edge
->tagged_condition
);
2878 umap
= isl_union_map_zip(umap
);
2879 umap
= isl_union_set_unwrap(isl_union_map_domain(umap
));
2880 map
= isl_map_from_union_map(umap
);
2882 sched
= node_extract_schedule(edge
->src
);
2883 map
= isl_map_apply_domain(map
, sched
);
2884 sched
= node_extract_schedule(edge
->dst
);
2885 map
= isl_map_apply_range(map
, sched
);
2887 test
= isl_map_identity(isl_map_get_space(map
));
2888 local
= isl_map_is_subset(map
, test
);
2895 /* For each conditional validity constraint that is adjacent
2896 * to a condition with domain in condition_source or range in condition_sink,
2897 * turn it into an unconditional validity constraint.
2899 static int unconditionalize_adjacent_validity(struct isl_sched_graph
*graph
,
2900 __isl_take isl_union_set
*condition_source
,
2901 __isl_take isl_union_set
*condition_sink
)
2905 condition_source
= isl_union_set_coalesce(condition_source
);
2906 condition_sink
= isl_union_set_coalesce(condition_sink
);
2908 for (i
= 0; i
< graph
->n_edge
; ++i
) {
2910 isl_union_map
*validity
;
2912 if (!is_conditional_validity(&graph
->edge
[i
]))
2914 if (is_validity(&graph
->edge
[i
]))
2917 validity
= graph
->edge
[i
].tagged_validity
;
2918 adjacent
= domain_intersects(validity
, condition_sink
);
2919 if (adjacent
>= 0 && !adjacent
)
2920 adjacent
= range_intersects(validity
, condition_source
);
2926 set_validity(&graph
->edge
[i
]);
2929 isl_union_set_free(condition_source
);
2930 isl_union_set_free(condition_sink
);
2933 isl_union_set_free(condition_source
);
2934 isl_union_set_free(condition_sink
);
2938 /* Update the dependence relations of all edges based on the current schedule
2939 * and enforce conditional validity constraints that are adjacent
2940 * to satisfied condition constraints.
2942 * First check if any of the condition constraints are satisfied
2943 * (i.e., not local to the outer schedule) and keep track of
2944 * their domain and range.
2945 * Then update all dependence relations (which removes the non-local
2947 * Finally, if any condition constraints turned out to be satisfied,
2948 * then turn all adjacent conditional validity constraints into
2949 * unconditional validity constraints.
2951 static int update_edges(isl_ctx
*ctx
, struct isl_sched_graph
*graph
)
2955 isl_union_set
*source
, *sink
;
2957 source
= isl_union_set_empty(isl_space_params_alloc(ctx
, 0));
2958 sink
= isl_union_set_empty(isl_space_params_alloc(ctx
, 0));
2959 for (i
= 0; i
< graph
->n_edge
; ++i
) {
2961 isl_union_set
*uset
;
2962 isl_union_map
*umap
;
2964 if (!is_condition(&graph
->edge
[i
]))
2966 if (is_local(&graph
->edge
[i
]))
2968 local
= is_condition_false(&graph
->edge
[i
]);
2976 umap
= isl_union_map_copy(graph
->edge
[i
].tagged_condition
);
2977 uset
= isl_union_map_domain(umap
);
2978 source
= isl_union_set_union(source
, uset
);
2980 umap
= isl_union_map_copy(graph
->edge
[i
].tagged_condition
);
2981 uset
= isl_union_map_range(umap
);
2982 sink
= isl_union_set_union(sink
, uset
);
2985 for (i
= graph
->n_edge
- 1; i
>= 0; --i
) {
2986 if (update_edge(graph
, &graph
->edge
[i
]) < 0)
2991 return unconditionalize_adjacent_validity(graph
, source
, sink
);
2993 isl_union_set_free(source
);
2994 isl_union_set_free(sink
);
2997 isl_union_set_free(source
);
2998 isl_union_set_free(sink
);
3002 static void next_band(struct isl_sched_graph
*graph
)
3004 graph
->band_start
= graph
->n_total_row
;
3007 /* Return the union of the universe domains of the nodes in "graph"
3008 * that satisfy "pred".
3010 static __isl_give isl_union_set
*isl_sched_graph_domain(isl_ctx
*ctx
,
3011 struct isl_sched_graph
*graph
,
3012 int (*pred
)(struct isl_sched_node
*node
, int data
), int data
)
3018 for (i
= 0; i
< graph
->n
; ++i
)
3019 if (pred(&graph
->node
[i
], data
))
3023 isl_die(ctx
, isl_error_internal
,
3024 "empty component", return NULL
);
3026 set
= isl_set_universe(isl_space_copy(graph
->node
[i
].space
));
3027 dom
= isl_union_set_from_set(set
);
3029 for (i
= i
+ 1; i
< graph
->n
; ++i
) {
3030 if (!pred(&graph
->node
[i
], data
))
3032 set
= isl_set_universe(isl_space_copy(graph
->node
[i
].space
));
3033 dom
= isl_union_set_union(dom
, isl_union_set_from_set(set
));
3039 /* Return a list of unions of universe domains, where each element
3040 * in the list corresponds to an SCC (or WCC) indexed by node->scc.
3042 static __isl_give isl_union_set_list
*extract_sccs(isl_ctx
*ctx
,
3043 struct isl_sched_graph
*graph
)
3046 isl_union_set_list
*filters
;
3048 filters
= isl_union_set_list_alloc(ctx
, graph
->scc
);
3049 for (i
= 0; i
< graph
->scc
; ++i
) {
3052 dom
= isl_sched_graph_domain(ctx
, graph
, &node_scc_exactly
, i
);
3053 filters
= isl_union_set_list_add(filters
, dom
);
3059 /* Return a list of two unions of universe domains, one for the SCCs up
3060 * to and including graph->src_scc and another for the other SCCs.
3062 static __isl_give isl_union_set_list
*extract_split(isl_ctx
*ctx
,
3063 struct isl_sched_graph
*graph
)
3066 isl_union_set_list
*filters
;
3068 filters
= isl_union_set_list_alloc(ctx
, 2);
3069 dom
= isl_sched_graph_domain(ctx
, graph
,
3070 &node_scc_at_most
, graph
->src_scc
);
3071 filters
= isl_union_set_list_add(filters
, dom
);
3072 dom
= isl_sched_graph_domain(ctx
, graph
,
3073 &node_scc_at_least
, graph
->src_scc
+ 1);
3074 filters
= isl_union_set_list_add(filters
, dom
);
3079 /* Copy nodes that satisfy node_pred from the src dependence graph
3080 * to the dst dependence graph.
3082 static int copy_nodes(struct isl_sched_graph
*dst
, struct isl_sched_graph
*src
,
3083 int (*node_pred
)(struct isl_sched_node
*node
, int data
), int data
)
3088 for (i
= 0; i
< src
->n
; ++i
) {
3091 if (!node_pred(&src
->node
[i
], data
))
3095 dst
->node
[j
].space
= isl_space_copy(src
->node
[i
].space
);
3096 dst
->node
[j
].compressed
= src
->node
[i
].compressed
;
3097 dst
->node
[j
].hull
= isl_set_copy(src
->node
[i
].hull
);
3098 dst
->node
[j
].compress
=
3099 isl_multi_aff_copy(src
->node
[i
].compress
);
3100 dst
->node
[j
].decompress
=
3101 isl_multi_aff_copy(src
->node
[i
].decompress
);
3102 dst
->node
[j
].nvar
= src
->node
[i
].nvar
;
3103 dst
->node
[j
].nparam
= src
->node
[i
].nparam
;
3104 dst
->node
[j
].sched
= isl_mat_copy(src
->node
[i
].sched
);
3105 dst
->node
[j
].sched_map
= isl_map_copy(src
->node
[i
].sched_map
);
3106 dst
->node
[j
].coincident
= src
->node
[i
].coincident
;
3107 dst
->node
[j
].sizes
= isl_multi_val_copy(src
->node
[i
].sizes
);
3108 dst
->node
[j
].max
= isl_vec_copy(src
->node
[i
].max
);
3111 if (!dst
->node
[j
].space
|| !dst
->node
[j
].sched
)
3113 if (dst
->node
[j
].compressed
&&
3114 (!dst
->node
[j
].hull
|| !dst
->node
[j
].compress
||
3115 !dst
->node
[j
].decompress
))
3122 /* Copy non-empty edges that satisfy edge_pred from the src dependence graph
3123 * to the dst dependence graph.
3124 * If the source or destination node of the edge is not in the destination
3125 * graph, then it must be a backward proximity edge and it should simply
3128 static int copy_edges(isl_ctx
*ctx
, struct isl_sched_graph
*dst
,
3129 struct isl_sched_graph
*src
,
3130 int (*edge_pred
)(struct isl_sched_edge
*edge
, int data
), int data
)
3133 enum isl_edge_type t
;
3136 for (i
= 0; i
< src
->n_edge
; ++i
) {
3137 struct isl_sched_edge
*edge
= &src
->edge
[i
];
3139 isl_union_map
*tagged_condition
;
3140 isl_union_map
*tagged_validity
;
3141 struct isl_sched_node
*dst_src
, *dst_dst
;
3143 if (!edge_pred(edge
, data
))
3146 if (isl_map_plain_is_empty(edge
->map
))
3149 dst_src
= graph_find_node(ctx
, dst
, edge
->src
->space
);
3150 dst_dst
= graph_find_node(ctx
, dst
, edge
->dst
->space
);
3151 if (!dst_src
|| !dst_dst
) {
3152 if (is_validity(edge
) || is_conditional_validity(edge
))
3153 isl_die(ctx
, isl_error_internal
,
3154 "backward (conditional) validity edge",
3159 map
= isl_map_copy(edge
->map
);
3160 tagged_condition
= isl_union_map_copy(edge
->tagged_condition
);
3161 tagged_validity
= isl_union_map_copy(edge
->tagged_validity
);
3163 dst
->edge
[dst
->n_edge
].src
= dst_src
;
3164 dst
->edge
[dst
->n_edge
].dst
= dst_dst
;
3165 dst
->edge
[dst
->n_edge
].map
= map
;
3166 dst
->edge
[dst
->n_edge
].tagged_condition
= tagged_condition
;
3167 dst
->edge
[dst
->n_edge
].tagged_validity
= tagged_validity
;
3168 dst
->edge
[dst
->n_edge
].types
= edge
->types
;
3171 if (edge
->tagged_condition
&& !tagged_condition
)
3173 if (edge
->tagged_validity
&& !tagged_validity
)
3176 for (t
= isl_edge_first
; t
<= isl_edge_last
; ++t
) {
3178 graph_find_edge(src
, t
, edge
->src
, edge
->dst
))
3180 if (graph_edge_table_add(ctx
, dst
, t
,
3181 &dst
->edge
[dst
->n_edge
- 1]) < 0)
3189 /* Compute the maximal number of variables over all nodes.
3190 * This is the maximal number of linearly independent schedule
3191 * rows that we need to compute.
3192 * Just in case we end up in a part of the dependence graph
3193 * with only lower-dimensional domains, we make sure we will
3194 * compute the required amount of extra linearly independent rows.
3196 static int compute_maxvar(struct isl_sched_graph
*graph
)
3201 for (i
= 0; i
< graph
->n
; ++i
) {
3202 struct isl_sched_node
*node
= &graph
->node
[i
];
3205 if (node_update_cmap(node
) < 0)
3207 nvar
= node
->nvar
+ graph
->n_row
- node
->rank
;
3208 if (nvar
> graph
->maxvar
)
3209 graph
->maxvar
= nvar
;
3215 /* Extract the subgraph of "graph" that consists of the node satisfying
3216 * "node_pred" and the edges satisfying "edge_pred" and store
3217 * the result in "sub".
3219 static int extract_sub_graph(isl_ctx
*ctx
, struct isl_sched_graph
*graph
,
3220 int (*node_pred
)(struct isl_sched_node
*node
, int data
),
3221 int (*edge_pred
)(struct isl_sched_edge
*edge
, int data
),
3222 int data
, struct isl_sched_graph
*sub
)
3224 int i
, n
= 0, n_edge
= 0;
3227 for (i
= 0; i
< graph
->n
; ++i
)
3228 if (node_pred(&graph
->node
[i
], data
))
3230 for (i
= 0; i
< graph
->n_edge
; ++i
)
3231 if (edge_pred(&graph
->edge
[i
], data
))
3233 if (graph_alloc(ctx
, sub
, n
, n_edge
) < 0)
3235 if (copy_nodes(sub
, graph
, node_pred
, data
) < 0)
3237 if (graph_init_table(ctx
, sub
) < 0)
3239 for (t
= 0; t
<= isl_edge_last
; ++t
)
3240 sub
->max_edge
[t
] = graph
->max_edge
[t
];
3241 if (graph_init_edge_tables(ctx
, sub
) < 0)
3243 if (copy_edges(ctx
, sub
, graph
, edge_pred
, data
) < 0)
3245 sub
->n_row
= graph
->n_row
;
3246 sub
->max_row
= graph
->max_row
;
3247 sub
->n_total_row
= graph
->n_total_row
;
3248 sub
->band_start
= graph
->band_start
;
3253 static __isl_give isl_schedule_node
*compute_schedule(isl_schedule_node
*node
,
3254 struct isl_sched_graph
*graph
);
3255 static __isl_give isl_schedule_node
*compute_schedule_wcc(
3256 isl_schedule_node
*node
, struct isl_sched_graph
*graph
);
3258 /* Compute a schedule for a subgraph of "graph". In particular, for
3259 * the graph composed of nodes that satisfy node_pred and edges that
3260 * that satisfy edge_pred.
3261 * If the subgraph is known to consist of a single component, then wcc should
3262 * be set and then we call compute_schedule_wcc on the constructed subgraph.
3263 * Otherwise, we call compute_schedule, which will check whether the subgraph
3266 * The schedule is inserted at "node" and the updated schedule node
3269 static __isl_give isl_schedule_node
*compute_sub_schedule(
3270 __isl_take isl_schedule_node
*node
, isl_ctx
*ctx
,
3271 struct isl_sched_graph
*graph
,
3272 int (*node_pred
)(struct isl_sched_node
*node
, int data
),
3273 int (*edge_pred
)(struct isl_sched_edge
*edge
, int data
),
3276 struct isl_sched_graph split
= { 0 };
3278 if (extract_sub_graph(ctx
, graph
, node_pred
, edge_pred
, data
,
3283 node
= compute_schedule_wcc(node
, &split
);
3285 node
= compute_schedule(node
, &split
);
3287 graph_free(ctx
, &split
);
3290 graph_free(ctx
, &split
);
3291 return isl_schedule_node_free(node
);
3294 static int edge_scc_exactly(struct isl_sched_edge
*edge
, int scc
)
3296 return edge
->src
->scc
== scc
&& edge
->dst
->scc
== scc
;
3299 static int edge_dst_scc_at_most(struct isl_sched_edge
*edge
, int scc
)
3301 return edge
->dst
->scc
<= scc
;
3304 static int edge_src_scc_at_least(struct isl_sched_edge
*edge
, int scc
)
3306 return edge
->src
->scc
>= scc
;
3309 /* Reset the current band by dropping all its schedule rows.
3311 static int reset_band(struct isl_sched_graph
*graph
)
3316 drop
= graph
->n_total_row
- graph
->band_start
;
3317 graph
->n_total_row
-= drop
;
3318 graph
->n_row
-= drop
;
3320 for (i
= 0; i
< graph
->n
; ++i
) {
3321 struct isl_sched_node
*node
= &graph
->node
[i
];
3323 isl_map_free(node
->sched_map
);
3324 node
->sched_map
= NULL
;
3326 node
->sched
= isl_mat_drop_rows(node
->sched
,
3327 graph
->band_start
, drop
);
3336 /* Split the current graph into two parts and compute a schedule for each
3337 * part individually. In particular, one part consists of all SCCs up
3338 * to and including graph->src_scc, while the other part contains the other
3339 * SCCs. The split is enforced by a sequence node inserted at position "node"
3340 * in the schedule tree. Return the updated schedule node.
3341 * If either of these two parts consists of a sequence, then it is spliced
3342 * into the sequence containing the two parts.
3344 * The current band is reset. It would be possible to reuse
3345 * the previously computed rows as the first rows in the next
3346 * band, but recomputing them may result in better rows as we are looking
3347 * at a smaller part of the dependence graph.
3349 static __isl_give isl_schedule_node
*compute_split_schedule(
3350 __isl_take isl_schedule_node
*node
, struct isl_sched_graph
*graph
)
3354 isl_union_set_list
*filters
;
3359 if (reset_band(graph
) < 0)
3360 return isl_schedule_node_free(node
);
3364 ctx
= isl_schedule_node_get_ctx(node
);
3365 filters
= extract_split(ctx
, graph
);
3366 node
= isl_schedule_node_insert_sequence(node
, filters
);
3367 node
= isl_schedule_node_child(node
, 1);
3368 node
= isl_schedule_node_child(node
, 0);
3370 node
= compute_sub_schedule(node
, ctx
, graph
,
3371 &node_scc_at_least
, &edge_src_scc_at_least
,
3372 graph
->src_scc
+ 1, 0);
3373 is_seq
= isl_schedule_node_get_type(node
) == isl_schedule_node_sequence
;
3374 node
= isl_schedule_node_parent(node
);
3375 node
= isl_schedule_node_parent(node
);
3377 node
= isl_schedule_node_sequence_splice_child(node
, 1);
3378 node
= isl_schedule_node_child(node
, 0);
3379 node
= isl_schedule_node_child(node
, 0);
3380 node
= compute_sub_schedule(node
, ctx
, graph
,
3381 &node_scc_at_most
, &edge_dst_scc_at_most
,
3383 is_seq
= isl_schedule_node_get_type(node
) == isl_schedule_node_sequence
;
3384 node
= isl_schedule_node_parent(node
);
3385 node
= isl_schedule_node_parent(node
);
3387 node
= isl_schedule_node_sequence_splice_child(node
, 0);
3392 /* Insert a band node at position "node" in the schedule tree corresponding
3393 * to the current band in "graph". Mark the band node permutable
3394 * if "permutable" is set.
3395 * The partial schedules and the coincidence property are extracted
3396 * from the graph nodes.
3397 * Return the updated schedule node.
3399 static __isl_give isl_schedule_node
*insert_current_band(
3400 __isl_take isl_schedule_node
*node
, struct isl_sched_graph
*graph
,
3406 isl_multi_pw_aff
*mpa
;
3407 isl_multi_union_pw_aff
*mupa
;
3413 isl_die(isl_schedule_node_get_ctx(node
), isl_error_internal
,
3414 "graph should have at least one node",
3415 return isl_schedule_node_free(node
));
3417 start
= graph
->band_start
;
3418 end
= graph
->n_total_row
;
3421 ma
= node_extract_partial_schedule_multi_aff(&graph
->node
[0], start
, n
);
3422 mpa
= isl_multi_pw_aff_from_multi_aff(ma
);
3423 mupa
= isl_multi_union_pw_aff_from_multi_pw_aff(mpa
);
3425 for (i
= 1; i
< graph
->n
; ++i
) {
3426 isl_multi_union_pw_aff
*mupa_i
;
3428 ma
= node_extract_partial_schedule_multi_aff(&graph
->node
[i
],
3430 mpa
= isl_multi_pw_aff_from_multi_aff(ma
);
3431 mupa_i
= isl_multi_union_pw_aff_from_multi_pw_aff(mpa
);
3432 mupa
= isl_multi_union_pw_aff_union_add(mupa
, mupa_i
);
3434 node
= isl_schedule_node_insert_partial_schedule(node
, mupa
);
3436 for (i
= 0; i
< n
; ++i
)
3437 node
= isl_schedule_node_band_member_set_coincident(node
, i
,
3438 graph
->node
[0].coincident
[start
+ i
]);
3439 node
= isl_schedule_node_band_set_permutable(node
, permutable
);
3444 /* Update the dependence relations based on the current schedule,
3445 * add the current band to "node" and then continue with the computation
3447 * Return the updated schedule node.
3449 static __isl_give isl_schedule_node
*compute_next_band(
3450 __isl_take isl_schedule_node
*node
,
3451 struct isl_sched_graph
*graph
, int permutable
)
3458 ctx
= isl_schedule_node_get_ctx(node
);
3459 if (update_edges(ctx
, graph
) < 0)
3460 return isl_schedule_node_free(node
);
3461 node
= insert_current_band(node
, graph
, permutable
);
3464 node
= isl_schedule_node_child(node
, 0);
3465 node
= compute_schedule(node
, graph
);
3466 node
= isl_schedule_node_parent(node
);
3471 /* Add constraints to graph->lp that force the dependence "map" (which
3472 * is part of the dependence relation of "edge")
3473 * to be respected and attempt to carry it, where the edge is one from
3474 * a node j to itself. "pos" is the sequence number of the given map.
3475 * That is, add constraints that enforce
3477 * (c_j_0 + c_j_n n + c_j_x y) - (c_j_0 + c_j_n n + c_j_x x)
3478 * = c_j_x (y - x) >= e_i
3480 * for each (x,y) in R.
3481 * We obtain general constraints on coefficients (c_0, c_n, c_x)
3482 * of valid constraints for (y - x) and then plug in (-e_i, 0, c_j_x),
3483 * with each coefficient in c_j_x represented as a pair of non-negative
3486 static int add_intra_constraints(struct isl_sched_graph
*graph
,
3487 struct isl_sched_edge
*edge
, __isl_take isl_map
*map
, int pos
)
3490 isl_ctx
*ctx
= isl_map_get_ctx(map
);
3491 isl_dim_map
*dim_map
;
3492 isl_basic_set
*coef
;
3493 struct isl_sched_node
*node
= edge
->src
;
3495 coef
= intra_coefficients(graph
, node
, map
);
3499 offset
= coef_var_offset(coef
);
3500 dim_map
= intra_dim_map(ctx
, graph
, node
, offset
, 1);
3501 isl_dim_map_range(dim_map
, 3 + pos
, 0, 0, 0, 1, -1);
3502 graph
->lp
= isl_basic_set_extend_constraints(graph
->lp
,
3503 coef
->n_eq
, coef
->n_ineq
);
3504 graph
->lp
= isl_basic_set_add_constraints_dim_map(graph
->lp
,
3510 /* Add constraints to graph->lp that force the dependence "map" (which
3511 * is part of the dependence relation of "edge")
3512 * to be respected and attempt to carry it, where the edge is one from
3513 * node j to node k. "pos" is the sequence number of the given map.
3514 * That is, add constraints that enforce
3516 * (c_k_0 + c_k_n n + c_k_x y) - (c_j_0 + c_j_n n + c_j_x x) >= e_i
3518 * for each (x,y) in R.
3519 * We obtain general constraints on coefficients (c_0, c_n, c_x)
3520 * of valid constraints for R and then plug in
3521 * (-e_i + c_k_0 - c_j_0, c_k_n - c_j_n, c_k_x - c_j_x)
3522 * with each coefficient (except e_i, c_*_0 and c_*_n)
3523 * represented as a pair of non-negative coefficients.
3525 static int add_inter_constraints(struct isl_sched_graph
*graph
,
3526 struct isl_sched_edge
*edge
, __isl_take isl_map
*map
, int pos
)
3529 isl_ctx
*ctx
= isl_map_get_ctx(map
);
3530 isl_dim_map
*dim_map
;
3531 isl_basic_set
*coef
;
3532 struct isl_sched_node
*src
= edge
->src
;
3533 struct isl_sched_node
*dst
= edge
->dst
;
3535 coef
= inter_coefficients(graph
, edge
, map
);
3539 offset
= coef_var_offset(coef
);
3540 dim_map
= inter_dim_map(ctx
, graph
, src
, dst
, offset
, 1);
3541 isl_dim_map_range(dim_map
, 3 + pos
, 0, 0, 0, 1, -1);
3542 graph
->lp
= isl_basic_set_extend_constraints(graph
->lp
,
3543 coef
->n_eq
, coef
->n_ineq
);
3544 graph
->lp
= isl_basic_set_add_constraints_dim_map(graph
->lp
,
3550 /* Add constraints to graph->lp that force all (conditional) validity
3551 * dependences to be respected and attempt to carry them.
3553 static isl_stat
add_all_constraints(struct isl_sched_graph
*graph
)
3559 for (i
= 0; i
< graph
->n_edge
; ++i
) {
3560 struct isl_sched_edge
*edge
= &graph
->edge
[i
];
3562 if (!is_any_validity(edge
))
3565 for (j
= 0; j
< edge
->map
->n
; ++j
) {
3566 isl_basic_map
*bmap
;
3569 bmap
= isl_basic_map_copy(edge
->map
->p
[j
]);
3570 map
= isl_map_from_basic_map(bmap
);
3572 if (edge
->src
== edge
->dst
&&
3573 add_intra_constraints(graph
, edge
, map
, pos
) < 0)
3574 return isl_stat_error
;
3575 if (edge
->src
!= edge
->dst
&&
3576 add_inter_constraints(graph
, edge
, map
, pos
) < 0)
3577 return isl_stat_error
;
3585 /* Count the number of equality and inequality constraints
3586 * that will be added to the carry_lp problem.
3587 * We count each edge exactly once.
3589 static isl_stat
count_all_constraints(struct isl_sched_graph
*graph
,
3590 int *n_eq
, int *n_ineq
)
3594 *n_eq
= *n_ineq
= 0;
3595 for (i
= 0; i
< graph
->n_edge
; ++i
) {
3596 struct isl_sched_edge
*edge
= &graph
->edge
[i
];
3598 if (!is_any_validity(edge
))
3601 for (j
= 0; j
< edge
->map
->n
; ++j
) {
3602 isl_basic_map
*bmap
;
3605 bmap
= isl_basic_map_copy(edge
->map
->p
[j
]);
3606 map
= isl_map_from_basic_map(bmap
);
3608 if (count_map_constraints(graph
, edge
, map
,
3609 n_eq
, n_ineq
, 1, 0) < 0)
3610 return isl_stat_error
;
3617 /* Return the total number of (validity) edges that carry_dependences will
3620 static int count_carry_edges(struct isl_sched_graph
*graph
)
3626 for (i
= 0; i
< graph
->n_edge
; ++i
) {
3627 struct isl_sched_edge
*edge
= &graph
->edge
[i
];
3629 if (!is_any_validity(edge
))
3632 n_edge
+= isl_map_n_basic_map(edge
->map
);
3638 /* Construct an LP problem for finding schedule coefficients
3639 * such that the schedule carries as many validity dependences as possible.
3640 * In particular, for each dependence i, we bound the dependence distance
3641 * from below by e_i, with 0 <= e_i <= 1 and then maximize the sum
3642 * of all e_i's. Dependences with e_i = 0 in the solution are simply
3643 * respected, while those with e_i > 0 (in practice e_i = 1) are carried.
3644 * Note that if the dependence relation is a union of basic maps,
3645 * then we have to consider each basic map individually as it may only
3646 * be possible to carry the dependences expressed by some of those
3647 * basic maps and not all of them.
3648 * Below, we consider each of those basic maps as a separate "edge".
3649 * "n_edge" is the number of these edges.
3651 * All variables of the LP are non-negative. The actual coefficients
3652 * may be negative, so each coefficient is represented as the difference
3653 * of two non-negative variables. The negative part always appears
3654 * immediately before the positive part.
3655 * Other than that, the variables have the following order
3657 * - sum of (1 - e_i) over all edges
3658 * - sum of all c_n coefficients
3659 * (unconstrained when computing non-parametric schedules)
3660 * - sum of positive and negative parts of all c_x coefficients
3665 * - c_i_n (if parametric)
3666 * - positive and negative parts of c_i_x
3668 * The constraints are those from the (validity) edges plus three equalities
3669 * to express the sums and n_edge inequalities to express e_i <= 1.
3671 static isl_stat
setup_carry_lp(isl_ctx
*ctx
, struct isl_sched_graph
*graph
,
3681 for (i
= 0; i
< graph
->n
; ++i
) {
3682 struct isl_sched_node
*node
= &graph
->node
[graph
->sorted
[i
]];
3683 node
->start
= total
;
3684 total
+= 1 + node
->nparam
+ 2 * node
->nvar
;
3687 if (count_all_constraints(graph
, &n_eq
, &n_ineq
) < 0)
3688 return isl_stat_error
;
3690 dim
= isl_space_set_alloc(ctx
, 0, total
);
3691 isl_basic_set_free(graph
->lp
);
3694 graph
->lp
= isl_basic_set_alloc_space(dim
, 0, n_eq
, n_ineq
);
3695 graph
->lp
= isl_basic_set_set_rational(graph
->lp
);
3697 k
= isl_basic_set_alloc_equality(graph
->lp
);
3699 return isl_stat_error
;
3700 isl_seq_clr(graph
->lp
->eq
[k
], 1 + total
);
3701 isl_int_set_si(graph
->lp
->eq
[k
][0], -n_edge
);
3702 isl_int_set_si(graph
->lp
->eq
[k
][1], 1);
3703 for (i
= 0; i
< n_edge
; ++i
)
3704 isl_int_set_si(graph
->lp
->eq
[k
][4 + i
], 1);
3706 if (add_param_sum_constraint(graph
, 1) < 0)
3707 return isl_stat_error
;
3708 if (add_var_sum_constraint(graph
, 2) < 0)
3709 return isl_stat_error
;
3711 for (i
= 0; i
< n_edge
; ++i
) {
3712 k
= isl_basic_set_alloc_inequality(graph
->lp
);
3714 return isl_stat_error
;
3715 isl_seq_clr(graph
->lp
->ineq
[k
], 1 + total
);
3716 isl_int_set_si(graph
->lp
->ineq
[k
][4 + i
], -1);
3717 isl_int_set_si(graph
->lp
->ineq
[k
][0], 1);
3720 if (add_all_constraints(graph
) < 0)
3721 return isl_stat_error
;
3726 static __isl_give isl_schedule_node
*compute_component_schedule(
3727 __isl_take isl_schedule_node
*node
, struct isl_sched_graph
*graph
,
3730 /* Comparison function for sorting the statements based on
3731 * the corresponding value in "r".
3733 static int smaller_value(const void *a
, const void *b
, void *data
)
3739 return isl_int_cmp(r
->el
[*i1
], r
->el
[*i2
]);
3742 /* If the schedule_split_scaled option is set and if the linear
3743 * parts of the scheduling rows for all nodes in the graphs have
3744 * a non-trivial common divisor, then split off the remainder of the
3745 * constant term modulo this common divisor from the linear part.
3746 * Otherwise, insert a band node directly and continue with
3747 * the construction of the schedule.
3749 * If a non-trivial common divisor is found, then
3750 * the linear part is reduced and the remainder is enforced
3751 * by a sequence node with the children placed in the order
3752 * of this remainder.
3753 * In particular, we assign an scc index based on the remainder and
3754 * then rely on compute_component_schedule to insert the sequence and
3755 * to continue the schedule construction on each part.
3757 static __isl_give isl_schedule_node
*split_scaled(
3758 __isl_take isl_schedule_node
*node
, struct isl_sched_graph
*graph
)
3771 ctx
= isl_schedule_node_get_ctx(node
);
3772 if (!ctx
->opt
->schedule_split_scaled
)
3773 return compute_next_band(node
, graph
, 0);
3775 return compute_next_band(node
, graph
, 0);
3778 isl_int_init(gcd_i
);
3780 isl_int_set_si(gcd
, 0);
3782 row
= isl_mat_rows(graph
->node
[0].sched
) - 1;
3784 for (i
= 0; i
< graph
->n
; ++i
) {
3785 struct isl_sched_node
*node
= &graph
->node
[i
];
3786 int cols
= isl_mat_cols(node
->sched
);
3788 isl_seq_gcd(node
->sched
->row
[row
] + 1, cols
- 1, &gcd_i
);
3789 isl_int_gcd(gcd
, gcd
, gcd_i
);
3792 isl_int_clear(gcd_i
);
3794 if (isl_int_cmp_si(gcd
, 1) <= 0) {
3796 return compute_next_band(node
, graph
, 0);
3799 r
= isl_vec_alloc(ctx
, graph
->n
);
3800 order
= isl_calloc_array(ctx
, int, graph
->n
);
3804 for (i
= 0; i
< graph
->n
; ++i
) {
3805 struct isl_sched_node
*node
= &graph
->node
[i
];
3808 isl_int_fdiv_r(r
->el
[i
], node
->sched
->row
[row
][0], gcd
);
3809 isl_int_fdiv_q(node
->sched
->row
[row
][0],
3810 node
->sched
->row
[row
][0], gcd
);
3811 isl_int_mul(node
->sched
->row
[row
][0],
3812 node
->sched
->row
[row
][0], gcd
);
3813 node
->sched
= isl_mat_scale_down_row(node
->sched
, row
, gcd
);
3818 if (isl_sort(order
, graph
->n
, sizeof(order
[0]), &smaller_value
, r
) < 0)
3822 for (i
= 0; i
< graph
->n
; ++i
) {
3823 if (i
> 0 && isl_int_ne(r
->el
[order
[i
- 1]], r
->el
[order
[i
]]))
3825 graph
->node
[order
[i
]].scc
= scc
;
3834 if (update_edges(ctx
, graph
) < 0)
3835 return isl_schedule_node_free(node
);
3836 node
= insert_current_band(node
, graph
, 0);
3839 node
= isl_schedule_node_child(node
, 0);
3840 node
= compute_component_schedule(node
, graph
, 0);
3841 node
= isl_schedule_node_parent(node
);
3848 return isl_schedule_node_free(node
);
3851 /* Is the schedule row "sol" trivial on node "node"?
3852 * That is, is the solution zero on the dimensions orthogonal to
3853 * the previously found solutions?
3854 * Return 1 if the solution is trivial, 0 if it is not and -1 on error.
3856 * Each coefficient is represented as the difference between
3857 * two non-negative values in "sol". "sol" has been computed
3858 * in terms of the original iterators (i.e., without use of cmap).
3859 * We construct the schedule row s and write it as a linear
3860 * combination of (linear combinations of) previously computed schedule rows.
3861 * s = Q c or c = U s.
3862 * If the final entries of c are all zero, then the solution is trivial.
3864 static int is_trivial(struct isl_sched_node
*node
, __isl_keep isl_vec
*sol
)
3871 if (node
->nvar
== node
->rank
)
3874 node_sol
= extract_var_coef(node
, sol
);
3875 node_sol
= isl_mat_vec_product(isl_mat_copy(node
->cinv
), node_sol
);
3879 trivial
= isl_seq_first_non_zero(node_sol
->el
+ node
->rank
,
3880 node
->nvar
- node
->rank
) == -1;
3882 isl_vec_free(node_sol
);
3887 /* Is the schedule row "sol" trivial on any node where it should
3889 * "sol" has been computed in terms of the original iterators
3890 * (i.e., without use of cmap).
3891 * Return 1 if any solution is trivial, 0 if they are not and -1 on error.
3893 static int is_any_trivial(struct isl_sched_graph
*graph
,
3894 __isl_keep isl_vec
*sol
)
3898 for (i
= 0; i
< graph
->n
; ++i
) {
3899 struct isl_sched_node
*node
= &graph
->node
[i
];
3902 if (!needs_row(graph
, node
))
3904 trivial
= is_trivial(node
, sol
);
3905 if (trivial
< 0 || trivial
)
3912 /* Does the schedule represented by "sol" perform loop coalescing on "node"?
3913 * If so, return the position of the coalesced dimension.
3914 * Otherwise, return node->nvar or -1 on error.
3916 * In particular, look for pairs of coefficients c_i and c_j such that
3917 * |c_j/c_i| >= size_i, i.e., |c_j| >= |c_i * size_i|.
3918 * If any such pair is found, then return i.
3919 * If size_i is infinity, then no check on c_i needs to be performed.
3921 static int find_node_coalescing(struct isl_sched_node
*node
,
3922 __isl_keep isl_vec
*sol
)
3928 if (node
->nvar
<= 1)
3931 csol
= extract_var_coef(node
, sol
);
3935 for (i
= 0; i
< node
->nvar
; ++i
) {
3938 if (isl_int_is_zero(csol
->el
[i
]))
3940 v
= isl_multi_val_get_val(node
->sizes
, i
);
3943 if (!isl_val_is_int(v
)) {
3947 isl_int_mul(max
, v
->n
, csol
->el
[i
]);
3950 for (j
= 0; j
< node
->nvar
; ++j
) {
3953 if (isl_int_abs_ge(csol
->el
[j
], max
))
3969 /* Force the schedule coefficient at position "pos" of "node" to be zero
3971 * The coefficient is encoded as the difference between two non-negative
3972 * variables. Force these two variables to have the same value.
3974 static __isl_give isl_tab_lexmin
*zero_out_node_coef(
3975 __isl_take isl_tab_lexmin
*tl
, struct isl_sched_node
*node
, int pos
)
3981 ctx
= isl_space_get_ctx(node
->space
);
3982 dim
= isl_tab_lexmin_dim(tl
);
3984 return isl_tab_lexmin_free(tl
);
3985 eq
= isl_vec_alloc(ctx
, 1 + dim
);
3986 eq
= isl_vec_clr(eq
);
3988 return isl_tab_lexmin_free(tl
);
3990 pos
= 1 + node_var_coef_offset(node
) + 2 * pos
;
3991 isl_int_set_si(eq
->el
[pos
], 1);
3992 isl_int_set_si(eq
->el
[pos
+ 1], -1);
3993 tl
= isl_tab_lexmin_add_eq(tl
, eq
->el
);
3999 /* Return the lexicographically smallest rational point in the basic set
4000 * from which "tl" was constructed, double checking that this input set
4003 static __isl_give isl_vec
*non_empty_solution(__isl_keep isl_tab_lexmin
*tl
)
4007 sol
= isl_tab_lexmin_get_solution(tl
);
4011 isl_die(isl_vec_get_ctx(sol
), isl_error_internal
,
4012 "error in schedule construction",
4013 return isl_vec_free(sol
));
4017 /* Does the solution "sol" of the LP problem constructed by setup_carry_lp
4018 * carry any of the "n_edge" groups of dependences?
4019 * The value in the first position is the sum of (1 - e_i) over all "n_edge"
4020 * edges, with 0 <= e_i <= 1 equal to 1 when the dependences represented
4021 * by the edge are carried by the solution.
4022 * If the sum of the (1 - e_i) is smaller than "n_edge" then at least
4023 * one of those is carried.
4025 * Note that despite the fact that the problem is solved using a rational
4026 * solver, the solution is guaranteed to be integral.
4027 * Specifically, the dependence distance lower bounds e_i (and therefore
4028 * also their sum) are integers. See Lemma 5 of [1].
4030 * Any potential denominator of the sum is cleared by this function.
4031 * The denominator is not relevant for any of the other elements
4034 * [1] P. Feautrier, Some Efficient Solutions to the Affine Scheduling
4035 * Problem, Part II: Multi-Dimensional Time.
4036 * In Intl. Journal of Parallel Programming, 1992.
4038 static int carries_dependences(__isl_keep isl_vec
*sol
, int n_edge
)
4040 isl_int_divexact(sol
->el
[1], sol
->el
[1], sol
->el
[0]);
4041 isl_int_set_si(sol
->el
[0], 1);
4042 return isl_int_cmp_si(sol
->el
[1], n_edge
) < 0;
4045 /* Return the lexicographically smallest rational point in "lp",
4046 * assuming that all variables are non-negative and performing some
4047 * additional sanity checks.
4048 * In particular, "lp" should not be empty by construction.
4049 * Double check that this is the case.
4050 * Also, check that dependences are carried for at least one of
4051 * the "n_edge" edges.
4053 * If the computed schedule performs loop coalescing on a given node,
4054 * i.e., if it is of the form
4056 * c_i i + c_j j + ...
4058 * with |c_j/c_i| >= size_i, then force the coefficient c_i to be zero
4059 * to cut out this solution. Repeat this process until no more loop
4060 * coalescing occurs or until no more dependences can be carried.
4061 * In the latter case, revert to the previously computed solution.
4063 static __isl_give isl_vec
*non_neg_lexmin(struct isl_sched_graph
*graph
,
4064 __isl_take isl_basic_set
*lp
, int n_edge
)
4069 isl_vec
*sol
, *prev
= NULL
;
4070 int treat_coalescing
;
4074 ctx
= isl_basic_set_get_ctx(lp
);
4075 treat_coalescing
= isl_options_get_schedule_treat_coalescing(ctx
);
4076 tl
= isl_tab_lexmin_from_basic_set(lp
);
4079 sol
= non_empty_solution(tl
);
4083 if (!carries_dependences(sol
, n_edge
)) {
4085 isl_die(ctx
, isl_error_unknown
,
4086 "unable to carry dependences",
4092 prev
= isl_vec_free(prev
);
4093 if (!treat_coalescing
)
4095 for (i
= 0; i
< graph
->n
; ++i
) {
4096 struct isl_sched_node
*node
= &graph
->node
[i
];
4098 pos
= find_node_coalescing(node
, sol
);
4101 if (pos
< node
->nvar
)
4106 tl
= zero_out_node_coef(tl
, &graph
->node
[i
], pos
);
4108 } while (i
< graph
->n
);
4110 isl_tab_lexmin_free(tl
);
4114 isl_tab_lexmin_free(tl
);
4120 /* Construct a schedule row for each node such that as many validity dependences
4121 * as possible are carried and then continue with the next band.
4123 * If there are no validity dependences, then no dependence can be carried and
4124 * the procedure is guaranteed to fail. If there is more than one component,
4125 * then try computing a schedule on each component separately
4126 * to prevent or at least postpone this failure.
4128 * If the computed schedule row turns out to be trivial on one or
4129 * more nodes where it should not be trivial, then we throw it away
4130 * and try again on each component separately.
4132 * If there is only one component, then we accept the schedule row anyway,
4133 * but we do not consider it as a complete row and therefore do not
4134 * increment graph->n_row. Note that the ranks of the nodes that
4135 * do get a non-trivial schedule part will get updated regardless and
4136 * graph->maxvar is computed based on these ranks. The test for
4137 * whether more schedule rows are required in compute_schedule_wcc
4138 * is therefore not affected.
4140 * Insert a band corresponding to the schedule row at position "node"
4141 * of the schedule tree and continue with the construction of the schedule.
4142 * This insertion and the continued construction is performed by split_scaled
4143 * after optionally checking for non-trivial common divisors.
4145 static __isl_give isl_schedule_node
*carry_dependences(
4146 __isl_take isl_schedule_node
*node
, struct isl_sched_graph
*graph
)
4157 n_edge
= count_carry_edges(graph
);
4158 if (n_edge
== 0 && graph
->scc
> 1)
4159 return compute_component_schedule(node
, graph
, 1);
4161 ctx
= isl_schedule_node_get_ctx(node
);
4162 if (setup_carry_lp(ctx
, graph
, n_edge
) < 0)
4163 return isl_schedule_node_free(node
);
4165 lp
= isl_basic_set_copy(graph
->lp
);
4166 sol
= non_neg_lexmin(graph
, lp
, n_edge
);
4168 return isl_schedule_node_free(node
);
4170 trivial
= is_any_trivial(graph
, sol
);
4172 sol
= isl_vec_free(sol
);
4173 } else if (trivial
&& graph
->scc
> 1) {
4175 return compute_component_schedule(node
, graph
, 1);
4178 if (update_schedule(graph
, sol
, 0, 0) < 0)
4179 return isl_schedule_node_free(node
);
4183 return split_scaled(node
, graph
);
4186 /* Topologically sort statements mapped to the same schedule iteration
4187 * and add insert a sequence node in front of "node"
4188 * corresponding to this order.
4189 * If "initialized" is set, then it may be assumed that compute_maxvar
4190 * has been called on the current band. Otherwise, call
4191 * compute_maxvar if and before carry_dependences gets called.
4193 * If it turns out to be impossible to sort the statements apart,
4194 * because different dependences impose different orderings
4195 * on the statements, then we extend the schedule such that
4196 * it carries at least one more dependence.
4198 static __isl_give isl_schedule_node
*sort_statements(
4199 __isl_take isl_schedule_node
*node
, struct isl_sched_graph
*graph
,
4203 isl_union_set_list
*filters
;
4208 ctx
= isl_schedule_node_get_ctx(node
);
4210 isl_die(ctx
, isl_error_internal
,
4211 "graph should have at least one node",
4212 return isl_schedule_node_free(node
));
4217 if (update_edges(ctx
, graph
) < 0)
4218 return isl_schedule_node_free(node
);
4220 if (graph
->n_edge
== 0)
4223 if (detect_sccs(ctx
, graph
) < 0)
4224 return isl_schedule_node_free(node
);
4227 if (graph
->scc
< graph
->n
) {
4228 if (!initialized
&& compute_maxvar(graph
) < 0)
4229 return isl_schedule_node_free(node
);
4230 return carry_dependences(node
, graph
);
4233 filters
= extract_sccs(ctx
, graph
);
4234 node
= isl_schedule_node_insert_sequence(node
, filters
);
4239 /* Are there any (non-empty) (conditional) validity edges in the graph?
4241 static int has_validity_edges(struct isl_sched_graph
*graph
)
4245 for (i
= 0; i
< graph
->n_edge
; ++i
) {
4248 empty
= isl_map_plain_is_empty(graph
->edge
[i
].map
);
4253 if (is_any_validity(&graph
->edge
[i
]))
4260 /* Should we apply a Feautrier step?
4261 * That is, did the user request the Feautrier algorithm and are
4262 * there any validity dependences (left)?
4264 static int need_feautrier_step(isl_ctx
*ctx
, struct isl_sched_graph
*graph
)
4266 if (ctx
->opt
->schedule_algorithm
!= ISL_SCHEDULE_ALGORITHM_FEAUTRIER
)
4269 return has_validity_edges(graph
);
4272 /* Compute a schedule for a connected dependence graph using Feautrier's
4273 * multi-dimensional scheduling algorithm and return the updated schedule node.
4275 * The original algorithm is described in [1].
4276 * The main idea is to minimize the number of scheduling dimensions, by
4277 * trying to satisfy as many dependences as possible per scheduling dimension.
4279 * [1] P. Feautrier, Some Efficient Solutions to the Affine Scheduling
4280 * Problem, Part II: Multi-Dimensional Time.
4281 * In Intl. Journal of Parallel Programming, 1992.
4283 static __isl_give isl_schedule_node
*compute_schedule_wcc_feautrier(
4284 isl_schedule_node
*node
, struct isl_sched_graph
*graph
)
4286 return carry_dependences(node
, graph
);
4289 /* Turn off the "local" bit on all (condition) edges.
4291 static void clear_local_edges(struct isl_sched_graph
*graph
)
4295 for (i
= 0; i
< graph
->n_edge
; ++i
)
4296 if (is_condition(&graph
->edge
[i
]))
4297 clear_local(&graph
->edge
[i
]);
4300 /* Does "graph" have both condition and conditional validity edges?
4302 static int need_condition_check(struct isl_sched_graph
*graph
)
4305 int any_condition
= 0;
4306 int any_conditional_validity
= 0;
4308 for (i
= 0; i
< graph
->n_edge
; ++i
) {
4309 if (is_condition(&graph
->edge
[i
]))
4311 if (is_conditional_validity(&graph
->edge
[i
]))
4312 any_conditional_validity
= 1;
4315 return any_condition
&& any_conditional_validity
;
4318 /* Does "graph" contain any coincidence edge?
4320 static int has_any_coincidence(struct isl_sched_graph
*graph
)
4324 for (i
= 0; i
< graph
->n_edge
; ++i
)
4325 if (is_coincidence(&graph
->edge
[i
]))
4331 /* Extract the final schedule row as a map with the iteration domain
4332 * of "node" as domain.
4334 static __isl_give isl_map
*final_row(struct isl_sched_node
*node
)
4336 isl_local_space
*ls
;
4340 row
= isl_mat_rows(node
->sched
) - 1;
4341 ls
= isl_local_space_from_space(isl_space_copy(node
->space
));
4342 aff
= extract_schedule_row(ls
, node
, row
);
4343 return isl_map_from_aff(aff
);
4346 /* Is the conditional validity dependence in the edge with index "edge_index"
4347 * violated by the latest (i.e., final) row of the schedule?
4348 * That is, is i scheduled after j
4349 * for any conditional validity dependence i -> j?
4351 static int is_violated(struct isl_sched_graph
*graph
, int edge_index
)
4353 isl_map
*src_sched
, *dst_sched
, *map
;
4354 struct isl_sched_edge
*edge
= &graph
->edge
[edge_index
];
4357 src_sched
= final_row(edge
->src
);
4358 dst_sched
= final_row(edge
->dst
);
4359 map
= isl_map_copy(edge
->map
);
4360 map
= isl_map_apply_domain(map
, src_sched
);
4361 map
= isl_map_apply_range(map
, dst_sched
);
4362 map
= isl_map_order_gt(map
, isl_dim_in
, 0, isl_dim_out
, 0);
4363 empty
= isl_map_is_empty(map
);
4372 /* Does "graph" have any satisfied condition edges that
4373 * are adjacent to the conditional validity constraint with
4374 * domain "conditional_source" and range "conditional_sink"?
4376 * A satisfied condition is one that is not local.
4377 * If a condition was forced to be local already (i.e., marked as local)
4378 * then there is no need to check if it is in fact local.
4380 * Additionally, mark all adjacent condition edges found as local.
4382 static int has_adjacent_true_conditions(struct isl_sched_graph
*graph
,
4383 __isl_keep isl_union_set
*conditional_source
,
4384 __isl_keep isl_union_set
*conditional_sink
)
4389 for (i
= 0; i
< graph
->n_edge
; ++i
) {
4390 int adjacent
, local
;
4391 isl_union_map
*condition
;
4393 if (!is_condition(&graph
->edge
[i
]))
4395 if (is_local(&graph
->edge
[i
]))
4398 condition
= graph
->edge
[i
].tagged_condition
;
4399 adjacent
= domain_intersects(condition
, conditional_sink
);
4400 if (adjacent
>= 0 && !adjacent
)
4401 adjacent
= range_intersects(condition
,
4402 conditional_source
);
4408 set_local(&graph
->edge
[i
]);
4410 local
= is_condition_false(&graph
->edge
[i
]);
4420 /* Are there any violated conditional validity dependences with
4421 * adjacent condition dependences that are not local with respect
4422 * to the current schedule?
4423 * That is, is the conditional validity constraint violated?
4425 * Additionally, mark all those adjacent condition dependences as local.
4426 * We also mark those adjacent condition dependences that were not marked
4427 * as local before, but just happened to be local already. This ensures
4428 * that they remain local if the schedule is recomputed.
4430 * We first collect domain and range of all violated conditional validity
4431 * dependences and then check if there are any adjacent non-local
4432 * condition dependences.
4434 static int has_violated_conditional_constraint(isl_ctx
*ctx
,
4435 struct isl_sched_graph
*graph
)
4439 isl_union_set
*source
, *sink
;
4441 source
= isl_union_set_empty(isl_space_params_alloc(ctx
, 0));
4442 sink
= isl_union_set_empty(isl_space_params_alloc(ctx
, 0));
4443 for (i
= 0; i
< graph
->n_edge
; ++i
) {
4444 isl_union_set
*uset
;
4445 isl_union_map
*umap
;
4448 if (!is_conditional_validity(&graph
->edge
[i
]))
4451 violated
= is_violated(graph
, i
);
4459 umap
= isl_union_map_copy(graph
->edge
[i
].tagged_validity
);
4460 uset
= isl_union_map_domain(umap
);
4461 source
= isl_union_set_union(source
, uset
);
4462 source
= isl_union_set_coalesce(source
);
4464 umap
= isl_union_map_copy(graph
->edge
[i
].tagged_validity
);
4465 uset
= isl_union_map_range(umap
);
4466 sink
= isl_union_set_union(sink
, uset
);
4467 sink
= isl_union_set_coalesce(sink
);
4471 any
= has_adjacent_true_conditions(graph
, source
, sink
);
4473 isl_union_set_free(source
);
4474 isl_union_set_free(sink
);
4477 isl_union_set_free(source
);
4478 isl_union_set_free(sink
);
4482 /* Examine the current band (the rows between graph->band_start and
4483 * graph->n_total_row), deciding whether to drop it or add it to "node"
4484 * and then continue with the computation of the next band, if any.
4485 * If "initialized" is set, then it may be assumed that compute_maxvar
4486 * has been called on the current band. Otherwise, call
4487 * compute_maxvar if and before carry_dependences gets called.
4489 * The caller keeps looking for a new row as long as
4490 * graph->n_row < graph->maxvar. If the latest attempt to find
4491 * such a row failed (i.e., we still have graph->n_row < graph->maxvar),
4493 * - split between SCCs and start over (assuming we found an interesting
4494 * pair of SCCs between which to split)
4495 * - continue with the next band (assuming the current band has at least
4497 * - try to carry as many dependences as possible and continue with the next
4499 * In each case, we first insert a band node in the schedule tree
4500 * if any rows have been computed.
4502 * If the caller managed to complete the schedule, we insert a band node
4503 * (if any schedule rows were computed) and we finish off by topologically
4504 * sorting the statements based on the remaining dependences.
4506 static __isl_give isl_schedule_node
*compute_schedule_finish_band(
4507 __isl_take isl_schedule_node
*node
, struct isl_sched_graph
*graph
,
4515 if (graph
->n_row
< graph
->maxvar
) {
4517 int empty
= graph
->n_total_row
== graph
->band_start
;
4519 ctx
= isl_schedule_node_get_ctx(node
);
4520 if (!ctx
->opt
->schedule_maximize_band_depth
&& !empty
)
4521 return compute_next_band(node
, graph
, 1);
4522 if (graph
->src_scc
>= 0)
4523 return compute_split_schedule(node
, graph
);
4525 return compute_next_band(node
, graph
, 1);
4526 if (!initialized
&& compute_maxvar(graph
) < 0)
4527 return isl_schedule_node_free(node
);
4528 return carry_dependences(node
, graph
);
4531 insert
= graph
->n_total_row
> graph
->band_start
;
4533 node
= insert_current_band(node
, graph
, 1);
4534 node
= isl_schedule_node_child(node
, 0);
4536 node
= sort_statements(node
, graph
, initialized
);
4538 node
= isl_schedule_node_parent(node
);
4543 /* Construct a band of schedule rows for a connected dependence graph.
4544 * The caller is responsible for determining the strongly connected
4545 * components and calling compute_maxvar first.
4547 * We try to find a sequence of as many schedule rows as possible that result
4548 * in non-negative dependence distances (independent of the previous rows
4549 * in the sequence, i.e., such that the sequence is tilable), with as
4550 * many of the initial rows as possible satisfying the coincidence constraints.
4551 * The computation stops if we can't find any more rows or if we have found
4552 * all the rows we wanted to find.
4554 * If ctx->opt->schedule_outer_coincidence is set, then we force the
4555 * outermost dimension to satisfy the coincidence constraints. If this
4556 * turns out to be impossible, we fall back on the general scheme above
4557 * and try to carry as many dependences as possible.
4559 * If "graph" contains both condition and conditional validity dependences,
4560 * then we need to check that that the conditional schedule constraint
4561 * is satisfied, i.e., there are no violated conditional validity dependences
4562 * that are adjacent to any non-local condition dependences.
4563 * If there are, then we mark all those adjacent condition dependences
4564 * as local and recompute the current band. Those dependences that
4565 * are marked local will then be forced to be local.
4566 * The initial computation is performed with no dependences marked as local.
4567 * If we are lucky, then there will be no violated conditional validity
4568 * dependences adjacent to any non-local condition dependences.
4569 * Otherwise, we mark some additional condition dependences as local and
4570 * recompute. We continue this process until there are no violations left or
4571 * until we are no longer able to compute a schedule.
4572 * Since there are only a finite number of dependences,
4573 * there will only be a finite number of iterations.
4575 static isl_stat
compute_schedule_wcc_band(isl_ctx
*ctx
,
4576 struct isl_sched_graph
*graph
)
4578 int has_coincidence
;
4579 int use_coincidence
;
4580 int force_coincidence
= 0;
4581 int check_conditional
;
4583 if (sort_sccs(graph
) < 0)
4584 return isl_stat_error
;
4586 clear_local_edges(graph
);
4587 check_conditional
= need_condition_check(graph
);
4588 has_coincidence
= has_any_coincidence(graph
);
4590 if (ctx
->opt
->schedule_outer_coincidence
)
4591 force_coincidence
= 1;
4593 use_coincidence
= has_coincidence
;
4594 while (graph
->n_row
< graph
->maxvar
) {
4599 graph
->src_scc
= -1;
4600 graph
->dst_scc
= -1;
4602 if (setup_lp(ctx
, graph
, use_coincidence
) < 0)
4603 return isl_stat_error
;
4604 sol
= solve_lp(graph
);
4606 return isl_stat_error
;
4607 if (sol
->size
== 0) {
4608 int empty
= graph
->n_total_row
== graph
->band_start
;
4611 if (use_coincidence
&& (!force_coincidence
|| !empty
)) {
4612 use_coincidence
= 0;
4617 coincident
= !has_coincidence
|| use_coincidence
;
4618 if (update_schedule(graph
, sol
, 1, coincident
) < 0)
4619 return isl_stat_error
;
4621 if (!check_conditional
)
4623 violated
= has_violated_conditional_constraint(ctx
, graph
);
4625 return isl_stat_error
;
4628 if (reset_band(graph
) < 0)
4629 return isl_stat_error
;
4630 use_coincidence
= has_coincidence
;
4636 /* Compute a schedule for a connected dependence graph by considering
4637 * the graph as a whole and return the updated schedule node.
4639 * The actual schedule rows of the current band are computed by
4640 * compute_schedule_wcc_band. compute_schedule_finish_band takes
4641 * care of integrating the band into "node" and continuing
4644 static __isl_give isl_schedule_node
*compute_schedule_wcc_whole(
4645 __isl_take isl_schedule_node
*node
, struct isl_sched_graph
*graph
)
4652 ctx
= isl_schedule_node_get_ctx(node
);
4653 if (compute_schedule_wcc_band(ctx
, graph
) < 0)
4654 return isl_schedule_node_free(node
);
4656 return compute_schedule_finish_band(node
, graph
, 1);
4659 /* Clustering information used by compute_schedule_wcc_clustering.
4661 * "n" is the number of SCCs in the original dependence graph
4662 * "scc" is an array of "n" elements, each representing an SCC
4663 * of the original dependence graph. All entries in the same cluster
4664 * have the same number of schedule rows.
4665 * "scc_cluster" maps each SCC index to the cluster to which it belongs,
4666 * where each cluster is represented by the index of the first SCC
4667 * in the cluster. Initially, each SCC belongs to a cluster containing
4670 * "scc_in_merge" is used by merge_clusters_along_edge to keep
4671 * track of which SCCs need to be merged.
4673 * "cluster" contains the merged clusters of SCCs after the clustering
4676 * "scc_node" is a temporary data structure used inside copy_partial.
4677 * For each SCC, it keeps track of the number of nodes in the SCC
4678 * that have already been copied.
4680 struct isl_clustering
{
4682 struct isl_sched_graph
*scc
;
4683 struct isl_sched_graph
*cluster
;
4689 /* Initialize the clustering data structure "c" from "graph".
4691 * In particular, allocate memory, extract the SCCs from "graph"
4692 * into c->scc, initialize scc_cluster and construct
4693 * a band of schedule rows for each SCC.
4694 * Within each SCC, there is only one SCC by definition.
4695 * Each SCC initially belongs to a cluster containing only that SCC.
4697 static isl_stat
clustering_init(isl_ctx
*ctx
, struct isl_clustering
*c
,
4698 struct isl_sched_graph
*graph
)
4703 c
->scc
= isl_calloc_array(ctx
, struct isl_sched_graph
, c
->n
);
4704 c
->cluster
= isl_calloc_array(ctx
, struct isl_sched_graph
, c
->n
);
4705 c
->scc_cluster
= isl_calloc_array(ctx
, int, c
->n
);
4706 c
->scc_node
= isl_calloc_array(ctx
, int, c
->n
);
4707 c
->scc_in_merge
= isl_calloc_array(ctx
, int, c
->n
);
4708 if (!c
->scc
|| !c
->cluster
||
4709 !c
->scc_cluster
|| !c
->scc_node
|| !c
->scc_in_merge
)
4710 return isl_stat_error
;
4712 for (i
= 0; i
< c
->n
; ++i
) {
4713 if (extract_sub_graph(ctx
, graph
, &node_scc_exactly
,
4714 &edge_scc_exactly
, i
, &c
->scc
[i
]) < 0)
4715 return isl_stat_error
;
4717 if (compute_maxvar(&c
->scc
[i
]) < 0)
4718 return isl_stat_error
;
4719 if (compute_schedule_wcc_band(ctx
, &c
->scc
[i
]) < 0)
4720 return isl_stat_error
;
4721 c
->scc_cluster
[i
] = i
;
4727 /* Free all memory allocated for "c".
4729 static void clustering_free(isl_ctx
*ctx
, struct isl_clustering
*c
)
4734 for (i
= 0; i
< c
->n
; ++i
)
4735 graph_free(ctx
, &c
->scc
[i
]);
4738 for (i
= 0; i
< c
->n
; ++i
)
4739 graph_free(ctx
, &c
->cluster
[i
]);
4741 free(c
->scc_cluster
);
4743 free(c
->scc_in_merge
);
4746 /* Should we refrain from merging the cluster in "graph" with
4747 * any other cluster?
4748 * In particular, is its current schedule band empty and incomplete.
4750 static int bad_cluster(struct isl_sched_graph
*graph
)
4752 return graph
->n_row
< graph
->maxvar
&&
4753 graph
->n_total_row
== graph
->band_start
;
4756 /* Return the index of an edge in "graph" that can be used to merge
4757 * two clusters in "c".
4758 * Return graph->n_edge if no such edge can be found.
4759 * Return -1 on error.
4761 * In particular, return a proximity edge between two clusters
4762 * that is not marked "no_merge" and such that neither of the
4763 * two clusters has an incomplete, empty band.
4765 * If there are multiple such edges, then try and find the most
4766 * appropriate edge to use for merging. In particular, pick the edge
4767 * with the greatest weight. If there are multiple of those,
4768 * then pick one with the shortest distance between
4769 * the two cluster representatives.
4771 static int find_proximity(struct isl_sched_graph
*graph
,
4772 struct isl_clustering
*c
)
4774 int i
, best
= graph
->n_edge
, best_dist
, best_weight
;
4776 for (i
= 0; i
< graph
->n_edge
; ++i
) {
4777 struct isl_sched_edge
*edge
= &graph
->edge
[i
];
4780 if (!is_proximity(edge
))
4784 if (bad_cluster(&c
->scc
[edge
->src
->scc
]) ||
4785 bad_cluster(&c
->scc
[edge
->dst
->scc
]))
4787 dist
= c
->scc_cluster
[edge
->dst
->scc
] -
4788 c
->scc_cluster
[edge
->src
->scc
];
4791 weight
= edge
->weight
;
4792 if (best
< graph
->n_edge
) {
4793 if (best_weight
> weight
)
4795 if (best_weight
== weight
&& best_dist
<= dist
)
4800 best_weight
= weight
;
4806 /* Internal data structure used in mark_merge_sccs.
4808 * "graph" is the dependence graph in which a strongly connected
4809 * component is constructed.
4810 * "scc_cluster" maps each SCC index to the cluster to which it belongs.
4811 * "src" and "dst" are the indices of the nodes that are being merged.
4813 struct isl_mark_merge_sccs_data
{
4814 struct isl_sched_graph
*graph
;
4820 /* Check whether the cluster containing node "i" depends on the cluster
4821 * containing node "j". If "i" and "j" belong to the same cluster,
4822 * then they are taken to depend on each other to ensure that
4823 * the resulting strongly connected component consists of complete
4824 * clusters. Furthermore, if "i" and "j" are the two nodes that
4825 * are being merged, then they are taken to depend on each other as well.
4826 * Otherwise, check if there is a (conditional) validity dependence
4827 * from node[j] to node[i], forcing node[i] to follow node[j].
4829 static isl_bool
cluster_follows(int i
, int j
, void *user
)
4831 struct isl_mark_merge_sccs_data
*data
= user
;
4832 struct isl_sched_graph
*graph
= data
->graph
;
4833 int *scc_cluster
= data
->scc_cluster
;
4835 if (data
->src
== i
&& data
->dst
== j
)
4836 return isl_bool_true
;
4837 if (data
->src
== j
&& data
->dst
== i
)
4838 return isl_bool_true
;
4839 if (scc_cluster
[graph
->node
[i
].scc
] == scc_cluster
[graph
->node
[j
].scc
])
4840 return isl_bool_true
;
4842 return graph_has_validity_edge(graph
, &graph
->node
[j
], &graph
->node
[i
]);
4845 /* Mark all SCCs that belong to either of the two clusters in "c"
4846 * connected by the edge in "graph" with index "edge", or to any
4847 * of the intermediate clusters.
4848 * The marking is recorded in c->scc_in_merge.
4850 * The given edge has been selected for merging two clusters,
4851 * meaning that there is at least a proximity edge between the two nodes.
4852 * However, there may also be (indirect) validity dependences
4853 * between the two nodes. When merging the two clusters, all clusters
4854 * containing one or more of the intermediate nodes along the
4855 * indirect validity dependences need to be merged in as well.
4857 * First collect all such nodes by computing the strongly connected
4858 * component (SCC) containing the two nodes connected by the edge, where
4859 * the two nodes are considered to depend on each other to make
4860 * sure they end up in the same SCC. Similarly, each node is considered
4861 * to depend on every other node in the same cluster to ensure
4862 * that the SCC consists of complete clusters.
4864 * Then the original SCCs that contain any of these nodes are marked
4865 * in c->scc_in_merge.
4867 static isl_stat
mark_merge_sccs(isl_ctx
*ctx
, struct isl_sched_graph
*graph
,
4868 int edge
, struct isl_clustering
*c
)
4870 struct isl_mark_merge_sccs_data data
;
4871 struct isl_tarjan_graph
*g
;
4874 for (i
= 0; i
< c
->n
; ++i
)
4875 c
->scc_in_merge
[i
] = 0;
4878 data
.scc_cluster
= c
->scc_cluster
;
4879 data
.src
= graph
->edge
[edge
].src
- graph
->node
;
4880 data
.dst
= graph
->edge
[edge
].dst
- graph
->node
;
4882 g
= isl_tarjan_graph_component(ctx
, graph
->n
, data
.dst
,
4883 &cluster_follows
, &data
);
4889 isl_die(ctx
, isl_error_internal
,
4890 "expecting at least two nodes in component",
4892 if (g
->order
[--i
] != -1)
4893 isl_die(ctx
, isl_error_internal
,
4894 "expecting end of component marker", goto error
);
4896 for (--i
; i
>= 0 && g
->order
[i
] != -1; --i
) {
4897 int scc
= graph
->node
[g
->order
[i
]].scc
;
4898 c
->scc_in_merge
[scc
] = 1;
4901 isl_tarjan_graph_free(g
);
4904 isl_tarjan_graph_free(g
);
4905 return isl_stat_error
;
4908 /* Construct the identifier "cluster_i".
4910 static __isl_give isl_id
*cluster_id(isl_ctx
*ctx
, int i
)
4914 snprintf(name
, sizeof(name
), "cluster_%d", i
);
4915 return isl_id_alloc(ctx
, name
, NULL
);
4918 /* Construct the space of the cluster with index "i" containing
4919 * the strongly connected component "scc".
4921 * In particular, construct a space called cluster_i with dimension equal
4922 * to the number of schedule rows in the current band of "scc".
4924 static __isl_give isl_space
*cluster_space(struct isl_sched_graph
*scc
, int i
)
4930 nvar
= scc
->n_total_row
- scc
->band_start
;
4931 space
= isl_space_copy(scc
->node
[0].space
);
4932 space
= isl_space_params(space
);
4933 space
= isl_space_set_from_params(space
);
4934 space
= isl_space_add_dims(space
, isl_dim_set
, nvar
);
4935 id
= cluster_id(isl_space_get_ctx(space
), i
);
4936 space
= isl_space_set_tuple_id(space
, isl_dim_set
, id
);
4941 /* Collect the domain of the graph for merging clusters.
4943 * In particular, for each cluster with first SCC "i", construct
4944 * a set in the space called cluster_i with dimension equal
4945 * to the number of schedule rows in the current band of the cluster.
4947 static __isl_give isl_union_set
*collect_domain(isl_ctx
*ctx
,
4948 struct isl_sched_graph
*graph
, struct isl_clustering
*c
)
4952 isl_union_set
*domain
;
4954 space
= isl_space_params_alloc(ctx
, 0);
4955 domain
= isl_union_set_empty(space
);
4957 for (i
= 0; i
< graph
->scc
; ++i
) {
4960 if (!c
->scc_in_merge
[i
])
4962 if (c
->scc_cluster
[i
] != i
)
4964 space
= cluster_space(&c
->scc
[i
], i
);
4965 domain
= isl_union_set_add_set(domain
, isl_set_universe(space
));
4971 /* Construct a map from the original instances to the corresponding
4972 * cluster instance in the current bands of the clusters in "c".
4974 static __isl_give isl_union_map
*collect_cluster_map(isl_ctx
*ctx
,
4975 struct isl_sched_graph
*graph
, struct isl_clustering
*c
)
4979 isl_union_map
*cluster_map
;
4981 space
= isl_space_params_alloc(ctx
, 0);
4982 cluster_map
= isl_union_map_empty(space
);
4983 for (i
= 0; i
< graph
->scc
; ++i
) {
4987 if (!c
->scc_in_merge
[i
])
4990 id
= cluster_id(ctx
, c
->scc_cluster
[i
]);
4991 start
= c
->scc
[i
].band_start
;
4992 n
= c
->scc
[i
].n_total_row
- start
;
4993 for (j
= 0; j
< c
->scc
[i
].n
; ++j
) {
4996 struct isl_sched_node
*node
= &c
->scc
[i
].node
[j
];
4998 ma
= node_extract_partial_schedule_multi_aff(node
,
5000 ma
= isl_multi_aff_set_tuple_id(ma
, isl_dim_out
,
5002 map
= isl_map_from_multi_aff(ma
);
5003 cluster_map
= isl_union_map_add_map(cluster_map
, map
);
5011 /* Add "umap" to the schedule constraints "sc" of all types of "edge"
5012 * that are not isl_edge_condition or isl_edge_conditional_validity.
5014 static __isl_give isl_schedule_constraints
*add_non_conditional_constraints(
5015 struct isl_sched_edge
*edge
, __isl_keep isl_union_map
*umap
,
5016 __isl_take isl_schedule_constraints
*sc
)
5018 enum isl_edge_type t
;
5023 for (t
= isl_edge_first
; t
<= isl_edge_last
; ++t
) {
5024 if (t
== isl_edge_condition
||
5025 t
== isl_edge_conditional_validity
)
5027 if (!is_type(edge
, t
))
5029 sc
= isl_schedule_constraints_add(sc
, t
,
5030 isl_union_map_copy(umap
));
5036 /* Add schedule constraints of types isl_edge_condition and
5037 * isl_edge_conditional_validity to "sc" by applying "umap" to
5038 * the domains of the wrapped relations in domain and range
5039 * of the corresponding tagged constraints of "edge".
5041 static __isl_give isl_schedule_constraints
*add_conditional_constraints(
5042 struct isl_sched_edge
*edge
, __isl_keep isl_union_map
*umap
,
5043 __isl_take isl_schedule_constraints
*sc
)
5045 enum isl_edge_type t
;
5046 isl_union_map
*tagged
;
5048 for (t
= isl_edge_condition
; t
<= isl_edge_conditional_validity
; ++t
) {
5049 if (!is_type(edge
, t
))
5051 if (t
== isl_edge_condition
)
5052 tagged
= isl_union_map_copy(edge
->tagged_condition
);
5054 tagged
= isl_union_map_copy(edge
->tagged_validity
);
5055 tagged
= isl_union_map_zip(tagged
);
5056 tagged
= isl_union_map_apply_domain(tagged
,
5057 isl_union_map_copy(umap
));
5058 tagged
= isl_union_map_zip(tagged
);
5059 sc
= isl_schedule_constraints_add(sc
, t
, tagged
);
5067 /* Given a mapping "cluster_map" from the original instances to
5068 * the cluster instances, add schedule constraints on the clusters
5069 * to "sc" corresponding to the original constraints represented by "edge".
5071 * For non-tagged dependence constraints, the cluster constraints
5072 * are obtained by applying "cluster_map" to the edge->map.
5074 * For tagged dependence constraints, "cluster_map" needs to be applied
5075 * to the domains of the wrapped relations in domain and range
5076 * of the tagged dependence constraints. Pick out the mappings
5077 * from these domains from "cluster_map" and construct their product.
5078 * This mapping can then be applied to the pair of domains.
5080 static __isl_give isl_schedule_constraints
*collect_edge_constraints(
5081 struct isl_sched_edge
*edge
, __isl_keep isl_union_map
*cluster_map
,
5082 __isl_take isl_schedule_constraints
*sc
)
5084 isl_union_map
*umap
;
5086 isl_union_set
*uset
;
5087 isl_union_map
*umap1
, *umap2
;
5092 umap
= isl_union_map_from_map(isl_map_copy(edge
->map
));
5093 umap
= isl_union_map_apply_domain(umap
,
5094 isl_union_map_copy(cluster_map
));
5095 umap
= isl_union_map_apply_range(umap
,
5096 isl_union_map_copy(cluster_map
));
5097 sc
= add_non_conditional_constraints(edge
, umap
, sc
);
5098 isl_union_map_free(umap
);
5100 if (!sc
|| (!is_condition(edge
) && !is_conditional_validity(edge
)))
5103 space
= isl_space_domain(isl_map_get_space(edge
->map
));
5104 uset
= isl_union_set_from_set(isl_set_universe(space
));
5105 umap1
= isl_union_map_copy(cluster_map
);
5106 umap1
= isl_union_map_intersect_domain(umap1
, uset
);
5107 space
= isl_space_range(isl_map_get_space(edge
->map
));
5108 uset
= isl_union_set_from_set(isl_set_universe(space
));
5109 umap2
= isl_union_map_copy(cluster_map
);
5110 umap2
= isl_union_map_intersect_domain(umap2
, uset
);
5111 umap
= isl_union_map_product(umap1
, umap2
);
5113 sc
= add_conditional_constraints(edge
, umap
, sc
);
5115 isl_union_map_free(umap
);
5119 /* Given a mapping "cluster_map" from the original instances to
5120 * the cluster instances, add schedule constraints on the clusters
5121 * to "sc" corresponding to all edges in "graph" between nodes that
5122 * belong to SCCs that are marked for merging in "scc_in_merge".
5124 static __isl_give isl_schedule_constraints
*collect_constraints(
5125 struct isl_sched_graph
*graph
, int *scc_in_merge
,
5126 __isl_keep isl_union_map
*cluster_map
,
5127 __isl_take isl_schedule_constraints
*sc
)
5131 for (i
= 0; i
< graph
->n_edge
; ++i
) {
5132 struct isl_sched_edge
*edge
= &graph
->edge
[i
];
5134 if (!scc_in_merge
[edge
->src
->scc
])
5136 if (!scc_in_merge
[edge
->dst
->scc
])
5138 sc
= collect_edge_constraints(edge
, cluster_map
, sc
);
5144 /* Construct a dependence graph for scheduling clusters with respect
5145 * to each other and store the result in "merge_graph".
5146 * In particular, the nodes of the graph correspond to the schedule
5147 * dimensions of the current bands of those clusters that have been
5148 * marked for merging in "c".
5150 * First construct an isl_schedule_constraints object for this domain
5151 * by transforming the edges in "graph" to the domain.
5152 * Then initialize a dependence graph for scheduling from these
5155 static isl_stat
init_merge_graph(isl_ctx
*ctx
, struct isl_sched_graph
*graph
,
5156 struct isl_clustering
*c
, struct isl_sched_graph
*merge_graph
)
5158 isl_union_set
*domain
;
5159 isl_union_map
*cluster_map
;
5160 isl_schedule_constraints
*sc
;
5163 domain
= collect_domain(ctx
, graph
, c
);
5164 sc
= isl_schedule_constraints_on_domain(domain
);
5166 return isl_stat_error
;
5167 cluster_map
= collect_cluster_map(ctx
, graph
, c
);
5168 sc
= collect_constraints(graph
, c
->scc_in_merge
, cluster_map
, sc
);
5169 isl_union_map_free(cluster_map
);
5171 r
= graph_init(merge_graph
, sc
);
5173 isl_schedule_constraints_free(sc
);
5178 /* Compute the maximal number of remaining schedule rows that still need
5179 * to be computed for the nodes that belong to clusters with the maximal
5180 * dimension for the current band (i.e., the band that is to be merged).
5181 * Only clusters that are about to be merged are considered.
5182 * "maxvar" is the maximal dimension for the current band.
5183 * "c" contains information about the clusters.
5185 * Return the maximal number of remaining schedule rows or -1 on error.
5187 static int compute_maxvar_max_slack(int maxvar
, struct isl_clustering
*c
)
5193 for (i
= 0; i
< c
->n
; ++i
) {
5195 struct isl_sched_graph
*scc
;
5197 if (!c
->scc_in_merge
[i
])
5200 nvar
= scc
->n_total_row
- scc
->band_start
;
5203 for (j
= 0; j
< scc
->n
; ++j
) {
5204 struct isl_sched_node
*node
= &scc
->node
[j
];
5207 if (node_update_cmap(node
) < 0)
5209 slack
= node
->nvar
- node
->rank
;
5210 if (slack
> max_slack
)
5218 /* If there are any clusters where the dimension of the current band
5219 * (i.e., the band that is to be merged) is smaller than "maxvar" and
5220 * if there are any nodes in such a cluster where the number
5221 * of remaining schedule rows that still need to be computed
5222 * is greater than "max_slack", then return the smallest current band
5223 * dimension of all these clusters. Otherwise return the original value
5224 * of "maxvar". Return -1 in case of any error.
5225 * Only clusters that are about to be merged are considered.
5226 * "c" contains information about the clusters.
5228 static int limit_maxvar_to_slack(int maxvar
, int max_slack
,
5229 struct isl_clustering
*c
)
5233 for (i
= 0; i
< c
->n
; ++i
) {
5235 struct isl_sched_graph
*scc
;
5237 if (!c
->scc_in_merge
[i
])
5240 nvar
= scc
->n_total_row
- scc
->band_start
;
5243 for (j
= 0; j
< scc
->n
; ++j
) {
5244 struct isl_sched_node
*node
= &scc
->node
[j
];
5247 if (node_update_cmap(node
) < 0)
5249 slack
= node
->nvar
- node
->rank
;
5250 if (slack
> max_slack
) {
5260 /* Adjust merge_graph->maxvar based on the number of remaining schedule rows
5261 * that still need to be computed. In particular, if there is a node
5262 * in a cluster where the dimension of the current band is smaller
5263 * than merge_graph->maxvar, but the number of remaining schedule rows
5264 * is greater than that of any node in a cluster with the maximal
5265 * dimension for the current band (i.e., merge_graph->maxvar),
5266 * then adjust merge_graph->maxvar to the (smallest) current band dimension
5267 * of those clusters. Without this adjustment, the total number of
5268 * schedule dimensions would be increased, resulting in a skewed view
5269 * of the number of coincident dimensions.
5270 * "c" contains information about the clusters.
5272 * If the maximize_band_depth option is set and merge_graph->maxvar is reduced,
5273 * then there is no point in attempting any merge since it will be rejected
5274 * anyway. Set merge_graph->maxvar to zero in such cases.
5276 static isl_stat
adjust_maxvar_to_slack(isl_ctx
*ctx
,
5277 struct isl_sched_graph
*merge_graph
, struct isl_clustering
*c
)
5279 int max_slack
, maxvar
;
5281 max_slack
= compute_maxvar_max_slack(merge_graph
->maxvar
, c
);
5283 return isl_stat_error
;
5284 maxvar
= limit_maxvar_to_slack(merge_graph
->maxvar
, max_slack
, c
);
5286 return isl_stat_error
;
5288 if (maxvar
< merge_graph
->maxvar
) {
5289 if (isl_options_get_schedule_maximize_band_depth(ctx
))
5290 merge_graph
->maxvar
= 0;
5292 merge_graph
->maxvar
= maxvar
;
5298 /* Return the number of coincident dimensions in the current band of "graph",
5299 * where the nodes of "graph" are assumed to be scheduled by a single band.
5301 static int get_n_coincident(struct isl_sched_graph
*graph
)
5305 for (i
= graph
->band_start
; i
< graph
->n_total_row
; ++i
)
5306 if (!graph
->node
[0].coincident
[i
])
5309 return i
- graph
->band_start
;
5312 /* Should the clusters be merged based on the cluster schedule
5313 * in the current (and only) band of "merge_graph", given that
5314 * coincidence should be maximized?
5316 * If the number of coincident schedule dimensions in the merged band
5317 * would be less than the maximal number of coincident schedule dimensions
5318 * in any of the merged clusters, then the clusters should not be merged.
5320 static isl_bool
ok_to_merge_coincident(struct isl_clustering
*c
,
5321 struct isl_sched_graph
*merge_graph
)
5328 for (i
= 0; i
< c
->n
; ++i
) {
5329 if (!c
->scc_in_merge
[i
])
5331 n_coincident
= get_n_coincident(&c
->scc
[i
]);
5332 if (n_coincident
> max_coincident
)
5333 max_coincident
= n_coincident
;
5336 n_coincident
= get_n_coincident(merge_graph
);
5338 return n_coincident
>= max_coincident
;
5341 /* Return the transformation on "node" expressed by the current (and only)
5342 * band of "merge_graph" applied to the clusters in "c".
5344 * First find the representation of "node" in its SCC in "c" and
5345 * extract the transformation expressed by the current band.
5346 * Then extract the transformation applied by "merge_graph"
5347 * to the cluster to which this SCC belongs.
5348 * Combine the two to obtain the complete transformation on the node.
5350 * Note that the range of the first transformation is an anonymous space,
5351 * while the domain of the second is named "cluster_X". The range
5352 * of the former therefore needs to be adjusted before the two
5355 static __isl_give isl_map
*extract_node_transformation(isl_ctx
*ctx
,
5356 struct isl_sched_node
*node
, struct isl_clustering
*c
,
5357 struct isl_sched_graph
*merge_graph
)
5359 struct isl_sched_node
*scc_node
, *cluster_node
;
5363 isl_multi_aff
*ma
, *ma2
;
5365 scc_node
= graph_find_node(ctx
, &c
->scc
[node
->scc
], node
->space
);
5366 start
= c
->scc
[node
->scc
].band_start
;
5367 n
= c
->scc
[node
->scc
].n_total_row
- start
;
5368 ma
= node_extract_partial_schedule_multi_aff(scc_node
, start
, n
);
5369 space
= cluster_space(&c
->scc
[node
->scc
], c
->scc_cluster
[node
->scc
]);
5370 cluster_node
= graph_find_node(ctx
, merge_graph
, space
);
5371 if (space
&& !cluster_node
)
5372 isl_die(ctx
, isl_error_internal
, "unable to find cluster",
5373 space
= isl_space_free(space
));
5374 id
= isl_space_get_tuple_id(space
, isl_dim_set
);
5375 ma
= isl_multi_aff_set_tuple_id(ma
, isl_dim_out
, id
);
5376 isl_space_free(space
);
5377 n
= merge_graph
->n_total_row
;
5378 ma2
= node_extract_partial_schedule_multi_aff(cluster_node
, 0, n
);
5379 ma
= isl_multi_aff_pullback_multi_aff(ma2
, ma
);
5381 return isl_map_from_multi_aff(ma
);
5384 /* Give a set of distances "set", are they bounded by a small constant
5385 * in direction "pos"?
5386 * In practice, check if they are bounded by 2 by checking that there
5387 * are no elements with a value greater than or equal to 3 or
5388 * smaller than or equal to -3.
5390 static isl_bool
distance_is_bounded(__isl_keep isl_set
*set
, int pos
)
5396 return isl_bool_error
;
5398 test
= isl_set_copy(set
);
5399 test
= isl_set_lower_bound_si(test
, isl_dim_set
, pos
, 3);
5400 bounded
= isl_set_is_empty(test
);
5403 if (bounded
< 0 || !bounded
)
5406 test
= isl_set_copy(set
);
5407 test
= isl_set_upper_bound_si(test
, isl_dim_set
, pos
, -3);
5408 bounded
= isl_set_is_empty(test
);
5414 /* Does the set "set" have a fixed (but possible parametric) value
5415 * at dimension "pos"?
5417 static isl_bool
has_single_value(__isl_keep isl_set
*set
, int pos
)
5423 return isl_bool_error
;
5424 set
= isl_set_copy(set
);
5425 n
= isl_set_dim(set
, isl_dim_set
);
5426 set
= isl_set_project_out(set
, isl_dim_set
, pos
+ 1, n
- (pos
+ 1));
5427 set
= isl_set_project_out(set
, isl_dim_set
, 0, pos
);
5428 single
= isl_set_is_singleton(set
);
5434 /* Does "map" have a fixed (but possible parametric) value
5435 * at dimension "pos" of either its domain or its range?
5437 static isl_bool
has_singular_src_or_dst(__isl_keep isl_map
*map
, int pos
)
5442 set
= isl_map_domain(isl_map_copy(map
));
5443 single
= has_single_value(set
, pos
);
5446 if (single
< 0 || single
)
5449 set
= isl_map_range(isl_map_copy(map
));
5450 single
= has_single_value(set
, pos
);
5456 /* Does the edge "edge" from "graph" have bounded dependence distances
5457 * in the merged graph "merge_graph" of a selection of clusters in "c"?
5459 * Extract the complete transformations of the source and destination
5460 * nodes of the edge, apply them to the edge constraints and
5461 * compute the differences. Finally, check if these differences are bounded
5462 * in each direction.
5464 * If the dimension of the band is greater than the number of
5465 * dimensions that can be expected to be optimized by the edge
5466 * (based on its weight), then also allow the differences to be unbounded
5467 * in the remaining dimensions, but only if either the source or
5468 * the destination has a fixed value in that direction.
5469 * This allows a statement that produces values that are used by
5470 * several instances of another statement to be merged with that
5472 * However, merging such clusters will introduce an inherently
5473 * large proximity distance inside the merged cluster, meaning
5474 * that proximity distances will no longer be optimized in
5475 * subsequent merges. These merges are therefore only allowed
5476 * after all other possible merges have been tried.
5477 * The first time such a merge is encountered, the weight of the edge
5478 * is replaced by a negative weight. The second time (i.e., after
5479 * all merges over edges with a non-negative weight have been tried),
5480 * the merge is allowed.
5482 static isl_bool
has_bounded_distances(isl_ctx
*ctx
, struct isl_sched_edge
*edge
,
5483 struct isl_sched_graph
*graph
, struct isl_clustering
*c
,
5484 struct isl_sched_graph
*merge_graph
)
5491 map
= isl_map_copy(edge
->map
);
5492 t
= extract_node_transformation(ctx
, edge
->src
, c
, merge_graph
);
5493 map
= isl_map_apply_domain(map
, t
);
5494 t
= extract_node_transformation(ctx
, edge
->dst
, c
, merge_graph
);
5495 map
= isl_map_apply_range(map
, t
);
5496 dist
= isl_map_deltas(isl_map_copy(map
));
5498 bounded
= isl_bool_true
;
5499 n
= isl_set_dim(dist
, isl_dim_set
);
5500 n_slack
= n
- edge
->weight
;
5501 if (edge
->weight
< 0)
5502 n_slack
-= graph
->max_weight
+ 1;
5503 for (i
= 0; i
< n
; ++i
) {
5504 isl_bool bounded_i
, singular_i
;
5506 bounded_i
= distance_is_bounded(dist
, i
);
5511 if (edge
->weight
>= 0)
5512 bounded
= isl_bool_false
;
5516 singular_i
= has_singular_src_or_dst(map
, i
);
5521 bounded
= isl_bool_false
;
5524 if (!bounded
&& i
>= n
&& edge
->weight
>= 0)
5525 edge
->weight
-= graph
->max_weight
+ 1;
5533 return isl_bool_error
;
5536 /* Should the clusters be merged based on the cluster schedule
5537 * in the current (and only) band of "merge_graph"?
5538 * "graph" is the original dependence graph, while "c" records
5539 * which SCCs are involved in the latest merge.
5541 * In particular, is there at least one proximity constraint
5542 * that is optimized by the merge?
5544 * A proximity constraint is considered to be optimized
5545 * if the dependence distances are small.
5547 static isl_bool
ok_to_merge_proximity(isl_ctx
*ctx
,
5548 struct isl_sched_graph
*graph
, struct isl_clustering
*c
,
5549 struct isl_sched_graph
*merge_graph
)
5553 for (i
= 0; i
< graph
->n_edge
; ++i
) {
5554 struct isl_sched_edge
*edge
= &graph
->edge
[i
];
5557 if (!is_proximity(edge
))
5559 if (!c
->scc_in_merge
[edge
->src
->scc
])
5561 if (!c
->scc_in_merge
[edge
->dst
->scc
])
5563 if (c
->scc_cluster
[edge
->dst
->scc
] ==
5564 c
->scc_cluster
[edge
->src
->scc
])
5566 bounded
= has_bounded_distances(ctx
, edge
, graph
, c
,
5568 if (bounded
< 0 || bounded
)
5572 return isl_bool_false
;
5575 /* Should the clusters be merged based on the cluster schedule
5576 * in the current (and only) band of "merge_graph"?
5577 * "graph" is the original dependence graph, while "c" records
5578 * which SCCs are involved in the latest merge.
5580 * If the current band is empty, then the clusters should not be merged.
5582 * If the band depth should be maximized and the merge schedule
5583 * is incomplete (meaning that the dimension of some of the schedule
5584 * bands in the original schedule will be reduced), then the clusters
5585 * should not be merged.
5587 * If the schedule_maximize_coincidence option is set, then check that
5588 * the number of coincident schedule dimensions is not reduced.
5590 * Finally, only allow the merge if at least one proximity
5591 * constraint is optimized.
5593 static isl_bool
ok_to_merge(isl_ctx
*ctx
, struct isl_sched_graph
*graph
,
5594 struct isl_clustering
*c
, struct isl_sched_graph
*merge_graph
)
5596 if (merge_graph
->n_total_row
== merge_graph
->band_start
)
5597 return isl_bool_false
;
5599 if (isl_options_get_schedule_maximize_band_depth(ctx
) &&
5600 merge_graph
->n_total_row
< merge_graph
->maxvar
)
5601 return isl_bool_false
;
5603 if (isl_options_get_schedule_maximize_coincidence(ctx
)) {
5606 ok
= ok_to_merge_coincident(c
, merge_graph
);
5611 return ok_to_merge_proximity(ctx
, graph
, c
, merge_graph
);
5614 /* Apply the schedule in "t_node" to the "n" rows starting at "first"
5615 * of the schedule in "node" and return the result.
5617 * That is, essentially compute
5619 * T * N(first:first+n-1)
5621 * taking into account the constant term and the parameter coefficients
5624 static __isl_give isl_mat
*node_transformation(isl_ctx
*ctx
,
5625 struct isl_sched_node
*t_node
, struct isl_sched_node
*node
,
5630 int n_row
, n_col
, n_param
, n_var
;
5632 n_param
= node
->nparam
;
5634 n_row
= isl_mat_rows(t_node
->sched
);
5635 n_col
= isl_mat_cols(node
->sched
);
5636 t
= isl_mat_alloc(ctx
, n_row
, n_col
);
5639 for (i
= 0; i
< n_row
; ++i
) {
5640 isl_seq_cpy(t
->row
[i
], t_node
->sched
->row
[i
], 1 + n_param
);
5641 isl_seq_clr(t
->row
[i
] + 1 + n_param
, n_var
);
5642 for (j
= 0; j
< n
; ++j
)
5643 isl_seq_addmul(t
->row
[i
],
5644 t_node
->sched
->row
[i
][1 + n_param
+ j
],
5645 node
->sched
->row
[first
+ j
],
5646 1 + n_param
+ n_var
);
5651 /* Apply the cluster schedule in "t_node" to the current band
5652 * schedule of the nodes in "graph".
5654 * In particular, replace the rows starting at band_start
5655 * by the result of applying the cluster schedule in "t_node"
5656 * to the original rows.
5658 * The coincidence of the schedule is determined by the coincidence
5659 * of the cluster schedule.
5661 static isl_stat
transform(isl_ctx
*ctx
, struct isl_sched_graph
*graph
,
5662 struct isl_sched_node
*t_node
)
5668 start
= graph
->band_start
;
5669 n
= graph
->n_total_row
- start
;
5671 n_new
= isl_mat_rows(t_node
->sched
);
5672 for (i
= 0; i
< graph
->n
; ++i
) {
5673 struct isl_sched_node
*node
= &graph
->node
[i
];
5676 t
= node_transformation(ctx
, t_node
, node
, start
, n
);
5677 node
->sched
= isl_mat_drop_rows(node
->sched
, start
, n
);
5678 node
->sched
= isl_mat_concat(node
->sched
, t
);
5679 node
->sched_map
= isl_map_free(node
->sched_map
);
5681 return isl_stat_error
;
5682 for (j
= 0; j
< n_new
; ++j
)
5683 node
->coincident
[start
+ j
] = t_node
->coincident
[j
];
5685 graph
->n_total_row
-= n
;
5687 graph
->n_total_row
+= n_new
;
5688 graph
->n_row
+= n_new
;
5693 /* Merge the clusters marked for merging in "c" into a single
5694 * cluster using the cluster schedule in the current band of "merge_graph".
5695 * The representative SCC for the new cluster is the SCC with
5696 * the smallest index.
5698 * The current band schedule of each SCC in the new cluster is obtained
5699 * by applying the schedule of the corresponding original cluster
5700 * to the original band schedule.
5701 * All SCCs in the new cluster have the same number of schedule rows.
5703 static isl_stat
merge(isl_ctx
*ctx
, struct isl_clustering
*c
,
5704 struct isl_sched_graph
*merge_graph
)
5710 for (i
= 0; i
< c
->n
; ++i
) {
5711 struct isl_sched_node
*node
;
5713 if (!c
->scc_in_merge
[i
])
5717 space
= cluster_space(&c
->scc
[i
], c
->scc_cluster
[i
]);
5719 return isl_stat_error
;
5720 node
= graph_find_node(ctx
, merge_graph
, space
);
5721 isl_space_free(space
);
5723 isl_die(ctx
, isl_error_internal
,
5724 "unable to find cluster",
5725 return isl_stat_error
);
5726 if (transform(ctx
, &c
->scc
[i
], node
) < 0)
5727 return isl_stat_error
;
5728 c
->scc_cluster
[i
] = cluster
;
5734 /* Try and merge the clusters of SCCs marked in c->scc_in_merge
5735 * by scheduling the current cluster bands with respect to each other.
5737 * Construct a dependence graph with a space for each cluster and
5738 * with the coordinates of each space corresponding to the schedule
5739 * dimensions of the current band of that cluster.
5740 * Construct a cluster schedule in this cluster dependence graph and
5741 * apply it to the current cluster bands if it is applicable
5742 * according to ok_to_merge.
5744 * If the number of remaining schedule dimensions in a cluster
5745 * with a non-maximal current schedule dimension is greater than
5746 * the number of remaining schedule dimensions in clusters
5747 * with a maximal current schedule dimension, then restrict
5748 * the number of rows to be computed in the cluster schedule
5749 * to the minimal such non-maximal current schedule dimension.
5750 * Do this by adjusting merge_graph.maxvar.
5752 * Return isl_bool_true if the clusters have effectively been merged
5753 * into a single cluster.
5755 * Note that since the standard scheduling algorithm minimizes the maximal
5756 * distance over proximity constraints, the proximity constraints between
5757 * the merged clusters may not be optimized any further than what is
5758 * sufficient to bring the distances within the limits of the internal
5759 * proximity constraints inside the individual clusters.
5760 * It may therefore make sense to perform an additional translation step
5761 * to bring the clusters closer to each other, while maintaining
5762 * the linear part of the merging schedule found using the standard
5763 * scheduling algorithm.
5765 static isl_bool
try_merge(isl_ctx
*ctx
, struct isl_sched_graph
*graph
,
5766 struct isl_clustering
*c
)
5768 struct isl_sched_graph merge_graph
= { 0 };
5771 if (init_merge_graph(ctx
, graph
, c
, &merge_graph
) < 0)
5774 if (compute_maxvar(&merge_graph
) < 0)
5776 if (adjust_maxvar_to_slack(ctx
, &merge_graph
,c
) < 0)
5778 if (compute_schedule_wcc_band(ctx
, &merge_graph
) < 0)
5780 merged
= ok_to_merge(ctx
, graph
, c
, &merge_graph
);
5781 if (merged
&& merge(ctx
, c
, &merge_graph
) < 0)
5784 graph_free(ctx
, &merge_graph
);
5787 graph_free(ctx
, &merge_graph
);
5788 return isl_bool_error
;
5791 /* Is there any edge marked "no_merge" between two SCCs that are
5792 * about to be merged (i.e., that are set in "scc_in_merge")?
5793 * "merge_edge" is the proximity edge along which the clusters of SCCs
5794 * are going to be merged.
5796 * If there is any edge between two SCCs with a negative weight,
5797 * while the weight of "merge_edge" is non-negative, then this
5798 * means that the edge was postponed. "merge_edge" should then
5799 * also be postponed since merging along the edge with negative weight should
5800 * be postponed until all edges with non-negative weight have been tried.
5801 * Replace the weight of "merge_edge" by a negative weight as well and
5802 * tell the caller not to attempt a merge.
5804 static int any_no_merge(struct isl_sched_graph
*graph
, int *scc_in_merge
,
5805 struct isl_sched_edge
*merge_edge
)
5809 for (i
= 0; i
< graph
->n_edge
; ++i
) {
5810 struct isl_sched_edge
*edge
= &graph
->edge
[i
];
5812 if (!scc_in_merge
[edge
->src
->scc
])
5814 if (!scc_in_merge
[edge
->dst
->scc
])
5818 if (merge_edge
->weight
>= 0 && edge
->weight
< 0) {
5819 merge_edge
->weight
-= graph
->max_weight
+ 1;
5827 /* Merge the two clusters in "c" connected by the edge in "graph"
5828 * with index "edge" into a single cluster.
5829 * If it turns out to be impossible to merge these two clusters,
5830 * then mark the edge as "no_merge" such that it will not be
5833 * First mark all SCCs that need to be merged. This includes the SCCs
5834 * in the two clusters, but it may also include the SCCs
5835 * of intermediate clusters.
5836 * If there is already a no_merge edge between any pair of such SCCs,
5837 * then simply mark the current edge as no_merge as well.
5838 * Likewise, if any of those edges was postponed by has_bounded_distances,
5839 * then postpone the current edge as well.
5840 * Otherwise, try and merge the clusters and mark "edge" as "no_merge"
5841 * if the clusters did not end up getting merged, unless the non-merge
5842 * is due to the fact that the edge was postponed. This postponement
5843 * can be recognized by a change in weight (from non-negative to negative).
5845 static isl_stat
merge_clusters_along_edge(isl_ctx
*ctx
,
5846 struct isl_sched_graph
*graph
, int edge
, struct isl_clustering
*c
)
5849 int edge_weight
= graph
->edge
[edge
].weight
;
5851 if (mark_merge_sccs(ctx
, graph
, edge
, c
) < 0)
5852 return isl_stat_error
;
5854 if (any_no_merge(graph
, c
->scc_in_merge
, &graph
->edge
[edge
]))
5855 merged
= isl_bool_false
;
5857 merged
= try_merge(ctx
, graph
, c
);
5859 return isl_stat_error
;
5860 if (!merged
&& edge_weight
== graph
->edge
[edge
].weight
)
5861 graph
->edge
[edge
].no_merge
= 1;
5866 /* Does "node" belong to the cluster identified by "cluster"?
5868 static int node_cluster_exactly(struct isl_sched_node
*node
, int cluster
)
5870 return node
->cluster
== cluster
;
5873 /* Does "edge" connect two nodes belonging to the cluster
5874 * identified by "cluster"?
5876 static int edge_cluster_exactly(struct isl_sched_edge
*edge
, int cluster
)
5878 return edge
->src
->cluster
== cluster
&& edge
->dst
->cluster
== cluster
;
5881 /* Swap the schedule of "node1" and "node2".
5882 * Both nodes have been derived from the same node in a common parent graph.
5883 * Since the "coincident" field is shared with that node
5884 * in the parent graph, there is no need to also swap this field.
5886 static void swap_sched(struct isl_sched_node
*node1
,
5887 struct isl_sched_node
*node2
)
5892 sched
= node1
->sched
;
5893 node1
->sched
= node2
->sched
;
5894 node2
->sched
= sched
;
5896 sched_map
= node1
->sched_map
;
5897 node1
->sched_map
= node2
->sched_map
;
5898 node2
->sched_map
= sched_map
;
5901 /* Copy the current band schedule from the SCCs that form the cluster
5902 * with index "pos" to the actual cluster at position "pos".
5903 * By construction, the index of the first SCC that belongs to the cluster
5906 * The order of the nodes inside both the SCCs and the cluster
5907 * is assumed to be same as the order in the original "graph".
5909 * Since the SCC graphs will no longer be used after this function,
5910 * the schedules are actually swapped rather than copied.
5912 static isl_stat
copy_partial(struct isl_sched_graph
*graph
,
5913 struct isl_clustering
*c
, int pos
)
5917 c
->cluster
[pos
].n_total_row
= c
->scc
[pos
].n_total_row
;
5918 c
->cluster
[pos
].n_row
= c
->scc
[pos
].n_row
;
5919 c
->cluster
[pos
].maxvar
= c
->scc
[pos
].maxvar
;
5921 for (i
= 0; i
< graph
->n
; ++i
) {
5925 if (graph
->node
[i
].cluster
!= pos
)
5927 s
= graph
->node
[i
].scc
;
5928 k
= c
->scc_node
[s
]++;
5929 swap_sched(&c
->cluster
[pos
].node
[j
], &c
->scc
[s
].node
[k
]);
5930 if (c
->scc
[s
].maxvar
> c
->cluster
[pos
].maxvar
)
5931 c
->cluster
[pos
].maxvar
= c
->scc
[s
].maxvar
;
5938 /* Is there a (conditional) validity dependence from node[j] to node[i],
5939 * forcing node[i] to follow node[j] or do the nodes belong to the same
5942 static isl_bool
node_follows_strong_or_same_cluster(int i
, int j
, void *user
)
5944 struct isl_sched_graph
*graph
= user
;
5946 if (graph
->node
[i
].cluster
== graph
->node
[j
].cluster
)
5947 return isl_bool_true
;
5948 return graph_has_validity_edge(graph
, &graph
->node
[j
], &graph
->node
[i
]);
5951 /* Extract the merged clusters of SCCs in "graph", sort them, and
5952 * store them in c->clusters. Update c->scc_cluster accordingly.
5954 * First keep track of the cluster containing the SCC to which a node
5955 * belongs in the node itself.
5956 * Then extract the clusters into c->clusters, copying the current
5957 * band schedule from the SCCs that belong to the cluster.
5958 * Do this only once per cluster.
5960 * Finally, topologically sort the clusters and update c->scc_cluster
5961 * to match the new scc numbering. While the SCCs were originally
5962 * sorted already, some SCCs that depend on some other SCCs may
5963 * have been merged with SCCs that appear before these other SCCs.
5964 * A reordering may therefore be required.
5966 static isl_stat
extract_clusters(isl_ctx
*ctx
, struct isl_sched_graph
*graph
,
5967 struct isl_clustering
*c
)
5971 for (i
= 0; i
< graph
->n
; ++i
)
5972 graph
->node
[i
].cluster
= c
->scc_cluster
[graph
->node
[i
].scc
];
5974 for (i
= 0; i
< graph
->scc
; ++i
) {
5975 if (c
->scc_cluster
[i
] != i
)
5977 if (extract_sub_graph(ctx
, graph
, &node_cluster_exactly
,
5978 &edge_cluster_exactly
, i
, &c
->cluster
[i
]) < 0)
5979 return isl_stat_error
;
5980 c
->cluster
[i
].src_scc
= -1;
5981 c
->cluster
[i
].dst_scc
= -1;
5982 if (copy_partial(graph
, c
, i
) < 0)
5983 return isl_stat_error
;
5986 if (detect_ccs(ctx
, graph
, &node_follows_strong_or_same_cluster
) < 0)
5987 return isl_stat_error
;
5988 for (i
= 0; i
< graph
->n
; ++i
)
5989 c
->scc_cluster
[graph
->node
[i
].scc
] = graph
->node
[i
].cluster
;
5994 /* Compute weights on the proximity edges of "graph" that can
5995 * be used by find_proximity to find the most appropriate
5996 * proximity edge to use to merge two clusters in "c".
5997 * The weights are also used by has_bounded_distances to determine
5998 * whether the merge should be allowed.
5999 * Store the maximum of the computed weights in graph->max_weight.
6001 * The computed weight is a measure for the number of remaining schedule
6002 * dimensions that can still be completely aligned.
6003 * In particular, compute the number of equalities between
6004 * input dimensions and output dimensions in the proximity constraints.
6005 * The directions that are already handled by outer schedule bands
6006 * are projected out prior to determining this number.
6008 * Edges that will never be considered by find_proximity are ignored.
6010 static isl_stat
compute_weights(struct isl_sched_graph
*graph
,
6011 struct isl_clustering
*c
)
6015 graph
->max_weight
= 0;
6017 for (i
= 0; i
< graph
->n_edge
; ++i
) {
6018 struct isl_sched_edge
*edge
= &graph
->edge
[i
];
6019 struct isl_sched_node
*src
= edge
->src
;
6020 struct isl_sched_node
*dst
= edge
->dst
;
6021 isl_basic_map
*hull
;
6024 if (!is_proximity(edge
))
6026 if (bad_cluster(&c
->scc
[edge
->src
->scc
]) ||
6027 bad_cluster(&c
->scc
[edge
->dst
->scc
]))
6029 if (c
->scc_cluster
[edge
->dst
->scc
] ==
6030 c
->scc_cluster
[edge
->src
->scc
])
6033 hull
= isl_map_affine_hull(isl_map_copy(edge
->map
));
6034 hull
= isl_basic_map_transform_dims(hull
, isl_dim_in
, 0,
6035 isl_mat_copy(src
->ctrans
));
6036 hull
= isl_basic_map_transform_dims(hull
, isl_dim_out
, 0,
6037 isl_mat_copy(dst
->ctrans
));
6038 hull
= isl_basic_map_project_out(hull
,
6039 isl_dim_in
, 0, src
->rank
);
6040 hull
= isl_basic_map_project_out(hull
,
6041 isl_dim_out
, 0, dst
->rank
);
6042 hull
= isl_basic_map_remove_divs(hull
);
6043 n_in
= isl_basic_map_dim(hull
, isl_dim_in
);
6044 n_out
= isl_basic_map_dim(hull
, isl_dim_out
);
6045 hull
= isl_basic_map_drop_constraints_not_involving_dims(hull
,
6046 isl_dim_in
, 0, n_in
);
6047 hull
= isl_basic_map_drop_constraints_not_involving_dims(hull
,
6048 isl_dim_out
, 0, n_out
);
6050 return isl_stat_error
;
6051 edge
->weight
= hull
->n_eq
;
6052 isl_basic_map_free(hull
);
6054 if (edge
->weight
> graph
->max_weight
)
6055 graph
->max_weight
= edge
->weight
;
6061 /* Call compute_schedule_finish_band on each of the clusters in "c"
6062 * in their topological order. This order is determined by the scc
6063 * fields of the nodes in "graph".
6064 * Combine the results in a sequence expressing the topological order.
6066 * If there is only one cluster left, then there is no need to introduce
6067 * a sequence node. Also, in this case, the cluster necessarily contains
6068 * the SCC at position 0 in the original graph and is therefore also
6069 * stored in the first cluster of "c".
6071 static __isl_give isl_schedule_node
*finish_bands_clustering(
6072 __isl_take isl_schedule_node
*node
, struct isl_sched_graph
*graph
,
6073 struct isl_clustering
*c
)
6077 isl_union_set_list
*filters
;
6079 if (graph
->scc
== 1)
6080 return compute_schedule_finish_band(node
, &c
->cluster
[0], 0);
6082 ctx
= isl_schedule_node_get_ctx(node
);
6084 filters
= extract_sccs(ctx
, graph
);
6085 node
= isl_schedule_node_insert_sequence(node
, filters
);
6087 for (i
= 0; i
< graph
->scc
; ++i
) {
6088 int j
= c
->scc_cluster
[i
];
6089 node
= isl_schedule_node_child(node
, i
);
6090 node
= isl_schedule_node_child(node
, 0);
6091 node
= compute_schedule_finish_band(node
, &c
->cluster
[j
], 0);
6092 node
= isl_schedule_node_parent(node
);
6093 node
= isl_schedule_node_parent(node
);
6099 /* Compute a schedule for a connected dependence graph by first considering
6100 * each strongly connected component (SCC) in the graph separately and then
6101 * incrementally combining them into clusters.
6102 * Return the updated schedule node.
6104 * Initially, each cluster consists of a single SCC, each with its
6105 * own band schedule. The algorithm then tries to merge pairs
6106 * of clusters along a proximity edge until no more suitable
6107 * proximity edges can be found. During this merging, the schedule
6108 * is maintained in the individual SCCs.
6109 * After the merging is completed, the full resulting clusters
6110 * are extracted and in finish_bands_clustering,
6111 * compute_schedule_finish_band is called on each of them to integrate
6112 * the band into "node" and to continue the computation.
6114 * compute_weights initializes the weights that are used by find_proximity.
6116 static __isl_give isl_schedule_node
*compute_schedule_wcc_clustering(
6117 __isl_take isl_schedule_node
*node
, struct isl_sched_graph
*graph
)
6120 struct isl_clustering c
;
6123 ctx
= isl_schedule_node_get_ctx(node
);
6125 if (clustering_init(ctx
, &c
, graph
) < 0)
6128 if (compute_weights(graph
, &c
) < 0)
6132 i
= find_proximity(graph
, &c
);
6135 if (i
>= graph
->n_edge
)
6137 if (merge_clusters_along_edge(ctx
, graph
, i
, &c
) < 0)
6141 if (extract_clusters(ctx
, graph
, &c
) < 0)
6144 node
= finish_bands_clustering(node
, graph
, &c
);
6146 clustering_free(ctx
, &c
);
6149 clustering_free(ctx
, &c
);
6150 return isl_schedule_node_free(node
);
6153 /* Compute a schedule for a connected dependence graph and return
6154 * the updated schedule node.
6156 * If Feautrier's algorithm is selected, we first recursively try to satisfy
6157 * as many validity dependences as possible. When all validity dependences
6158 * are satisfied we extend the schedule to a full-dimensional schedule.
6160 * Call compute_schedule_wcc_whole or compute_schedule_wcc_clustering
6161 * depending on whether the user has selected the option to try and
6162 * compute a schedule for the entire (weakly connected) component first.
6163 * If there is only a single strongly connected component (SCC), then
6164 * there is no point in trying to combine SCCs
6165 * in compute_schedule_wcc_clustering, so compute_schedule_wcc_whole
6166 * is called instead.
6168 static __isl_give isl_schedule_node
*compute_schedule_wcc(
6169 __isl_take isl_schedule_node
*node
, struct isl_sched_graph
*graph
)
6176 ctx
= isl_schedule_node_get_ctx(node
);
6177 if (detect_sccs(ctx
, graph
) < 0)
6178 return isl_schedule_node_free(node
);
6180 if (compute_maxvar(graph
) < 0)
6181 return isl_schedule_node_free(node
);
6183 if (need_feautrier_step(ctx
, graph
))
6184 return compute_schedule_wcc_feautrier(node
, graph
);
6186 if (graph
->scc
<= 1 || isl_options_get_schedule_whole_component(ctx
))
6187 return compute_schedule_wcc_whole(node
, graph
);
6189 return compute_schedule_wcc_clustering(node
, graph
);
6192 /* Compute a schedule for each group of nodes identified by node->scc
6193 * separately and then combine them in a sequence node (or as set node
6194 * if graph->weak is set) inserted at position "node" of the schedule tree.
6195 * Return the updated schedule node.
6197 * If "wcc" is set then each of the groups belongs to a single
6198 * weakly connected component in the dependence graph so that
6199 * there is no need for compute_sub_schedule to look for weakly
6200 * connected components.
6202 static __isl_give isl_schedule_node
*compute_component_schedule(
6203 __isl_take isl_schedule_node
*node
, struct isl_sched_graph
*graph
,
6208 isl_union_set_list
*filters
;
6212 ctx
= isl_schedule_node_get_ctx(node
);
6214 filters
= extract_sccs(ctx
, graph
);
6216 node
= isl_schedule_node_insert_set(node
, filters
);
6218 node
= isl_schedule_node_insert_sequence(node
, filters
);
6220 for (component
= 0; component
< graph
->scc
; ++component
) {
6221 node
= isl_schedule_node_child(node
, component
);
6222 node
= isl_schedule_node_child(node
, 0);
6223 node
= compute_sub_schedule(node
, ctx
, graph
,
6225 &edge_scc_exactly
, component
, wcc
);
6226 node
= isl_schedule_node_parent(node
);
6227 node
= isl_schedule_node_parent(node
);
6233 /* Compute a schedule for the given dependence graph and insert it at "node".
6234 * Return the updated schedule node.
6236 * We first check if the graph is connected (through validity and conditional
6237 * validity dependences) and, if not, compute a schedule
6238 * for each component separately.
6239 * If the schedule_serialize_sccs option is set, then we check for strongly
6240 * connected components instead and compute a separate schedule for
6241 * each such strongly connected component.
6243 static __isl_give isl_schedule_node
*compute_schedule(isl_schedule_node
*node
,
6244 struct isl_sched_graph
*graph
)
6251 ctx
= isl_schedule_node_get_ctx(node
);
6252 if (isl_options_get_schedule_serialize_sccs(ctx
)) {
6253 if (detect_sccs(ctx
, graph
) < 0)
6254 return isl_schedule_node_free(node
);
6256 if (detect_wccs(ctx
, graph
) < 0)
6257 return isl_schedule_node_free(node
);
6261 return compute_component_schedule(node
, graph
, 1);
6263 return compute_schedule_wcc(node
, graph
);
6266 /* Compute a schedule on sc->domain that respects the given schedule
6269 * In particular, the schedule respects all the validity dependences.
6270 * If the default isl scheduling algorithm is used, it tries to minimize
6271 * the dependence distances over the proximity dependences.
6272 * If Feautrier's scheduling algorithm is used, the proximity dependence
6273 * distances are only minimized during the extension to a full-dimensional
6276 * If there are any condition and conditional validity dependences,
6277 * then the conditional validity dependences may be violated inside
6278 * a tilable band, provided they have no adjacent non-local
6279 * condition dependences.
6281 __isl_give isl_schedule
*isl_schedule_constraints_compute_schedule(
6282 __isl_take isl_schedule_constraints
*sc
)
6284 isl_ctx
*ctx
= isl_schedule_constraints_get_ctx(sc
);
6285 struct isl_sched_graph graph
= { 0 };
6286 isl_schedule
*sched
;
6287 isl_schedule_node
*node
;
6288 isl_union_set
*domain
;
6290 sc
= isl_schedule_constraints_align_params(sc
);
6292 domain
= isl_schedule_constraints_get_domain(sc
);
6293 if (isl_union_set_n_set(domain
) == 0) {
6294 isl_schedule_constraints_free(sc
);
6295 return isl_schedule_from_domain(domain
);
6298 if (graph_init(&graph
, sc
) < 0)
6299 domain
= isl_union_set_free(domain
);
6301 node
= isl_schedule_node_from_domain(domain
);
6302 node
= isl_schedule_node_child(node
, 0);
6304 node
= compute_schedule(node
, &graph
);
6305 sched
= isl_schedule_node_get_schedule(node
);
6306 isl_schedule_node_free(node
);
6308 graph_free(ctx
, &graph
);
6309 isl_schedule_constraints_free(sc
);
6314 /* Compute a schedule for the given union of domains that respects
6315 * all the validity dependences and minimizes
6316 * the dependence distances over the proximity dependences.
6318 * This function is kept for backward compatibility.
6320 __isl_give isl_schedule
*isl_union_set_compute_schedule(
6321 __isl_take isl_union_set
*domain
,
6322 __isl_take isl_union_map
*validity
,
6323 __isl_take isl_union_map
*proximity
)
6325 isl_schedule_constraints
*sc
;
6327 sc
= isl_schedule_constraints_on_domain(domain
);
6328 sc
= isl_schedule_constraints_set_validity(sc
, validity
);
6329 sc
= isl_schedule_constraints_set_proximity(sc
, proximity
);
6331 return isl_schedule_constraints_compute_schedule(sc
);