isl_scheduler.c: rename is_type to isl_sched_edge_has_type
[isl.git] / isl_scheduler.c
blob6409b20399dcf1f9ff5483898c88e99dfdeb5ce0
1 /*
2 * Copyright 2011 INRIA Saclay
3 * Copyright 2012-2014 Ecole Normale Superieure
4 * Copyright 2015-2016 Sven Verdoolaege
5 * Copyright 2016 INRIA Paris
6 * Copyright 2017 Sven Verdoolaege
8 * Use of this software is governed by the MIT license
10 * Written by Sven Verdoolaege, INRIA Saclay - Ile-de-France,
11 * Parc Club Orsay Universite, ZAC des vignes, 4 rue Jacques Monod,
12 * 91893 Orsay, France
13 * and Ecole Normale Superieure, 45 rue d'Ulm, 75230 Paris, France
14 * and Centre de Recherche Inria de Paris, 2 rue Simone Iff - Voie DQ12,
15 * CS 42112, 75589 Paris Cedex 12, France
18 #include <isl_ctx_private.h>
19 #include <isl_map_private.h>
20 #include <isl_space_private.h>
21 #include <isl_aff_private.h>
22 #include <isl/hash.h>
23 #include <isl/id.h>
24 #include <isl/constraint.h>
25 #include <isl/schedule.h>
26 #include <isl_schedule_constraints.h>
27 #include <isl/schedule_node.h>
28 #include <isl_mat_private.h>
29 #include <isl_vec_private.h>
30 #include <isl/set.h>
31 #include <isl_union_set_private.h>
32 #include <isl_seq.h>
33 #include <isl_tab.h>
34 #include <isl_dim_map.h>
35 #include <isl/map_to_basic_set.h>
36 #include <isl_sort.h>
37 #include <isl_options_private.h>
38 #include <isl_tarjan.h>
39 #include <isl_morph.h>
40 #include <isl/ilp.h>
41 #include <isl_val_private.h>
44 * The scheduling algorithm implemented in this file was inspired by
45 * Bondhugula et al., "Automatic Transformations for Communication-Minimized
46 * Parallelization and Locality Optimization in the Polyhedral Model".
48 * For a detailed description of the variant implemented in isl,
49 * see Verdoolaege and Janssens, "Scheduling for PPCG" (2017).
53 /* Internal information about a node that is used during the construction
54 * of a schedule.
55 * space represents the original space in which the domain lives;
56 * that is, the space is not affected by compression
57 * sched is a matrix representation of the schedule being constructed
58 * for this node; if compressed is set, then this schedule is
59 * defined over the compressed domain space
60 * sched_map is an isl_map representation of the same (partial) schedule
61 * sched_map may be NULL; if compressed is set, then this map
62 * is defined over the uncompressed domain space
63 * rank is the number of linearly independent rows in the linear part
64 * of sched
65 * the rows of "vmap" represent a change of basis for the node
66 * variables; the first rank rows span the linear part of
67 * the schedule rows; the remaining rows are linearly independent
68 * the rows of "indep" represent linear combinations of the schedule
69 * coefficients that are non-zero when the schedule coefficients are
70 * linearly independent of previously computed schedule rows.
71 * start is the first variable in the LP problem in the sequences that
72 * represents the schedule coefficients of this node
73 * nvar is the dimension of the (compressed) domain
74 * nparam is the number of parameters or 0 if we are not constructing
75 * a parametric schedule
77 * If compressed is set, then hull represents the constraints
78 * that were used to derive the compression, while compress and
79 * decompress map the original space to the compressed space and
80 * vice versa.
82 * scc is the index of SCC (or WCC) this node belongs to
84 * "cluster" is only used inside extract_clusters and identifies
85 * the cluster of SCCs that the node belongs to.
87 * coincident contains a boolean for each of the rows of the schedule,
88 * indicating whether the corresponding scheduling dimension satisfies
89 * the coincidence constraints in the sense that the corresponding
90 * dependence distances are zero.
92 * If the schedule_treat_coalescing option is set, then
93 * "sizes" contains the sizes of the (compressed) instance set
94 * in each direction. If there is no fixed size in a given direction,
95 * then the corresponding size value is set to infinity.
96 * If the schedule_treat_coalescing option or the schedule_max_coefficient
97 * option is set, then "max" contains the maximal values for
98 * schedule coefficients of the (compressed) variables. If no bound
99 * needs to be imposed on a particular variable, then the corresponding
100 * value is negative.
101 * If not NULL, then "bounds" contains a non-parametric set
102 * in the compressed space that is bounded by the size in each direction.
104 struct isl_sched_node {
105 isl_space *space;
106 int compressed;
107 isl_set *hull;
108 isl_multi_aff *compress;
109 isl_pw_multi_aff *decompress;
110 isl_mat *sched;
111 isl_map *sched_map;
112 int rank;
113 isl_mat *indep;
114 isl_mat *vmap;
115 int start;
116 int nvar;
117 int nparam;
119 int scc;
120 int cluster;
122 int *coincident;
124 isl_multi_val *sizes;
125 isl_basic_set *bounds;
126 isl_vec *max;
129 static isl_bool node_has_tuples(const void *entry, const void *val)
131 struct isl_sched_node *node = (struct isl_sched_node *)entry;
132 isl_space *space = (isl_space *) val;
134 return isl_space_has_equal_tuples(node->space, space);
137 static int isl_sched_node_scc_exactly(struct isl_sched_node *node, int scc)
139 return node->scc == scc;
142 static int node_scc_at_most(struct isl_sched_node *node, int scc)
144 return node->scc <= scc;
147 static int node_scc_at_least(struct isl_sched_node *node, int scc)
149 return node->scc >= scc;
152 /* An edge in the dependence graph. An edge may be used to
153 * ensure validity of the generated schedule, to minimize the dependence
154 * distance or both
156 * map is the dependence relation, with i -> j in the map if j depends on i
157 * tagged_condition and tagged_validity contain the union of all tagged
158 * condition or conditional validity dependence relations that
159 * specialize the dependence relation "map"; that is,
160 * if (i -> a) -> (j -> b) is an element of "tagged_condition"
161 * or "tagged_validity", then i -> j is an element of "map".
162 * If these fields are NULL, then they represent the empty relation.
163 * src is the source node
164 * dst is the sink node
166 * types is a bit vector containing the types of this edge.
167 * validity is set if the edge is used to ensure correctness
168 * coincidence is used to enforce zero dependence distances
169 * proximity is set if the edge is used to minimize dependence distances
170 * condition is set if the edge represents a condition
171 * for a conditional validity schedule constraint
172 * local can only be set for condition edges and indicates that
173 * the dependence distance over the edge should be zero
174 * conditional_validity is set if the edge is used to conditionally
175 * ensure correctness
177 * For validity edges, start and end mark the sequence of inequality
178 * constraints in the LP problem that encode the validity constraint
179 * corresponding to this edge.
181 * During clustering, an edge may be marked "no_merge" if it should
182 * not be used to merge clusters.
183 * The weight is also only used during clustering and it is
184 * an indication of how many schedule dimensions on either side
185 * of the schedule constraints can be aligned.
186 * If the weight is negative, then this means that this edge was postponed
187 * by has_bounded_distances or any_no_merge. The original weight can
188 * be retrieved by adding 1 + graph->max_weight, with "graph"
189 * the graph containing this edge.
191 struct isl_sched_edge {
192 isl_map *map;
193 isl_union_map *tagged_condition;
194 isl_union_map *tagged_validity;
196 struct isl_sched_node *src;
197 struct isl_sched_node *dst;
199 unsigned types;
201 int start;
202 int end;
204 int no_merge;
205 int weight;
208 /* Is "edge" marked as being of type "type"?
210 static int isl_sched_edge_has_type(struct isl_sched_edge *edge,
211 enum isl_edge_type type)
213 return ISL_FL_ISSET(edge->types, 1 << type);
216 /* Mark "edge" as being of type "type".
218 static void set_type(struct isl_sched_edge *edge, enum isl_edge_type type)
220 ISL_FL_SET(edge->types, 1 << type);
223 /* No longer mark "edge" as being of type "type"?
225 static void clear_type(struct isl_sched_edge *edge, enum isl_edge_type type)
227 ISL_FL_CLR(edge->types, 1 << type);
230 /* Is "edge" marked as a validity edge?
232 static int is_validity(struct isl_sched_edge *edge)
234 return isl_sched_edge_has_type(edge, isl_edge_validity);
237 /* Mark "edge" as a validity edge.
239 static void set_validity(struct isl_sched_edge *edge)
241 set_type(edge, isl_edge_validity);
244 /* Is "edge" marked as a proximity edge?
246 static int is_proximity(struct isl_sched_edge *edge)
248 return isl_sched_edge_has_type(edge, isl_edge_proximity);
251 /* Is "edge" marked as a local edge?
253 static int is_local(struct isl_sched_edge *edge)
255 return isl_sched_edge_has_type(edge, isl_edge_local);
258 /* Mark "edge" as a local edge.
260 static void set_local(struct isl_sched_edge *edge)
262 set_type(edge, isl_edge_local);
265 /* No longer mark "edge" as a local edge.
267 static void clear_local(struct isl_sched_edge *edge)
269 clear_type(edge, isl_edge_local);
272 /* Is "edge" marked as a coincidence edge?
274 static int is_coincidence(struct isl_sched_edge *edge)
276 return isl_sched_edge_has_type(edge, isl_edge_coincidence);
279 /* Is "edge" marked as a condition edge?
281 static int is_condition(struct isl_sched_edge *edge)
283 return isl_sched_edge_has_type(edge, isl_edge_condition);
286 /* Is "edge" marked as a conditional validity edge?
288 static int is_conditional_validity(struct isl_sched_edge *edge)
290 return isl_sched_edge_has_type(edge, isl_edge_conditional_validity);
293 /* Is "edge" of a type that can appear multiple times between
294 * the same pair of nodes?
296 * Condition edges and conditional validity edges may have tagged
297 * dependence relations, in which case an edge is added for each
298 * pair of tags.
300 static int is_multi_edge_type(struct isl_sched_edge *edge)
302 return is_condition(edge) || is_conditional_validity(edge);
305 /* Internal information about the dependence graph used during
306 * the construction of the schedule.
308 * intra_hmap is a cache, mapping dependence relations to their dual,
309 * for dependences from a node to itself, possibly without
310 * coefficients for the parameters
311 * intra_hmap_param is a cache, mapping dependence relations to their dual,
312 * for dependences from a node to itself, including coefficients
313 * for the parameters
314 * inter_hmap is a cache, mapping dependence relations to their dual,
315 * for dependences between distinct nodes
316 * if compression is involved then the key for these maps
317 * is the original, uncompressed dependence relation, while
318 * the value is the dual of the compressed dependence relation.
320 * n is the number of nodes
321 * node is the list of nodes
322 * maxvar is the maximal number of variables over all nodes
323 * max_row is the allocated number of rows in the schedule
324 * n_row is the current (maximal) number of linearly independent
325 * rows in the node schedules
326 * n_total_row is the current number of rows in the node schedules
327 * band_start is the starting row in the node schedules of the current band
328 * root is set to the original dependence graph from which this graph
329 * is derived through splitting. If this graph is not the result of
330 * splitting, then the root field points to the graph itself.
332 * sorted contains a list of node indices sorted according to the
333 * SCC to which a node belongs
335 * n_edge is the number of edges
336 * edge is the list of edges
337 * max_edge contains the maximal number of edges of each type;
338 * in particular, it contains the number of edges in the inital graph.
339 * edge_table contains pointers into the edge array, hashed on the source
340 * and sink spaces; there is one such table for each type;
341 * a given edge may be referenced from more than one table
342 * if the corresponding relation appears in more than one of the
343 * sets of dependences; however, for each type there is only
344 * a single edge between a given pair of source and sink space
345 * in the entire graph
347 * node_table contains pointers into the node array, hashed on the space tuples
349 * region contains a list of variable sequences that should be non-trivial
351 * lp contains the (I)LP problem used to obtain new schedule rows
353 * src_scc and dst_scc are the source and sink SCCs of an edge with
354 * conflicting constraints
356 * scc represents the number of components
357 * weak is set if the components are weakly connected
359 * max_weight is used during clustering and represents the maximal
360 * weight of the relevant proximity edges.
362 struct isl_sched_graph {
363 isl_map_to_basic_set *intra_hmap;
364 isl_map_to_basic_set *intra_hmap_param;
365 isl_map_to_basic_set *inter_hmap;
367 struct isl_sched_node *node;
368 int n;
369 int maxvar;
370 int max_row;
371 int n_row;
373 int *sorted;
375 int n_total_row;
376 int band_start;
378 struct isl_sched_graph *root;
380 struct isl_sched_edge *edge;
381 int n_edge;
382 int max_edge[isl_edge_last + 1];
383 struct isl_hash_table *edge_table[isl_edge_last + 1];
385 struct isl_hash_table *node_table;
386 struct isl_trivial_region *region;
388 isl_basic_set *lp;
390 int src_scc;
391 int dst_scc;
393 int scc;
394 int weak;
396 int max_weight;
399 /* Initialize node_table based on the list of nodes.
401 static int graph_init_table(isl_ctx *ctx, struct isl_sched_graph *graph)
403 int i;
405 graph->node_table = isl_hash_table_alloc(ctx, graph->n);
406 if (!graph->node_table)
407 return -1;
409 for (i = 0; i < graph->n; ++i) {
410 struct isl_hash_table_entry *entry;
411 uint32_t hash;
413 hash = isl_space_get_tuple_hash(graph->node[i].space);
414 entry = isl_hash_table_find(ctx, graph->node_table, hash,
415 &node_has_tuples,
416 graph->node[i].space, 1);
417 if (!entry)
418 return -1;
419 entry->data = &graph->node[i];
422 return 0;
425 /* Return a pointer to the node that lives within the given space,
426 * an invalid node if there is no such node, or NULL in case of error.
428 static struct isl_sched_node *graph_find_node(isl_ctx *ctx,
429 struct isl_sched_graph *graph, __isl_keep isl_space *space)
431 struct isl_hash_table_entry *entry;
432 uint32_t hash;
434 if (!space)
435 return NULL;
437 hash = isl_space_get_tuple_hash(space);
438 entry = isl_hash_table_find(ctx, graph->node_table, hash,
439 &node_has_tuples, space, 0);
440 if (!entry)
441 return NULL;
442 if (entry == isl_hash_table_entry_none)
443 return graph->node + graph->n;
445 return entry->data;
448 /* Is "node" a node in "graph"?
450 static int is_node(struct isl_sched_graph *graph,
451 struct isl_sched_node *node)
453 return node && node >= &graph->node[0] && node < &graph->node[graph->n];
456 static isl_bool edge_has_src_and_dst(const void *entry, const void *val)
458 const struct isl_sched_edge *edge = entry;
459 const struct isl_sched_edge *temp = val;
461 return isl_bool_ok(edge->src == temp->src && edge->dst == temp->dst);
464 /* Add the given edge to graph->edge_table[type].
466 static isl_stat graph_edge_table_add(isl_ctx *ctx,
467 struct isl_sched_graph *graph, enum isl_edge_type type,
468 struct isl_sched_edge *edge)
470 struct isl_hash_table_entry *entry;
471 uint32_t hash;
473 hash = isl_hash_init();
474 hash = isl_hash_builtin(hash, edge->src);
475 hash = isl_hash_builtin(hash, edge->dst);
476 entry = isl_hash_table_find(ctx, graph->edge_table[type], hash,
477 &edge_has_src_and_dst, edge, 1);
478 if (!entry)
479 return isl_stat_error;
480 entry->data = edge;
482 return isl_stat_ok;
485 /* Add "edge" to all relevant edge tables.
486 * That is, for every type of the edge, add it to the corresponding table.
488 static isl_stat graph_edge_tables_add(isl_ctx *ctx,
489 struct isl_sched_graph *graph, struct isl_sched_edge *edge)
491 enum isl_edge_type t;
493 for (t = isl_edge_first; t <= isl_edge_last; ++t) {
494 if (!isl_sched_edge_has_type(edge, t))
495 continue;
496 if (graph_edge_table_add(ctx, graph, t, edge) < 0)
497 return isl_stat_error;
500 return isl_stat_ok;
503 /* Allocate the edge_tables based on the maximal number of edges of
504 * each type.
506 static int graph_init_edge_tables(isl_ctx *ctx, struct isl_sched_graph *graph)
508 int i;
510 for (i = 0; i <= isl_edge_last; ++i) {
511 graph->edge_table[i] = isl_hash_table_alloc(ctx,
512 graph->max_edge[i]);
513 if (!graph->edge_table[i])
514 return -1;
517 return 0;
520 /* If graph->edge_table[type] contains an edge from the given source
521 * to the given destination, then return the hash table entry of this edge.
522 * Otherwise, return NULL.
524 static struct isl_hash_table_entry *graph_find_edge_entry(
525 struct isl_sched_graph *graph,
526 enum isl_edge_type type,
527 struct isl_sched_node *src, struct isl_sched_node *dst)
529 isl_ctx *ctx = isl_space_get_ctx(src->space);
530 uint32_t hash;
531 struct isl_sched_edge temp = { .src = src, .dst = dst };
533 hash = isl_hash_init();
534 hash = isl_hash_builtin(hash, temp.src);
535 hash = isl_hash_builtin(hash, temp.dst);
536 return isl_hash_table_find(ctx, graph->edge_table[type], hash,
537 &edge_has_src_and_dst, &temp, 0);
541 /* If graph->edge_table[type] contains an edge from the given source
542 * to the given destination, then return this edge.
543 * Return "none" if no such edge can be found.
544 * Return NULL on error.
546 static struct isl_sched_edge *graph_find_edge(struct isl_sched_graph *graph,
547 enum isl_edge_type type,
548 struct isl_sched_node *src, struct isl_sched_node *dst,
549 struct isl_sched_edge *none)
551 struct isl_hash_table_entry *entry;
553 entry = graph_find_edge_entry(graph, type, src, dst);
554 if (!entry)
555 return NULL;
556 if (entry == isl_hash_table_entry_none)
557 return none;
559 return entry->data;
562 /* Check whether the dependence graph has an edge of the given type
563 * between the given two nodes.
565 static isl_bool graph_has_edge(struct isl_sched_graph *graph,
566 enum isl_edge_type type,
567 struct isl_sched_node *src, struct isl_sched_node *dst)
569 struct isl_sched_edge dummy;
570 struct isl_sched_edge *edge;
571 isl_bool empty;
573 edge = graph_find_edge(graph, type, src, dst, &dummy);
574 if (!edge)
575 return isl_bool_error;
576 if (edge == &dummy)
577 return isl_bool_false;
579 empty = isl_map_plain_is_empty(edge->map);
581 return isl_bool_not(empty);
584 /* Look for any edge with the same src, dst and map fields as "model".
586 * Return the matching edge if one can be found.
587 * Return "model" if no matching edge is found.
588 * Return NULL on error.
590 static struct isl_sched_edge *graph_find_matching_edge(
591 struct isl_sched_graph *graph, struct isl_sched_edge *model)
593 enum isl_edge_type i;
594 struct isl_sched_edge *edge;
596 for (i = isl_edge_first; i <= isl_edge_last; ++i) {
597 int is_equal;
599 edge = graph_find_edge(graph, i, model->src, model->dst, model);
600 if (!edge)
601 return NULL;
602 if (edge == model)
603 continue;
604 is_equal = isl_map_plain_is_equal(model->map, edge->map);
605 if (is_equal < 0)
606 return NULL;
607 if (is_equal)
608 return edge;
611 return model;
614 /* Remove the given edge from all the edge_tables that refer to it.
616 static isl_stat graph_remove_edge(struct isl_sched_graph *graph,
617 struct isl_sched_edge *edge)
619 isl_ctx *ctx = isl_map_get_ctx(edge->map);
620 enum isl_edge_type i;
622 for (i = isl_edge_first; i <= isl_edge_last; ++i) {
623 struct isl_hash_table_entry *entry;
625 entry = graph_find_edge_entry(graph, i, edge->src, edge->dst);
626 if (!entry)
627 return isl_stat_error;
628 if (entry == isl_hash_table_entry_none)
629 continue;
630 if (entry->data != edge)
631 continue;
632 isl_hash_table_remove(ctx, graph->edge_table[i], entry);
635 return isl_stat_ok;
638 /* Check whether the dependence graph has any edge
639 * between the given two nodes.
641 static isl_bool graph_has_any_edge(struct isl_sched_graph *graph,
642 struct isl_sched_node *src, struct isl_sched_node *dst)
644 enum isl_edge_type i;
645 isl_bool r;
647 for (i = isl_edge_first; i <= isl_edge_last; ++i) {
648 r = graph_has_edge(graph, i, src, dst);
649 if (r < 0 || r)
650 return r;
653 return r;
656 /* Check whether the dependence graph has a validity edge
657 * between the given two nodes.
659 * Conditional validity edges are essentially validity edges that
660 * can be ignored if the corresponding condition edges are iteration private.
661 * Here, we are only checking for the presence of validity
662 * edges, so we need to consider the conditional validity edges too.
663 * In particular, this function is used during the detection
664 * of strongly connected components and we cannot ignore
665 * conditional validity edges during this detection.
667 static isl_bool graph_has_validity_edge(struct isl_sched_graph *graph,
668 struct isl_sched_node *src, struct isl_sched_node *dst)
670 isl_bool r;
672 r = graph_has_edge(graph, isl_edge_validity, src, dst);
673 if (r < 0 || r)
674 return r;
676 return graph_has_edge(graph, isl_edge_conditional_validity, src, dst);
679 /* Perform all the required memory allocations for a schedule graph "graph"
680 * with "n_node" nodes and "n_edge" edge and initialize the corresponding
681 * fields.
683 static isl_stat graph_alloc(isl_ctx *ctx, struct isl_sched_graph *graph,
684 int n_node, int n_edge)
686 int i;
688 graph->n = n_node;
689 graph->n_edge = n_edge;
690 graph->node = isl_calloc_array(ctx, struct isl_sched_node, graph->n);
691 graph->sorted = isl_calloc_array(ctx, int, graph->n);
692 graph->region = isl_alloc_array(ctx,
693 struct isl_trivial_region, graph->n);
694 graph->edge = isl_calloc_array(ctx,
695 struct isl_sched_edge, graph->n_edge);
697 graph->intra_hmap = isl_map_to_basic_set_alloc(ctx, 2 * n_edge);
698 graph->intra_hmap_param = isl_map_to_basic_set_alloc(ctx, 2 * n_edge);
699 graph->inter_hmap = isl_map_to_basic_set_alloc(ctx, 2 * n_edge);
701 if (!graph->node || !graph->region || (graph->n_edge && !graph->edge) ||
702 !graph->sorted)
703 return isl_stat_error;
705 for(i = 0; i < graph->n; ++i)
706 graph->sorted[i] = i;
708 return isl_stat_ok;
711 /* Free the memory associated to node "node" in "graph".
712 * The "coincident" field is shared by nodes in a graph and its subgraph.
713 * It therefore only needs to be freed for the original dependence graph,
714 * i.e., one that is not the result of splitting.
716 static void clear_node(struct isl_sched_graph *graph,
717 struct isl_sched_node *node)
719 isl_space_free(node->space);
720 isl_set_free(node->hull);
721 isl_multi_aff_free(node->compress);
722 isl_pw_multi_aff_free(node->decompress);
723 isl_mat_free(node->sched);
724 isl_map_free(node->sched_map);
725 isl_mat_free(node->indep);
726 isl_mat_free(node->vmap);
727 if (graph->root == graph)
728 free(node->coincident);
729 isl_multi_val_free(node->sizes);
730 isl_basic_set_free(node->bounds);
731 isl_vec_free(node->max);
734 static void graph_free(isl_ctx *ctx, struct isl_sched_graph *graph)
736 int i;
738 isl_map_to_basic_set_free(graph->intra_hmap);
739 isl_map_to_basic_set_free(graph->intra_hmap_param);
740 isl_map_to_basic_set_free(graph->inter_hmap);
742 if (graph->node)
743 for (i = 0; i < graph->n; ++i)
744 clear_node(graph, &graph->node[i]);
745 free(graph->node);
746 free(graph->sorted);
747 if (graph->edge)
748 for (i = 0; i < graph->n_edge; ++i) {
749 isl_map_free(graph->edge[i].map);
750 isl_union_map_free(graph->edge[i].tagged_condition);
751 isl_union_map_free(graph->edge[i].tagged_validity);
753 free(graph->edge);
754 free(graph->region);
755 for (i = 0; i <= isl_edge_last; ++i)
756 isl_hash_table_free(ctx, graph->edge_table[i]);
757 isl_hash_table_free(ctx, graph->node_table);
758 isl_basic_set_free(graph->lp);
761 /* For each "set" on which this function is called, increment
762 * graph->n by one and update graph->maxvar.
764 static isl_stat init_n_maxvar(__isl_take isl_set *set, void *user)
766 struct isl_sched_graph *graph = user;
767 isl_size nvar = isl_set_dim(set, isl_dim_set);
769 graph->n++;
770 if (nvar > graph->maxvar)
771 graph->maxvar = nvar;
773 isl_set_free(set);
775 if (nvar < 0)
776 return isl_stat_error;
777 return isl_stat_ok;
780 /* Compute the number of rows that should be allocated for the schedule.
781 * In particular, we need one row for each variable or one row
782 * for each basic map in the dependences.
783 * Note that it is practically impossible to exhaust both
784 * the number of dependences and the number of variables.
786 static isl_stat compute_max_row(struct isl_sched_graph *graph,
787 __isl_keep isl_schedule_constraints *sc)
789 int n_edge;
790 isl_stat r;
791 isl_union_set *domain;
793 graph->n = 0;
794 graph->maxvar = 0;
795 domain = isl_schedule_constraints_get_domain(sc);
796 r = isl_union_set_foreach_set(domain, &init_n_maxvar, graph);
797 isl_union_set_free(domain);
798 if (r < 0)
799 return isl_stat_error;
800 n_edge = isl_schedule_constraints_n_basic_map(sc);
801 if (n_edge < 0)
802 return isl_stat_error;
803 graph->max_row = n_edge + graph->maxvar;
805 return isl_stat_ok;
808 /* Does "bset" have any defining equalities for its set variables?
810 static isl_bool has_any_defining_equality(__isl_keep isl_basic_set *bset)
812 int i;
813 isl_size n;
815 n = isl_basic_set_dim(bset, isl_dim_set);
816 if (n < 0)
817 return isl_bool_error;
819 for (i = 0; i < n; ++i) {
820 isl_bool has;
822 has = isl_basic_set_has_defining_equality(bset, isl_dim_set, i,
823 NULL);
824 if (has < 0 || has)
825 return has;
828 return isl_bool_false;
831 /* Set the entries of node->max to the value of the schedule_max_coefficient
832 * option, if set.
834 static isl_stat set_max_coefficient(isl_ctx *ctx, struct isl_sched_node *node)
836 int max;
838 max = isl_options_get_schedule_max_coefficient(ctx);
839 if (max == -1)
840 return isl_stat_ok;
842 node->max = isl_vec_alloc(ctx, node->nvar);
843 node->max = isl_vec_set_si(node->max, max);
844 if (!node->max)
845 return isl_stat_error;
847 return isl_stat_ok;
850 /* Set the entries of node->max to the minimum of the schedule_max_coefficient
851 * option (if set) and half of the minimum of the sizes in the other
852 * dimensions. Round up when computing the half such that
853 * if the minimum of the sizes is one, half of the size is taken to be one
854 * rather than zero.
855 * If the global minimum is unbounded (i.e., if both
856 * the schedule_max_coefficient is not set and the sizes in the other
857 * dimensions are unbounded), then store a negative value.
858 * If the schedule coefficient is close to the size of the instance set
859 * in another dimension, then the schedule may represent a loop
860 * coalescing transformation (especially if the coefficient
861 * in that other dimension is one). Forcing the coefficient to be
862 * smaller than or equal to half the minimal size should avoid this
863 * situation.
865 static isl_stat compute_max_coefficient(isl_ctx *ctx,
866 struct isl_sched_node *node)
868 int max;
869 int i, j;
870 isl_vec *v;
872 max = isl_options_get_schedule_max_coefficient(ctx);
873 v = isl_vec_alloc(ctx, node->nvar);
874 if (!v)
875 return isl_stat_error;
877 for (i = 0; i < node->nvar; ++i) {
878 isl_int_set_si(v->el[i], max);
879 isl_int_mul_si(v->el[i], v->el[i], 2);
882 for (i = 0; i < node->nvar; ++i) {
883 isl_val *size;
885 size = isl_multi_val_get_val(node->sizes, i);
886 if (!size)
887 goto error;
888 if (!isl_val_is_int(size)) {
889 isl_val_free(size);
890 continue;
892 for (j = 0; j < node->nvar; ++j) {
893 if (j == i)
894 continue;
895 if (isl_int_is_neg(v->el[j]) ||
896 isl_int_gt(v->el[j], size->n))
897 isl_int_set(v->el[j], size->n);
899 isl_val_free(size);
902 for (i = 0; i < node->nvar; ++i)
903 isl_int_cdiv_q_ui(v->el[i], v->el[i], 2);
905 node->max = v;
906 return isl_stat_ok;
907 error:
908 isl_vec_free(v);
909 return isl_stat_error;
912 /* Construct an identifier for node "node", which will represent "set".
913 * The name of the identifier is either "compressed" or
914 * "compressed_<name>", with <name> the name of the space of "set".
915 * The user pointer of the identifier points to "node".
917 static __isl_give isl_id *construct_compressed_id(__isl_keep isl_set *set,
918 struct isl_sched_node *node)
920 isl_bool has_name;
921 isl_ctx *ctx;
922 isl_id *id;
923 isl_printer *p;
924 const char *name;
925 char *id_name;
927 has_name = isl_set_has_tuple_name(set);
928 if (has_name < 0)
929 return NULL;
931 ctx = isl_set_get_ctx(set);
932 if (!has_name)
933 return isl_id_alloc(ctx, "compressed", node);
935 p = isl_printer_to_str(ctx);
936 name = isl_set_get_tuple_name(set);
937 p = isl_printer_print_str(p, "compressed_");
938 p = isl_printer_print_str(p, name);
939 id_name = isl_printer_get_str(p);
940 isl_printer_free(p);
942 id = isl_id_alloc(ctx, id_name, node);
943 free(id_name);
945 return id;
948 /* Construct a map that isolates the variable in position "pos" in "set".
950 * That is, construct
952 * [i_0, ..., i_pos-1, i_pos+1, ...] -> [i_pos]
954 static __isl_give isl_map *isolate(__isl_take isl_set *set, int pos)
956 isl_map *map;
958 map = isl_set_project_onto_map(set, isl_dim_set, pos, 1);
959 map = isl_map_project_out(map, isl_dim_in, pos, 1);
960 return map;
963 /* Compute and return the size of "set" in dimension "dim".
964 * The size is taken to be the difference in values for that variable
965 * for fixed values of the other variables.
966 * This assumes that "set" is convex.
967 * In particular, the variable is first isolated from the other variables
968 * in the range of a map
970 * [i_0, ..., i_dim-1, i_dim+1, ...] -> [i_dim]
972 * and then duplicated
974 * [i_0, ..., i_dim-1, i_dim+1, ...] -> [[i_dim] -> [i_dim']]
976 * The shared variables are then projected out and the maximal value
977 * of i_dim' - i_dim is computed.
979 static __isl_give isl_val *compute_size(__isl_take isl_set *set, int dim)
981 isl_map *map;
982 isl_local_space *ls;
983 isl_aff *obj;
984 isl_val *v;
986 map = isolate(set, dim);
987 map = isl_map_range_product(map, isl_map_copy(map));
988 map = isl_set_unwrap(isl_map_range(map));
989 set = isl_map_deltas(map);
990 ls = isl_local_space_from_space(isl_set_get_space(set));
991 obj = isl_aff_var_on_domain(ls, isl_dim_set, 0);
992 v = isl_set_max_val(set, obj);
993 isl_aff_free(obj);
994 isl_set_free(set);
996 return v;
999 /* Perform a compression on "node" where "hull" represents the constraints
1000 * that were used to derive the compression, while "compress" and
1001 * "decompress" map the original space to the compressed space and
1002 * vice versa.
1004 * If "node" was not compressed already, then simply store
1005 * the compression information.
1006 * Otherwise the "original" space is actually the result
1007 * of a previous compression, which is then combined
1008 * with the present compression.
1010 * The dimensionality of the compressed domain is also adjusted.
1011 * Other information, such as the sizes and the maximal coefficient values,
1012 * has not been computed yet and therefore does not need to be adjusted.
1014 static isl_stat compress_node(struct isl_sched_node *node,
1015 __isl_take isl_set *hull, __isl_take isl_multi_aff *compress,
1016 __isl_take isl_pw_multi_aff *decompress)
1018 node->nvar = isl_multi_aff_dim(compress, isl_dim_out);
1019 if (!node->compressed) {
1020 node->compressed = 1;
1021 node->hull = hull;
1022 node->compress = compress;
1023 node->decompress = decompress;
1024 } else {
1025 hull = isl_set_preimage_multi_aff(hull,
1026 isl_multi_aff_copy(node->compress));
1027 node->hull = isl_set_intersect(node->hull, hull);
1028 node->compress = isl_multi_aff_pullback_multi_aff(
1029 compress, node->compress);
1030 node->decompress = isl_pw_multi_aff_pullback_pw_multi_aff(
1031 node->decompress, decompress);
1034 if (!node->hull || !node->compress || !node->decompress)
1035 return isl_stat_error;
1037 return isl_stat_ok;
1040 /* Given that dimension "pos" in "set" has a fixed value
1041 * in terms of the other dimensions, (further) compress "node"
1042 * by projecting out this dimension.
1043 * "set" may be the result of a previous compression.
1044 * "uncompressed" is the original domain (without compression).
1046 * The compression function simply projects out the dimension.
1047 * The decompression function adds back the dimension
1048 * in the right position as an expression of the other dimensions
1049 * derived from "set".
1050 * As in extract_node, the compressed space has an identifier
1051 * that references "node" such that each compressed space is unique and
1052 * such that the node can be recovered from the compressed space.
1054 * The constraint removed through the compression is added to the "hull"
1055 * such that only edges that relate to the original domains
1056 * are taken into account.
1057 * In particular, it is obtained by composing compression and decompression and
1058 * taking the relation among the variables in the range.
1060 static isl_stat project_out_fixed(struct isl_sched_node *node,
1061 __isl_keep isl_set *uncompressed, __isl_take isl_set *set, int pos)
1063 isl_id *id;
1064 isl_space *space;
1065 isl_set *domain;
1066 isl_map *map;
1067 isl_multi_aff *compress;
1068 isl_pw_multi_aff *decompress, *pma;
1069 isl_multi_pw_aff *mpa;
1070 isl_set *hull;
1072 map = isolate(isl_set_copy(set), pos);
1073 pma = isl_pw_multi_aff_from_map(map);
1074 domain = isl_pw_multi_aff_domain(isl_pw_multi_aff_copy(pma));
1075 pma = isl_pw_multi_aff_gist(pma, domain);
1076 space = isl_pw_multi_aff_get_domain_space(pma);
1077 mpa = isl_multi_pw_aff_identity(isl_space_map_from_set(space));
1078 mpa = isl_multi_pw_aff_range_splice(mpa, pos,
1079 isl_multi_pw_aff_from_pw_multi_aff(pma));
1080 decompress = isl_pw_multi_aff_from_multi_pw_aff(mpa);
1081 space = isl_set_get_space(set);
1082 compress = isl_multi_aff_project_out_map(space, isl_dim_set, pos, 1);
1083 id = construct_compressed_id(uncompressed, node);
1084 compress = isl_multi_aff_set_tuple_id(compress, isl_dim_out, id);
1085 space = isl_space_reverse(isl_multi_aff_get_space(compress));
1086 decompress = isl_pw_multi_aff_reset_space(decompress, space);
1087 pma = isl_pw_multi_aff_pullback_multi_aff(
1088 isl_pw_multi_aff_copy(decompress), isl_multi_aff_copy(compress));
1089 hull = isl_map_range(isl_map_from_pw_multi_aff(pma));
1091 isl_set_free(set);
1093 return compress_node(node, hull, compress, decompress);
1096 /* Compute the size of the compressed domain in each dimension and
1097 * store the results in node->sizes.
1098 * "uncompressed" is the original domain (without compression).
1100 * First compress the domain if needed and then compute the size
1101 * in each direction.
1102 * If the domain is not convex, then the sizes are computed
1103 * on a convex superset in order to avoid picking up sizes
1104 * that are valid for the individual disjuncts, but not for
1105 * the domain as a whole.
1107 * If any of the sizes turns out to be zero, then this means
1108 * that this dimension has a fixed value in terms of
1109 * the other dimensions. Perform an (extra) compression
1110 * to remove this dimension.
1112 static isl_stat compute_sizes(struct isl_sched_node *node,
1113 __isl_keep isl_set *uncompressed)
1115 int j;
1116 isl_size n;
1117 isl_multi_val *mv;
1118 isl_set *set = isl_set_copy(uncompressed);
1120 if (node->compressed)
1121 set = isl_set_preimage_pw_multi_aff(set,
1122 isl_pw_multi_aff_copy(node->decompress));
1123 set = isl_set_from_basic_set(isl_set_simple_hull(set));
1124 mv = isl_multi_val_zero(isl_set_get_space(set));
1125 n = isl_set_dim(set, isl_dim_set);
1126 if (n < 0)
1127 mv = isl_multi_val_free(mv);
1128 for (j = 0; j < n; ++j) {
1129 isl_bool is_zero;
1130 isl_val *v;
1132 v = compute_size(isl_set_copy(set), j);
1133 is_zero = isl_val_is_zero(v);
1134 mv = isl_multi_val_set_val(mv, j, v);
1135 if (is_zero >= 0 && is_zero) {
1136 isl_multi_val_free(mv);
1137 if (project_out_fixed(node, uncompressed, set, j) < 0)
1138 return isl_stat_error;
1139 return compute_sizes(node, uncompressed);
1142 node->sizes = mv;
1143 isl_set_free(set);
1144 if (!node->sizes)
1145 return isl_stat_error;
1146 return isl_stat_ok;
1149 /* Compute the size of the instance set "set" of "node", after compression,
1150 * as well as bounds on the corresponding coefficients, if needed.
1152 * The sizes are needed when the schedule_treat_coalescing option is set.
1153 * The bounds are needed when the schedule_treat_coalescing option or
1154 * the schedule_max_coefficient option is set.
1156 * If the schedule_treat_coalescing option is not set, then at most
1157 * the bounds need to be set and this is done in set_max_coefficient.
1158 * Otherwise, compute the size of the compressed domain
1159 * in each direction and store the results in node->size.
1160 * Finally, set the bounds on the coefficients based on the sizes
1161 * and the schedule_max_coefficient option in compute_max_coefficient.
1163 static isl_stat compute_sizes_and_max(isl_ctx *ctx, struct isl_sched_node *node,
1164 __isl_take isl_set *set)
1166 isl_stat r;
1168 if (!isl_options_get_schedule_treat_coalescing(ctx)) {
1169 isl_set_free(set);
1170 return set_max_coefficient(ctx, node);
1173 r = compute_sizes(node, set);
1174 isl_set_free(set);
1175 if (r < 0)
1176 return isl_stat_error;
1177 return compute_max_coefficient(ctx, node);
1180 /* Add a new node to the graph representing the given instance set.
1181 * "nvar" is the (possibly compressed) number of variables and
1182 * may be smaller than then number of set variables in "set"
1183 * if "compressed" is set.
1184 * If "compressed" is set, then "hull" represents the constraints
1185 * that were used to derive the compression, while "compress" and
1186 * "decompress" map the original space to the compressed space and
1187 * vice versa.
1188 * If "compressed" is not set, then "hull", "compress" and "decompress"
1189 * should be NULL.
1191 * Compute the size of the instance set and bounds on the coefficients,
1192 * if needed.
1194 static isl_stat add_node(struct isl_sched_graph *graph,
1195 __isl_take isl_set *set, int nvar, int compressed,
1196 __isl_take isl_set *hull, __isl_take isl_multi_aff *compress,
1197 __isl_take isl_pw_multi_aff *decompress)
1199 isl_size nparam;
1200 isl_ctx *ctx;
1201 isl_mat *sched;
1202 isl_space *space;
1203 int *coincident;
1204 struct isl_sched_node *node;
1206 nparam = isl_set_dim(set, isl_dim_param);
1207 if (nparam < 0)
1208 goto error;
1210 ctx = isl_set_get_ctx(set);
1211 if (!ctx->opt->schedule_parametric)
1212 nparam = 0;
1213 sched = isl_mat_alloc(ctx, 0, 1 + nparam + nvar);
1214 node = &graph->node[graph->n];
1215 graph->n++;
1216 space = isl_set_get_space(set);
1217 node->space = space;
1218 node->nvar = nvar;
1219 node->nparam = nparam;
1220 node->sched = sched;
1221 node->sched_map = NULL;
1222 coincident = isl_calloc_array(ctx, int, graph->max_row);
1223 node->coincident = coincident;
1224 node->compressed = compressed;
1225 node->hull = hull;
1226 node->compress = compress;
1227 node->decompress = decompress;
1228 if (compute_sizes_and_max(ctx, node, set) < 0)
1229 return isl_stat_error;
1231 if (!space || !sched || (graph->max_row && !coincident))
1232 return isl_stat_error;
1233 if (compressed && (!hull || !compress || !decompress))
1234 return isl_stat_error;
1236 return isl_stat_ok;
1237 error:
1238 isl_set_free(set);
1239 isl_set_free(hull);
1240 isl_multi_aff_free(compress);
1241 isl_pw_multi_aff_free(decompress);
1242 return isl_stat_error;
1245 /* Add a new node to the graph representing the given set.
1247 * If any of the set variables is defined by an equality, then
1248 * we perform variable compression such that we can perform
1249 * the scheduling on the compressed domain.
1250 * In this case, an identifier is used that references the new node
1251 * such that each compressed space is unique and
1252 * such that the node can be recovered from the compressed space.
1254 static isl_stat extract_node(__isl_take isl_set *set, void *user)
1256 isl_size nvar;
1257 isl_bool has_equality;
1258 isl_id *id;
1259 isl_basic_set *hull;
1260 isl_set *hull_set;
1261 isl_morph *morph;
1262 isl_multi_aff *compress, *decompress_ma;
1263 isl_pw_multi_aff *decompress;
1264 struct isl_sched_graph *graph = user;
1266 hull = isl_set_affine_hull(isl_set_copy(set));
1267 hull = isl_basic_set_remove_divs(hull);
1268 nvar = isl_set_dim(set, isl_dim_set);
1269 has_equality = has_any_defining_equality(hull);
1271 if (nvar < 0 || has_equality < 0)
1272 goto error;
1273 if (!has_equality) {
1274 isl_basic_set_free(hull);
1275 return add_node(graph, set, nvar, 0, NULL, NULL, NULL);
1278 id = construct_compressed_id(set, &graph->node[graph->n]);
1279 morph = isl_basic_set_variable_compression_with_id(hull, id);
1280 isl_id_free(id);
1281 nvar = isl_morph_ran_dim(morph, isl_dim_set);
1282 if (nvar < 0)
1283 set = isl_set_free(set);
1284 compress = isl_morph_get_var_multi_aff(morph);
1285 morph = isl_morph_inverse(morph);
1286 decompress_ma = isl_morph_get_var_multi_aff(morph);
1287 decompress = isl_pw_multi_aff_from_multi_aff(decompress_ma);
1288 isl_morph_free(morph);
1290 hull_set = isl_set_from_basic_set(hull);
1291 return add_node(graph, set, nvar, 1, hull_set, compress, decompress);
1292 error:
1293 isl_basic_set_free(hull);
1294 isl_set_free(set);
1295 return isl_stat_error;
1298 struct isl_extract_edge_data {
1299 enum isl_edge_type type;
1300 struct isl_sched_graph *graph;
1303 /* Merge edge2 into edge1, freeing the contents of edge2.
1304 * Return 0 on success and -1 on failure.
1306 * edge1 and edge2 are assumed to have the same value for the map field.
1308 static int merge_edge(struct isl_sched_edge *edge1,
1309 struct isl_sched_edge *edge2)
1311 edge1->types |= edge2->types;
1312 isl_map_free(edge2->map);
1314 if (is_condition(edge2)) {
1315 if (!edge1->tagged_condition)
1316 edge1->tagged_condition = edge2->tagged_condition;
1317 else
1318 edge1->tagged_condition =
1319 isl_union_map_union(edge1->tagged_condition,
1320 edge2->tagged_condition);
1323 if (is_conditional_validity(edge2)) {
1324 if (!edge1->tagged_validity)
1325 edge1->tagged_validity = edge2->tagged_validity;
1326 else
1327 edge1->tagged_validity =
1328 isl_union_map_union(edge1->tagged_validity,
1329 edge2->tagged_validity);
1332 if (is_condition(edge2) && !edge1->tagged_condition)
1333 return -1;
1334 if (is_conditional_validity(edge2) && !edge1->tagged_validity)
1335 return -1;
1337 return 0;
1340 /* Insert dummy tags in domain and range of "map".
1342 * In particular, if "map" is of the form
1344 * A -> B
1346 * then return
1348 * [A -> dummy_tag] -> [B -> dummy_tag]
1350 * where the dummy_tags are identical and equal to any dummy tags
1351 * introduced by any other call to this function.
1353 static __isl_give isl_map *insert_dummy_tags(__isl_take isl_map *map)
1355 static char dummy;
1356 isl_ctx *ctx;
1357 isl_id *id;
1358 isl_space *space;
1359 isl_set *domain, *range;
1361 ctx = isl_map_get_ctx(map);
1363 id = isl_id_alloc(ctx, NULL, &dummy);
1364 space = isl_space_params(isl_map_get_space(map));
1365 space = isl_space_set_from_params(space);
1366 space = isl_space_set_tuple_id(space, isl_dim_set, id);
1367 space = isl_space_map_from_set(space);
1369 domain = isl_map_wrap(map);
1370 range = isl_map_wrap(isl_map_universe(space));
1371 map = isl_map_from_domain_and_range(domain, range);
1372 map = isl_map_zip(map);
1374 return map;
1377 /* Given that at least one of "src" or "dst" is compressed, return
1378 * a map between the spaces of these nodes restricted to the affine
1379 * hull that was used in the compression.
1381 static __isl_give isl_map *extract_hull(struct isl_sched_node *src,
1382 struct isl_sched_node *dst)
1384 isl_set *dom, *ran;
1386 if (src->compressed)
1387 dom = isl_set_copy(src->hull);
1388 else
1389 dom = isl_set_universe(isl_space_copy(src->space));
1390 if (dst->compressed)
1391 ran = isl_set_copy(dst->hull);
1392 else
1393 ran = isl_set_universe(isl_space_copy(dst->space));
1395 return isl_map_from_domain_and_range(dom, ran);
1398 /* Intersect the domains of the nested relations in domain and range
1399 * of "tagged" with "map".
1401 static __isl_give isl_map *map_intersect_domains(__isl_take isl_map *tagged,
1402 __isl_keep isl_map *map)
1404 isl_set *set;
1406 tagged = isl_map_zip(tagged);
1407 set = isl_map_wrap(isl_map_copy(map));
1408 tagged = isl_map_intersect_domain(tagged, set);
1409 tagged = isl_map_zip(tagged);
1410 return tagged;
1413 /* Return a pointer to the node that lives in the domain space of "map",
1414 * an invalid node if there is no such node, or NULL in case of error.
1416 static struct isl_sched_node *find_domain_node(isl_ctx *ctx,
1417 struct isl_sched_graph *graph, __isl_keep isl_map *map)
1419 struct isl_sched_node *node;
1420 isl_space *space;
1422 space = isl_space_domain(isl_map_get_space(map));
1423 node = graph_find_node(ctx, graph, space);
1424 isl_space_free(space);
1426 return node;
1429 /* Return a pointer to the node that lives in the range space of "map",
1430 * an invalid node if there is no such node, or NULL in case of error.
1432 static struct isl_sched_node *find_range_node(isl_ctx *ctx,
1433 struct isl_sched_graph *graph, __isl_keep isl_map *map)
1435 struct isl_sched_node *node;
1436 isl_space *space;
1438 space = isl_space_range(isl_map_get_space(map));
1439 node = graph_find_node(ctx, graph, space);
1440 isl_space_free(space);
1442 return node;
1445 /* Refrain from adding a new edge based on "map".
1446 * Instead, just free the map.
1447 * "tagged" is either a copy of "map" with additional tags or NULL.
1449 static isl_stat skip_edge(__isl_take isl_map *map, __isl_take isl_map *tagged)
1451 isl_map_free(map);
1452 isl_map_free(tagged);
1454 return isl_stat_ok;
1457 /* Add a new edge to the graph based on the given map
1458 * and add it to data->graph->edge_table[data->type].
1459 * If a dependence relation of a given type happens to be identical
1460 * to one of the dependence relations of a type that was added before,
1461 * then we don't create a new edge, but instead mark the original edge
1462 * as also representing a dependence of the current type.
1464 * Edges of type isl_edge_condition or isl_edge_conditional_validity
1465 * may be specified as "tagged" dependence relations. That is, "map"
1466 * may contain elements (i -> a) -> (j -> b), where i -> j denotes
1467 * the dependence on iterations and a and b are tags.
1468 * edge->map is set to the relation containing the elements i -> j,
1469 * while edge->tagged_condition and edge->tagged_validity contain
1470 * the union of all the "map" relations
1471 * for which extract_edge is called that result in the same edge->map.
1473 * If the source or the destination node is compressed, then
1474 * intersect both "map" and "tagged" with the constraints that
1475 * were used to construct the compression.
1476 * This ensures that there are no schedule constraints defined
1477 * outside of these domains, while the scheduler no longer has
1478 * any control over those outside parts.
1480 static isl_stat extract_edge(__isl_take isl_map *map, void *user)
1482 isl_bool empty;
1483 isl_ctx *ctx = isl_map_get_ctx(map);
1484 struct isl_extract_edge_data *data = user;
1485 struct isl_sched_graph *graph = data->graph;
1486 struct isl_sched_node *src, *dst;
1487 struct isl_sched_edge *edge;
1488 isl_map *tagged = NULL;
1490 if (data->type == isl_edge_condition ||
1491 data->type == isl_edge_conditional_validity) {
1492 if (isl_map_can_zip(map)) {
1493 tagged = isl_map_copy(map);
1494 map = isl_set_unwrap(isl_map_domain(isl_map_zip(map)));
1495 } else {
1496 tagged = insert_dummy_tags(isl_map_copy(map));
1500 src = find_domain_node(ctx, graph, map);
1501 dst = find_range_node(ctx, graph, map);
1503 if (!src || !dst)
1504 goto error;
1505 if (!is_node(graph, src) || !is_node(graph, dst))
1506 return skip_edge(map, tagged);
1508 if (src->compressed || dst->compressed) {
1509 isl_map *hull;
1510 hull = extract_hull(src, dst);
1511 if (tagged)
1512 tagged = map_intersect_domains(tagged, hull);
1513 map = isl_map_intersect(map, hull);
1516 empty = isl_map_plain_is_empty(map);
1517 if (empty < 0)
1518 goto error;
1519 if (empty)
1520 return skip_edge(map, tagged);
1522 graph->edge[graph->n_edge].src = src;
1523 graph->edge[graph->n_edge].dst = dst;
1524 graph->edge[graph->n_edge].map = map;
1525 graph->edge[graph->n_edge].types = 0;
1526 graph->edge[graph->n_edge].tagged_condition = NULL;
1527 graph->edge[graph->n_edge].tagged_validity = NULL;
1528 set_type(&graph->edge[graph->n_edge], data->type);
1529 if (data->type == isl_edge_condition)
1530 graph->edge[graph->n_edge].tagged_condition =
1531 isl_union_map_from_map(tagged);
1532 if (data->type == isl_edge_conditional_validity)
1533 graph->edge[graph->n_edge].tagged_validity =
1534 isl_union_map_from_map(tagged);
1536 edge = graph_find_matching_edge(graph, &graph->edge[graph->n_edge]);
1537 if (!edge) {
1538 graph->n_edge++;
1539 return isl_stat_error;
1541 if (edge == &graph->edge[graph->n_edge])
1542 return graph_edge_table_add(ctx, graph, data->type,
1543 &graph->edge[graph->n_edge++]);
1545 if (merge_edge(edge, &graph->edge[graph->n_edge]) < 0)
1546 return isl_stat_error;
1548 return graph_edge_table_add(ctx, graph, data->type, edge);
1549 error:
1550 isl_map_free(map);
1551 isl_map_free(tagged);
1552 return isl_stat_error;
1555 /* Initialize the schedule graph "graph" from the schedule constraints "sc".
1557 * The context is included in the domain before the nodes of
1558 * the graphs are extracted in order to be able to exploit
1559 * any possible additional equalities.
1560 * Note that this intersection is only performed locally here.
1562 static isl_stat graph_init(struct isl_sched_graph *graph,
1563 __isl_keep isl_schedule_constraints *sc)
1565 isl_ctx *ctx;
1566 isl_union_set *domain;
1567 isl_union_map *c;
1568 struct isl_extract_edge_data data;
1569 enum isl_edge_type i;
1570 isl_stat r;
1571 isl_size n;
1573 if (!sc)
1574 return isl_stat_error;
1576 ctx = isl_schedule_constraints_get_ctx(sc);
1578 domain = isl_schedule_constraints_get_domain(sc);
1579 n = isl_union_set_n_set(domain);
1580 graph->n = n;
1581 isl_union_set_free(domain);
1582 if (n < 0)
1583 return isl_stat_error;
1585 n = isl_schedule_constraints_n_map(sc);
1586 if (n < 0 || graph_alloc(ctx, graph, graph->n, n) < 0)
1587 return isl_stat_error;
1589 if (compute_max_row(graph, sc) < 0)
1590 return isl_stat_error;
1591 graph->root = graph;
1592 graph->n = 0;
1593 domain = isl_schedule_constraints_get_domain(sc);
1594 domain = isl_union_set_intersect_params(domain,
1595 isl_schedule_constraints_get_context(sc));
1596 r = isl_union_set_foreach_set(domain, &extract_node, graph);
1597 isl_union_set_free(domain);
1598 if (r < 0)
1599 return isl_stat_error;
1600 if (graph_init_table(ctx, graph) < 0)
1601 return isl_stat_error;
1602 for (i = isl_edge_first; i <= isl_edge_last; ++i) {
1603 isl_size n;
1605 c = isl_schedule_constraints_get(sc, i);
1606 n = isl_union_map_n_map(c);
1607 graph->max_edge[i] = n;
1608 isl_union_map_free(c);
1609 if (n < 0)
1610 return isl_stat_error;
1612 if (graph_init_edge_tables(ctx, graph) < 0)
1613 return isl_stat_error;
1614 graph->n_edge = 0;
1615 data.graph = graph;
1616 for (i = isl_edge_first; i <= isl_edge_last; ++i) {
1617 isl_stat r;
1619 data.type = i;
1620 c = isl_schedule_constraints_get(sc, i);
1621 r = isl_union_map_foreach_map(c, &extract_edge, &data);
1622 isl_union_map_free(c);
1623 if (r < 0)
1624 return isl_stat_error;
1627 return isl_stat_ok;
1630 /* Check whether there is any dependence from node[j] to node[i]
1631 * or from node[i] to node[j].
1633 static isl_bool node_follows_weak(int i, int j, void *user)
1635 isl_bool f;
1636 struct isl_sched_graph *graph = user;
1638 f = graph_has_any_edge(graph, &graph->node[j], &graph->node[i]);
1639 if (f < 0 || f)
1640 return f;
1641 return graph_has_any_edge(graph, &graph->node[i], &graph->node[j]);
1644 /* Check whether there is a (conditional) validity dependence from node[j]
1645 * to node[i], forcing node[i] to follow node[j].
1647 static isl_bool node_follows_strong(int i, int j, void *user)
1649 struct isl_sched_graph *graph = user;
1651 return graph_has_validity_edge(graph, &graph->node[j], &graph->node[i]);
1654 /* Use Tarjan's algorithm for computing the strongly connected components
1655 * in the dependence graph only considering those edges defined by "follows".
1657 static isl_stat detect_ccs(isl_ctx *ctx, struct isl_sched_graph *graph,
1658 isl_bool (*follows)(int i, int j, void *user))
1660 int i, n;
1661 struct isl_tarjan_graph *g = NULL;
1663 g = isl_tarjan_graph_init(ctx, graph->n, follows, graph);
1664 if (!g)
1665 return isl_stat_error;
1667 graph->scc = 0;
1668 i = 0;
1669 n = graph->n;
1670 while (n) {
1671 while (g->order[i] != -1) {
1672 graph->node[g->order[i]].scc = graph->scc;
1673 --n;
1674 ++i;
1676 ++i;
1677 graph->scc++;
1680 isl_tarjan_graph_free(g);
1682 return isl_stat_ok;
1685 /* Apply Tarjan's algorithm to detect the strongly connected components
1686 * in the dependence graph.
1687 * Only consider the (conditional) validity dependences and clear "weak".
1689 static isl_stat detect_sccs(isl_ctx *ctx, struct isl_sched_graph *graph)
1691 graph->weak = 0;
1692 return detect_ccs(ctx, graph, &node_follows_strong);
1695 /* Apply Tarjan's algorithm to detect the (weakly) connected components
1696 * in the dependence graph.
1697 * Consider all dependences and set "weak".
1699 static isl_stat detect_wccs(isl_ctx *ctx, struct isl_sched_graph *graph)
1701 graph->weak = 1;
1702 return detect_ccs(ctx, graph, &node_follows_weak);
1705 static int cmp_scc(const void *a, const void *b, void *data)
1707 struct isl_sched_graph *graph = data;
1708 const int *i1 = a;
1709 const int *i2 = b;
1711 return graph->node[*i1].scc - graph->node[*i2].scc;
1714 /* Sort the elements of graph->sorted according to the corresponding SCCs.
1716 static int sort_sccs(struct isl_sched_graph *graph)
1718 return isl_sort(graph->sorted, graph->n, sizeof(int), &cmp_scc, graph);
1721 /* Return a non-parametric set in the compressed space of "node" that is
1722 * bounded by the size in each direction
1724 * { [x] : -S_i <= x_i <= S_i }
1726 * If S_i is infinity in direction i, then there are no constraints
1727 * in that direction.
1729 * Cache the result in node->bounds.
1731 static __isl_give isl_basic_set *get_size_bounds(struct isl_sched_node *node)
1733 isl_space *space;
1734 isl_basic_set *bounds;
1735 int i;
1737 if (node->bounds)
1738 return isl_basic_set_copy(node->bounds);
1740 if (node->compressed)
1741 space = isl_pw_multi_aff_get_domain_space(node->decompress);
1742 else
1743 space = isl_space_copy(node->space);
1744 space = isl_space_drop_all_params(space);
1745 bounds = isl_basic_set_universe(space);
1747 for (i = 0; i < node->nvar; ++i) {
1748 isl_val *size;
1750 size = isl_multi_val_get_val(node->sizes, i);
1751 if (!size)
1752 return isl_basic_set_free(bounds);
1753 if (!isl_val_is_int(size)) {
1754 isl_val_free(size);
1755 continue;
1757 bounds = isl_basic_set_upper_bound_val(bounds, isl_dim_set, i,
1758 isl_val_copy(size));
1759 bounds = isl_basic_set_lower_bound_val(bounds, isl_dim_set, i,
1760 isl_val_neg(size));
1763 node->bounds = isl_basic_set_copy(bounds);
1764 return bounds;
1767 /* Compress the dependence relation "map", if needed, i.e.,
1768 * when the source node "src" and/or the destination node "dst"
1769 * has been compressed.
1771 static __isl_give isl_map *compress(__isl_take isl_map *map,
1772 struct isl_sched_node *src, struct isl_sched_node *dst)
1774 if (src->compressed)
1775 map = isl_map_preimage_domain_pw_multi_aff(map,
1776 isl_pw_multi_aff_copy(src->decompress));
1777 if (dst->compressed)
1778 map = isl_map_preimage_range_pw_multi_aff(map,
1779 isl_pw_multi_aff_copy(dst->decompress));
1780 return map;
1783 /* Drop some constraints from "delta" that could be exploited
1784 * to construct loop coalescing schedules.
1785 * In particular, drop those constraint that bound the difference
1786 * to the size of the domain.
1787 * First project out the parameters to improve the effectiveness.
1789 static __isl_give isl_set *drop_coalescing_constraints(
1790 __isl_take isl_set *delta, struct isl_sched_node *node)
1792 isl_size nparam;
1793 isl_basic_set *bounds;
1795 nparam = isl_set_dim(delta, isl_dim_param);
1796 if (nparam < 0)
1797 return isl_set_free(delta);
1799 bounds = get_size_bounds(node);
1801 delta = isl_set_project_out(delta, isl_dim_param, 0, nparam);
1802 delta = isl_set_remove_divs(delta);
1803 delta = isl_set_plain_gist_basic_set(delta, bounds);
1804 return delta;
1807 /* Given a dependence relation R from "node" to itself,
1808 * construct the set of coefficients of valid constraints for elements
1809 * in that dependence relation.
1810 * In particular, the result contains tuples of coefficients
1811 * c_0, c_n, c_x such that
1813 * c_0 + c_n n + c_x y - c_x x >= 0 for each (x,y) in R
1815 * or, equivalently,
1817 * c_0 + c_n n + c_x d >= 0 for each d in delta R = { y - x | (x,y) in R }
1819 * We choose here to compute the dual of delta R.
1820 * Alternatively, we could have computed the dual of R, resulting
1821 * in a set of tuples c_0, c_n, c_x, c_y, and then
1822 * plugged in (c_0, c_n, c_x, -c_x).
1824 * If "need_param" is set, then the resulting coefficients effectively
1825 * include coefficients for the parameters c_n. Otherwise, they may
1826 * have been projected out already.
1827 * Since the constraints may be different for these two cases,
1828 * they are stored in separate caches.
1829 * In particular, if no parameter coefficients are required and
1830 * the schedule_treat_coalescing option is set, then the parameters
1831 * are projected out and some constraints that could be exploited
1832 * to construct coalescing schedules are removed before the dual
1833 * is computed.
1835 * If "node" has been compressed, then the dependence relation
1836 * is also compressed before the set of coefficients is computed.
1838 static __isl_give isl_basic_set *intra_coefficients(
1839 struct isl_sched_graph *graph, struct isl_sched_node *node,
1840 __isl_take isl_map *map, int need_param)
1842 isl_ctx *ctx;
1843 isl_set *delta;
1844 isl_map *key;
1845 isl_basic_set *coef;
1846 isl_maybe_isl_basic_set m;
1847 isl_map_to_basic_set **hmap = &graph->intra_hmap;
1848 int treat;
1850 if (!map)
1851 return NULL;
1853 ctx = isl_map_get_ctx(map);
1854 treat = !need_param && isl_options_get_schedule_treat_coalescing(ctx);
1855 if (!treat)
1856 hmap = &graph->intra_hmap_param;
1857 m = isl_map_to_basic_set_try_get(*hmap, map);
1858 if (m.valid < 0 || m.valid) {
1859 isl_map_free(map);
1860 return m.value;
1863 key = isl_map_copy(map);
1864 map = compress(map, node, node);
1865 delta = isl_map_deltas(map);
1866 if (treat)
1867 delta = drop_coalescing_constraints(delta, node);
1868 delta = isl_set_remove_divs(delta);
1869 coef = isl_set_coefficients(delta);
1870 *hmap = isl_map_to_basic_set_set(*hmap, key, isl_basic_set_copy(coef));
1872 return coef;
1875 /* Given a dependence relation R, construct the set of coefficients
1876 * of valid constraints for elements in that dependence relation.
1877 * In particular, the result contains tuples of coefficients
1878 * c_0, c_n, c_x, c_y such that
1880 * c_0 + c_n n + c_x x + c_y y >= 0 for each (x,y) in R
1882 * If the source or destination nodes of "edge" have been compressed,
1883 * then the dependence relation is also compressed before
1884 * the set of coefficients is computed.
1886 static __isl_give isl_basic_set *inter_coefficients(
1887 struct isl_sched_graph *graph, struct isl_sched_edge *edge,
1888 __isl_take isl_map *map)
1890 isl_set *set;
1891 isl_map *key;
1892 isl_basic_set *coef;
1893 isl_maybe_isl_basic_set m;
1895 m = isl_map_to_basic_set_try_get(graph->inter_hmap, map);
1896 if (m.valid < 0 || m.valid) {
1897 isl_map_free(map);
1898 return m.value;
1901 key = isl_map_copy(map);
1902 map = compress(map, edge->src, edge->dst);
1903 set = isl_map_wrap(isl_map_remove_divs(map));
1904 coef = isl_set_coefficients(set);
1905 graph->inter_hmap = isl_map_to_basic_set_set(graph->inter_hmap, key,
1906 isl_basic_set_copy(coef));
1908 return coef;
1911 /* Return the position of the coefficients of the variables in
1912 * the coefficients constraints "coef".
1914 * The space of "coef" is of the form
1916 * { coefficients[[cst, params] -> S] }
1918 * Return the position of S.
1920 static isl_size coef_var_offset(__isl_keep isl_basic_set *coef)
1922 isl_size offset;
1923 isl_space *space;
1925 space = isl_space_unwrap(isl_basic_set_get_space(coef));
1926 offset = isl_space_dim(space, isl_dim_in);
1927 isl_space_free(space);
1929 return offset;
1932 /* Return the offset of the coefficient of the constant term of "node"
1933 * within the (I)LP.
1935 * Within each node, the coefficients have the following order:
1936 * - positive and negative parts of c_i_x
1937 * - c_i_n (if parametric)
1938 * - c_i_0
1940 static int node_cst_coef_offset(struct isl_sched_node *node)
1942 return node->start + 2 * node->nvar + node->nparam;
1945 /* Return the offset of the coefficients of the parameters of "node"
1946 * within the (I)LP.
1948 * Within each node, the coefficients have the following order:
1949 * - positive and negative parts of c_i_x
1950 * - c_i_n (if parametric)
1951 * - c_i_0
1953 static int node_par_coef_offset(struct isl_sched_node *node)
1955 return node->start + 2 * node->nvar;
1958 /* Return the offset of the coefficients of the variables of "node"
1959 * within the (I)LP.
1961 * Within each node, the coefficients have the following order:
1962 * - positive and negative parts of c_i_x
1963 * - c_i_n (if parametric)
1964 * - c_i_0
1966 static int node_var_coef_offset(struct isl_sched_node *node)
1968 return node->start;
1971 /* Return the position of the pair of variables encoding
1972 * coefficient "i" of "node".
1974 * The order of these variable pairs is the opposite of
1975 * that of the coefficients, with 2 variables per coefficient.
1977 static int node_var_coef_pos(struct isl_sched_node *node, int i)
1979 return node_var_coef_offset(node) + 2 * (node->nvar - 1 - i);
1982 /* Construct an isl_dim_map for mapping constraints on coefficients
1983 * for "node" to the corresponding positions in graph->lp.
1984 * "offset" is the offset of the coefficients for the variables
1985 * in the input constraints.
1986 * "s" is the sign of the mapping.
1988 * The input constraints are given in terms of the coefficients
1989 * (c_0, c_x) or (c_0, c_n, c_x).
1990 * The mapping produced by this function essentially plugs in
1991 * (0, c_i_x^+ - c_i_x^-) if s = 1 and
1992 * (0, -c_i_x^+ + c_i_x^-) if s = -1 or
1993 * (0, 0, c_i_x^+ - c_i_x^-) if s = 1 and
1994 * (0, 0, -c_i_x^+ + c_i_x^-) if s = -1.
1995 * In graph->lp, the c_i_x^- appear before their c_i_x^+ counterpart.
1996 * Furthermore, the order of these pairs is the opposite of that
1997 * of the corresponding coefficients.
1999 * The caller can extend the mapping to also map the other coefficients
2000 * (and therefore not plug in 0).
2002 static __isl_give isl_dim_map *intra_dim_map(isl_ctx *ctx,
2003 struct isl_sched_graph *graph, struct isl_sched_node *node,
2004 int offset, int s)
2006 int pos;
2007 isl_size total;
2008 isl_dim_map *dim_map;
2010 total = isl_basic_set_dim(graph->lp, isl_dim_all);
2011 if (!node || total < 0)
2012 return NULL;
2014 pos = node_var_coef_pos(node, 0);
2015 dim_map = isl_dim_map_alloc(ctx, total);
2016 isl_dim_map_range(dim_map, pos, -2, offset, 1, node->nvar, -s);
2017 isl_dim_map_range(dim_map, pos + 1, -2, offset, 1, node->nvar, s);
2019 return dim_map;
2022 /* Construct an isl_dim_map for mapping constraints on coefficients
2023 * for "src" (node i) and "dst" (node j) to the corresponding positions
2024 * in graph->lp.
2025 * "offset" is the offset of the coefficients for the variables of "src"
2026 * in the input constraints.
2027 * "s" is the sign of the mapping.
2029 * The input constraints are given in terms of the coefficients
2030 * (c_0, c_n, c_x, c_y).
2031 * The mapping produced by this function essentially plugs in
2032 * (c_j_0 - c_i_0, c_j_n - c_i_n,
2033 * -(c_i_x^+ - c_i_x^-), c_j_x^+ - c_j_x^-) if s = 1 and
2034 * (-c_j_0 + c_i_0, -c_j_n + c_i_n,
2035 * c_i_x^+ - c_i_x^-, -(c_j_x^+ - c_j_x^-)) if s = -1.
2036 * In graph->lp, the c_*^- appear before their c_*^+ counterpart.
2037 * Furthermore, the order of these pairs is the opposite of that
2038 * of the corresponding coefficients.
2040 * The caller can further extend the mapping.
2042 static __isl_give isl_dim_map *inter_dim_map(isl_ctx *ctx,
2043 struct isl_sched_graph *graph, struct isl_sched_node *src,
2044 struct isl_sched_node *dst, int offset, int s)
2046 int pos;
2047 isl_size total;
2048 isl_dim_map *dim_map;
2050 total = isl_basic_set_dim(graph->lp, isl_dim_all);
2051 if (!src || !dst || total < 0)
2052 return NULL;
2054 dim_map = isl_dim_map_alloc(ctx, total);
2056 pos = node_cst_coef_offset(dst);
2057 isl_dim_map_range(dim_map, pos, 0, 0, 0, 1, s);
2058 pos = node_par_coef_offset(dst);
2059 isl_dim_map_range(dim_map, pos, 1, 1, 1, dst->nparam, s);
2060 pos = node_var_coef_pos(dst, 0);
2061 isl_dim_map_range(dim_map, pos, -2, offset + src->nvar, 1,
2062 dst->nvar, -s);
2063 isl_dim_map_range(dim_map, pos + 1, -2, offset + src->nvar, 1,
2064 dst->nvar, s);
2066 pos = node_cst_coef_offset(src);
2067 isl_dim_map_range(dim_map, pos, 0, 0, 0, 1, -s);
2068 pos = node_par_coef_offset(src);
2069 isl_dim_map_range(dim_map, pos, 1, 1, 1, src->nparam, -s);
2070 pos = node_var_coef_pos(src, 0);
2071 isl_dim_map_range(dim_map, pos, -2, offset, 1, src->nvar, s);
2072 isl_dim_map_range(dim_map, pos + 1, -2, offset, 1, src->nvar, -s);
2074 return dim_map;
2077 /* Add the constraints from "src" to "dst" using "dim_map",
2078 * after making sure there is enough room in "dst" for the extra constraints.
2080 static __isl_give isl_basic_set *add_constraints_dim_map(
2081 __isl_take isl_basic_set *dst, __isl_take isl_basic_set *src,
2082 __isl_take isl_dim_map *dim_map)
2084 isl_size n_eq, n_ineq;
2086 n_eq = isl_basic_set_n_equality(src);
2087 n_ineq = isl_basic_set_n_inequality(src);
2088 if (n_eq < 0 || n_ineq < 0)
2089 dst = isl_basic_set_free(dst);
2090 dst = isl_basic_set_extend_constraints(dst, n_eq, n_ineq);
2091 dst = isl_basic_set_add_constraints_dim_map(dst, src, dim_map);
2092 return dst;
2095 /* Add constraints to graph->lp that force validity for the given
2096 * dependence from a node i to itself.
2097 * That is, add constraints that enforce
2099 * (c_i_0 + c_i_n n + c_i_x y) - (c_i_0 + c_i_n n + c_i_x x)
2100 * = c_i_x (y - x) >= 0
2102 * for each (x,y) in R.
2103 * We obtain general constraints on coefficients (c_0, c_x)
2104 * of valid constraints for (y - x) and then plug in (0, c_i_x^+ - c_i_x^-),
2105 * where c_i_x = c_i_x^+ - c_i_x^-, with c_i_x^+ and c_i_x^- non-negative.
2106 * In graph->lp, the c_i_x^- appear before their c_i_x^+ counterpart.
2107 * Note that the result of intra_coefficients may also contain
2108 * parameter coefficients c_n, in which case 0 is plugged in for them as well.
2110 static isl_stat add_intra_validity_constraints(struct isl_sched_graph *graph,
2111 struct isl_sched_edge *edge)
2113 isl_size offset;
2114 isl_map *map = isl_map_copy(edge->map);
2115 isl_ctx *ctx = isl_map_get_ctx(map);
2116 isl_dim_map *dim_map;
2117 isl_basic_set *coef;
2118 struct isl_sched_node *node = edge->src;
2120 coef = intra_coefficients(graph, node, map, 0);
2122 offset = coef_var_offset(coef);
2123 if (offset < 0)
2124 coef = isl_basic_set_free(coef);
2125 if (!coef)
2126 return isl_stat_error;
2128 dim_map = intra_dim_map(ctx, graph, node, offset, 1);
2129 graph->lp = add_constraints_dim_map(graph->lp, coef, dim_map);
2131 return isl_stat_ok;
2134 /* Add constraints to graph->lp that force validity for the given
2135 * dependence from node i to node j.
2136 * That is, add constraints that enforce
2138 * (c_j_0 + c_j_n n + c_j_x y) - (c_i_0 + c_i_n n + c_i_x x) >= 0
2140 * for each (x,y) in R.
2141 * We obtain general constraints on coefficients (c_0, c_n, c_x, c_y)
2142 * of valid constraints for R and then plug in
2143 * (c_j_0 - c_i_0, c_j_n - c_i_n, -(c_i_x^+ - c_i_x^-), c_j_x^+ - c_j_x^-),
2144 * where c_* = c_*^+ - c_*^-, with c_*^+ and c_*^- non-negative.
2145 * In graph->lp, the c_*^- appear before their c_*^+ counterpart.
2147 static isl_stat add_inter_validity_constraints(struct isl_sched_graph *graph,
2148 struct isl_sched_edge *edge)
2150 isl_size offset;
2151 isl_map *map;
2152 isl_ctx *ctx;
2153 isl_dim_map *dim_map;
2154 isl_basic_set *coef;
2155 struct isl_sched_node *src = edge->src;
2156 struct isl_sched_node *dst = edge->dst;
2158 if (!graph->lp)
2159 return isl_stat_error;
2161 map = isl_map_copy(edge->map);
2162 ctx = isl_map_get_ctx(map);
2163 coef = inter_coefficients(graph, edge, map);
2165 offset = coef_var_offset(coef);
2166 if (offset < 0)
2167 coef = isl_basic_set_free(coef);
2168 if (!coef)
2169 return isl_stat_error;
2171 dim_map = inter_dim_map(ctx, graph, src, dst, offset, 1);
2173 edge->start = graph->lp->n_ineq;
2174 graph->lp = add_constraints_dim_map(graph->lp, coef, dim_map);
2175 if (!graph->lp)
2176 return isl_stat_error;
2177 edge->end = graph->lp->n_ineq;
2179 return isl_stat_ok;
2182 /* Add constraints to graph->lp that bound the dependence distance for the given
2183 * dependence from a node i to itself.
2184 * If s = 1, we add the constraint
2186 * c_i_x (y - x) <= m_0 + m_n n
2188 * or
2190 * -c_i_x (y - x) + m_0 + m_n n >= 0
2192 * for each (x,y) in R.
2193 * If s = -1, we add the constraint
2195 * -c_i_x (y - x) <= m_0 + m_n n
2197 * or
2199 * c_i_x (y - x) + m_0 + m_n n >= 0
2201 * for each (x,y) in R.
2202 * We obtain general constraints on coefficients (c_0, c_n, c_x)
2203 * of valid constraints for (y - x) and then plug in (m_0, m_n, -s * c_i_x),
2204 * with each coefficient (except m_0) represented as a pair of non-negative
2205 * coefficients.
2208 * If "local" is set, then we add constraints
2210 * c_i_x (y - x) <= 0
2212 * or
2214 * -c_i_x (y - x) <= 0
2216 * instead, forcing the dependence distance to be (less than or) equal to 0.
2217 * That is, we plug in (0, 0, -s * c_i_x),
2218 * intra_coefficients is not required to have c_n in its result when
2219 * "local" is set. If they are missing, then (0, -s * c_i_x) is plugged in.
2220 * Note that dependences marked local are treated as validity constraints
2221 * by add_all_validity_constraints and therefore also have
2222 * their distances bounded by 0 from below.
2224 static isl_stat add_intra_proximity_constraints(struct isl_sched_graph *graph,
2225 struct isl_sched_edge *edge, int s, int local)
2227 isl_size offset;
2228 isl_size nparam;
2229 isl_map *map = isl_map_copy(edge->map);
2230 isl_ctx *ctx = isl_map_get_ctx(map);
2231 isl_dim_map *dim_map;
2232 isl_basic_set *coef;
2233 struct isl_sched_node *node = edge->src;
2235 coef = intra_coefficients(graph, node, map, !local);
2236 nparam = isl_space_dim(node->space, isl_dim_param);
2238 offset = coef_var_offset(coef);
2239 if (nparam < 0 || offset < 0)
2240 coef = isl_basic_set_free(coef);
2241 if (!coef)
2242 return isl_stat_error;
2244 dim_map = intra_dim_map(ctx, graph, node, offset, -s);
2246 if (!local) {
2247 isl_dim_map_range(dim_map, 1, 0, 0, 0, 1, 1);
2248 isl_dim_map_range(dim_map, 4, 2, 1, 1, nparam, -1);
2249 isl_dim_map_range(dim_map, 5, 2, 1, 1, nparam, 1);
2251 graph->lp = add_constraints_dim_map(graph->lp, coef, dim_map);
2253 return isl_stat_ok;
2256 /* Add constraints to graph->lp that bound the dependence distance for the given
2257 * dependence from node i to node j.
2258 * If s = 1, we add the constraint
2260 * (c_j_0 + c_j_n n + c_j_x y) - (c_i_0 + c_i_n n + c_i_x x)
2261 * <= m_0 + m_n n
2263 * or
2265 * -(c_j_0 + c_j_n n + c_j_x y) + (c_i_0 + c_i_n n + c_i_x x) +
2266 * m_0 + m_n n >= 0
2268 * for each (x,y) in R.
2269 * If s = -1, we add the constraint
2271 * -((c_j_0 + c_j_n n + c_j_x y) - (c_i_0 + c_i_n n + c_i_x x))
2272 * <= m_0 + m_n n
2274 * or
2276 * (c_j_0 + c_j_n n + c_j_x y) - (c_i_0 + c_i_n n + c_i_x x) +
2277 * m_0 + m_n n >= 0
2279 * for each (x,y) in R.
2280 * We obtain general constraints on coefficients (c_0, c_n, c_x, c_y)
2281 * of valid constraints for R and then plug in
2282 * (m_0 - s*c_j_0 + s*c_i_0, m_n - s*c_j_n + s*c_i_n,
2283 * s*c_i_x, -s*c_j_x)
2284 * with each coefficient (except m_0, c_*_0 and c_*_n)
2285 * represented as a pair of non-negative coefficients.
2288 * If "local" is set (and s = 1), then we add constraints
2290 * (c_j_0 + c_j_n n + c_j_x y) - (c_i_0 + c_i_n n + c_i_x x) <= 0
2292 * or
2294 * -((c_j_0 + c_j_n n + c_j_x y) + (c_i_0 + c_i_n n + c_i_x x)) >= 0
2296 * instead, forcing the dependence distance to be (less than or) equal to 0.
2297 * That is, we plug in
2298 * (-s*c_j_0 + s*c_i_0, -s*c_j_n + s*c_i_n, s*c_i_x, -s*c_j_x).
2299 * Note that dependences marked local are treated as validity constraints
2300 * by add_all_validity_constraints and therefore also have
2301 * their distances bounded by 0 from below.
2303 static isl_stat add_inter_proximity_constraints(struct isl_sched_graph *graph,
2304 struct isl_sched_edge *edge, int s, int local)
2306 isl_size offset;
2307 isl_size nparam;
2308 isl_map *map = isl_map_copy(edge->map);
2309 isl_ctx *ctx = isl_map_get_ctx(map);
2310 isl_dim_map *dim_map;
2311 isl_basic_set *coef;
2312 struct isl_sched_node *src = edge->src;
2313 struct isl_sched_node *dst = edge->dst;
2315 coef = inter_coefficients(graph, edge, map);
2316 nparam = isl_space_dim(src->space, isl_dim_param);
2318 offset = coef_var_offset(coef);
2319 if (nparam < 0 || offset < 0)
2320 coef = isl_basic_set_free(coef);
2321 if (!coef)
2322 return isl_stat_error;
2324 dim_map = inter_dim_map(ctx, graph, src, dst, offset, -s);
2326 if (!local) {
2327 isl_dim_map_range(dim_map, 1, 0, 0, 0, 1, 1);
2328 isl_dim_map_range(dim_map, 4, 2, 1, 1, nparam, -1);
2329 isl_dim_map_range(dim_map, 5, 2, 1, 1, nparam, 1);
2332 graph->lp = add_constraints_dim_map(graph->lp, coef, dim_map);
2334 return isl_stat_ok;
2337 /* Should the distance over "edge" be forced to zero?
2338 * That is, is it marked as a local edge?
2339 * If "use_coincidence" is set, then coincidence edges are treated
2340 * as local edges.
2342 static int force_zero(struct isl_sched_edge *edge, int use_coincidence)
2344 return is_local(edge) || (use_coincidence && is_coincidence(edge));
2347 /* Add all validity constraints to graph->lp.
2349 * An edge that is forced to be local needs to have its dependence
2350 * distances equal to zero. We take care of bounding them by 0 from below
2351 * here. add_all_proximity_constraints takes care of bounding them by 0
2352 * from above.
2354 * If "use_coincidence" is set, then we treat coincidence edges as local edges.
2355 * Otherwise, we ignore them.
2357 static int add_all_validity_constraints(struct isl_sched_graph *graph,
2358 int use_coincidence)
2360 int i;
2362 for (i = 0; i < graph->n_edge; ++i) {
2363 struct isl_sched_edge *edge = &graph->edge[i];
2364 int zero;
2366 zero = force_zero(edge, use_coincidence);
2367 if (!is_validity(edge) && !zero)
2368 continue;
2369 if (edge->src != edge->dst)
2370 continue;
2371 if (add_intra_validity_constraints(graph, edge) < 0)
2372 return -1;
2375 for (i = 0; i < graph->n_edge; ++i) {
2376 struct isl_sched_edge *edge = &graph->edge[i];
2377 int zero;
2379 zero = force_zero(edge, use_coincidence);
2380 if (!is_validity(edge) && !zero)
2381 continue;
2382 if (edge->src == edge->dst)
2383 continue;
2384 if (add_inter_validity_constraints(graph, edge) < 0)
2385 return -1;
2388 return 0;
2391 /* Add constraints to graph->lp that bound the dependence distance
2392 * for all dependence relations.
2393 * If a given proximity dependence is identical to a validity
2394 * dependence, then the dependence distance is already bounded
2395 * from below (by zero), so we only need to bound the distance
2396 * from above. (This includes the case of "local" dependences
2397 * which are treated as validity dependence by add_all_validity_constraints.)
2398 * Otherwise, we need to bound the distance both from above and from below.
2400 * If "use_coincidence" is set, then we treat coincidence edges as local edges.
2401 * Otherwise, we ignore them.
2403 static int add_all_proximity_constraints(struct isl_sched_graph *graph,
2404 int use_coincidence)
2406 int i;
2408 for (i = 0; i < graph->n_edge; ++i) {
2409 struct isl_sched_edge *edge = &graph->edge[i];
2410 int zero;
2412 zero = force_zero(edge, use_coincidence);
2413 if (!is_proximity(edge) && !zero)
2414 continue;
2415 if (edge->src == edge->dst &&
2416 add_intra_proximity_constraints(graph, edge, 1, zero) < 0)
2417 return -1;
2418 if (edge->src != edge->dst &&
2419 add_inter_proximity_constraints(graph, edge, 1, zero) < 0)
2420 return -1;
2421 if (is_validity(edge) || zero)
2422 continue;
2423 if (edge->src == edge->dst &&
2424 add_intra_proximity_constraints(graph, edge, -1, 0) < 0)
2425 return -1;
2426 if (edge->src != edge->dst &&
2427 add_inter_proximity_constraints(graph, edge, -1, 0) < 0)
2428 return -1;
2431 return 0;
2434 /* Normalize the rows of "indep" such that all rows are lexicographically
2435 * positive and such that each row contains as many final zeros as possible,
2436 * given the choice for the previous rows.
2437 * Do this by performing elementary row operations.
2439 static __isl_give isl_mat *normalize_independent(__isl_take isl_mat *indep)
2441 indep = isl_mat_reverse_gauss(indep);
2442 indep = isl_mat_lexnonneg_rows(indep);
2443 return indep;
2446 /* Extract the linear part of the current schedule for node "node".
2448 static __isl_give isl_mat *extract_linear_schedule(struct isl_sched_node *node)
2450 isl_size n_row = isl_mat_rows(node->sched);
2452 if (n_row < 0)
2453 return NULL;
2454 return isl_mat_sub_alloc(node->sched, 0, n_row,
2455 1 + node->nparam, node->nvar);
2458 /* Compute a basis for the rows in the linear part of the schedule
2459 * and extend this basis to a full basis. The remaining rows
2460 * can then be used to force linear independence from the rows
2461 * in the schedule.
2463 * In particular, given the schedule rows S, we compute
2465 * S = H Q
2466 * S U = H
2468 * with H the Hermite normal form of S. That is, all but the
2469 * first rank columns of H are zero and so each row in S is
2470 * a linear combination of the first rank rows of Q.
2471 * The matrix Q can be used as a variable transformation
2472 * that isolates the directions of S in the first rank rows.
2473 * Transposing S U = H yields
2475 * U^T S^T = H^T
2477 * with all but the first rank rows of H^T zero.
2478 * The last rows of U^T are therefore linear combinations
2479 * of schedule coefficients that are all zero on schedule
2480 * coefficients that are linearly dependent on the rows of S.
2481 * At least one of these combinations is non-zero on
2482 * linearly independent schedule coefficients.
2483 * The rows are normalized to involve as few of the last
2484 * coefficients as possible and to have a positive initial value.
2486 static isl_stat node_update_vmap(struct isl_sched_node *node)
2488 isl_mat *H, *U, *Q;
2490 H = extract_linear_schedule(node);
2492 H = isl_mat_left_hermite(H, 0, &U, &Q);
2493 isl_mat_free(node->indep);
2494 isl_mat_free(node->vmap);
2495 node->vmap = Q;
2496 node->indep = isl_mat_transpose(U);
2497 node->rank = isl_mat_initial_non_zero_cols(H);
2498 node->indep = isl_mat_drop_rows(node->indep, 0, node->rank);
2499 node->indep = normalize_independent(node->indep);
2500 isl_mat_free(H);
2502 if (!node->indep || !node->vmap || node->rank < 0)
2503 return isl_stat_error;
2504 return isl_stat_ok;
2507 /* Is "edge" marked as a validity or a conditional validity edge?
2509 static int is_any_validity(struct isl_sched_edge *edge)
2511 return is_validity(edge) || is_conditional_validity(edge);
2514 /* How many times should we count the constraints in "edge"?
2516 * We count as follows
2517 * validity -> 1 (>= 0)
2518 * validity+proximity -> 2 (>= 0 and upper bound)
2519 * proximity -> 2 (lower and upper bound)
2520 * local(+any) -> 2 (>= 0 and <= 0)
2522 * If an edge is only marked conditional_validity then it counts
2523 * as zero since it is only checked afterwards.
2525 * If "use_coincidence" is set, then we treat coincidence edges as local edges.
2526 * Otherwise, we ignore them.
2528 static int edge_multiplicity(struct isl_sched_edge *edge, int use_coincidence)
2530 if (is_proximity(edge) || force_zero(edge, use_coincidence))
2531 return 2;
2532 if (is_validity(edge))
2533 return 1;
2534 return 0;
2537 /* How many times should the constraints in "edge" be counted
2538 * as a parametric intra-node constraint?
2540 * Only proximity edges that are not forced zero need
2541 * coefficient constraints that include coefficients for parameters.
2542 * If the edge is also a validity edge, then only
2543 * an upper bound is introduced. Otherwise, both lower and upper bounds
2544 * are introduced.
2546 static int parametric_intra_edge_multiplicity(struct isl_sched_edge *edge,
2547 int use_coincidence)
2549 if (edge->src != edge->dst)
2550 return 0;
2551 if (!is_proximity(edge))
2552 return 0;
2553 if (force_zero(edge, use_coincidence))
2554 return 0;
2555 if (is_validity(edge))
2556 return 1;
2557 else
2558 return 2;
2561 /* Add "f" times the number of equality and inequality constraints of "bset"
2562 * to "n_eq" and "n_ineq" and free "bset".
2564 static isl_stat update_count(__isl_take isl_basic_set *bset,
2565 int f, int *n_eq, int *n_ineq)
2567 isl_size eq, ineq;
2569 eq = isl_basic_set_n_equality(bset);
2570 ineq = isl_basic_set_n_inequality(bset);
2571 isl_basic_set_free(bset);
2573 if (eq < 0 || ineq < 0)
2574 return isl_stat_error;
2576 *n_eq += eq;
2577 *n_ineq += ineq;
2579 return isl_stat_ok;
2582 /* Count the number of equality and inequality constraints
2583 * that will be added for the given map.
2585 * The edges that require parameter coefficients are counted separately.
2587 * "use_coincidence" is set if we should take into account coincidence edges.
2589 static isl_stat count_map_constraints(struct isl_sched_graph *graph,
2590 struct isl_sched_edge *edge, __isl_take isl_map *map,
2591 int *n_eq, int *n_ineq, int use_coincidence)
2593 isl_map *copy;
2594 isl_basic_set *coef;
2595 int f = edge_multiplicity(edge, use_coincidence);
2596 int fp = parametric_intra_edge_multiplicity(edge, use_coincidence);
2598 if (f == 0) {
2599 isl_map_free(map);
2600 return isl_stat_ok;
2603 if (edge->src != edge->dst) {
2604 coef = inter_coefficients(graph, edge, map);
2605 return update_count(coef, f, n_eq, n_ineq);
2608 if (fp > 0) {
2609 copy = isl_map_copy(map);
2610 coef = intra_coefficients(graph, edge->src, copy, 1);
2611 if (update_count(coef, fp, n_eq, n_ineq) < 0)
2612 goto error;
2615 if (f > fp) {
2616 copy = isl_map_copy(map);
2617 coef = intra_coefficients(graph, edge->src, copy, 0);
2618 if (update_count(coef, f - fp, n_eq, n_ineq) < 0)
2619 goto error;
2622 isl_map_free(map);
2623 return isl_stat_ok;
2624 error:
2625 isl_map_free(map);
2626 return isl_stat_error;
2629 /* Count the number of equality and inequality constraints
2630 * that will be added to the main lp problem.
2631 * We count as follows
2632 * validity -> 1 (>= 0)
2633 * validity+proximity -> 2 (>= 0 and upper bound)
2634 * proximity -> 2 (lower and upper bound)
2635 * local(+any) -> 2 (>= 0 and <= 0)
2637 * If "use_coincidence" is set, then we treat coincidence edges as local edges.
2638 * Otherwise, we ignore them.
2640 static int count_constraints(struct isl_sched_graph *graph,
2641 int *n_eq, int *n_ineq, int use_coincidence)
2643 int i;
2645 *n_eq = *n_ineq = 0;
2646 for (i = 0; i < graph->n_edge; ++i) {
2647 struct isl_sched_edge *edge = &graph->edge[i];
2648 isl_map *map = isl_map_copy(edge->map);
2650 if (count_map_constraints(graph, edge, map, n_eq, n_ineq,
2651 use_coincidence) < 0)
2652 return -1;
2655 return 0;
2658 /* Count the number of constraints that will be added by
2659 * add_bound_constant_constraints to bound the values of the constant terms
2660 * and increment *n_eq and *n_ineq accordingly.
2662 * In practice, add_bound_constant_constraints only adds inequalities.
2664 static isl_stat count_bound_constant_constraints(isl_ctx *ctx,
2665 struct isl_sched_graph *graph, int *n_eq, int *n_ineq)
2667 if (isl_options_get_schedule_max_constant_term(ctx) == -1)
2668 return isl_stat_ok;
2670 *n_ineq += graph->n;
2672 return isl_stat_ok;
2675 /* Add constraints to bound the values of the constant terms in the schedule,
2676 * if requested by the user.
2678 * The maximal value of the constant terms is defined by the option
2679 * "schedule_max_constant_term".
2681 static isl_stat add_bound_constant_constraints(isl_ctx *ctx,
2682 struct isl_sched_graph *graph)
2684 int i, k;
2685 int max;
2686 isl_size total;
2688 max = isl_options_get_schedule_max_constant_term(ctx);
2689 if (max == -1)
2690 return isl_stat_ok;
2692 total = isl_basic_set_dim(graph->lp, isl_dim_set);
2693 if (total < 0)
2694 return isl_stat_error;
2696 for (i = 0; i < graph->n; ++i) {
2697 struct isl_sched_node *node = &graph->node[i];
2698 int pos;
2700 k = isl_basic_set_alloc_inequality(graph->lp);
2701 if (k < 0)
2702 return isl_stat_error;
2703 isl_seq_clr(graph->lp->ineq[k], 1 + total);
2704 pos = node_cst_coef_offset(node);
2705 isl_int_set_si(graph->lp->ineq[k][1 + pos], -1);
2706 isl_int_set_si(graph->lp->ineq[k][0], max);
2709 return isl_stat_ok;
2712 /* Count the number of constraints that will be added by
2713 * add_bound_coefficient_constraints and increment *n_eq and *n_ineq
2714 * accordingly.
2716 * In practice, add_bound_coefficient_constraints only adds inequalities.
2718 static int count_bound_coefficient_constraints(isl_ctx *ctx,
2719 struct isl_sched_graph *graph, int *n_eq, int *n_ineq)
2721 int i;
2723 if (isl_options_get_schedule_max_coefficient(ctx) == -1 &&
2724 !isl_options_get_schedule_treat_coalescing(ctx))
2725 return 0;
2727 for (i = 0; i < graph->n; ++i)
2728 *n_ineq += graph->node[i].nparam + 2 * graph->node[i].nvar;
2730 return 0;
2733 /* Add constraints to graph->lp that bound the values of
2734 * the parameter schedule coefficients of "node" to "max" and
2735 * the variable schedule coefficients to the corresponding entry
2736 * in node->max.
2737 * In either case, a negative value means that no bound needs to be imposed.
2739 * For parameter coefficients, this amounts to adding a constraint
2741 * c_n <= max
2743 * i.e.,
2745 * -c_n + max >= 0
2747 * The variables coefficients are, however, not represented directly.
2748 * Instead, the variable coefficients c_x are written as differences
2749 * c_x = c_x^+ - c_x^-.
2750 * That is,
2752 * -max_i <= c_x_i <= max_i
2754 * is encoded as
2756 * -max_i <= c_x_i^+ - c_x_i^- <= max_i
2758 * or
2760 * -(c_x_i^+ - c_x_i^-) + max_i >= 0
2761 * c_x_i^+ - c_x_i^- + max_i >= 0
2763 static isl_stat node_add_coefficient_constraints(isl_ctx *ctx,
2764 struct isl_sched_graph *graph, struct isl_sched_node *node, int max)
2766 int i, j, k;
2767 isl_size total;
2768 isl_vec *ineq;
2770 total = isl_basic_set_dim(graph->lp, isl_dim_set);
2771 if (total < 0)
2772 return isl_stat_error;
2774 for (j = 0; j < node->nparam; ++j) {
2775 int dim;
2777 if (max < 0)
2778 continue;
2780 k = isl_basic_set_alloc_inequality(graph->lp);
2781 if (k < 0)
2782 return isl_stat_error;
2783 dim = 1 + node_par_coef_offset(node) + j;
2784 isl_seq_clr(graph->lp->ineq[k], 1 + total);
2785 isl_int_set_si(graph->lp->ineq[k][dim], -1);
2786 isl_int_set_si(graph->lp->ineq[k][0], max);
2789 ineq = isl_vec_alloc(ctx, 1 + total);
2790 ineq = isl_vec_clr(ineq);
2791 if (!ineq)
2792 return isl_stat_error;
2793 for (i = 0; i < node->nvar; ++i) {
2794 int pos = 1 + node_var_coef_pos(node, i);
2796 if (isl_int_is_neg(node->max->el[i]))
2797 continue;
2799 isl_int_set_si(ineq->el[pos], 1);
2800 isl_int_set_si(ineq->el[pos + 1], -1);
2801 isl_int_set(ineq->el[0], node->max->el[i]);
2803 k = isl_basic_set_alloc_inequality(graph->lp);
2804 if (k < 0)
2805 goto error;
2806 isl_seq_cpy(graph->lp->ineq[k], ineq->el, 1 + total);
2808 isl_seq_neg(ineq->el + pos, ineq->el + pos, 2);
2809 k = isl_basic_set_alloc_inequality(graph->lp);
2810 if (k < 0)
2811 goto error;
2812 isl_seq_cpy(graph->lp->ineq[k], ineq->el, 1 + total);
2814 isl_seq_clr(ineq->el + pos, 2);
2816 isl_vec_free(ineq);
2818 return isl_stat_ok;
2819 error:
2820 isl_vec_free(ineq);
2821 return isl_stat_error;
2824 /* Add constraints that bound the values of the variable and parameter
2825 * coefficients of the schedule.
2827 * The maximal value of the coefficients is defined by the option
2828 * 'schedule_max_coefficient' and the entries in node->max.
2829 * These latter entries are only set if either the schedule_max_coefficient
2830 * option or the schedule_treat_coalescing option is set.
2832 static isl_stat add_bound_coefficient_constraints(isl_ctx *ctx,
2833 struct isl_sched_graph *graph)
2835 int i;
2836 int max;
2838 max = isl_options_get_schedule_max_coefficient(ctx);
2840 if (max == -1 && !isl_options_get_schedule_treat_coalescing(ctx))
2841 return isl_stat_ok;
2843 for (i = 0; i < graph->n; ++i) {
2844 struct isl_sched_node *node = &graph->node[i];
2846 if (node_add_coefficient_constraints(ctx, graph, node, max) < 0)
2847 return isl_stat_error;
2850 return isl_stat_ok;
2853 /* Add a constraint to graph->lp that equates the value at position
2854 * "sum_pos" to the sum of the "n" values starting at "first".
2856 static isl_stat add_sum_constraint(struct isl_sched_graph *graph,
2857 int sum_pos, int first, int n)
2859 int i, k;
2860 isl_size total;
2862 total = isl_basic_set_dim(graph->lp, isl_dim_set);
2863 if (total < 0)
2864 return isl_stat_error;
2866 k = isl_basic_set_alloc_equality(graph->lp);
2867 if (k < 0)
2868 return isl_stat_error;
2869 isl_seq_clr(graph->lp->eq[k], 1 + total);
2870 isl_int_set_si(graph->lp->eq[k][1 + sum_pos], -1);
2871 for (i = 0; i < n; ++i)
2872 isl_int_set_si(graph->lp->eq[k][1 + first + i], 1);
2874 return isl_stat_ok;
2877 /* Add a constraint to graph->lp that equates the value at position
2878 * "sum_pos" to the sum of the parameter coefficients of all nodes.
2880 static isl_stat add_param_sum_constraint(struct isl_sched_graph *graph,
2881 int sum_pos)
2883 int i, j, k;
2884 isl_size total;
2886 total = isl_basic_set_dim(graph->lp, isl_dim_set);
2887 if (total < 0)
2888 return isl_stat_error;
2890 k = isl_basic_set_alloc_equality(graph->lp);
2891 if (k < 0)
2892 return isl_stat_error;
2893 isl_seq_clr(graph->lp->eq[k], 1 + total);
2894 isl_int_set_si(graph->lp->eq[k][1 + sum_pos], -1);
2895 for (i = 0; i < graph->n; ++i) {
2896 int pos = 1 + node_par_coef_offset(&graph->node[i]);
2898 for (j = 0; j < graph->node[i].nparam; ++j)
2899 isl_int_set_si(graph->lp->eq[k][pos + j], 1);
2902 return isl_stat_ok;
2905 /* Add a constraint to graph->lp that equates the value at position
2906 * "sum_pos" to the sum of the variable coefficients of all nodes.
2908 static isl_stat add_var_sum_constraint(struct isl_sched_graph *graph,
2909 int sum_pos)
2911 int i, j, k;
2912 isl_size total;
2914 total = isl_basic_set_dim(graph->lp, isl_dim_set);
2915 if (total < 0)
2916 return isl_stat_error;
2918 k = isl_basic_set_alloc_equality(graph->lp);
2919 if (k < 0)
2920 return isl_stat_error;
2921 isl_seq_clr(graph->lp->eq[k], 1 + total);
2922 isl_int_set_si(graph->lp->eq[k][1 + sum_pos], -1);
2923 for (i = 0; i < graph->n; ++i) {
2924 struct isl_sched_node *node = &graph->node[i];
2925 int pos = 1 + node_var_coef_offset(node);
2927 for (j = 0; j < 2 * node->nvar; ++j)
2928 isl_int_set_si(graph->lp->eq[k][pos + j], 1);
2931 return isl_stat_ok;
2934 /* Construct an ILP problem for finding schedule coefficients
2935 * that result in non-negative, but small dependence distances
2936 * over all dependences.
2937 * In particular, the dependence distances over proximity edges
2938 * are bounded by m_0 + m_n n and we compute schedule coefficients
2939 * with small values (preferably zero) of m_n and m_0.
2941 * All variables of the ILP are non-negative. The actual coefficients
2942 * may be negative, so each coefficient is represented as the difference
2943 * of two non-negative variables. The negative part always appears
2944 * immediately before the positive part.
2945 * Other than that, the variables have the following order
2947 * - sum of positive and negative parts of m_n coefficients
2948 * - m_0
2949 * - sum of all c_n coefficients
2950 * (unconstrained when computing non-parametric schedules)
2951 * - sum of positive and negative parts of all c_x coefficients
2952 * - positive and negative parts of m_n coefficients
2953 * - for each node
2954 * - positive and negative parts of c_i_x, in opposite order
2955 * - c_i_n (if parametric)
2956 * - c_i_0
2958 * The constraints are those from the edges plus two or three equalities
2959 * to express the sums.
2961 * If "use_coincidence" is set, then we treat coincidence edges as local edges.
2962 * Otherwise, we ignore them.
2964 static isl_stat setup_lp(isl_ctx *ctx, struct isl_sched_graph *graph,
2965 int use_coincidence)
2967 int i;
2968 isl_size nparam;
2969 unsigned total;
2970 isl_space *space;
2971 int parametric;
2972 int param_pos;
2973 int n_eq, n_ineq;
2975 parametric = ctx->opt->schedule_parametric;
2976 nparam = isl_space_dim(graph->node[0].space, isl_dim_param);
2977 if (nparam < 0)
2978 return isl_stat_error;
2979 param_pos = 4;
2980 total = param_pos + 2 * nparam;
2981 for (i = 0; i < graph->n; ++i) {
2982 struct isl_sched_node *node = &graph->node[graph->sorted[i]];
2983 if (node_update_vmap(node) < 0)
2984 return isl_stat_error;
2985 node->start = total;
2986 total += 1 + node->nparam + 2 * node->nvar;
2989 if (count_constraints(graph, &n_eq, &n_ineq, use_coincidence) < 0)
2990 return isl_stat_error;
2991 if (count_bound_constant_constraints(ctx, graph, &n_eq, &n_ineq) < 0)
2992 return isl_stat_error;
2993 if (count_bound_coefficient_constraints(ctx, graph, &n_eq, &n_ineq) < 0)
2994 return isl_stat_error;
2996 space = isl_space_set_alloc(ctx, 0, total);
2997 isl_basic_set_free(graph->lp);
2998 n_eq += 2 + parametric;
3000 graph->lp = isl_basic_set_alloc_space(space, 0, n_eq, n_ineq);
3002 if (add_sum_constraint(graph, 0, param_pos, 2 * nparam) < 0)
3003 return isl_stat_error;
3004 if (parametric && add_param_sum_constraint(graph, 2) < 0)
3005 return isl_stat_error;
3006 if (add_var_sum_constraint(graph, 3) < 0)
3007 return isl_stat_error;
3008 if (add_bound_constant_constraints(ctx, graph) < 0)
3009 return isl_stat_error;
3010 if (add_bound_coefficient_constraints(ctx, graph) < 0)
3011 return isl_stat_error;
3012 if (add_all_validity_constraints(graph, use_coincidence) < 0)
3013 return isl_stat_error;
3014 if (add_all_proximity_constraints(graph, use_coincidence) < 0)
3015 return isl_stat_error;
3017 return isl_stat_ok;
3020 /* Analyze the conflicting constraint found by
3021 * isl_tab_basic_set_non_trivial_lexmin. If it corresponds to the validity
3022 * constraint of one of the edges between distinct nodes, living, moreover
3023 * in distinct SCCs, then record the source and sink SCC as this may
3024 * be a good place to cut between SCCs.
3026 static int check_conflict(int con, void *user)
3028 int i;
3029 struct isl_sched_graph *graph = user;
3031 if (graph->src_scc >= 0)
3032 return 0;
3034 con -= graph->lp->n_eq;
3036 if (con >= graph->lp->n_ineq)
3037 return 0;
3039 for (i = 0; i < graph->n_edge; ++i) {
3040 if (!is_validity(&graph->edge[i]))
3041 continue;
3042 if (graph->edge[i].src == graph->edge[i].dst)
3043 continue;
3044 if (graph->edge[i].src->scc == graph->edge[i].dst->scc)
3045 continue;
3046 if (graph->edge[i].start > con)
3047 continue;
3048 if (graph->edge[i].end <= con)
3049 continue;
3050 graph->src_scc = graph->edge[i].src->scc;
3051 graph->dst_scc = graph->edge[i].dst->scc;
3054 return 0;
3057 /* Check whether the next schedule row of the given node needs to be
3058 * non-trivial. Lower-dimensional domains may have some trivial rows,
3059 * but as soon as the number of remaining required non-trivial rows
3060 * is as large as the number or remaining rows to be computed,
3061 * all remaining rows need to be non-trivial.
3063 static int needs_row(struct isl_sched_graph *graph, struct isl_sched_node *node)
3065 return node->nvar - node->rank >= graph->maxvar - graph->n_row;
3068 /* Construct a non-triviality region with triviality directions
3069 * corresponding to the rows of "indep".
3070 * The rows of "indep" are expressed in terms of the schedule coefficients c_i,
3071 * while the triviality directions are expressed in terms of
3072 * pairs of non-negative variables c^+_i - c^-_i, with c^-_i appearing
3073 * before c^+_i. Furthermore,
3074 * the pairs of non-negative variables representing the coefficients
3075 * are stored in the opposite order.
3077 static __isl_give isl_mat *construct_trivial(__isl_keep isl_mat *indep)
3079 isl_ctx *ctx;
3080 isl_mat *mat;
3081 int i, j;
3082 isl_size n, n_var;
3084 n = isl_mat_rows(indep);
3085 n_var = isl_mat_cols(indep);
3086 if (n < 0 || n_var < 0)
3087 return NULL;
3089 ctx = isl_mat_get_ctx(indep);
3090 mat = isl_mat_alloc(ctx, n, 2 * n_var);
3091 if (!mat)
3092 return NULL;
3093 for (i = 0; i < n; ++i) {
3094 for (j = 0; j < n_var; ++j) {
3095 int nj = n_var - 1 - j;
3096 isl_int_neg(mat->row[i][2 * nj], indep->row[i][j]);
3097 isl_int_set(mat->row[i][2 * nj + 1], indep->row[i][j]);
3101 return mat;
3104 /* Solve the ILP problem constructed in setup_lp.
3105 * For each node such that all the remaining rows of its schedule
3106 * need to be non-trivial, we construct a non-triviality region.
3107 * This region imposes that the next row is independent of previous rows.
3108 * In particular, the non-triviality region enforces that at least
3109 * one of the linear combinations in the rows of node->indep is non-zero.
3111 static __isl_give isl_vec *solve_lp(isl_ctx *ctx, struct isl_sched_graph *graph)
3113 int i;
3114 isl_vec *sol;
3115 isl_basic_set *lp;
3117 for (i = 0; i < graph->n; ++i) {
3118 struct isl_sched_node *node = &graph->node[i];
3119 isl_mat *trivial;
3121 graph->region[i].pos = node_var_coef_offset(node);
3122 if (needs_row(graph, node))
3123 trivial = construct_trivial(node->indep);
3124 else
3125 trivial = isl_mat_zero(ctx, 0, 0);
3126 graph->region[i].trivial = trivial;
3128 lp = isl_basic_set_copy(graph->lp);
3129 sol = isl_tab_basic_set_non_trivial_lexmin(lp, 2, graph->n,
3130 graph->region, &check_conflict, graph);
3131 for (i = 0; i < graph->n; ++i)
3132 isl_mat_free(graph->region[i].trivial);
3133 return sol;
3136 /* Extract the coefficients for the variables of "node" from "sol".
3138 * Each schedule coefficient c_i_x is represented as the difference
3139 * between two non-negative variables c_i_x^+ - c_i_x^-.
3140 * The c_i_x^- appear before their c_i_x^+ counterpart.
3141 * Furthermore, the order of these pairs is the opposite of that
3142 * of the corresponding coefficients.
3144 * Return c_i_x = c_i_x^+ - c_i_x^-
3146 static __isl_give isl_vec *extract_var_coef(struct isl_sched_node *node,
3147 __isl_keep isl_vec *sol)
3149 int i;
3150 int pos;
3151 isl_vec *csol;
3153 if (!sol)
3154 return NULL;
3155 csol = isl_vec_alloc(isl_vec_get_ctx(sol), node->nvar);
3156 if (!csol)
3157 return NULL;
3159 pos = 1 + node_var_coef_offset(node);
3160 for (i = 0; i < node->nvar; ++i)
3161 isl_int_sub(csol->el[node->nvar - 1 - i],
3162 sol->el[pos + 2 * i + 1], sol->el[pos + 2 * i]);
3164 return csol;
3167 /* Update the schedules of all nodes based on the given solution
3168 * of the LP problem.
3169 * The new row is added to the current band.
3170 * All possibly negative coefficients are encoded as a difference
3171 * of two non-negative variables, so we need to perform the subtraction
3172 * here.
3174 * If coincident is set, then the caller guarantees that the new
3175 * row satisfies the coincidence constraints.
3177 static int update_schedule(struct isl_sched_graph *graph,
3178 __isl_take isl_vec *sol, int coincident)
3180 int i, j;
3181 isl_vec *csol = NULL;
3183 if (!sol)
3184 goto error;
3185 if (sol->size == 0)
3186 isl_die(sol->ctx, isl_error_internal,
3187 "no solution found", goto error);
3188 if (graph->n_total_row >= graph->max_row)
3189 isl_die(sol->ctx, isl_error_internal,
3190 "too many schedule rows", goto error);
3192 for (i = 0; i < graph->n; ++i) {
3193 struct isl_sched_node *node = &graph->node[i];
3194 int pos;
3195 isl_size row = isl_mat_rows(node->sched);
3197 isl_vec_free(csol);
3198 csol = extract_var_coef(node, sol);
3199 if (row < 0 || !csol)
3200 goto error;
3202 isl_map_free(node->sched_map);
3203 node->sched_map = NULL;
3204 node->sched = isl_mat_add_rows(node->sched, 1);
3205 if (!node->sched)
3206 goto error;
3207 pos = node_cst_coef_offset(node);
3208 node->sched = isl_mat_set_element(node->sched,
3209 row, 0, sol->el[1 + pos]);
3210 pos = node_par_coef_offset(node);
3211 for (j = 0; j < node->nparam; ++j)
3212 node->sched = isl_mat_set_element(node->sched,
3213 row, 1 + j, sol->el[1 + pos + j]);
3214 for (j = 0; j < node->nvar; ++j)
3215 node->sched = isl_mat_set_element(node->sched,
3216 row, 1 + node->nparam + j, csol->el[j]);
3217 node->coincident[graph->n_total_row] = coincident;
3219 isl_vec_free(sol);
3220 isl_vec_free(csol);
3222 graph->n_row++;
3223 graph->n_total_row++;
3225 return 0;
3226 error:
3227 isl_vec_free(sol);
3228 isl_vec_free(csol);
3229 return -1;
3232 /* Convert row "row" of node->sched into an isl_aff living in "ls"
3233 * and return this isl_aff.
3235 static __isl_give isl_aff *extract_schedule_row(__isl_take isl_local_space *ls,
3236 struct isl_sched_node *node, int row)
3238 int j;
3239 isl_int v;
3240 isl_aff *aff;
3242 isl_int_init(v);
3244 aff = isl_aff_zero_on_domain(ls);
3245 if (isl_mat_get_element(node->sched, row, 0, &v) < 0)
3246 goto error;
3247 aff = isl_aff_set_constant(aff, v);
3248 for (j = 0; j < node->nparam; ++j) {
3249 if (isl_mat_get_element(node->sched, row, 1 + j, &v) < 0)
3250 goto error;
3251 aff = isl_aff_set_coefficient(aff, isl_dim_param, j, v);
3253 for (j = 0; j < node->nvar; ++j) {
3254 if (isl_mat_get_element(node->sched, row,
3255 1 + node->nparam + j, &v) < 0)
3256 goto error;
3257 aff = isl_aff_set_coefficient(aff, isl_dim_in, j, v);
3260 isl_int_clear(v);
3262 return aff;
3263 error:
3264 isl_int_clear(v);
3265 isl_aff_free(aff);
3266 return NULL;
3269 /* Convert the "n" rows starting at "first" of node->sched into a multi_aff
3270 * and return this multi_aff.
3272 * The result is defined over the uncompressed node domain.
3274 static __isl_give isl_multi_aff *node_extract_partial_schedule_multi_aff(
3275 struct isl_sched_node *node, int first, int n)
3277 int i;
3278 isl_space *space;
3279 isl_local_space *ls;
3280 isl_aff *aff;
3281 isl_multi_aff *ma;
3282 isl_size nrow;
3284 if (!node)
3285 return NULL;
3286 nrow = isl_mat_rows(node->sched);
3287 if (nrow < 0)
3288 return NULL;
3289 if (node->compressed)
3290 space = isl_pw_multi_aff_get_domain_space(node->decompress);
3291 else
3292 space = isl_space_copy(node->space);
3293 ls = isl_local_space_from_space(isl_space_copy(space));
3294 space = isl_space_from_domain(space);
3295 space = isl_space_add_dims(space, isl_dim_out, n);
3296 ma = isl_multi_aff_zero(space);
3298 for (i = first; i < first + n; ++i) {
3299 aff = extract_schedule_row(isl_local_space_copy(ls), node, i);
3300 ma = isl_multi_aff_set_aff(ma, i - first, aff);
3303 isl_local_space_free(ls);
3305 if (node->compressed)
3306 ma = isl_multi_aff_pullback_multi_aff(ma,
3307 isl_multi_aff_copy(node->compress));
3309 return ma;
3312 /* Convert node->sched into a multi_aff and return this multi_aff.
3314 * The result is defined over the uncompressed node domain.
3316 static __isl_give isl_multi_aff *node_extract_schedule_multi_aff(
3317 struct isl_sched_node *node)
3319 isl_size nrow;
3321 nrow = isl_mat_rows(node->sched);
3322 if (nrow < 0)
3323 return NULL;
3324 return node_extract_partial_schedule_multi_aff(node, 0, nrow);
3327 /* Convert node->sched into a map and return this map.
3329 * The result is cached in node->sched_map, which needs to be released
3330 * whenever node->sched is updated.
3331 * It is defined over the uncompressed node domain.
3333 static __isl_give isl_map *node_extract_schedule(struct isl_sched_node *node)
3335 if (!node->sched_map) {
3336 isl_multi_aff *ma;
3338 ma = node_extract_schedule_multi_aff(node);
3339 node->sched_map = isl_map_from_multi_aff(ma);
3342 return isl_map_copy(node->sched_map);
3345 /* Construct a map that can be used to update a dependence relation
3346 * based on the current schedule.
3347 * That is, construct a map expressing that source and sink
3348 * are executed within the same iteration of the current schedule.
3349 * This map can then be intersected with the dependence relation.
3350 * This is not the most efficient way, but this shouldn't be a critical
3351 * operation.
3353 static __isl_give isl_map *specializer(struct isl_sched_node *src,
3354 struct isl_sched_node *dst)
3356 isl_map *src_sched, *dst_sched;
3358 src_sched = node_extract_schedule(src);
3359 dst_sched = node_extract_schedule(dst);
3360 return isl_map_apply_range(src_sched, isl_map_reverse(dst_sched));
3363 /* Intersect the domains of the nested relations in domain and range
3364 * of "umap" with "map".
3366 static __isl_give isl_union_map *intersect_domains(
3367 __isl_take isl_union_map *umap, __isl_keep isl_map *map)
3369 isl_union_set *uset;
3371 umap = isl_union_map_zip(umap);
3372 uset = isl_union_set_from_set(isl_map_wrap(isl_map_copy(map)));
3373 umap = isl_union_map_intersect_domain(umap, uset);
3374 umap = isl_union_map_zip(umap);
3375 return umap;
3378 /* Update the dependence relation of the given edge based
3379 * on the current schedule.
3380 * If the dependence is carried completely by the current schedule, then
3381 * it is removed from the edge_tables. It is kept in the list of edges
3382 * as otherwise all edge_tables would have to be recomputed.
3384 * If the edge is of a type that can appear multiple times
3385 * between the same pair of nodes, then it is added to
3386 * the edge table (again). This prevents the situation
3387 * where none of these edges is referenced from the edge table
3388 * because the one that was referenced turned out to be empty and
3389 * was therefore removed from the table.
3391 static isl_stat update_edge(isl_ctx *ctx, struct isl_sched_graph *graph,
3392 struct isl_sched_edge *edge)
3394 int empty;
3395 isl_map *id;
3397 id = specializer(edge->src, edge->dst);
3398 edge->map = isl_map_intersect(edge->map, isl_map_copy(id));
3399 if (!edge->map)
3400 goto error;
3402 if (edge->tagged_condition) {
3403 edge->tagged_condition =
3404 intersect_domains(edge->tagged_condition, id);
3405 if (!edge->tagged_condition)
3406 goto error;
3408 if (edge->tagged_validity) {
3409 edge->tagged_validity =
3410 intersect_domains(edge->tagged_validity, id);
3411 if (!edge->tagged_validity)
3412 goto error;
3415 empty = isl_map_plain_is_empty(edge->map);
3416 if (empty < 0)
3417 goto error;
3418 if (empty) {
3419 if (graph_remove_edge(graph, edge) < 0)
3420 goto error;
3421 } else if (is_multi_edge_type(edge)) {
3422 if (graph_edge_tables_add(ctx, graph, edge) < 0)
3423 goto error;
3426 isl_map_free(id);
3427 return isl_stat_ok;
3428 error:
3429 isl_map_free(id);
3430 return isl_stat_error;
3433 /* Does the domain of "umap" intersect "uset"?
3435 static int domain_intersects(__isl_keep isl_union_map *umap,
3436 __isl_keep isl_union_set *uset)
3438 int empty;
3440 umap = isl_union_map_copy(umap);
3441 umap = isl_union_map_intersect_domain(umap, isl_union_set_copy(uset));
3442 empty = isl_union_map_is_empty(umap);
3443 isl_union_map_free(umap);
3445 return empty < 0 ? -1 : !empty;
3448 /* Does the range of "umap" intersect "uset"?
3450 static int range_intersects(__isl_keep isl_union_map *umap,
3451 __isl_keep isl_union_set *uset)
3453 int empty;
3455 umap = isl_union_map_copy(umap);
3456 umap = isl_union_map_intersect_range(umap, isl_union_set_copy(uset));
3457 empty = isl_union_map_is_empty(umap);
3458 isl_union_map_free(umap);
3460 return empty < 0 ? -1 : !empty;
3463 /* Are the condition dependences of "edge" local with respect to
3464 * the current schedule?
3466 * That is, are domain and range of the condition dependences mapped
3467 * to the same point?
3469 * In other words, is the condition false?
3471 static int is_condition_false(struct isl_sched_edge *edge)
3473 isl_union_map *umap;
3474 isl_map *map, *sched, *test;
3475 int empty, local;
3477 empty = isl_union_map_is_empty(edge->tagged_condition);
3478 if (empty < 0 || empty)
3479 return empty;
3481 umap = isl_union_map_copy(edge->tagged_condition);
3482 umap = isl_union_map_zip(umap);
3483 umap = isl_union_set_unwrap(isl_union_map_domain(umap));
3484 map = isl_map_from_union_map(umap);
3486 sched = node_extract_schedule(edge->src);
3487 map = isl_map_apply_domain(map, sched);
3488 sched = node_extract_schedule(edge->dst);
3489 map = isl_map_apply_range(map, sched);
3491 test = isl_map_identity(isl_map_get_space(map));
3492 local = isl_map_is_subset(map, test);
3493 isl_map_free(map);
3494 isl_map_free(test);
3496 return local;
3499 /* For each conditional validity constraint that is adjacent
3500 * to a condition with domain in condition_source or range in condition_sink,
3501 * turn it into an unconditional validity constraint.
3503 static int unconditionalize_adjacent_validity(struct isl_sched_graph *graph,
3504 __isl_take isl_union_set *condition_source,
3505 __isl_take isl_union_set *condition_sink)
3507 int i;
3509 condition_source = isl_union_set_coalesce(condition_source);
3510 condition_sink = isl_union_set_coalesce(condition_sink);
3512 for (i = 0; i < graph->n_edge; ++i) {
3513 int adjacent;
3514 isl_union_map *validity;
3516 if (!is_conditional_validity(&graph->edge[i]))
3517 continue;
3518 if (is_validity(&graph->edge[i]))
3519 continue;
3521 validity = graph->edge[i].tagged_validity;
3522 adjacent = domain_intersects(validity, condition_sink);
3523 if (adjacent >= 0 && !adjacent)
3524 adjacent = range_intersects(validity, condition_source);
3525 if (adjacent < 0)
3526 goto error;
3527 if (!adjacent)
3528 continue;
3530 set_validity(&graph->edge[i]);
3533 isl_union_set_free(condition_source);
3534 isl_union_set_free(condition_sink);
3535 return 0;
3536 error:
3537 isl_union_set_free(condition_source);
3538 isl_union_set_free(condition_sink);
3539 return -1;
3542 /* Update the dependence relations of all edges based on the current schedule
3543 * and enforce conditional validity constraints that are adjacent
3544 * to satisfied condition constraints.
3546 * First check if any of the condition constraints are satisfied
3547 * (i.e., not local to the outer schedule) and keep track of
3548 * their domain and range.
3549 * Then update all dependence relations (which removes the non-local
3550 * constraints).
3551 * Finally, if any condition constraints turned out to be satisfied,
3552 * then turn all adjacent conditional validity constraints into
3553 * unconditional validity constraints.
3555 static int update_edges(isl_ctx *ctx, struct isl_sched_graph *graph)
3557 int i;
3558 int any = 0;
3559 isl_union_set *source, *sink;
3561 source = isl_union_set_empty(isl_space_params_alloc(ctx, 0));
3562 sink = isl_union_set_empty(isl_space_params_alloc(ctx, 0));
3563 for (i = 0; i < graph->n_edge; ++i) {
3564 int local;
3565 isl_union_set *uset;
3566 isl_union_map *umap;
3568 if (!is_condition(&graph->edge[i]))
3569 continue;
3570 if (is_local(&graph->edge[i]))
3571 continue;
3572 local = is_condition_false(&graph->edge[i]);
3573 if (local < 0)
3574 goto error;
3575 if (local)
3576 continue;
3578 any = 1;
3580 umap = isl_union_map_copy(graph->edge[i].tagged_condition);
3581 uset = isl_union_map_domain(umap);
3582 source = isl_union_set_union(source, uset);
3584 umap = isl_union_map_copy(graph->edge[i].tagged_condition);
3585 uset = isl_union_map_range(umap);
3586 sink = isl_union_set_union(sink, uset);
3589 for (i = 0; i < graph->n_edge; ++i) {
3590 if (update_edge(ctx, graph, &graph->edge[i]) < 0)
3591 goto error;
3594 if (any)
3595 return unconditionalize_adjacent_validity(graph, source, sink);
3597 isl_union_set_free(source);
3598 isl_union_set_free(sink);
3599 return 0;
3600 error:
3601 isl_union_set_free(source);
3602 isl_union_set_free(sink);
3603 return -1;
3606 static void next_band(struct isl_sched_graph *graph)
3608 graph->band_start = graph->n_total_row;
3611 /* Return the union of the universe domains of the nodes in "graph"
3612 * that satisfy "pred".
3614 static __isl_give isl_union_set *isl_sched_graph_domain(isl_ctx *ctx,
3615 struct isl_sched_graph *graph,
3616 int (*pred)(struct isl_sched_node *node, int data), int data)
3618 int i;
3619 isl_set *set;
3620 isl_union_set *dom;
3622 for (i = 0; i < graph->n; ++i)
3623 if (pred(&graph->node[i], data))
3624 break;
3626 if (i >= graph->n)
3627 isl_die(ctx, isl_error_internal,
3628 "empty component", return NULL);
3630 set = isl_set_universe(isl_space_copy(graph->node[i].space));
3631 dom = isl_union_set_from_set(set);
3633 for (i = i + 1; i < graph->n; ++i) {
3634 if (!pred(&graph->node[i], data))
3635 continue;
3636 set = isl_set_universe(isl_space_copy(graph->node[i].space));
3637 dom = isl_union_set_union(dom, isl_union_set_from_set(set));
3640 return dom;
3643 /* Return a list of unions of universe domains, where each element
3644 * in the list corresponds to an SCC (or WCC) indexed by node->scc.
3646 static __isl_give isl_union_set_list *extract_sccs(isl_ctx *ctx,
3647 struct isl_sched_graph *graph)
3649 int i;
3650 isl_union_set_list *filters;
3652 filters = isl_union_set_list_alloc(ctx, graph->scc);
3653 for (i = 0; i < graph->scc; ++i) {
3654 isl_union_set *dom;
3656 dom = isl_sched_graph_domain(ctx, graph,
3657 &isl_sched_node_scc_exactly, i);
3658 filters = isl_union_set_list_add(filters, dom);
3661 return filters;
3664 /* Return a list of two unions of universe domains, one for the SCCs up
3665 * to and including graph->src_scc and another for the other SCCs.
3667 static __isl_give isl_union_set_list *extract_split(isl_ctx *ctx,
3668 struct isl_sched_graph *graph)
3670 isl_union_set *dom;
3671 isl_union_set_list *filters;
3673 filters = isl_union_set_list_alloc(ctx, 2);
3674 dom = isl_sched_graph_domain(ctx, graph,
3675 &node_scc_at_most, graph->src_scc);
3676 filters = isl_union_set_list_add(filters, dom);
3677 dom = isl_sched_graph_domain(ctx, graph,
3678 &node_scc_at_least, graph->src_scc + 1);
3679 filters = isl_union_set_list_add(filters, dom);
3681 return filters;
3684 /* Copy nodes that satisfy node_pred from the src dependence graph
3685 * to the dst dependence graph.
3687 static isl_stat copy_nodes(struct isl_sched_graph *dst,
3688 struct isl_sched_graph *src,
3689 int (*node_pred)(struct isl_sched_node *node, int data), int data)
3691 int i;
3693 dst->n = 0;
3694 for (i = 0; i < src->n; ++i) {
3695 int j;
3697 if (!node_pred(&src->node[i], data))
3698 continue;
3700 j = dst->n;
3701 dst->node[j].space = isl_space_copy(src->node[i].space);
3702 dst->node[j].compressed = src->node[i].compressed;
3703 dst->node[j].hull = isl_set_copy(src->node[i].hull);
3704 dst->node[j].compress =
3705 isl_multi_aff_copy(src->node[i].compress);
3706 dst->node[j].decompress =
3707 isl_pw_multi_aff_copy(src->node[i].decompress);
3708 dst->node[j].nvar = src->node[i].nvar;
3709 dst->node[j].nparam = src->node[i].nparam;
3710 dst->node[j].sched = isl_mat_copy(src->node[i].sched);
3711 dst->node[j].sched_map = isl_map_copy(src->node[i].sched_map);
3712 dst->node[j].coincident = src->node[i].coincident;
3713 dst->node[j].sizes = isl_multi_val_copy(src->node[i].sizes);
3714 dst->node[j].bounds = isl_basic_set_copy(src->node[i].bounds);
3715 dst->node[j].max = isl_vec_copy(src->node[i].max);
3716 dst->n++;
3718 if (!dst->node[j].space || !dst->node[j].sched)
3719 return isl_stat_error;
3720 if (dst->node[j].compressed &&
3721 (!dst->node[j].hull || !dst->node[j].compress ||
3722 !dst->node[j].decompress))
3723 return isl_stat_error;
3726 return isl_stat_ok;
3729 /* Copy non-empty edges that satisfy edge_pred from the src dependence graph
3730 * to the dst dependence graph.
3731 * If the source or destination node of the edge is not in the destination
3732 * graph, then it must be a backward proximity edge and it should simply
3733 * be ignored.
3735 static isl_stat copy_edges(isl_ctx *ctx, struct isl_sched_graph *dst,
3736 struct isl_sched_graph *src,
3737 int (*edge_pred)(struct isl_sched_edge *edge, int data), int data)
3739 int i;
3741 dst->n_edge = 0;
3742 for (i = 0; i < src->n_edge; ++i) {
3743 struct isl_sched_edge *edge = &src->edge[i];
3744 isl_map *map;
3745 isl_union_map *tagged_condition;
3746 isl_union_map *tagged_validity;
3747 struct isl_sched_node *dst_src, *dst_dst;
3749 if (!edge_pred(edge, data))
3750 continue;
3752 if (isl_map_plain_is_empty(edge->map))
3753 continue;
3755 dst_src = graph_find_node(ctx, dst, edge->src->space);
3756 dst_dst = graph_find_node(ctx, dst, edge->dst->space);
3757 if (!dst_src || !dst_dst)
3758 return isl_stat_error;
3759 if (!is_node(dst, dst_src) || !is_node(dst, dst_dst)) {
3760 if (is_validity(edge) || is_conditional_validity(edge))
3761 isl_die(ctx, isl_error_internal,
3762 "backward (conditional) validity edge",
3763 return isl_stat_error);
3764 continue;
3767 map = isl_map_copy(edge->map);
3768 tagged_condition = isl_union_map_copy(edge->tagged_condition);
3769 tagged_validity = isl_union_map_copy(edge->tagged_validity);
3771 dst->edge[dst->n_edge].src = dst_src;
3772 dst->edge[dst->n_edge].dst = dst_dst;
3773 dst->edge[dst->n_edge].map = map;
3774 dst->edge[dst->n_edge].tagged_condition = tagged_condition;
3775 dst->edge[dst->n_edge].tagged_validity = tagged_validity;
3776 dst->edge[dst->n_edge].types = edge->types;
3777 dst->n_edge++;
3779 if (edge->tagged_condition && !tagged_condition)
3780 return isl_stat_error;
3781 if (edge->tagged_validity && !tagged_validity)
3782 return isl_stat_error;
3784 if (graph_edge_tables_add(ctx, dst,
3785 &dst->edge[dst->n_edge - 1]) < 0)
3786 return isl_stat_error;
3789 return isl_stat_ok;
3792 /* Compute the maximal number of variables over all nodes.
3793 * This is the maximal number of linearly independent schedule
3794 * rows that we need to compute.
3795 * Just in case we end up in a part of the dependence graph
3796 * with only lower-dimensional domains, we make sure we will
3797 * compute the required amount of extra linearly independent rows.
3799 static isl_stat compute_maxvar(struct isl_sched_graph *graph)
3801 int i;
3803 graph->maxvar = 0;
3804 for (i = 0; i < graph->n; ++i) {
3805 struct isl_sched_node *node = &graph->node[i];
3806 int nvar;
3808 if (node_update_vmap(node) < 0)
3809 return isl_stat_error;
3810 nvar = node->nvar + graph->n_row - node->rank;
3811 if (nvar > graph->maxvar)
3812 graph->maxvar = nvar;
3815 return isl_stat_ok;
3818 /* Extract the subgraph of "graph" that consists of the nodes satisfying
3819 * "node_pred" and the edges satisfying "edge_pred" and store
3820 * the result in "sub".
3822 static isl_stat extract_sub_graph(isl_ctx *ctx, struct isl_sched_graph *graph,
3823 int (*node_pred)(struct isl_sched_node *node, int data),
3824 int (*edge_pred)(struct isl_sched_edge *edge, int data),
3825 int data, struct isl_sched_graph *sub)
3827 int i, n = 0, n_edge = 0;
3828 int t;
3830 for (i = 0; i < graph->n; ++i)
3831 if (node_pred(&graph->node[i], data))
3832 ++n;
3833 for (i = 0; i < graph->n_edge; ++i)
3834 if (edge_pred(&graph->edge[i], data))
3835 ++n_edge;
3836 if (graph_alloc(ctx, sub, n, n_edge) < 0)
3837 return isl_stat_error;
3838 sub->root = graph->root;
3839 if (copy_nodes(sub, graph, node_pred, data) < 0)
3840 return isl_stat_error;
3841 if (graph_init_table(ctx, sub) < 0)
3842 return isl_stat_error;
3843 for (t = 0; t <= isl_edge_last; ++t)
3844 sub->max_edge[t] = graph->max_edge[t];
3845 if (graph_init_edge_tables(ctx, sub) < 0)
3846 return isl_stat_error;
3847 if (copy_edges(ctx, sub, graph, edge_pred, data) < 0)
3848 return isl_stat_error;
3849 sub->n_row = graph->n_row;
3850 sub->max_row = graph->max_row;
3851 sub->n_total_row = graph->n_total_row;
3852 sub->band_start = graph->band_start;
3854 return isl_stat_ok;
3857 static __isl_give isl_schedule_node *compute_schedule(isl_schedule_node *node,
3858 struct isl_sched_graph *graph);
3859 static __isl_give isl_schedule_node *compute_schedule_wcc(
3860 isl_schedule_node *node, struct isl_sched_graph *graph);
3862 /* Compute a schedule for a subgraph of "graph". In particular, for
3863 * the graph composed of nodes that satisfy node_pred and edges that
3864 * that satisfy edge_pred.
3865 * If the subgraph is known to consist of a single component, then wcc should
3866 * be set and then we call compute_schedule_wcc on the constructed subgraph.
3867 * Otherwise, we call compute_schedule, which will check whether the subgraph
3868 * is connected.
3870 * The schedule is inserted at "node" and the updated schedule node
3871 * is returned.
3873 static __isl_give isl_schedule_node *compute_sub_schedule(
3874 __isl_take isl_schedule_node *node, isl_ctx *ctx,
3875 struct isl_sched_graph *graph,
3876 int (*node_pred)(struct isl_sched_node *node, int data),
3877 int (*edge_pred)(struct isl_sched_edge *edge, int data),
3878 int data, int wcc)
3880 struct isl_sched_graph split = { 0 };
3882 if (extract_sub_graph(ctx, graph, node_pred, edge_pred, data,
3883 &split) < 0)
3884 goto error;
3886 if (wcc)
3887 node = compute_schedule_wcc(node, &split);
3888 else
3889 node = compute_schedule(node, &split);
3891 graph_free(ctx, &split);
3892 return node;
3893 error:
3894 graph_free(ctx, &split);
3895 return isl_schedule_node_free(node);
3898 static int edge_scc_exactly(struct isl_sched_edge *edge, int scc)
3900 return edge->src->scc == scc && edge->dst->scc == scc;
3903 static int edge_dst_scc_at_most(struct isl_sched_edge *edge, int scc)
3905 return edge->dst->scc <= scc;
3908 static int edge_src_scc_at_least(struct isl_sched_edge *edge, int scc)
3910 return edge->src->scc >= scc;
3913 /* Reset the current band by dropping all its schedule rows.
3915 static isl_stat reset_band(struct isl_sched_graph *graph)
3917 int i;
3918 int drop;
3920 drop = graph->n_total_row - graph->band_start;
3921 graph->n_total_row -= drop;
3922 graph->n_row -= drop;
3924 for (i = 0; i < graph->n; ++i) {
3925 struct isl_sched_node *node = &graph->node[i];
3927 isl_map_free(node->sched_map);
3928 node->sched_map = NULL;
3930 node->sched = isl_mat_drop_rows(node->sched,
3931 graph->band_start, drop);
3933 if (!node->sched)
3934 return isl_stat_error;
3937 return isl_stat_ok;
3940 /* Split the current graph into two parts and compute a schedule for each
3941 * part individually. In particular, one part consists of all SCCs up
3942 * to and including graph->src_scc, while the other part contains the other
3943 * SCCs. The split is enforced by a sequence node inserted at position "node"
3944 * in the schedule tree. Return the updated schedule node.
3945 * If either of these two parts consists of a sequence, then it is spliced
3946 * into the sequence containing the two parts.
3948 * The current band is reset. It would be possible to reuse
3949 * the previously computed rows as the first rows in the next
3950 * band, but recomputing them may result in better rows as we are looking
3951 * at a smaller part of the dependence graph.
3953 static __isl_give isl_schedule_node *compute_split_schedule(
3954 __isl_take isl_schedule_node *node, struct isl_sched_graph *graph)
3956 isl_ctx *ctx;
3957 isl_union_set_list *filters;
3959 if (!node)
3960 return NULL;
3962 if (reset_band(graph) < 0)
3963 return isl_schedule_node_free(node);
3965 next_band(graph);
3967 ctx = isl_schedule_node_get_ctx(node);
3968 filters = extract_split(ctx, graph);
3969 node = isl_schedule_node_insert_sequence(node, filters);
3970 node = isl_schedule_node_grandchild(node, 1, 0);
3972 node = compute_sub_schedule(node, ctx, graph,
3973 &node_scc_at_least, &edge_src_scc_at_least,
3974 graph->src_scc + 1, 0);
3975 node = isl_schedule_node_grandparent(node);
3976 node = isl_schedule_node_grandchild(node, 0, 0);
3977 node = compute_sub_schedule(node, ctx, graph,
3978 &node_scc_at_most, &edge_dst_scc_at_most,
3979 graph->src_scc, 0);
3980 node = isl_schedule_node_grandparent(node);
3982 node = isl_schedule_node_sequence_splice_children(node);
3984 return node;
3987 /* Insert a band node at position "node" in the schedule tree corresponding
3988 * to the current band in "graph". Mark the band node permutable
3989 * if "permutable" is set.
3990 * The partial schedules and the coincidence property are extracted
3991 * from the graph nodes.
3992 * Return the updated schedule node.
3994 static __isl_give isl_schedule_node *insert_current_band(
3995 __isl_take isl_schedule_node *node, struct isl_sched_graph *graph,
3996 int permutable)
3998 int i;
3999 int start, end, n;
4000 isl_multi_aff *ma;
4001 isl_multi_pw_aff *mpa;
4002 isl_multi_union_pw_aff *mupa;
4004 if (!node)
4005 return NULL;
4007 if (graph->n < 1)
4008 isl_die(isl_schedule_node_get_ctx(node), isl_error_internal,
4009 "graph should have at least one node",
4010 return isl_schedule_node_free(node));
4012 start = graph->band_start;
4013 end = graph->n_total_row;
4014 n = end - start;
4016 ma = node_extract_partial_schedule_multi_aff(&graph->node[0], start, n);
4017 mpa = isl_multi_pw_aff_from_multi_aff(ma);
4018 mupa = isl_multi_union_pw_aff_from_multi_pw_aff(mpa);
4020 for (i = 1; i < graph->n; ++i) {
4021 isl_multi_union_pw_aff *mupa_i;
4023 ma = node_extract_partial_schedule_multi_aff(&graph->node[i],
4024 start, n);
4025 mpa = isl_multi_pw_aff_from_multi_aff(ma);
4026 mupa_i = isl_multi_union_pw_aff_from_multi_pw_aff(mpa);
4027 mupa = isl_multi_union_pw_aff_union_add(mupa, mupa_i);
4029 node = isl_schedule_node_insert_partial_schedule(node, mupa);
4031 for (i = 0; i < n; ++i)
4032 node = isl_schedule_node_band_member_set_coincident(node, i,
4033 graph->node[0].coincident[start + i]);
4034 node = isl_schedule_node_band_set_permutable(node, permutable);
4036 return node;
4039 /* Update the dependence relations based on the current schedule,
4040 * add the current band to "node" and then continue with the computation
4041 * of the next band.
4042 * Return the updated schedule node.
4044 static __isl_give isl_schedule_node *compute_next_band(
4045 __isl_take isl_schedule_node *node,
4046 struct isl_sched_graph *graph, int permutable)
4048 isl_ctx *ctx;
4050 if (!node)
4051 return NULL;
4053 ctx = isl_schedule_node_get_ctx(node);
4054 if (update_edges(ctx, graph) < 0)
4055 return isl_schedule_node_free(node);
4056 node = insert_current_band(node, graph, permutable);
4057 next_band(graph);
4059 node = isl_schedule_node_child(node, 0);
4060 node = compute_schedule(node, graph);
4061 node = isl_schedule_node_parent(node);
4063 return node;
4066 /* Add the constraints "coef" derived from an edge from "node" to itself
4067 * to graph->lp in order to respect the dependences and to try and carry them.
4068 * "pos" is the sequence number of the edge that needs to be carried.
4069 * "coef" represents general constraints on coefficients (c_0, c_x)
4070 * of valid constraints for (y - x) with x and y instances of the node.
4072 * The constraints added to graph->lp need to enforce
4074 * (c_j_0 + c_j_x y) - (c_j_0 + c_j_x x)
4075 * = c_j_x (y - x) >= e_i
4077 * for each (x,y) in the dependence relation of the edge.
4078 * That is, (-e_i, c_j_x) needs to be plugged in for (c_0, c_x),
4079 * taking into account that each coefficient in c_j_x is represented
4080 * as a pair of non-negative coefficients.
4082 static isl_stat add_intra_constraints(struct isl_sched_graph *graph,
4083 struct isl_sched_node *node, __isl_take isl_basic_set *coef, int pos)
4085 isl_size offset;
4086 isl_ctx *ctx;
4087 isl_dim_map *dim_map;
4089 offset = coef_var_offset(coef);
4090 if (offset < 0)
4091 coef = isl_basic_set_free(coef);
4092 if (!coef)
4093 return isl_stat_error;
4095 ctx = isl_basic_set_get_ctx(coef);
4096 dim_map = intra_dim_map(ctx, graph, node, offset, 1);
4097 isl_dim_map_range(dim_map, 3 + pos, 0, 0, 0, 1, -1);
4098 graph->lp = add_constraints_dim_map(graph->lp, coef, dim_map);
4100 return isl_stat_ok;
4103 /* Add the constraints "coef" derived from an edge from "src" to "dst"
4104 * to graph->lp in order to respect the dependences and to try and carry them.
4105 * "pos" is the sequence number of the edge that needs to be carried or
4106 * -1 if no attempt should be made to carry the dependences.
4107 * "coef" represents general constraints on coefficients (c_0, c_n, c_x, c_y)
4108 * of valid constraints for (x, y) with x and y instances of "src" and "dst".
4110 * The constraints added to graph->lp need to enforce
4112 * (c_k_0 + c_k_n n + c_k_x y) - (c_j_0 + c_j_n n + c_j_x x) >= e_i
4114 * for each (x,y) in the dependence relation of the edge or
4116 * (c_k_0 + c_k_n n + c_k_x y) - (c_j_0 + c_j_n n + c_j_x x) >= 0
4118 * if pos is -1.
4119 * That is,
4120 * (-e_i + c_k_0 - c_j_0, c_k_n - c_j_n, -c_j_x, c_k_x)
4121 * or
4122 * (c_k_0 - c_j_0, c_k_n - c_j_n, -c_j_x, c_k_x)
4123 * needs to be plugged in for (c_0, c_n, c_x, c_y),
4124 * taking into account that each coefficient in c_j_x and c_k_x is represented
4125 * as a pair of non-negative coefficients.
4127 static isl_stat add_inter_constraints(struct isl_sched_graph *graph,
4128 struct isl_sched_node *src, struct isl_sched_node *dst,
4129 __isl_take isl_basic_set *coef, int pos)
4131 isl_size offset;
4132 isl_ctx *ctx;
4133 isl_dim_map *dim_map;
4135 offset = coef_var_offset(coef);
4136 if (offset < 0)
4137 coef = isl_basic_set_free(coef);
4138 if (!coef)
4139 return isl_stat_error;
4141 ctx = isl_basic_set_get_ctx(coef);
4142 dim_map = inter_dim_map(ctx, graph, src, dst, offset, 1);
4143 if (pos >= 0)
4144 isl_dim_map_range(dim_map, 3 + pos, 0, 0, 0, 1, -1);
4145 graph->lp = add_constraints_dim_map(graph->lp, coef, dim_map);
4147 return isl_stat_ok;
4150 /* Data structure for keeping track of the data needed
4151 * to exploit non-trivial lineality spaces.
4153 * "any_non_trivial" is true if there are any non-trivial lineality spaces.
4154 * If "any_non_trivial" is not true, then "equivalent" and "mask" may be NULL.
4155 * "equivalent" connects instances to other instances on the same line(s).
4156 * "mask" contains the domain spaces of "equivalent".
4157 * Any instance set not in "mask" does not have a non-trivial lineality space.
4159 struct isl_exploit_lineality_data {
4160 isl_bool any_non_trivial;
4161 isl_union_map *equivalent;
4162 isl_union_set *mask;
4165 /* Data structure collecting information used during the construction
4166 * of an LP for carrying dependences.
4168 * "intra" is a sequence of coefficient constraints for intra-node edges.
4169 * "inter" is a sequence of coefficient constraints for inter-node edges.
4170 * "lineality" contains data used to exploit non-trivial lineality spaces.
4172 struct isl_carry {
4173 isl_basic_set_list *intra;
4174 isl_basic_set_list *inter;
4175 struct isl_exploit_lineality_data lineality;
4178 /* Free all the data stored in "carry".
4180 static void isl_carry_clear(struct isl_carry *carry)
4182 isl_basic_set_list_free(carry->intra);
4183 isl_basic_set_list_free(carry->inter);
4184 isl_union_map_free(carry->lineality.equivalent);
4185 isl_union_set_free(carry->lineality.mask);
4188 /* Return a pointer to the node in "graph" that lives in "space".
4189 * If the requested node has been compressed, then "space"
4190 * corresponds to the compressed space.
4191 * The graph is assumed to have such a node.
4192 * Return NULL in case of error.
4194 * First try and see if "space" is the space of an uncompressed node.
4195 * If so, return that node.
4196 * Otherwise, "space" was constructed by construct_compressed_id and
4197 * contains a user pointer pointing to the node in the tuple id.
4198 * However, this node belongs to the original dependence graph.
4199 * If "graph" is a subgraph of this original dependence graph,
4200 * then the node with the same space still needs to be looked up
4201 * in the current graph.
4203 static struct isl_sched_node *graph_find_compressed_node(isl_ctx *ctx,
4204 struct isl_sched_graph *graph, __isl_keep isl_space *space)
4206 isl_id *id;
4207 struct isl_sched_node *node;
4209 if (!space)
4210 return NULL;
4212 node = graph_find_node(ctx, graph, space);
4213 if (!node)
4214 return NULL;
4215 if (is_node(graph, node))
4216 return node;
4218 id = isl_space_get_tuple_id(space, isl_dim_set);
4219 node = isl_id_get_user(id);
4220 isl_id_free(id);
4222 if (!node)
4223 return NULL;
4225 if (!is_node(graph->root, node))
4226 isl_die(ctx, isl_error_internal,
4227 "space points to invalid node", return NULL);
4228 if (graph != graph->root)
4229 node = graph_find_node(ctx, graph, node->space);
4230 if (!is_node(graph, node))
4231 isl_die(ctx, isl_error_internal,
4232 "unable to find node", return NULL);
4234 return node;
4237 /* Internal data structure for add_all_constraints.
4239 * "graph" is the schedule constraint graph for which an LP problem
4240 * is being constructed.
4241 * "carry_inter" indicates whether inter-node edges should be carried.
4242 * "pos" is the position of the next edge that needs to be carried.
4244 struct isl_add_all_constraints_data {
4245 isl_ctx *ctx;
4246 struct isl_sched_graph *graph;
4247 int carry_inter;
4248 int pos;
4251 /* Add the constraints "coef" derived from an edge from a node to itself
4252 * to data->graph->lp in order to respect the dependences and
4253 * to try and carry them.
4255 * The space of "coef" is of the form
4257 * coefficients[[c_cst] -> S[c_x]]
4259 * with S[c_x] the (compressed) space of the node.
4260 * Extract the node from the space and call add_intra_constraints.
4262 static isl_stat lp_add_intra(__isl_take isl_basic_set *coef, void *user)
4264 struct isl_add_all_constraints_data *data = user;
4265 isl_space *space;
4266 struct isl_sched_node *node;
4268 space = isl_basic_set_get_space(coef);
4269 space = isl_space_range(isl_space_unwrap(space));
4270 node = graph_find_compressed_node(data->ctx, data->graph, space);
4271 isl_space_free(space);
4272 return add_intra_constraints(data->graph, node, coef, data->pos++);
4275 /* Add the constraints "coef" derived from an edge from a node j
4276 * to a node k to data->graph->lp in order to respect the dependences and
4277 * to try and carry them (provided data->carry_inter is set).
4279 * The space of "coef" is of the form
4281 * coefficients[[c_cst, c_n] -> [S_j[c_x] -> S_k[c_y]]]
4283 * with S_j[c_x] and S_k[c_y] the (compressed) spaces of the nodes.
4284 * Extract the nodes from the space and call add_inter_constraints.
4286 static isl_stat lp_add_inter(__isl_take isl_basic_set *coef, void *user)
4288 struct isl_add_all_constraints_data *data = user;
4289 isl_space *space, *dom;
4290 struct isl_sched_node *src, *dst;
4291 int pos;
4293 space = isl_basic_set_get_space(coef);
4294 space = isl_space_unwrap(isl_space_range(isl_space_unwrap(space)));
4295 dom = isl_space_domain(isl_space_copy(space));
4296 src = graph_find_compressed_node(data->ctx, data->graph, dom);
4297 isl_space_free(dom);
4298 space = isl_space_range(space);
4299 dst = graph_find_compressed_node(data->ctx, data->graph, space);
4300 isl_space_free(space);
4302 pos = data->carry_inter ? data->pos++ : -1;
4303 return add_inter_constraints(data->graph, src, dst, coef, pos);
4306 /* Add constraints to graph->lp that force all (conditional) validity
4307 * dependences to be respected and attempt to carry them.
4308 * "intra" is the sequence of coefficient constraints for intra-node edges.
4309 * "inter" is the sequence of coefficient constraints for inter-node edges.
4310 * "carry_inter" indicates whether inter-node edges should be carried or
4311 * only respected.
4313 static isl_stat add_all_constraints(isl_ctx *ctx, struct isl_sched_graph *graph,
4314 __isl_keep isl_basic_set_list *intra,
4315 __isl_keep isl_basic_set_list *inter, int carry_inter)
4317 struct isl_add_all_constraints_data data = { ctx, graph, carry_inter };
4319 data.pos = 0;
4320 if (isl_basic_set_list_foreach(intra, &lp_add_intra, &data) < 0)
4321 return isl_stat_error;
4322 if (isl_basic_set_list_foreach(inter, &lp_add_inter, &data) < 0)
4323 return isl_stat_error;
4324 return isl_stat_ok;
4327 /* Internal data structure for count_all_constraints
4328 * for keeping track of the number of equality and inequality constraints.
4330 struct isl_sched_count {
4331 int n_eq;
4332 int n_ineq;
4335 /* Add the number of equality and inequality constraints of "bset"
4336 * to data->n_eq and data->n_ineq.
4338 static isl_stat bset_update_count(__isl_take isl_basic_set *bset, void *user)
4340 struct isl_sched_count *data = user;
4342 return update_count(bset, 1, &data->n_eq, &data->n_ineq);
4345 /* Count the number of equality and inequality constraints
4346 * that will be added to the carry_lp problem.
4347 * We count each edge exactly once.
4348 * "intra" is the sequence of coefficient constraints for intra-node edges.
4349 * "inter" is the sequence of coefficient constraints for inter-node edges.
4351 static isl_stat count_all_constraints(__isl_keep isl_basic_set_list *intra,
4352 __isl_keep isl_basic_set_list *inter, int *n_eq, int *n_ineq)
4354 struct isl_sched_count data;
4356 data.n_eq = data.n_ineq = 0;
4357 if (isl_basic_set_list_foreach(inter, &bset_update_count, &data) < 0)
4358 return isl_stat_error;
4359 if (isl_basic_set_list_foreach(intra, &bset_update_count, &data) < 0)
4360 return isl_stat_error;
4362 *n_eq = data.n_eq;
4363 *n_ineq = data.n_ineq;
4365 return isl_stat_ok;
4368 /* Construct an LP problem for finding schedule coefficients
4369 * such that the schedule carries as many validity dependences as possible.
4370 * In particular, for each dependence i, we bound the dependence distance
4371 * from below by e_i, with 0 <= e_i <= 1 and then maximize the sum
4372 * of all e_i's. Dependences with e_i = 0 in the solution are simply
4373 * respected, while those with e_i > 0 (in practice e_i = 1) are carried.
4374 * "intra" is the sequence of coefficient constraints for intra-node edges.
4375 * "inter" is the sequence of coefficient constraints for inter-node edges.
4376 * "n_edge" is the total number of edges.
4377 * "carry_inter" indicates whether inter-node edges should be carried or
4378 * only respected. That is, if "carry_inter" is not set, then
4379 * no e_i variables are introduced for the inter-node edges.
4381 * All variables of the LP are non-negative. The actual coefficients
4382 * may be negative, so each coefficient is represented as the difference
4383 * of two non-negative variables. The negative part always appears
4384 * immediately before the positive part.
4385 * Other than that, the variables have the following order
4387 * - sum of (1 - e_i) over all edges
4388 * - sum of all c_n coefficients
4389 * (unconstrained when computing non-parametric schedules)
4390 * - sum of positive and negative parts of all c_x coefficients
4391 * - for each edge
4392 * - e_i
4393 * - for each node
4394 * - positive and negative parts of c_i_x, in opposite order
4395 * - c_i_n (if parametric)
4396 * - c_i_0
4398 * The constraints are those from the (validity) edges plus three equalities
4399 * to express the sums and n_edge inequalities to express e_i <= 1.
4401 static isl_stat setup_carry_lp(isl_ctx *ctx, struct isl_sched_graph *graph,
4402 int n_edge, __isl_keep isl_basic_set_list *intra,
4403 __isl_keep isl_basic_set_list *inter, int carry_inter)
4405 int i;
4406 int k;
4407 isl_space *space;
4408 unsigned total;
4409 int n_eq, n_ineq;
4411 total = 3 + n_edge;
4412 for (i = 0; i < graph->n; ++i) {
4413 struct isl_sched_node *node = &graph->node[graph->sorted[i]];
4414 node->start = total;
4415 total += 1 + node->nparam + 2 * node->nvar;
4418 if (count_all_constraints(intra, inter, &n_eq, &n_ineq) < 0)
4419 return isl_stat_error;
4421 space = isl_space_set_alloc(ctx, 0, total);
4422 isl_basic_set_free(graph->lp);
4423 n_eq += 3;
4424 n_ineq += n_edge;
4425 graph->lp = isl_basic_set_alloc_space(space, 0, n_eq, n_ineq);
4426 graph->lp = isl_basic_set_set_rational(graph->lp);
4428 k = isl_basic_set_alloc_equality(graph->lp);
4429 if (k < 0)
4430 return isl_stat_error;
4431 isl_seq_clr(graph->lp->eq[k], 1 + total);
4432 isl_int_set_si(graph->lp->eq[k][0], -n_edge);
4433 isl_int_set_si(graph->lp->eq[k][1], 1);
4434 for (i = 0; i < n_edge; ++i)
4435 isl_int_set_si(graph->lp->eq[k][4 + i], 1);
4437 if (add_param_sum_constraint(graph, 1) < 0)
4438 return isl_stat_error;
4439 if (add_var_sum_constraint(graph, 2) < 0)
4440 return isl_stat_error;
4442 for (i = 0; i < n_edge; ++i) {
4443 k = isl_basic_set_alloc_inequality(graph->lp);
4444 if (k < 0)
4445 return isl_stat_error;
4446 isl_seq_clr(graph->lp->ineq[k], 1 + total);
4447 isl_int_set_si(graph->lp->ineq[k][4 + i], -1);
4448 isl_int_set_si(graph->lp->ineq[k][0], 1);
4451 if (add_all_constraints(ctx, graph, intra, inter, carry_inter) < 0)
4452 return isl_stat_error;
4454 return isl_stat_ok;
4457 static __isl_give isl_schedule_node *compute_component_schedule(
4458 __isl_take isl_schedule_node *node, struct isl_sched_graph *graph,
4459 int wcc);
4461 /* If the schedule_split_scaled option is set and if the linear
4462 * parts of the scheduling rows for all nodes in the graphs have
4463 * a non-trivial common divisor, then remove this
4464 * common divisor from the linear part.
4465 * Otherwise, insert a band node directly and continue with
4466 * the construction of the schedule.
4468 * If a non-trivial common divisor is found, then
4469 * the linear part is reduced and the remainder is ignored.
4470 * The pieces of the graph that are assigned different remainders
4471 * form (groups of) strongly connected components within
4472 * the scaled down band. If needed, they can therefore
4473 * be ordered along this remainder in a sequence node.
4474 * However, this ordering is not enforced here in order to allow
4475 * the scheduler to combine some of the strongly connected components.
4477 static __isl_give isl_schedule_node *split_scaled(
4478 __isl_take isl_schedule_node *node, struct isl_sched_graph *graph)
4480 int i;
4481 int row;
4482 isl_ctx *ctx;
4483 isl_int gcd, gcd_i;
4484 isl_size n_row;
4486 if (!node)
4487 return NULL;
4489 ctx = isl_schedule_node_get_ctx(node);
4490 if (!ctx->opt->schedule_split_scaled)
4491 return compute_next_band(node, graph, 0);
4492 if (graph->n <= 1)
4493 return compute_next_band(node, graph, 0);
4494 n_row = isl_mat_rows(graph->node[0].sched);
4495 if (n_row < 0)
4496 return isl_schedule_node_free(node);
4498 isl_int_init(gcd);
4499 isl_int_init(gcd_i);
4501 isl_int_set_si(gcd, 0);
4503 row = n_row - 1;
4505 for (i = 0; i < graph->n; ++i) {
4506 struct isl_sched_node *node = &graph->node[i];
4507 isl_size cols = isl_mat_cols(node->sched);
4509 if (cols < 0)
4510 break;
4511 isl_seq_gcd(node->sched->row[row] + 1, cols - 1, &gcd_i);
4512 isl_int_gcd(gcd, gcd, gcd_i);
4515 isl_int_clear(gcd_i);
4516 if (i < graph->n)
4517 goto error;
4519 if (isl_int_cmp_si(gcd, 1) <= 0) {
4520 isl_int_clear(gcd);
4521 return compute_next_band(node, graph, 0);
4524 for (i = 0; i < graph->n; ++i) {
4525 struct isl_sched_node *node = &graph->node[i];
4527 isl_int_fdiv_q(node->sched->row[row][0],
4528 node->sched->row[row][0], gcd);
4529 isl_int_mul(node->sched->row[row][0],
4530 node->sched->row[row][0], gcd);
4531 node->sched = isl_mat_scale_down_row(node->sched, row, gcd);
4532 if (!node->sched)
4533 goto error;
4536 isl_int_clear(gcd);
4538 return compute_next_band(node, graph, 0);
4539 error:
4540 isl_int_clear(gcd);
4541 return isl_schedule_node_free(node);
4544 /* Is the schedule row "sol" trivial on node "node"?
4545 * That is, is the solution zero on the dimensions linearly independent of
4546 * the previously found solutions?
4547 * Return 1 if the solution is trivial, 0 if it is not and -1 on error.
4549 * Each coefficient is represented as the difference between
4550 * two non-negative values in "sol".
4551 * We construct the schedule row s and check if it is linearly
4552 * independent of previously computed schedule rows
4553 * by computing T s, with T the linear combinations that are zero
4554 * on linearly dependent schedule rows.
4555 * If the result consists of all zeros, then the solution is trivial.
4557 static int is_trivial(struct isl_sched_node *node, __isl_keep isl_vec *sol)
4559 int trivial;
4560 isl_vec *node_sol;
4562 if (!sol)
4563 return -1;
4564 if (node->nvar == node->rank)
4565 return 0;
4567 node_sol = extract_var_coef(node, sol);
4568 node_sol = isl_mat_vec_product(isl_mat_copy(node->indep), node_sol);
4569 if (!node_sol)
4570 return -1;
4572 trivial = isl_seq_first_non_zero(node_sol->el,
4573 node->nvar - node->rank) == -1;
4575 isl_vec_free(node_sol);
4577 return trivial;
4580 /* Is the schedule row "sol" trivial on any node where it should
4581 * not be trivial?
4582 * Return 1 if any solution is trivial, 0 if they are not and -1 on error.
4584 static int is_any_trivial(struct isl_sched_graph *graph,
4585 __isl_keep isl_vec *sol)
4587 int i;
4589 for (i = 0; i < graph->n; ++i) {
4590 struct isl_sched_node *node = &graph->node[i];
4591 int trivial;
4593 if (!needs_row(graph, node))
4594 continue;
4595 trivial = is_trivial(node, sol);
4596 if (trivial < 0 || trivial)
4597 return trivial;
4600 return 0;
4603 /* Does the schedule represented by "sol" perform loop coalescing on "node"?
4604 * If so, return the position of the coalesced dimension.
4605 * Otherwise, return node->nvar or -1 on error.
4607 * In particular, look for pairs of coefficients c_i and c_j such that
4608 * |c_j/c_i| > ceil(size_i/2), i.e., |c_j| > |c_i * ceil(size_i/2)|.
4609 * If any such pair is found, then return i.
4610 * If size_i is infinity, then no check on c_i needs to be performed.
4612 static int find_node_coalescing(struct isl_sched_node *node,
4613 __isl_keep isl_vec *sol)
4615 int i, j;
4616 isl_int max;
4617 isl_vec *csol;
4619 if (node->nvar <= 1)
4620 return node->nvar;
4622 csol = extract_var_coef(node, sol);
4623 if (!csol)
4624 return -1;
4625 isl_int_init(max);
4626 for (i = 0; i < node->nvar; ++i) {
4627 isl_val *v;
4629 if (isl_int_is_zero(csol->el[i]))
4630 continue;
4631 v = isl_multi_val_get_val(node->sizes, i);
4632 if (!v)
4633 goto error;
4634 if (!isl_val_is_int(v)) {
4635 isl_val_free(v);
4636 continue;
4638 v = isl_val_div_ui(v, 2);
4639 v = isl_val_ceil(v);
4640 if (!v)
4641 goto error;
4642 isl_int_mul(max, v->n, csol->el[i]);
4643 isl_val_free(v);
4645 for (j = 0; j < node->nvar; ++j) {
4646 if (j == i)
4647 continue;
4648 if (isl_int_abs_gt(csol->el[j], max))
4649 break;
4651 if (j < node->nvar)
4652 break;
4655 isl_int_clear(max);
4656 isl_vec_free(csol);
4657 return i;
4658 error:
4659 isl_int_clear(max);
4660 isl_vec_free(csol);
4661 return -1;
4664 /* Force the schedule coefficient at position "pos" of "node" to be zero
4665 * in "tl".
4666 * The coefficient is encoded as the difference between two non-negative
4667 * variables. Force these two variables to have the same value.
4669 static __isl_give isl_tab_lexmin *zero_out_node_coef(
4670 __isl_take isl_tab_lexmin *tl, struct isl_sched_node *node, int pos)
4672 int dim;
4673 isl_ctx *ctx;
4674 isl_vec *eq;
4676 ctx = isl_space_get_ctx(node->space);
4677 dim = isl_tab_lexmin_dim(tl);
4678 if (dim < 0)
4679 return isl_tab_lexmin_free(tl);
4680 eq = isl_vec_alloc(ctx, 1 + dim);
4681 eq = isl_vec_clr(eq);
4682 if (!eq)
4683 return isl_tab_lexmin_free(tl);
4685 pos = 1 + node_var_coef_pos(node, pos);
4686 isl_int_set_si(eq->el[pos], 1);
4687 isl_int_set_si(eq->el[pos + 1], -1);
4688 tl = isl_tab_lexmin_add_eq(tl, eq->el);
4689 isl_vec_free(eq);
4691 return tl;
4694 /* Return the lexicographically smallest rational point in the basic set
4695 * from which "tl" was constructed, double checking that this input set
4696 * was not empty.
4698 static __isl_give isl_vec *non_empty_solution(__isl_keep isl_tab_lexmin *tl)
4700 isl_vec *sol;
4702 sol = isl_tab_lexmin_get_solution(tl);
4703 if (!sol)
4704 return NULL;
4705 if (sol->size == 0)
4706 isl_die(isl_vec_get_ctx(sol), isl_error_internal,
4707 "error in schedule construction",
4708 return isl_vec_free(sol));
4709 return sol;
4712 /* Does the solution "sol" of the LP problem constructed by setup_carry_lp
4713 * carry any of the "n_edge" groups of dependences?
4714 * The value in the first position is the sum of (1 - e_i) over all "n_edge"
4715 * edges, with 0 <= e_i <= 1 equal to 1 when the dependences represented
4716 * by the edge are carried by the solution.
4717 * If the sum of the (1 - e_i) is smaller than "n_edge" then at least
4718 * one of those is carried.
4720 * Note that despite the fact that the problem is solved using a rational
4721 * solver, the solution is guaranteed to be integral.
4722 * Specifically, the dependence distance lower bounds e_i (and therefore
4723 * also their sum) are integers. See Lemma 5 of [1].
4725 * Any potential denominator of the sum is cleared by this function.
4726 * The denominator is not relevant for any of the other elements
4727 * in the solution.
4729 * [1] P. Feautrier, Some Efficient Solutions to the Affine Scheduling
4730 * Problem, Part II: Multi-Dimensional Time.
4731 * In Intl. Journal of Parallel Programming, 1992.
4733 static int carries_dependences(__isl_keep isl_vec *sol, int n_edge)
4735 isl_int_divexact(sol->el[1], sol->el[1], sol->el[0]);
4736 isl_int_set_si(sol->el[0], 1);
4737 return isl_int_cmp_si(sol->el[1], n_edge) < 0;
4740 /* Return the lexicographically smallest rational point in "lp",
4741 * assuming that all variables are non-negative and performing some
4742 * additional sanity checks.
4743 * If "want_integral" is set, then compute the lexicographically smallest
4744 * integer point instead.
4745 * In particular, "lp" should not be empty by construction.
4746 * Double check that this is the case.
4747 * If dependences are not carried for any of the "n_edge" edges,
4748 * then return an empty vector.
4750 * If the schedule_treat_coalescing option is set and
4751 * if the computed schedule performs loop coalescing on a given node,
4752 * i.e., if it is of the form
4754 * c_i i + c_j j + ...
4756 * with |c_j/c_i| >= size_i, then force the coefficient c_i to be zero
4757 * to cut out this solution. Repeat this process until no more loop
4758 * coalescing occurs or until no more dependences can be carried.
4759 * In the latter case, revert to the previously computed solution.
4761 * If the caller requests an integral solution and if coalescing should
4762 * be treated, then perform the coalescing treatment first as
4763 * an integral solution computed before coalescing treatment
4764 * would carry the same number of edges and would therefore probably
4765 * also be coalescing.
4767 * To allow the coalescing treatment to be performed first,
4768 * the initial solution is allowed to be rational and it is only
4769 * cut out (if needed) in the next iteration, if no coalescing measures
4770 * were taken.
4772 static __isl_give isl_vec *non_neg_lexmin(struct isl_sched_graph *graph,
4773 __isl_take isl_basic_set *lp, int n_edge, int want_integral)
4775 int i, pos, cut;
4776 isl_ctx *ctx;
4777 isl_tab_lexmin *tl;
4778 isl_vec *sol = NULL, *prev;
4779 int treat_coalescing;
4780 int try_again;
4782 if (!lp)
4783 return NULL;
4784 ctx = isl_basic_set_get_ctx(lp);
4785 treat_coalescing = isl_options_get_schedule_treat_coalescing(ctx);
4786 tl = isl_tab_lexmin_from_basic_set(lp);
4788 cut = 0;
4789 do {
4790 int integral;
4792 try_again = 0;
4793 if (cut)
4794 tl = isl_tab_lexmin_cut_to_integer(tl);
4795 prev = sol;
4796 sol = non_empty_solution(tl);
4797 if (!sol)
4798 goto error;
4800 integral = isl_int_is_one(sol->el[0]);
4801 if (!carries_dependences(sol, n_edge)) {
4802 if (!prev)
4803 prev = isl_vec_alloc(ctx, 0);
4804 isl_vec_free(sol);
4805 sol = prev;
4806 break;
4808 prev = isl_vec_free(prev);
4809 cut = want_integral && !integral;
4810 if (cut)
4811 try_again = 1;
4812 if (!treat_coalescing)
4813 continue;
4814 for (i = 0; i < graph->n; ++i) {
4815 struct isl_sched_node *node = &graph->node[i];
4817 pos = find_node_coalescing(node, sol);
4818 if (pos < 0)
4819 goto error;
4820 if (pos < node->nvar)
4821 break;
4823 if (i < graph->n) {
4824 try_again = 1;
4825 tl = zero_out_node_coef(tl, &graph->node[i], pos);
4826 cut = 0;
4828 } while (try_again);
4830 isl_tab_lexmin_free(tl);
4832 return sol;
4833 error:
4834 isl_tab_lexmin_free(tl);
4835 isl_vec_free(prev);
4836 isl_vec_free(sol);
4837 return NULL;
4840 /* If "edge" is an edge from a node to itself, then add the corresponding
4841 * dependence relation to "umap".
4842 * If "node" has been compressed, then the dependence relation
4843 * is also compressed first.
4845 static __isl_give isl_union_map *add_intra(__isl_take isl_union_map *umap,
4846 struct isl_sched_edge *edge)
4848 isl_map *map;
4849 struct isl_sched_node *node = edge->src;
4851 if (edge->src != edge->dst)
4852 return umap;
4854 map = isl_map_copy(edge->map);
4855 map = compress(map, node, node);
4856 umap = isl_union_map_add_map(umap, map);
4857 return umap;
4860 /* If "edge" is an edge from a node to another node, then add the corresponding
4861 * dependence relation to "umap".
4862 * If the source or destination nodes of "edge" have been compressed,
4863 * then the dependence relation is also compressed first.
4865 static __isl_give isl_union_map *add_inter(__isl_take isl_union_map *umap,
4866 struct isl_sched_edge *edge)
4868 isl_map *map;
4870 if (edge->src == edge->dst)
4871 return umap;
4873 map = isl_map_copy(edge->map);
4874 map = compress(map, edge->src, edge->dst);
4875 umap = isl_union_map_add_map(umap, map);
4876 return umap;
4879 /* Internal data structure used by union_drop_coalescing_constraints
4880 * to collect bounds on all relevant statements.
4882 * "graph" is the schedule constraint graph for which an LP problem
4883 * is being constructed.
4884 * "bounds" collects the bounds.
4886 struct isl_collect_bounds_data {
4887 isl_ctx *ctx;
4888 struct isl_sched_graph *graph;
4889 isl_union_set *bounds;
4892 /* Add the size bounds for the node with instance deltas in "set"
4893 * to data->bounds.
4895 static isl_stat collect_bounds(__isl_take isl_set *set, void *user)
4897 struct isl_collect_bounds_data *data = user;
4898 struct isl_sched_node *node;
4899 isl_space *space;
4900 isl_set *bounds;
4902 space = isl_set_get_space(set);
4903 isl_set_free(set);
4905 node = graph_find_compressed_node(data->ctx, data->graph, space);
4906 isl_space_free(space);
4908 bounds = isl_set_from_basic_set(get_size_bounds(node));
4909 data->bounds = isl_union_set_add_set(data->bounds, bounds);
4911 return isl_stat_ok;
4914 /* Drop some constraints from "delta" that could be exploited
4915 * to construct loop coalescing schedules.
4916 * In particular, drop those constraint that bound the difference
4917 * to the size of the domain.
4918 * Do this for each set/node in "delta" separately.
4919 * The parameters are assumed to have been projected out by the caller.
4921 static __isl_give isl_union_set *union_drop_coalescing_constraints(isl_ctx *ctx,
4922 struct isl_sched_graph *graph, __isl_take isl_union_set *delta)
4924 struct isl_collect_bounds_data data = { ctx, graph };
4926 data.bounds = isl_union_set_empty(isl_space_params_alloc(ctx, 0));
4927 if (isl_union_set_foreach_set(delta, &collect_bounds, &data) < 0)
4928 data.bounds = isl_union_set_free(data.bounds);
4929 delta = isl_union_set_plain_gist(delta, data.bounds);
4931 return delta;
4934 /* Given a non-trivial lineality space "lineality", add the corresponding
4935 * universe set to data->mask and add a map from elements to
4936 * other elements along the lines in "lineality" to data->equivalent.
4937 * If this is the first time this function gets called
4938 * (data->any_non_trivial is still false), then set data->any_non_trivial and
4939 * initialize data->mask and data->equivalent.
4941 * In particular, if the lineality space is defined by equality constraints
4943 * E x = 0
4945 * then construct an affine mapping
4947 * f : x -> E x
4949 * and compute the equivalence relation of having the same image under f:
4951 * { x -> x' : E x = E x' }
4953 static isl_stat add_non_trivial_lineality(__isl_take isl_basic_set *lineality,
4954 struct isl_exploit_lineality_data *data)
4956 isl_mat *eq;
4957 isl_space *space;
4958 isl_set *univ;
4959 isl_multi_aff *ma;
4960 isl_multi_pw_aff *mpa;
4961 isl_map *map;
4962 isl_size n;
4964 if (isl_basic_set_check_no_locals(lineality) < 0)
4965 goto error;
4967 space = isl_basic_set_get_space(lineality);
4968 if (!data->any_non_trivial) {
4969 data->equivalent = isl_union_map_empty(isl_space_copy(space));
4970 data->mask = isl_union_set_empty(isl_space_copy(space));
4972 data->any_non_trivial = isl_bool_true;
4974 univ = isl_set_universe(isl_space_copy(space));
4975 data->mask = isl_union_set_add_set(data->mask, univ);
4977 eq = isl_basic_set_extract_equalities(lineality);
4978 n = isl_mat_rows(eq);
4979 if (n < 0)
4980 space = isl_space_free(space);
4981 eq = isl_mat_insert_zero_rows(eq, 0, 1);
4982 eq = isl_mat_set_element_si(eq, 0, 0, 1);
4983 space = isl_space_from_domain(space);
4984 space = isl_space_add_dims(space, isl_dim_out, n);
4985 ma = isl_multi_aff_from_aff_mat(space, eq);
4986 mpa = isl_multi_pw_aff_from_multi_aff(ma);
4987 map = isl_multi_pw_aff_eq_map(mpa, isl_multi_pw_aff_copy(mpa));
4988 data->equivalent = isl_union_map_add_map(data->equivalent, map);
4990 isl_basic_set_free(lineality);
4991 return isl_stat_ok;
4992 error:
4993 isl_basic_set_free(lineality);
4994 return isl_stat_error;
4997 /* Check if the lineality space "set" is non-trivial (i.e., is not just
4998 * the origin or, in other words, satisfies a number of equality constraints
4999 * that is smaller than the dimension of the set).
5000 * If so, extend data->mask and data->equivalent accordingly.
5002 * The input should not have any local variables already, but
5003 * isl_set_remove_divs is called to make sure it does not.
5005 static isl_stat add_lineality(__isl_take isl_set *set, void *user)
5007 struct isl_exploit_lineality_data *data = user;
5008 isl_basic_set *hull;
5009 isl_size dim;
5010 isl_size n_eq;
5012 set = isl_set_remove_divs(set);
5013 hull = isl_set_unshifted_simple_hull(set);
5014 dim = isl_basic_set_dim(hull, isl_dim_set);
5015 n_eq = isl_basic_set_n_equality(hull);
5016 if (dim < 0 || n_eq < 0)
5017 goto error;
5018 if (dim != n_eq)
5019 return add_non_trivial_lineality(hull, data);
5020 isl_basic_set_free(hull);
5021 return isl_stat_ok;
5022 error:
5023 isl_basic_set_free(hull);
5024 return isl_stat_error;
5027 /* Check if the difference set on intra-node schedule constraints "intra"
5028 * has any non-trivial lineality space.
5029 * If so, then extend the difference set to a difference set
5030 * on equivalent elements. That is, if "intra" is
5032 * { y - x : (x,y) \in V }
5034 * and elements are equivalent if they have the same image under f,
5035 * then return
5037 * { y' - x' : (x,y) \in V and f(x) = f(x') and f(y) = f(y') }
5039 * or, since f is linear,
5041 * { y' - x' : (x,y) \in V and f(y - x) = f(y' - x') }
5043 * The results of the search for non-trivial lineality spaces is stored
5044 * in "data".
5046 static __isl_give isl_union_set *exploit_intra_lineality(
5047 __isl_take isl_union_set *intra,
5048 struct isl_exploit_lineality_data *data)
5050 isl_union_set *lineality;
5051 isl_union_set *uset;
5053 data->any_non_trivial = isl_bool_false;
5054 lineality = isl_union_set_copy(intra);
5055 lineality = isl_union_set_combined_lineality_space(lineality);
5056 if (isl_union_set_foreach_set(lineality, &add_lineality, data) < 0)
5057 data->any_non_trivial = isl_bool_error;
5058 isl_union_set_free(lineality);
5060 if (data->any_non_trivial < 0)
5061 return isl_union_set_free(intra);
5062 if (!data->any_non_trivial)
5063 return intra;
5065 uset = isl_union_set_copy(intra);
5066 intra = isl_union_set_subtract(intra, isl_union_set_copy(data->mask));
5067 uset = isl_union_set_apply(uset, isl_union_map_copy(data->equivalent));
5068 intra = isl_union_set_union(intra, uset);
5070 intra = isl_union_set_remove_divs(intra);
5072 return intra;
5075 /* If the difference set on intra-node schedule constraints was found to have
5076 * any non-trivial lineality space by exploit_intra_lineality,
5077 * as recorded in "data", then extend the inter-node
5078 * schedule constraints "inter" to schedule constraints on equivalent elements.
5079 * That is, if "inter" is V and
5080 * elements are equivalent if they have the same image under f, then return
5082 * { (x', y') : (x,y) \in V and f(x) = f(x') and f(y) = f(y') }
5084 static __isl_give isl_union_map *exploit_inter_lineality(
5085 __isl_take isl_union_map *inter,
5086 struct isl_exploit_lineality_data *data)
5088 isl_union_map *umap;
5090 if (data->any_non_trivial < 0)
5091 return isl_union_map_free(inter);
5092 if (!data->any_non_trivial)
5093 return inter;
5095 umap = isl_union_map_copy(inter);
5096 inter = isl_union_map_subtract_range(inter,
5097 isl_union_set_copy(data->mask));
5098 umap = isl_union_map_apply_range(umap,
5099 isl_union_map_copy(data->equivalent));
5100 inter = isl_union_map_union(inter, umap);
5101 umap = isl_union_map_copy(inter);
5102 inter = isl_union_map_subtract_domain(inter,
5103 isl_union_set_copy(data->mask));
5104 umap = isl_union_map_apply_range(isl_union_map_copy(data->equivalent),
5105 umap);
5106 inter = isl_union_map_union(inter, umap);
5108 inter = isl_union_map_remove_divs(inter);
5110 return inter;
5113 /* For each (conditional) validity edge in "graph",
5114 * add the corresponding dependence relation using "add"
5115 * to a collection of dependence relations and return the result.
5116 * If "coincidence" is set, then coincidence edges are considered as well.
5118 static __isl_give isl_union_map *collect_validity(struct isl_sched_graph *graph,
5119 __isl_give isl_union_map *(*add)(__isl_take isl_union_map *umap,
5120 struct isl_sched_edge *edge), int coincidence)
5122 int i;
5123 isl_space *space;
5124 isl_union_map *umap;
5126 space = isl_space_copy(graph->node[0].space);
5127 umap = isl_union_map_empty(space);
5129 for (i = 0; i < graph->n_edge; ++i) {
5130 struct isl_sched_edge *edge = &graph->edge[i];
5132 if (!is_any_validity(edge) &&
5133 (!coincidence || !is_coincidence(edge)))
5134 continue;
5136 umap = add(umap, edge);
5139 return umap;
5142 /* For each dependence relation on a (conditional) validity edge
5143 * from a node to itself,
5144 * construct the set of coefficients of valid constraints for elements
5145 * in that dependence relation and collect the results.
5146 * If "coincidence" is set, then coincidence edges are considered as well.
5148 * In particular, for each dependence relation R, constraints
5149 * on coefficients (c_0, c_x) are constructed such that
5151 * c_0 + c_x d >= 0 for each d in delta R = { y - x | (x,y) in R }
5153 * If the schedule_treat_coalescing option is set, then some constraints
5154 * that could be exploited to construct coalescing schedules
5155 * are removed before the dual is computed, but after the parameters
5156 * have been projected out.
5157 * The entire computation is essentially the same as that performed
5158 * by intra_coefficients, except that it operates on multiple
5159 * edges together and that the parameters are always projected out.
5161 * Additionally, exploit any non-trivial lineality space
5162 * in the difference set after removing coalescing constraints and
5163 * store the results of the non-trivial lineality space detection in "data".
5164 * The procedure is currently run unconditionally, but it is unlikely
5165 * to find any non-trivial lineality spaces if no coalescing constraints
5166 * have been removed.
5168 * Note that if a dependence relation is a union of basic maps,
5169 * then each basic map needs to be treated individually as it may only
5170 * be possible to carry the dependences expressed by some of those
5171 * basic maps and not all of them.
5172 * The collected validity constraints are therefore not coalesced and
5173 * it is assumed that they are not coalesced automatically.
5174 * Duplicate basic maps can be removed, however.
5175 * In particular, if the same basic map appears as a disjunct
5176 * in multiple edges, then it only needs to be carried once.
5178 static __isl_give isl_basic_set_list *collect_intra_validity(isl_ctx *ctx,
5179 struct isl_sched_graph *graph, int coincidence,
5180 struct isl_exploit_lineality_data *data)
5182 isl_union_map *intra;
5183 isl_union_set *delta;
5184 isl_basic_set_list *list;
5186 intra = collect_validity(graph, &add_intra, coincidence);
5187 delta = isl_union_map_deltas(intra);
5188 delta = isl_union_set_project_out_all_params(delta);
5189 delta = isl_union_set_remove_divs(delta);
5190 if (isl_options_get_schedule_treat_coalescing(ctx))
5191 delta = union_drop_coalescing_constraints(ctx, graph, delta);
5192 delta = exploit_intra_lineality(delta, data);
5193 list = isl_union_set_get_basic_set_list(delta);
5194 isl_union_set_free(delta);
5196 return isl_basic_set_list_coefficients(list);
5199 /* For each dependence relation on a (conditional) validity edge
5200 * from a node to some other node,
5201 * construct the set of coefficients of valid constraints for elements
5202 * in that dependence relation and collect the results.
5203 * If "coincidence" is set, then coincidence edges are considered as well.
5205 * In particular, for each dependence relation R, constraints
5206 * on coefficients (c_0, c_n, c_x, c_y) are constructed such that
5208 * c_0 + c_n n + c_x x + c_y y >= 0 for each (x,y) in R
5210 * This computation is essentially the same as that performed
5211 * by inter_coefficients, except that it operates on multiple
5212 * edges together.
5214 * Additionally, exploit any non-trivial lineality space
5215 * that may have been discovered by collect_intra_validity
5216 * (as stored in "data").
5218 * Note that if a dependence relation is a union of basic maps,
5219 * then each basic map needs to be treated individually as it may only
5220 * be possible to carry the dependences expressed by some of those
5221 * basic maps and not all of them.
5222 * The collected validity constraints are therefore not coalesced and
5223 * it is assumed that they are not coalesced automatically.
5224 * Duplicate basic maps can be removed, however.
5225 * In particular, if the same basic map appears as a disjunct
5226 * in multiple edges, then it only needs to be carried once.
5228 static __isl_give isl_basic_set_list *collect_inter_validity(
5229 struct isl_sched_graph *graph, int coincidence,
5230 struct isl_exploit_lineality_data *data)
5232 isl_union_map *inter;
5233 isl_union_set *wrap;
5234 isl_basic_set_list *list;
5236 inter = collect_validity(graph, &add_inter, coincidence);
5237 inter = exploit_inter_lineality(inter, data);
5238 inter = isl_union_map_remove_divs(inter);
5239 wrap = isl_union_map_wrap(inter);
5240 list = isl_union_set_get_basic_set_list(wrap);
5241 isl_union_set_free(wrap);
5242 return isl_basic_set_list_coefficients(list);
5245 /* Construct an LP problem for finding schedule coefficients
5246 * such that the schedule carries as many of the "n_edge" groups of
5247 * dependences as possible based on the corresponding coefficient
5248 * constraints and return the lexicographically smallest non-trivial solution.
5249 * "intra" is the sequence of coefficient constraints for intra-node edges.
5250 * "inter" is the sequence of coefficient constraints for inter-node edges.
5251 * If "want_integral" is set, then compute an integral solution
5252 * for the coefficients rather than using the numerators
5253 * of a rational solution.
5254 * "carry_inter" indicates whether inter-node edges should be carried or
5255 * only respected.
5257 * If none of the "n_edge" groups can be carried
5258 * then return an empty vector.
5260 static __isl_give isl_vec *compute_carrying_sol_coef(isl_ctx *ctx,
5261 struct isl_sched_graph *graph, int n_edge,
5262 __isl_keep isl_basic_set_list *intra,
5263 __isl_keep isl_basic_set_list *inter, int want_integral,
5264 int carry_inter)
5266 isl_basic_set *lp;
5268 if (setup_carry_lp(ctx, graph, n_edge, intra, inter, carry_inter) < 0)
5269 return NULL;
5271 lp = isl_basic_set_copy(graph->lp);
5272 return non_neg_lexmin(graph, lp, n_edge, want_integral);
5275 /* Construct an LP problem for finding schedule coefficients
5276 * such that the schedule carries as many of the validity dependences
5277 * as possible and
5278 * return the lexicographically smallest non-trivial solution.
5279 * If "fallback" is set, then the carrying is performed as a fallback
5280 * for the Pluto-like scheduler.
5281 * If "coincidence" is set, then try and carry coincidence edges as well.
5283 * The variable "n_edge" stores the number of groups that should be carried.
5284 * If none of the "n_edge" groups can be carried
5285 * then return an empty vector.
5286 * If, moreover, "n_edge" is zero, then the LP problem does not even
5287 * need to be constructed.
5289 * If a fallback solution is being computed, then compute an integral solution
5290 * for the coefficients rather than using the numerators
5291 * of a rational solution.
5293 * If a fallback solution is being computed, if there are any intra-node
5294 * dependences, and if requested by the user, then first try
5295 * to only carry those intra-node dependences.
5296 * If this fails to carry any dependences, then try again
5297 * with the inter-node dependences included.
5299 static __isl_give isl_vec *compute_carrying_sol(isl_ctx *ctx,
5300 struct isl_sched_graph *graph, int fallback, int coincidence)
5302 isl_size n_intra, n_inter;
5303 int n_edge;
5304 struct isl_carry carry = { 0 };
5305 isl_vec *sol;
5307 carry.intra = collect_intra_validity(ctx, graph, coincidence,
5308 &carry.lineality);
5309 carry.inter = collect_inter_validity(graph, coincidence,
5310 &carry.lineality);
5311 n_intra = isl_basic_set_list_n_basic_set(carry.intra);
5312 n_inter = isl_basic_set_list_n_basic_set(carry.inter);
5313 if (n_intra < 0 || n_inter < 0)
5314 goto error;
5316 if (fallback && n_intra > 0 &&
5317 isl_options_get_schedule_carry_self_first(ctx)) {
5318 sol = compute_carrying_sol_coef(ctx, graph, n_intra,
5319 carry.intra, carry.inter, fallback, 0);
5320 if (!sol || sol->size != 0 || n_inter == 0) {
5321 isl_carry_clear(&carry);
5322 return sol;
5324 isl_vec_free(sol);
5327 n_edge = n_intra + n_inter;
5328 if (n_edge == 0) {
5329 isl_carry_clear(&carry);
5330 return isl_vec_alloc(ctx, 0);
5333 sol = compute_carrying_sol_coef(ctx, graph, n_edge,
5334 carry.intra, carry.inter, fallback, 1);
5335 isl_carry_clear(&carry);
5336 return sol;
5337 error:
5338 isl_carry_clear(&carry);
5339 return NULL;
5342 /* Construct a schedule row for each node such that as many validity dependences
5343 * as possible are carried and then continue with the next band.
5344 * If "fallback" is set, then the carrying is performed as a fallback
5345 * for the Pluto-like scheduler.
5346 * If "coincidence" is set, then try and carry coincidence edges as well.
5348 * If there are no validity dependences, then no dependence can be carried and
5349 * the procedure is guaranteed to fail. If there is more than one component,
5350 * then try computing a schedule on each component separately
5351 * to prevent or at least postpone this failure.
5353 * If a schedule row is computed, then check that dependences are carried
5354 * for at least one of the edges.
5356 * If the computed schedule row turns out to be trivial on one or
5357 * more nodes where it should not be trivial, then we throw it away
5358 * and try again on each component separately.
5360 * If there is only one component, then we accept the schedule row anyway,
5361 * but we do not consider it as a complete row and therefore do not
5362 * increment graph->n_row. Note that the ranks of the nodes that
5363 * do get a non-trivial schedule part will get updated regardless and
5364 * graph->maxvar is computed based on these ranks. The test for
5365 * whether more schedule rows are required in compute_schedule_wcc
5366 * is therefore not affected.
5368 * Insert a band corresponding to the schedule row at position "node"
5369 * of the schedule tree and continue with the construction of the schedule.
5370 * This insertion and the continued construction is performed by split_scaled
5371 * after optionally checking for non-trivial common divisors.
5373 static __isl_give isl_schedule_node *carry(__isl_take isl_schedule_node *node,
5374 struct isl_sched_graph *graph, int fallback, int coincidence)
5376 int trivial;
5377 isl_ctx *ctx;
5378 isl_vec *sol;
5380 if (!node)
5381 return NULL;
5383 ctx = isl_schedule_node_get_ctx(node);
5384 sol = compute_carrying_sol(ctx, graph, fallback, coincidence);
5385 if (!sol)
5386 return isl_schedule_node_free(node);
5387 if (sol->size == 0) {
5388 isl_vec_free(sol);
5389 if (graph->scc > 1)
5390 return compute_component_schedule(node, graph, 1);
5391 isl_die(ctx, isl_error_unknown, "unable to carry dependences",
5392 return isl_schedule_node_free(node));
5395 trivial = is_any_trivial(graph, sol);
5396 if (trivial < 0) {
5397 sol = isl_vec_free(sol);
5398 } else if (trivial && graph->scc > 1) {
5399 isl_vec_free(sol);
5400 return compute_component_schedule(node, graph, 1);
5403 if (update_schedule(graph, sol, 0) < 0)
5404 return isl_schedule_node_free(node);
5405 if (trivial)
5406 graph->n_row--;
5408 return split_scaled(node, graph);
5411 /* Construct a schedule row for each node such that as many validity dependences
5412 * as possible are carried and then continue with the next band.
5413 * Do so as a fallback for the Pluto-like scheduler.
5414 * If "coincidence" is set, then try and carry coincidence edges as well.
5416 static __isl_give isl_schedule_node *carry_fallback(
5417 __isl_take isl_schedule_node *node, struct isl_sched_graph *graph,
5418 int coincidence)
5420 return carry(node, graph, 1, coincidence);
5423 /* Construct a schedule row for each node such that as many validity dependences
5424 * as possible are carried and then continue with the next band.
5425 * Do so for the case where the Feautrier scheduler was selected
5426 * by the user.
5428 static __isl_give isl_schedule_node *carry_feautrier(
5429 __isl_take isl_schedule_node *node, struct isl_sched_graph *graph)
5431 return carry(node, graph, 0, 0);
5434 /* Construct a schedule row for each node such that as many validity dependences
5435 * as possible are carried and then continue with the next band.
5436 * Do so as a fallback for the Pluto-like scheduler.
5438 static __isl_give isl_schedule_node *carry_dependences(
5439 __isl_take isl_schedule_node *node, struct isl_sched_graph *graph)
5441 return carry_fallback(node, graph, 0);
5444 /* Construct a schedule row for each node such that as many validity or
5445 * coincidence dependences as possible are carried and
5446 * then continue with the next band.
5447 * Do so as a fallback for the Pluto-like scheduler.
5449 static __isl_give isl_schedule_node *carry_coincidence(
5450 __isl_take isl_schedule_node *node, struct isl_sched_graph *graph)
5452 return carry_fallback(node, graph, 1);
5455 /* Topologically sort statements mapped to the same schedule iteration
5456 * and add insert a sequence node in front of "node"
5457 * corresponding to this order.
5458 * If "initialized" is set, then it may be assumed that compute_maxvar
5459 * has been called on the current band. Otherwise, call
5460 * compute_maxvar if and before carry_dependences gets called.
5462 * If it turns out to be impossible to sort the statements apart,
5463 * because different dependences impose different orderings
5464 * on the statements, then we extend the schedule such that
5465 * it carries at least one more dependence.
5467 static __isl_give isl_schedule_node *sort_statements(
5468 __isl_take isl_schedule_node *node, struct isl_sched_graph *graph,
5469 int initialized)
5471 isl_ctx *ctx;
5472 isl_union_set_list *filters;
5474 if (!node)
5475 return NULL;
5477 ctx = isl_schedule_node_get_ctx(node);
5478 if (graph->n < 1)
5479 isl_die(ctx, isl_error_internal,
5480 "graph should have at least one node",
5481 return isl_schedule_node_free(node));
5483 if (graph->n == 1)
5484 return node;
5486 if (update_edges(ctx, graph) < 0)
5487 return isl_schedule_node_free(node);
5489 if (graph->n_edge == 0)
5490 return node;
5492 if (detect_sccs(ctx, graph) < 0)
5493 return isl_schedule_node_free(node);
5495 next_band(graph);
5496 if (graph->scc < graph->n) {
5497 if (!initialized && compute_maxvar(graph) < 0)
5498 return isl_schedule_node_free(node);
5499 return carry_dependences(node, graph);
5502 filters = extract_sccs(ctx, graph);
5503 node = isl_schedule_node_insert_sequence(node, filters);
5505 return node;
5508 /* Are there any (non-empty) (conditional) validity edges in the graph?
5510 static int has_validity_edges(struct isl_sched_graph *graph)
5512 int i;
5514 for (i = 0; i < graph->n_edge; ++i) {
5515 int empty;
5517 empty = isl_map_plain_is_empty(graph->edge[i].map);
5518 if (empty < 0)
5519 return -1;
5520 if (empty)
5521 continue;
5522 if (is_any_validity(&graph->edge[i]))
5523 return 1;
5526 return 0;
5529 /* Should we apply a Feautrier step?
5530 * That is, did the user request the Feautrier algorithm and are
5531 * there any validity dependences (left)?
5533 static int need_feautrier_step(isl_ctx *ctx, struct isl_sched_graph *graph)
5535 if (ctx->opt->schedule_algorithm != ISL_SCHEDULE_ALGORITHM_FEAUTRIER)
5536 return 0;
5538 return has_validity_edges(graph);
5541 /* Compute a schedule for a connected dependence graph using Feautrier's
5542 * multi-dimensional scheduling algorithm and return the updated schedule node.
5544 * The original algorithm is described in [1].
5545 * The main idea is to minimize the number of scheduling dimensions, by
5546 * trying to satisfy as many dependences as possible per scheduling dimension.
5548 * [1] P. Feautrier, Some Efficient Solutions to the Affine Scheduling
5549 * Problem, Part II: Multi-Dimensional Time.
5550 * In Intl. Journal of Parallel Programming, 1992.
5552 static __isl_give isl_schedule_node *compute_schedule_wcc_feautrier(
5553 isl_schedule_node *node, struct isl_sched_graph *graph)
5555 return carry_feautrier(node, graph);
5558 /* Turn off the "local" bit on all (condition) edges.
5560 static void clear_local_edges(struct isl_sched_graph *graph)
5562 int i;
5564 for (i = 0; i < graph->n_edge; ++i)
5565 if (is_condition(&graph->edge[i]))
5566 clear_local(&graph->edge[i]);
5569 /* Does "graph" have both condition and conditional validity edges?
5571 static int need_condition_check(struct isl_sched_graph *graph)
5573 int i;
5574 int any_condition = 0;
5575 int any_conditional_validity = 0;
5577 for (i = 0; i < graph->n_edge; ++i) {
5578 if (is_condition(&graph->edge[i]))
5579 any_condition = 1;
5580 if (is_conditional_validity(&graph->edge[i]))
5581 any_conditional_validity = 1;
5584 return any_condition && any_conditional_validity;
5587 /* Does "graph" contain any coincidence edge?
5589 static int has_any_coincidence(struct isl_sched_graph *graph)
5591 int i;
5593 for (i = 0; i < graph->n_edge; ++i)
5594 if (is_coincidence(&graph->edge[i]))
5595 return 1;
5597 return 0;
5600 /* Extract the final schedule row as a map with the iteration domain
5601 * of "node" as domain.
5603 static __isl_give isl_map *final_row(struct isl_sched_node *node)
5605 isl_multi_aff *ma;
5606 isl_size n_row;
5608 n_row = isl_mat_rows(node->sched);
5609 if (n_row < 0)
5610 return NULL;
5611 ma = node_extract_partial_schedule_multi_aff(node, n_row - 1, 1);
5612 return isl_map_from_multi_aff(ma);
5615 /* Is the conditional validity dependence in the edge with index "edge_index"
5616 * violated by the latest (i.e., final) row of the schedule?
5617 * That is, is i scheduled after j
5618 * for any conditional validity dependence i -> j?
5620 static int is_violated(struct isl_sched_graph *graph, int edge_index)
5622 isl_map *src_sched, *dst_sched, *map;
5623 struct isl_sched_edge *edge = &graph->edge[edge_index];
5624 int empty;
5626 src_sched = final_row(edge->src);
5627 dst_sched = final_row(edge->dst);
5628 map = isl_map_copy(edge->map);
5629 map = isl_map_apply_domain(map, src_sched);
5630 map = isl_map_apply_range(map, dst_sched);
5631 map = isl_map_order_gt(map, isl_dim_in, 0, isl_dim_out, 0);
5632 empty = isl_map_is_empty(map);
5633 isl_map_free(map);
5635 if (empty < 0)
5636 return -1;
5638 return !empty;
5641 /* Does "graph" have any satisfied condition edges that
5642 * are adjacent to the conditional validity constraint with
5643 * domain "conditional_source" and range "conditional_sink"?
5645 * A satisfied condition is one that is not local.
5646 * If a condition was forced to be local already (i.e., marked as local)
5647 * then there is no need to check if it is in fact local.
5649 * Additionally, mark all adjacent condition edges found as local.
5651 static int has_adjacent_true_conditions(struct isl_sched_graph *graph,
5652 __isl_keep isl_union_set *conditional_source,
5653 __isl_keep isl_union_set *conditional_sink)
5655 int i;
5656 int any = 0;
5658 for (i = 0; i < graph->n_edge; ++i) {
5659 int adjacent, local;
5660 isl_union_map *condition;
5662 if (!is_condition(&graph->edge[i]))
5663 continue;
5664 if (is_local(&graph->edge[i]))
5665 continue;
5667 condition = graph->edge[i].tagged_condition;
5668 adjacent = domain_intersects(condition, conditional_sink);
5669 if (adjacent >= 0 && !adjacent)
5670 adjacent = range_intersects(condition,
5671 conditional_source);
5672 if (adjacent < 0)
5673 return -1;
5674 if (!adjacent)
5675 continue;
5677 set_local(&graph->edge[i]);
5679 local = is_condition_false(&graph->edge[i]);
5680 if (local < 0)
5681 return -1;
5682 if (!local)
5683 any = 1;
5686 return any;
5689 /* Are there any violated conditional validity dependences with
5690 * adjacent condition dependences that are not local with respect
5691 * to the current schedule?
5692 * That is, is the conditional validity constraint violated?
5694 * Additionally, mark all those adjacent condition dependences as local.
5695 * We also mark those adjacent condition dependences that were not marked
5696 * as local before, but just happened to be local already. This ensures
5697 * that they remain local if the schedule is recomputed.
5699 * We first collect domain and range of all violated conditional validity
5700 * dependences and then check if there are any adjacent non-local
5701 * condition dependences.
5703 static int has_violated_conditional_constraint(isl_ctx *ctx,
5704 struct isl_sched_graph *graph)
5706 int i;
5707 int any = 0;
5708 isl_union_set *source, *sink;
5710 source = isl_union_set_empty(isl_space_params_alloc(ctx, 0));
5711 sink = isl_union_set_empty(isl_space_params_alloc(ctx, 0));
5712 for (i = 0; i < graph->n_edge; ++i) {
5713 isl_union_set *uset;
5714 isl_union_map *umap;
5715 int violated;
5717 if (!is_conditional_validity(&graph->edge[i]))
5718 continue;
5720 violated = is_violated(graph, i);
5721 if (violated < 0)
5722 goto error;
5723 if (!violated)
5724 continue;
5726 any = 1;
5728 umap = isl_union_map_copy(graph->edge[i].tagged_validity);
5729 uset = isl_union_map_domain(umap);
5730 source = isl_union_set_union(source, uset);
5731 source = isl_union_set_coalesce(source);
5733 umap = isl_union_map_copy(graph->edge[i].tagged_validity);
5734 uset = isl_union_map_range(umap);
5735 sink = isl_union_set_union(sink, uset);
5736 sink = isl_union_set_coalesce(sink);
5739 if (any)
5740 any = has_adjacent_true_conditions(graph, source, sink);
5742 isl_union_set_free(source);
5743 isl_union_set_free(sink);
5744 return any;
5745 error:
5746 isl_union_set_free(source);
5747 isl_union_set_free(sink);
5748 return -1;
5751 /* Examine the current band (the rows between graph->band_start and
5752 * graph->n_total_row), deciding whether to drop it or add it to "node"
5753 * and then continue with the computation of the next band, if any.
5754 * If "initialized" is set, then it may be assumed that compute_maxvar
5755 * has been called on the current band. Otherwise, call
5756 * compute_maxvar if and before carry_dependences gets called.
5758 * The caller keeps looking for a new row as long as
5759 * graph->n_row < graph->maxvar. If the latest attempt to find
5760 * such a row failed (i.e., we still have graph->n_row < graph->maxvar),
5761 * then we either
5762 * - split between SCCs and start over (assuming we found an interesting
5763 * pair of SCCs between which to split)
5764 * - continue with the next band (assuming the current band has at least
5765 * one row)
5766 * - if there is more than one SCC left, then split along all SCCs
5767 * - if outer coincidence needs to be enforced, then try to carry as many
5768 * validity or coincidence dependences as possible and
5769 * continue with the next band
5770 * - try to carry as many validity dependences as possible and
5771 * continue with the next band
5772 * In each case, we first insert a band node in the schedule tree
5773 * if any rows have been computed.
5775 * If the caller managed to complete the schedule and the current band
5776 * is empty, then finish off by topologically
5777 * sorting the statements based on the remaining dependences.
5778 * If, on the other hand, the current band has at least one row,
5779 * then continue with the next band. Note that this next band
5780 * will necessarily be empty, but the graph may still be split up
5781 * into weakly connected components before arriving back here.
5783 static __isl_give isl_schedule_node *compute_schedule_finish_band(
5784 __isl_take isl_schedule_node *node, struct isl_sched_graph *graph,
5785 int initialized)
5787 int empty;
5789 if (!node)
5790 return NULL;
5792 empty = graph->n_total_row == graph->band_start;
5793 if (graph->n_row < graph->maxvar) {
5794 isl_ctx *ctx;
5796 ctx = isl_schedule_node_get_ctx(node);
5797 if (!ctx->opt->schedule_maximize_band_depth && !empty)
5798 return compute_next_band(node, graph, 1);
5799 if (graph->src_scc >= 0)
5800 return compute_split_schedule(node, graph);
5801 if (!empty)
5802 return compute_next_band(node, graph, 1);
5803 if (graph->scc > 1)
5804 return compute_component_schedule(node, graph, 1);
5805 if (!initialized && compute_maxvar(graph) < 0)
5806 return isl_schedule_node_free(node);
5807 if (isl_options_get_schedule_outer_coincidence(ctx))
5808 return carry_coincidence(node, graph);
5809 return carry_dependences(node, graph);
5812 if (!empty)
5813 return compute_next_band(node, graph, 1);
5814 return sort_statements(node, graph, initialized);
5817 /* Construct a band of schedule rows for a connected dependence graph.
5818 * The caller is responsible for determining the strongly connected
5819 * components and calling compute_maxvar first.
5821 * We try to find a sequence of as many schedule rows as possible that result
5822 * in non-negative dependence distances (independent of the previous rows
5823 * in the sequence, i.e., such that the sequence is tilable), with as
5824 * many of the initial rows as possible satisfying the coincidence constraints.
5825 * The computation stops if we can't find any more rows or if we have found
5826 * all the rows we wanted to find.
5828 * If ctx->opt->schedule_outer_coincidence is set, then we force the
5829 * outermost dimension to satisfy the coincidence constraints. If this
5830 * turns out to be impossible, we fall back on the general scheme above
5831 * and try to carry as many dependences as possible.
5833 * If "graph" contains both condition and conditional validity dependences,
5834 * then we need to check that that the conditional schedule constraint
5835 * is satisfied, i.e., there are no violated conditional validity dependences
5836 * that are adjacent to any non-local condition dependences.
5837 * If there are, then we mark all those adjacent condition dependences
5838 * as local and recompute the current band. Those dependences that
5839 * are marked local will then be forced to be local.
5840 * The initial computation is performed with no dependences marked as local.
5841 * If we are lucky, then there will be no violated conditional validity
5842 * dependences adjacent to any non-local condition dependences.
5843 * Otherwise, we mark some additional condition dependences as local and
5844 * recompute. We continue this process until there are no violations left or
5845 * until we are no longer able to compute a schedule.
5846 * Since there are only a finite number of dependences,
5847 * there will only be a finite number of iterations.
5849 static isl_stat compute_schedule_wcc_band(isl_ctx *ctx,
5850 struct isl_sched_graph *graph)
5852 int has_coincidence;
5853 int use_coincidence;
5854 int force_coincidence = 0;
5855 int check_conditional;
5857 if (sort_sccs(graph) < 0)
5858 return isl_stat_error;
5860 clear_local_edges(graph);
5861 check_conditional = need_condition_check(graph);
5862 has_coincidence = has_any_coincidence(graph);
5864 if (ctx->opt->schedule_outer_coincidence)
5865 force_coincidence = 1;
5867 use_coincidence = has_coincidence;
5868 while (graph->n_row < graph->maxvar) {
5869 isl_vec *sol;
5870 int violated;
5871 int coincident;
5873 graph->src_scc = -1;
5874 graph->dst_scc = -1;
5876 if (setup_lp(ctx, graph, use_coincidence) < 0)
5877 return isl_stat_error;
5878 sol = solve_lp(ctx, graph);
5879 if (!sol)
5880 return isl_stat_error;
5881 if (sol->size == 0) {
5882 int empty = graph->n_total_row == graph->band_start;
5884 isl_vec_free(sol);
5885 if (use_coincidence && (!force_coincidence || !empty)) {
5886 use_coincidence = 0;
5887 continue;
5889 return isl_stat_ok;
5891 coincident = !has_coincidence || use_coincidence;
5892 if (update_schedule(graph, sol, coincident) < 0)
5893 return isl_stat_error;
5895 if (!check_conditional)
5896 continue;
5897 violated = has_violated_conditional_constraint(ctx, graph);
5898 if (violated < 0)
5899 return isl_stat_error;
5900 if (!violated)
5901 continue;
5902 if (reset_band(graph) < 0)
5903 return isl_stat_error;
5904 use_coincidence = has_coincidence;
5907 return isl_stat_ok;
5910 /* Compute a schedule for a connected dependence graph by considering
5911 * the graph as a whole and return the updated schedule node.
5913 * The actual schedule rows of the current band are computed by
5914 * compute_schedule_wcc_band. compute_schedule_finish_band takes
5915 * care of integrating the band into "node" and continuing
5916 * the computation.
5918 static __isl_give isl_schedule_node *compute_schedule_wcc_whole(
5919 __isl_take isl_schedule_node *node, struct isl_sched_graph *graph)
5921 isl_ctx *ctx;
5923 if (!node)
5924 return NULL;
5926 ctx = isl_schedule_node_get_ctx(node);
5927 if (compute_schedule_wcc_band(ctx, graph) < 0)
5928 return isl_schedule_node_free(node);
5930 return compute_schedule_finish_band(node, graph, 1);
5933 /* Clustering information used by compute_schedule_wcc_clustering.
5935 * "n" is the number of SCCs in the original dependence graph
5936 * "scc" is an array of "n" elements, each representing an SCC
5937 * of the original dependence graph. All entries in the same cluster
5938 * have the same number of schedule rows.
5939 * "scc_cluster" maps each SCC index to the cluster to which it belongs,
5940 * where each cluster is represented by the index of the first SCC
5941 * in the cluster. Initially, each SCC belongs to a cluster containing
5942 * only that SCC.
5944 * "scc_in_merge" is used by merge_clusters_along_edge to keep
5945 * track of which SCCs need to be merged.
5947 * "cluster" contains the merged clusters of SCCs after the clustering
5948 * has completed.
5950 * "scc_node" is a temporary data structure used inside copy_partial.
5951 * For each SCC, it keeps track of the number of nodes in the SCC
5952 * that have already been copied.
5954 struct isl_clustering {
5955 int n;
5956 struct isl_sched_graph *scc;
5957 struct isl_sched_graph *cluster;
5958 int *scc_cluster;
5959 int *scc_node;
5960 int *scc_in_merge;
5963 /* Initialize the clustering data structure "c" from "graph".
5965 * In particular, allocate memory, extract the SCCs from "graph"
5966 * into c->scc, initialize scc_cluster and construct
5967 * a band of schedule rows for each SCC.
5968 * Within each SCC, there is only one SCC by definition.
5969 * Each SCC initially belongs to a cluster containing only that SCC.
5971 static isl_stat clustering_init(isl_ctx *ctx, struct isl_clustering *c,
5972 struct isl_sched_graph *graph)
5974 int i;
5976 c->n = graph->scc;
5977 c->scc = isl_calloc_array(ctx, struct isl_sched_graph, c->n);
5978 c->cluster = isl_calloc_array(ctx, struct isl_sched_graph, c->n);
5979 c->scc_cluster = isl_calloc_array(ctx, int, c->n);
5980 c->scc_node = isl_calloc_array(ctx, int, c->n);
5981 c->scc_in_merge = isl_calloc_array(ctx, int, c->n);
5982 if (!c->scc || !c->cluster ||
5983 !c->scc_cluster || !c->scc_node || !c->scc_in_merge)
5984 return isl_stat_error;
5986 for (i = 0; i < c->n; ++i) {
5987 if (extract_sub_graph(ctx, graph, &isl_sched_node_scc_exactly,
5988 &edge_scc_exactly, i, &c->scc[i]) < 0)
5989 return isl_stat_error;
5990 c->scc[i].scc = 1;
5991 if (compute_maxvar(&c->scc[i]) < 0)
5992 return isl_stat_error;
5993 if (compute_schedule_wcc_band(ctx, &c->scc[i]) < 0)
5994 return isl_stat_error;
5995 c->scc_cluster[i] = i;
5998 return isl_stat_ok;
6001 /* Free all memory allocated for "c".
6003 static void clustering_free(isl_ctx *ctx, struct isl_clustering *c)
6005 int i;
6007 if (c->scc)
6008 for (i = 0; i < c->n; ++i)
6009 graph_free(ctx, &c->scc[i]);
6010 free(c->scc);
6011 if (c->cluster)
6012 for (i = 0; i < c->n; ++i)
6013 graph_free(ctx, &c->cluster[i]);
6014 free(c->cluster);
6015 free(c->scc_cluster);
6016 free(c->scc_node);
6017 free(c->scc_in_merge);
6020 /* Should we refrain from merging the cluster in "graph" with
6021 * any other cluster?
6022 * In particular, is its current schedule band empty and incomplete.
6024 static int bad_cluster(struct isl_sched_graph *graph)
6026 return graph->n_row < graph->maxvar &&
6027 graph->n_total_row == graph->band_start;
6030 /* Is "edge" a proximity edge with a non-empty dependence relation?
6032 static isl_bool is_non_empty_proximity(struct isl_sched_edge *edge)
6034 if (!is_proximity(edge))
6035 return isl_bool_false;
6036 return isl_bool_not(isl_map_plain_is_empty(edge->map));
6039 /* Return the index of an edge in "graph" that can be used to merge
6040 * two clusters in "c".
6041 * Return graph->n_edge if no such edge can be found.
6042 * Return -1 on error.
6044 * In particular, return a proximity edge between two clusters
6045 * that is not marked "no_merge" and such that neither of the
6046 * two clusters has an incomplete, empty band.
6048 * If there are multiple such edges, then try and find the most
6049 * appropriate edge to use for merging. In particular, pick the edge
6050 * with the greatest weight. If there are multiple of those,
6051 * then pick one with the shortest distance between
6052 * the two cluster representatives.
6054 static int find_proximity(struct isl_sched_graph *graph,
6055 struct isl_clustering *c)
6057 int i, best = graph->n_edge, best_dist, best_weight;
6059 for (i = 0; i < graph->n_edge; ++i) {
6060 struct isl_sched_edge *edge = &graph->edge[i];
6061 int dist, weight;
6062 isl_bool prox;
6064 prox = is_non_empty_proximity(edge);
6065 if (prox < 0)
6066 return -1;
6067 if (!prox)
6068 continue;
6069 if (edge->no_merge)
6070 continue;
6071 if (bad_cluster(&c->scc[edge->src->scc]) ||
6072 bad_cluster(&c->scc[edge->dst->scc]))
6073 continue;
6074 dist = c->scc_cluster[edge->dst->scc] -
6075 c->scc_cluster[edge->src->scc];
6076 if (dist == 0)
6077 continue;
6078 weight = edge->weight;
6079 if (best < graph->n_edge) {
6080 if (best_weight > weight)
6081 continue;
6082 if (best_weight == weight && best_dist <= dist)
6083 continue;
6085 best = i;
6086 best_dist = dist;
6087 best_weight = weight;
6090 return best;
6093 /* Internal data structure used in mark_merge_sccs.
6095 * "graph" is the dependence graph in which a strongly connected
6096 * component is constructed.
6097 * "scc_cluster" maps each SCC index to the cluster to which it belongs.
6098 * "src" and "dst" are the indices of the nodes that are being merged.
6100 struct isl_mark_merge_sccs_data {
6101 struct isl_sched_graph *graph;
6102 int *scc_cluster;
6103 int src;
6104 int dst;
6107 /* Check whether the cluster containing node "i" depends on the cluster
6108 * containing node "j". If "i" and "j" belong to the same cluster,
6109 * then they are taken to depend on each other to ensure that
6110 * the resulting strongly connected component consists of complete
6111 * clusters. Furthermore, if "i" and "j" are the two nodes that
6112 * are being merged, then they are taken to depend on each other as well.
6113 * Otherwise, check if there is a (conditional) validity dependence
6114 * from node[j] to node[i], forcing node[i] to follow node[j].
6116 static isl_bool cluster_follows(int i, int j, void *user)
6118 struct isl_mark_merge_sccs_data *data = user;
6119 struct isl_sched_graph *graph = data->graph;
6120 int *scc_cluster = data->scc_cluster;
6122 if (data->src == i && data->dst == j)
6123 return isl_bool_true;
6124 if (data->src == j && data->dst == i)
6125 return isl_bool_true;
6126 if (scc_cluster[graph->node[i].scc] == scc_cluster[graph->node[j].scc])
6127 return isl_bool_true;
6129 return graph_has_validity_edge(graph, &graph->node[j], &graph->node[i]);
6132 /* Mark all SCCs that belong to either of the two clusters in "c"
6133 * connected by the edge in "graph" with index "edge", or to any
6134 * of the intermediate clusters.
6135 * The marking is recorded in c->scc_in_merge.
6137 * The given edge has been selected for merging two clusters,
6138 * meaning that there is at least a proximity edge between the two nodes.
6139 * However, there may also be (indirect) validity dependences
6140 * between the two nodes. When merging the two clusters, all clusters
6141 * containing one or more of the intermediate nodes along the
6142 * indirect validity dependences need to be merged in as well.
6144 * First collect all such nodes by computing the strongly connected
6145 * component (SCC) containing the two nodes connected by the edge, where
6146 * the two nodes are considered to depend on each other to make
6147 * sure they end up in the same SCC. Similarly, each node is considered
6148 * to depend on every other node in the same cluster to ensure
6149 * that the SCC consists of complete clusters.
6151 * Then the original SCCs that contain any of these nodes are marked
6152 * in c->scc_in_merge.
6154 static isl_stat mark_merge_sccs(isl_ctx *ctx, struct isl_sched_graph *graph,
6155 int edge, struct isl_clustering *c)
6157 struct isl_mark_merge_sccs_data data;
6158 struct isl_tarjan_graph *g;
6159 int i;
6161 for (i = 0; i < c->n; ++i)
6162 c->scc_in_merge[i] = 0;
6164 data.graph = graph;
6165 data.scc_cluster = c->scc_cluster;
6166 data.src = graph->edge[edge].src - graph->node;
6167 data.dst = graph->edge[edge].dst - graph->node;
6169 g = isl_tarjan_graph_component(ctx, graph->n, data.dst,
6170 &cluster_follows, &data);
6171 if (!g)
6172 goto error;
6174 i = g->op;
6175 if (i < 3)
6176 isl_die(ctx, isl_error_internal,
6177 "expecting at least two nodes in component",
6178 goto error);
6179 if (g->order[--i] != -1)
6180 isl_die(ctx, isl_error_internal,
6181 "expecting end of component marker", goto error);
6183 for (--i; i >= 0 && g->order[i] != -1; --i) {
6184 int scc = graph->node[g->order[i]].scc;
6185 c->scc_in_merge[scc] = 1;
6188 isl_tarjan_graph_free(g);
6189 return isl_stat_ok;
6190 error:
6191 isl_tarjan_graph_free(g);
6192 return isl_stat_error;
6195 /* Construct the identifier "cluster_i".
6197 static __isl_give isl_id *cluster_id(isl_ctx *ctx, int i)
6199 char name[40];
6201 snprintf(name, sizeof(name), "cluster_%d", i);
6202 return isl_id_alloc(ctx, name, NULL);
6205 /* Construct the space of the cluster with index "i" containing
6206 * the strongly connected component "scc".
6208 * In particular, construct a space called cluster_i with dimension equal
6209 * to the number of schedule rows in the current band of "scc".
6211 static __isl_give isl_space *cluster_space(struct isl_sched_graph *scc, int i)
6213 int nvar;
6214 isl_space *space;
6215 isl_id *id;
6217 nvar = scc->n_total_row - scc->band_start;
6218 space = isl_space_copy(scc->node[0].space);
6219 space = isl_space_params(space);
6220 space = isl_space_set_from_params(space);
6221 space = isl_space_add_dims(space, isl_dim_set, nvar);
6222 id = cluster_id(isl_space_get_ctx(space), i);
6223 space = isl_space_set_tuple_id(space, isl_dim_set, id);
6225 return space;
6228 /* Collect the domain of the graph for merging clusters.
6230 * In particular, for each cluster with first SCC "i", construct
6231 * a set in the space called cluster_i with dimension equal
6232 * to the number of schedule rows in the current band of the cluster.
6234 static __isl_give isl_union_set *collect_domain(isl_ctx *ctx,
6235 struct isl_sched_graph *graph, struct isl_clustering *c)
6237 int i;
6238 isl_space *space;
6239 isl_union_set *domain;
6241 space = isl_space_params_alloc(ctx, 0);
6242 domain = isl_union_set_empty(space);
6244 for (i = 0; i < graph->scc; ++i) {
6245 isl_space *space;
6247 if (!c->scc_in_merge[i])
6248 continue;
6249 if (c->scc_cluster[i] != i)
6250 continue;
6251 space = cluster_space(&c->scc[i], i);
6252 domain = isl_union_set_add_set(domain, isl_set_universe(space));
6255 return domain;
6258 /* Construct a map from the original instances to the corresponding
6259 * cluster instance in the current bands of the clusters in "c".
6261 static __isl_give isl_union_map *collect_cluster_map(isl_ctx *ctx,
6262 struct isl_sched_graph *graph, struct isl_clustering *c)
6264 int i, j;
6265 isl_space *space;
6266 isl_union_map *cluster_map;
6268 space = isl_space_params_alloc(ctx, 0);
6269 cluster_map = isl_union_map_empty(space);
6270 for (i = 0; i < graph->scc; ++i) {
6271 int start, n;
6272 isl_id *id;
6274 if (!c->scc_in_merge[i])
6275 continue;
6277 id = cluster_id(ctx, c->scc_cluster[i]);
6278 start = c->scc[i].band_start;
6279 n = c->scc[i].n_total_row - start;
6280 for (j = 0; j < c->scc[i].n; ++j) {
6281 isl_multi_aff *ma;
6282 isl_map *map;
6283 struct isl_sched_node *node = &c->scc[i].node[j];
6285 ma = node_extract_partial_schedule_multi_aff(node,
6286 start, n);
6287 ma = isl_multi_aff_set_tuple_id(ma, isl_dim_out,
6288 isl_id_copy(id));
6289 map = isl_map_from_multi_aff(ma);
6290 cluster_map = isl_union_map_add_map(cluster_map, map);
6292 isl_id_free(id);
6295 return cluster_map;
6298 /* Add "umap" to the schedule constraints "sc" of all types of "edge"
6299 * that are not isl_edge_condition or isl_edge_conditional_validity.
6301 static __isl_give isl_schedule_constraints *add_non_conditional_constraints(
6302 struct isl_sched_edge *edge, __isl_keep isl_union_map *umap,
6303 __isl_take isl_schedule_constraints *sc)
6305 enum isl_edge_type t;
6307 if (!sc)
6308 return NULL;
6310 for (t = isl_edge_first; t <= isl_edge_last; ++t) {
6311 if (t == isl_edge_condition ||
6312 t == isl_edge_conditional_validity)
6313 continue;
6314 if (!isl_sched_edge_has_type(edge, t))
6315 continue;
6316 sc = isl_schedule_constraints_add(sc, t,
6317 isl_union_map_copy(umap));
6320 return sc;
6323 /* Add schedule constraints of types isl_edge_condition and
6324 * isl_edge_conditional_validity to "sc" by applying "umap" to
6325 * the domains of the wrapped relations in domain and range
6326 * of the corresponding tagged constraints of "edge".
6328 static __isl_give isl_schedule_constraints *add_conditional_constraints(
6329 struct isl_sched_edge *edge, __isl_keep isl_union_map *umap,
6330 __isl_take isl_schedule_constraints *sc)
6332 enum isl_edge_type t;
6333 isl_union_map *tagged;
6335 for (t = isl_edge_condition; t <= isl_edge_conditional_validity; ++t) {
6336 if (!isl_sched_edge_has_type(edge, t))
6337 continue;
6338 if (t == isl_edge_condition)
6339 tagged = isl_union_map_copy(edge->tagged_condition);
6340 else
6341 tagged = isl_union_map_copy(edge->tagged_validity);
6342 tagged = isl_union_map_zip(tagged);
6343 tagged = isl_union_map_apply_domain(tagged,
6344 isl_union_map_copy(umap));
6345 tagged = isl_union_map_zip(tagged);
6346 sc = isl_schedule_constraints_add(sc, t, tagged);
6347 if (!sc)
6348 return NULL;
6351 return sc;
6354 /* Given a mapping "cluster_map" from the original instances to
6355 * the cluster instances, add schedule constraints on the clusters
6356 * to "sc" corresponding to the original constraints represented by "edge".
6358 * For non-tagged dependence constraints, the cluster constraints
6359 * are obtained by applying "cluster_map" to the edge->map.
6361 * For tagged dependence constraints, "cluster_map" needs to be applied
6362 * to the domains of the wrapped relations in domain and range
6363 * of the tagged dependence constraints. Pick out the mappings
6364 * from these domains from "cluster_map" and construct their product.
6365 * This mapping can then be applied to the pair of domains.
6367 static __isl_give isl_schedule_constraints *collect_edge_constraints(
6368 struct isl_sched_edge *edge, __isl_keep isl_union_map *cluster_map,
6369 __isl_take isl_schedule_constraints *sc)
6371 isl_union_map *umap;
6372 isl_space *space;
6373 isl_union_set *uset;
6374 isl_union_map *umap1, *umap2;
6376 if (!sc)
6377 return NULL;
6379 umap = isl_union_map_from_map(isl_map_copy(edge->map));
6380 umap = isl_union_map_apply_domain(umap,
6381 isl_union_map_copy(cluster_map));
6382 umap = isl_union_map_apply_range(umap,
6383 isl_union_map_copy(cluster_map));
6384 sc = add_non_conditional_constraints(edge, umap, sc);
6385 isl_union_map_free(umap);
6387 if (!sc || (!is_condition(edge) && !is_conditional_validity(edge)))
6388 return sc;
6390 space = isl_space_domain(isl_map_get_space(edge->map));
6391 uset = isl_union_set_from_set(isl_set_universe(space));
6392 umap1 = isl_union_map_copy(cluster_map);
6393 umap1 = isl_union_map_intersect_domain(umap1, uset);
6394 space = isl_space_range(isl_map_get_space(edge->map));
6395 uset = isl_union_set_from_set(isl_set_universe(space));
6396 umap2 = isl_union_map_copy(cluster_map);
6397 umap2 = isl_union_map_intersect_domain(umap2, uset);
6398 umap = isl_union_map_product(umap1, umap2);
6400 sc = add_conditional_constraints(edge, umap, sc);
6402 isl_union_map_free(umap);
6403 return sc;
6406 /* Given a mapping "cluster_map" from the original instances to
6407 * the cluster instances, add schedule constraints on the clusters
6408 * to "sc" corresponding to all edges in "graph" between nodes that
6409 * belong to SCCs that are marked for merging in "scc_in_merge".
6411 static __isl_give isl_schedule_constraints *collect_constraints(
6412 struct isl_sched_graph *graph, int *scc_in_merge,
6413 __isl_keep isl_union_map *cluster_map,
6414 __isl_take isl_schedule_constraints *sc)
6416 int i;
6418 for (i = 0; i < graph->n_edge; ++i) {
6419 struct isl_sched_edge *edge = &graph->edge[i];
6421 if (!scc_in_merge[edge->src->scc])
6422 continue;
6423 if (!scc_in_merge[edge->dst->scc])
6424 continue;
6425 sc = collect_edge_constraints(edge, cluster_map, sc);
6428 return sc;
6431 /* Construct a dependence graph for scheduling clusters with respect
6432 * to each other and store the result in "merge_graph".
6433 * In particular, the nodes of the graph correspond to the schedule
6434 * dimensions of the current bands of those clusters that have been
6435 * marked for merging in "c".
6437 * First construct an isl_schedule_constraints object for this domain
6438 * by transforming the edges in "graph" to the domain.
6439 * Then initialize a dependence graph for scheduling from these
6440 * constraints.
6442 static isl_stat init_merge_graph(isl_ctx *ctx, struct isl_sched_graph *graph,
6443 struct isl_clustering *c, struct isl_sched_graph *merge_graph)
6445 isl_union_set *domain;
6446 isl_union_map *cluster_map;
6447 isl_schedule_constraints *sc;
6448 isl_stat r;
6450 domain = collect_domain(ctx, graph, c);
6451 sc = isl_schedule_constraints_on_domain(domain);
6452 if (!sc)
6453 return isl_stat_error;
6454 cluster_map = collect_cluster_map(ctx, graph, c);
6455 sc = collect_constraints(graph, c->scc_in_merge, cluster_map, sc);
6456 isl_union_map_free(cluster_map);
6458 r = graph_init(merge_graph, sc);
6460 isl_schedule_constraints_free(sc);
6462 return r;
6465 /* Compute the maximal number of remaining schedule rows that still need
6466 * to be computed for the nodes that belong to clusters with the maximal
6467 * dimension for the current band (i.e., the band that is to be merged).
6468 * Only clusters that are about to be merged are considered.
6469 * "maxvar" is the maximal dimension for the current band.
6470 * "c" contains information about the clusters.
6472 * Return the maximal number of remaining schedule rows or
6473 * isl_size_error on error.
6475 static isl_size compute_maxvar_max_slack(int maxvar, struct isl_clustering *c)
6477 int i, j;
6478 int max_slack;
6480 max_slack = 0;
6481 for (i = 0; i < c->n; ++i) {
6482 int nvar;
6483 struct isl_sched_graph *scc;
6485 if (!c->scc_in_merge[i])
6486 continue;
6487 scc = &c->scc[i];
6488 nvar = scc->n_total_row - scc->band_start;
6489 if (nvar != maxvar)
6490 continue;
6491 for (j = 0; j < scc->n; ++j) {
6492 struct isl_sched_node *node = &scc->node[j];
6493 int slack;
6495 if (node_update_vmap(node) < 0)
6496 return isl_size_error;
6497 slack = node->nvar - node->rank;
6498 if (slack > max_slack)
6499 max_slack = slack;
6503 return max_slack;
6506 /* If there are any clusters where the dimension of the current band
6507 * (i.e., the band that is to be merged) is smaller than "maxvar" and
6508 * if there are any nodes in such a cluster where the number
6509 * of remaining schedule rows that still need to be computed
6510 * is greater than "max_slack", then return the smallest current band
6511 * dimension of all these clusters. Otherwise return the original value
6512 * of "maxvar". Return isl_size_error in case of any error.
6513 * Only clusters that are about to be merged are considered.
6514 * "c" contains information about the clusters.
6516 static isl_size limit_maxvar_to_slack(int maxvar, int max_slack,
6517 struct isl_clustering *c)
6519 int i, j;
6521 for (i = 0; i < c->n; ++i) {
6522 int nvar;
6523 struct isl_sched_graph *scc;
6525 if (!c->scc_in_merge[i])
6526 continue;
6527 scc = &c->scc[i];
6528 nvar = scc->n_total_row - scc->band_start;
6529 if (nvar >= maxvar)
6530 continue;
6531 for (j = 0; j < scc->n; ++j) {
6532 struct isl_sched_node *node = &scc->node[j];
6533 int slack;
6535 if (node_update_vmap(node) < 0)
6536 return isl_size_error;
6537 slack = node->nvar - node->rank;
6538 if (slack > max_slack) {
6539 maxvar = nvar;
6540 break;
6545 return maxvar;
6548 /* Adjust merge_graph->maxvar based on the number of remaining schedule rows
6549 * that still need to be computed. In particular, if there is a node
6550 * in a cluster where the dimension of the current band is smaller
6551 * than merge_graph->maxvar, but the number of remaining schedule rows
6552 * is greater than that of any node in a cluster with the maximal
6553 * dimension for the current band (i.e., merge_graph->maxvar),
6554 * then adjust merge_graph->maxvar to the (smallest) current band dimension
6555 * of those clusters. Without this adjustment, the total number of
6556 * schedule dimensions would be increased, resulting in a skewed view
6557 * of the number of coincident dimensions.
6558 * "c" contains information about the clusters.
6560 * If the maximize_band_depth option is set and merge_graph->maxvar is reduced,
6561 * then there is no point in attempting any merge since it will be rejected
6562 * anyway. Set merge_graph->maxvar to zero in such cases.
6564 static isl_stat adjust_maxvar_to_slack(isl_ctx *ctx,
6565 struct isl_sched_graph *merge_graph, struct isl_clustering *c)
6567 isl_size max_slack, maxvar;
6569 max_slack = compute_maxvar_max_slack(merge_graph->maxvar, c);
6570 if (max_slack < 0)
6571 return isl_stat_error;
6572 maxvar = limit_maxvar_to_slack(merge_graph->maxvar, max_slack, c);
6573 if (maxvar < 0)
6574 return isl_stat_error;
6576 if (maxvar < merge_graph->maxvar) {
6577 if (isl_options_get_schedule_maximize_band_depth(ctx))
6578 merge_graph->maxvar = 0;
6579 else
6580 merge_graph->maxvar = maxvar;
6583 return isl_stat_ok;
6586 /* Return the number of coincident dimensions in the current band of "graph",
6587 * where the nodes of "graph" are assumed to be scheduled by a single band.
6589 static int get_n_coincident(struct isl_sched_graph *graph)
6591 int i;
6593 for (i = graph->band_start; i < graph->n_total_row; ++i)
6594 if (!graph->node[0].coincident[i])
6595 break;
6597 return i - graph->band_start;
6600 /* Should the clusters be merged based on the cluster schedule
6601 * in the current (and only) band of "merge_graph", given that
6602 * coincidence should be maximized?
6604 * If the number of coincident schedule dimensions in the merged band
6605 * would be less than the maximal number of coincident schedule dimensions
6606 * in any of the merged clusters, then the clusters should not be merged.
6608 static isl_bool ok_to_merge_coincident(struct isl_clustering *c,
6609 struct isl_sched_graph *merge_graph)
6611 int i;
6612 int n_coincident;
6613 int max_coincident;
6615 max_coincident = 0;
6616 for (i = 0; i < c->n; ++i) {
6617 if (!c->scc_in_merge[i])
6618 continue;
6619 n_coincident = get_n_coincident(&c->scc[i]);
6620 if (n_coincident > max_coincident)
6621 max_coincident = n_coincident;
6624 n_coincident = get_n_coincident(merge_graph);
6626 return isl_bool_ok(n_coincident >= max_coincident);
6629 /* Return the transformation on "node" expressed by the current (and only)
6630 * band of "merge_graph" applied to the clusters in "c".
6632 * First find the representation of "node" in its SCC in "c" and
6633 * extract the transformation expressed by the current band.
6634 * Then extract the transformation applied by "merge_graph"
6635 * to the cluster to which this SCC belongs.
6636 * Combine the two to obtain the complete transformation on the node.
6638 * Note that the range of the first transformation is an anonymous space,
6639 * while the domain of the second is named "cluster_X". The range
6640 * of the former therefore needs to be adjusted before the two
6641 * can be combined.
6643 static __isl_give isl_map *extract_node_transformation(isl_ctx *ctx,
6644 struct isl_sched_node *node, struct isl_clustering *c,
6645 struct isl_sched_graph *merge_graph)
6647 struct isl_sched_node *scc_node, *cluster_node;
6648 int start, n;
6649 isl_id *id;
6650 isl_space *space;
6651 isl_multi_aff *ma, *ma2;
6653 scc_node = graph_find_node(ctx, &c->scc[node->scc], node->space);
6654 if (scc_node && !is_node(&c->scc[node->scc], scc_node))
6655 isl_die(ctx, isl_error_internal, "unable to find node",
6656 return NULL);
6657 start = c->scc[node->scc].band_start;
6658 n = c->scc[node->scc].n_total_row - start;
6659 ma = node_extract_partial_schedule_multi_aff(scc_node, start, n);
6660 space = cluster_space(&c->scc[node->scc], c->scc_cluster[node->scc]);
6661 cluster_node = graph_find_node(ctx, merge_graph, space);
6662 if (cluster_node && !is_node(merge_graph, cluster_node))
6663 isl_die(ctx, isl_error_internal, "unable to find cluster",
6664 space = isl_space_free(space));
6665 id = isl_space_get_tuple_id(space, isl_dim_set);
6666 ma = isl_multi_aff_set_tuple_id(ma, isl_dim_out, id);
6667 isl_space_free(space);
6668 n = merge_graph->n_total_row;
6669 ma2 = node_extract_partial_schedule_multi_aff(cluster_node, 0, n);
6670 ma = isl_multi_aff_pullback_multi_aff(ma2, ma);
6672 return isl_map_from_multi_aff(ma);
6675 /* Give a set of distances "set", are they bounded by a small constant
6676 * in direction "pos"?
6677 * In practice, check if they are bounded by 2 by checking that there
6678 * are no elements with a value greater than or equal to 3 or
6679 * smaller than or equal to -3.
6681 static isl_bool distance_is_bounded(__isl_keep isl_set *set, int pos)
6683 isl_bool bounded;
6684 isl_set *test;
6686 if (!set)
6687 return isl_bool_error;
6689 test = isl_set_copy(set);
6690 test = isl_set_lower_bound_si(test, isl_dim_set, pos, 3);
6691 bounded = isl_set_is_empty(test);
6692 isl_set_free(test);
6694 if (bounded < 0 || !bounded)
6695 return bounded;
6697 test = isl_set_copy(set);
6698 test = isl_set_upper_bound_si(test, isl_dim_set, pos, -3);
6699 bounded = isl_set_is_empty(test);
6700 isl_set_free(test);
6702 return bounded;
6705 /* Does the set "set" have a fixed (but possible parametric) value
6706 * at dimension "pos"?
6708 static isl_bool has_single_value(__isl_keep isl_set *set, int pos)
6710 isl_size n;
6711 isl_bool single;
6713 n = isl_set_dim(set, isl_dim_set);
6714 if (n < 0)
6715 return isl_bool_error;
6716 set = isl_set_copy(set);
6717 set = isl_set_project_out(set, isl_dim_set, pos + 1, n - (pos + 1));
6718 set = isl_set_project_out(set, isl_dim_set, 0, pos);
6719 single = isl_set_is_singleton(set);
6720 isl_set_free(set);
6722 return single;
6725 /* Does "map" have a fixed (but possible parametric) value
6726 * at dimension "pos" of either its domain or its range?
6728 static isl_bool has_singular_src_or_dst(__isl_keep isl_map *map, int pos)
6730 isl_set *set;
6731 isl_bool single;
6733 set = isl_map_domain(isl_map_copy(map));
6734 single = has_single_value(set, pos);
6735 isl_set_free(set);
6737 if (single < 0 || single)
6738 return single;
6740 set = isl_map_range(isl_map_copy(map));
6741 single = has_single_value(set, pos);
6742 isl_set_free(set);
6744 return single;
6747 /* Does the edge "edge" from "graph" have bounded dependence distances
6748 * in the merged graph "merge_graph" of a selection of clusters in "c"?
6750 * Extract the complete transformations of the source and destination
6751 * nodes of the edge, apply them to the edge constraints and
6752 * compute the differences. Finally, check if these differences are bounded
6753 * in each direction.
6755 * If the dimension of the band is greater than the number of
6756 * dimensions that can be expected to be optimized by the edge
6757 * (based on its weight), then also allow the differences to be unbounded
6758 * in the remaining dimensions, but only if either the source or
6759 * the destination has a fixed value in that direction.
6760 * This allows a statement that produces values that are used by
6761 * several instances of another statement to be merged with that
6762 * other statement.
6763 * However, merging such clusters will introduce an inherently
6764 * large proximity distance inside the merged cluster, meaning
6765 * that proximity distances will no longer be optimized in
6766 * subsequent merges. These merges are therefore only allowed
6767 * after all other possible merges have been tried.
6768 * The first time such a merge is encountered, the weight of the edge
6769 * is replaced by a negative weight. The second time (i.e., after
6770 * all merges over edges with a non-negative weight have been tried),
6771 * the merge is allowed.
6773 static isl_bool has_bounded_distances(isl_ctx *ctx, struct isl_sched_edge *edge,
6774 struct isl_sched_graph *graph, struct isl_clustering *c,
6775 struct isl_sched_graph *merge_graph)
6777 int i, n_slack;
6778 isl_size n;
6779 isl_bool bounded;
6780 isl_map *map, *t;
6781 isl_set *dist;
6783 map = isl_map_copy(edge->map);
6784 t = extract_node_transformation(ctx, edge->src, c, merge_graph);
6785 map = isl_map_apply_domain(map, t);
6786 t = extract_node_transformation(ctx, edge->dst, c, merge_graph);
6787 map = isl_map_apply_range(map, t);
6788 dist = isl_map_deltas(isl_map_copy(map));
6790 bounded = isl_bool_true;
6791 n = isl_set_dim(dist, isl_dim_set);
6792 if (n < 0)
6793 goto error;
6794 n_slack = n - edge->weight;
6795 if (edge->weight < 0)
6796 n_slack -= graph->max_weight + 1;
6797 for (i = 0; i < n; ++i) {
6798 isl_bool bounded_i, singular_i;
6800 bounded_i = distance_is_bounded(dist, i);
6801 if (bounded_i < 0)
6802 goto error;
6803 if (bounded_i)
6804 continue;
6805 if (edge->weight >= 0)
6806 bounded = isl_bool_false;
6807 n_slack--;
6808 if (n_slack < 0)
6809 break;
6810 singular_i = has_singular_src_or_dst(map, i);
6811 if (singular_i < 0)
6812 goto error;
6813 if (singular_i)
6814 continue;
6815 bounded = isl_bool_false;
6816 break;
6818 if (!bounded && i >= n && edge->weight >= 0)
6819 edge->weight -= graph->max_weight + 1;
6820 isl_map_free(map);
6821 isl_set_free(dist);
6823 return bounded;
6824 error:
6825 isl_map_free(map);
6826 isl_set_free(dist);
6827 return isl_bool_error;
6830 /* Should the clusters be merged based on the cluster schedule
6831 * in the current (and only) band of "merge_graph"?
6832 * "graph" is the original dependence graph, while "c" records
6833 * which SCCs are involved in the latest merge.
6835 * In particular, is there at least one proximity constraint
6836 * that is optimized by the merge?
6838 * A proximity constraint is considered to be optimized
6839 * if the dependence distances are small.
6841 static isl_bool ok_to_merge_proximity(isl_ctx *ctx,
6842 struct isl_sched_graph *graph, struct isl_clustering *c,
6843 struct isl_sched_graph *merge_graph)
6845 int i;
6847 for (i = 0; i < graph->n_edge; ++i) {
6848 struct isl_sched_edge *edge = &graph->edge[i];
6849 isl_bool bounded;
6851 if (!is_proximity(edge))
6852 continue;
6853 if (!c->scc_in_merge[edge->src->scc])
6854 continue;
6855 if (!c->scc_in_merge[edge->dst->scc])
6856 continue;
6857 if (c->scc_cluster[edge->dst->scc] ==
6858 c->scc_cluster[edge->src->scc])
6859 continue;
6860 bounded = has_bounded_distances(ctx, edge, graph, c,
6861 merge_graph);
6862 if (bounded < 0 || bounded)
6863 return bounded;
6866 return isl_bool_false;
6869 /* Should the clusters be merged based on the cluster schedule
6870 * in the current (and only) band of "merge_graph"?
6871 * "graph" is the original dependence graph, while "c" records
6872 * which SCCs are involved in the latest merge.
6874 * If the current band is empty, then the clusters should not be merged.
6876 * If the band depth should be maximized and the merge schedule
6877 * is incomplete (meaning that the dimension of some of the schedule
6878 * bands in the original schedule will be reduced), then the clusters
6879 * should not be merged.
6881 * If the schedule_maximize_coincidence option is set, then check that
6882 * the number of coincident schedule dimensions is not reduced.
6884 * Finally, only allow the merge if at least one proximity
6885 * constraint is optimized.
6887 static isl_bool ok_to_merge(isl_ctx *ctx, struct isl_sched_graph *graph,
6888 struct isl_clustering *c, struct isl_sched_graph *merge_graph)
6890 if (merge_graph->n_total_row == merge_graph->band_start)
6891 return isl_bool_false;
6893 if (isl_options_get_schedule_maximize_band_depth(ctx) &&
6894 merge_graph->n_total_row < merge_graph->maxvar)
6895 return isl_bool_false;
6897 if (isl_options_get_schedule_maximize_coincidence(ctx)) {
6898 isl_bool ok;
6900 ok = ok_to_merge_coincident(c, merge_graph);
6901 if (ok < 0 || !ok)
6902 return ok;
6905 return ok_to_merge_proximity(ctx, graph, c, merge_graph);
6908 /* Apply the schedule in "t_node" to the "n" rows starting at "first"
6909 * of the schedule in "node" and return the result.
6911 * That is, essentially compute
6913 * T * N(first:first+n-1)
6915 * taking into account the constant term and the parameter coefficients
6916 * in "t_node".
6918 static __isl_give isl_mat *node_transformation(isl_ctx *ctx,
6919 struct isl_sched_node *t_node, struct isl_sched_node *node,
6920 int first, int n)
6922 int i, j;
6923 isl_mat *t;
6924 isl_size n_row, n_col;
6925 int n_param, n_var;
6927 n_param = node->nparam;
6928 n_var = node->nvar;
6929 n_row = isl_mat_rows(t_node->sched);
6930 n_col = isl_mat_cols(node->sched);
6931 if (n_row < 0 || n_col < 0)
6932 return NULL;
6933 t = isl_mat_alloc(ctx, n_row, n_col);
6934 if (!t)
6935 return NULL;
6936 for (i = 0; i < n_row; ++i) {
6937 isl_seq_cpy(t->row[i], t_node->sched->row[i], 1 + n_param);
6938 isl_seq_clr(t->row[i] + 1 + n_param, n_var);
6939 for (j = 0; j < n; ++j)
6940 isl_seq_addmul(t->row[i],
6941 t_node->sched->row[i][1 + n_param + j],
6942 node->sched->row[first + j],
6943 1 + n_param + n_var);
6945 return t;
6948 /* Apply the cluster schedule in "t_node" to the current band
6949 * schedule of the nodes in "graph".
6951 * In particular, replace the rows starting at band_start
6952 * by the result of applying the cluster schedule in "t_node"
6953 * to the original rows.
6955 * The coincidence of the schedule is determined by the coincidence
6956 * of the cluster schedule.
6958 static isl_stat transform(isl_ctx *ctx, struct isl_sched_graph *graph,
6959 struct isl_sched_node *t_node)
6961 int i, j;
6962 isl_size n_new;
6963 int start, n;
6965 start = graph->band_start;
6966 n = graph->n_total_row - start;
6968 n_new = isl_mat_rows(t_node->sched);
6969 if (n_new < 0)
6970 return isl_stat_error;
6971 for (i = 0; i < graph->n; ++i) {
6972 struct isl_sched_node *node = &graph->node[i];
6973 isl_mat *t;
6975 t = node_transformation(ctx, t_node, node, start, n);
6976 node->sched = isl_mat_drop_rows(node->sched, start, n);
6977 node->sched = isl_mat_concat(node->sched, t);
6978 node->sched_map = isl_map_free(node->sched_map);
6979 if (!node->sched)
6980 return isl_stat_error;
6981 for (j = 0; j < n_new; ++j)
6982 node->coincident[start + j] = t_node->coincident[j];
6984 graph->n_total_row -= n;
6985 graph->n_row -= n;
6986 graph->n_total_row += n_new;
6987 graph->n_row += n_new;
6989 return isl_stat_ok;
6992 /* Merge the clusters marked for merging in "c" into a single
6993 * cluster using the cluster schedule in the current band of "merge_graph".
6994 * The representative SCC for the new cluster is the SCC with
6995 * the smallest index.
6997 * The current band schedule of each SCC in the new cluster is obtained
6998 * by applying the schedule of the corresponding original cluster
6999 * to the original band schedule.
7000 * All SCCs in the new cluster have the same number of schedule rows.
7002 static isl_stat merge(isl_ctx *ctx, struct isl_clustering *c,
7003 struct isl_sched_graph *merge_graph)
7005 int i;
7006 int cluster = -1;
7007 isl_space *space;
7009 for (i = 0; i < c->n; ++i) {
7010 struct isl_sched_node *node;
7012 if (!c->scc_in_merge[i])
7013 continue;
7014 if (cluster < 0)
7015 cluster = i;
7016 space = cluster_space(&c->scc[i], c->scc_cluster[i]);
7017 node = graph_find_node(ctx, merge_graph, space);
7018 isl_space_free(space);
7019 if (!node)
7020 return isl_stat_error;
7021 if (!is_node(merge_graph, node))
7022 isl_die(ctx, isl_error_internal,
7023 "unable to find cluster",
7024 return isl_stat_error);
7025 if (transform(ctx, &c->scc[i], node) < 0)
7026 return isl_stat_error;
7027 c->scc_cluster[i] = cluster;
7030 return isl_stat_ok;
7033 /* Try and merge the clusters of SCCs marked in c->scc_in_merge
7034 * by scheduling the current cluster bands with respect to each other.
7036 * Construct a dependence graph with a space for each cluster and
7037 * with the coordinates of each space corresponding to the schedule
7038 * dimensions of the current band of that cluster.
7039 * Construct a cluster schedule in this cluster dependence graph and
7040 * apply it to the current cluster bands if it is applicable
7041 * according to ok_to_merge.
7043 * If the number of remaining schedule dimensions in a cluster
7044 * with a non-maximal current schedule dimension is greater than
7045 * the number of remaining schedule dimensions in clusters
7046 * with a maximal current schedule dimension, then restrict
7047 * the number of rows to be computed in the cluster schedule
7048 * to the minimal such non-maximal current schedule dimension.
7049 * Do this by adjusting merge_graph.maxvar.
7051 * Return isl_bool_true if the clusters have effectively been merged
7052 * into a single cluster.
7054 * Note that since the standard scheduling algorithm minimizes the maximal
7055 * distance over proximity constraints, the proximity constraints between
7056 * the merged clusters may not be optimized any further than what is
7057 * sufficient to bring the distances within the limits of the internal
7058 * proximity constraints inside the individual clusters.
7059 * It may therefore make sense to perform an additional translation step
7060 * to bring the clusters closer to each other, while maintaining
7061 * the linear part of the merging schedule found using the standard
7062 * scheduling algorithm.
7064 static isl_bool try_merge(isl_ctx *ctx, struct isl_sched_graph *graph,
7065 struct isl_clustering *c)
7067 struct isl_sched_graph merge_graph = { 0 };
7068 isl_bool merged;
7070 if (init_merge_graph(ctx, graph, c, &merge_graph) < 0)
7071 goto error;
7073 if (compute_maxvar(&merge_graph) < 0)
7074 goto error;
7075 if (adjust_maxvar_to_slack(ctx, &merge_graph,c) < 0)
7076 goto error;
7077 if (compute_schedule_wcc_band(ctx, &merge_graph) < 0)
7078 goto error;
7079 merged = ok_to_merge(ctx, graph, c, &merge_graph);
7080 if (merged && merge(ctx, c, &merge_graph) < 0)
7081 goto error;
7083 graph_free(ctx, &merge_graph);
7084 return merged;
7085 error:
7086 graph_free(ctx, &merge_graph);
7087 return isl_bool_error;
7090 /* Is there any edge marked "no_merge" between two SCCs that are
7091 * about to be merged (i.e., that are set in "scc_in_merge")?
7092 * "merge_edge" is the proximity edge along which the clusters of SCCs
7093 * are going to be merged.
7095 * If there is any edge between two SCCs with a negative weight,
7096 * while the weight of "merge_edge" is non-negative, then this
7097 * means that the edge was postponed. "merge_edge" should then
7098 * also be postponed since merging along the edge with negative weight should
7099 * be postponed until all edges with non-negative weight have been tried.
7100 * Replace the weight of "merge_edge" by a negative weight as well and
7101 * tell the caller not to attempt a merge.
7103 static int any_no_merge(struct isl_sched_graph *graph, int *scc_in_merge,
7104 struct isl_sched_edge *merge_edge)
7106 int i;
7108 for (i = 0; i < graph->n_edge; ++i) {
7109 struct isl_sched_edge *edge = &graph->edge[i];
7111 if (!scc_in_merge[edge->src->scc])
7112 continue;
7113 if (!scc_in_merge[edge->dst->scc])
7114 continue;
7115 if (edge->no_merge)
7116 return 1;
7117 if (merge_edge->weight >= 0 && edge->weight < 0) {
7118 merge_edge->weight -= graph->max_weight + 1;
7119 return 1;
7123 return 0;
7126 /* Merge the two clusters in "c" connected by the edge in "graph"
7127 * with index "edge" into a single cluster.
7128 * If it turns out to be impossible to merge these two clusters,
7129 * then mark the edge as "no_merge" such that it will not be
7130 * considered again.
7132 * First mark all SCCs that need to be merged. This includes the SCCs
7133 * in the two clusters, but it may also include the SCCs
7134 * of intermediate clusters.
7135 * If there is already a no_merge edge between any pair of such SCCs,
7136 * then simply mark the current edge as no_merge as well.
7137 * Likewise, if any of those edges was postponed by has_bounded_distances,
7138 * then postpone the current edge as well.
7139 * Otherwise, try and merge the clusters and mark "edge" as "no_merge"
7140 * if the clusters did not end up getting merged, unless the non-merge
7141 * is due to the fact that the edge was postponed. This postponement
7142 * can be recognized by a change in weight (from non-negative to negative).
7144 static isl_stat merge_clusters_along_edge(isl_ctx *ctx,
7145 struct isl_sched_graph *graph, int edge, struct isl_clustering *c)
7147 isl_bool merged;
7148 int edge_weight = graph->edge[edge].weight;
7150 if (mark_merge_sccs(ctx, graph, edge, c) < 0)
7151 return isl_stat_error;
7153 if (any_no_merge(graph, c->scc_in_merge, &graph->edge[edge]))
7154 merged = isl_bool_false;
7155 else
7156 merged = try_merge(ctx, graph, c);
7157 if (merged < 0)
7158 return isl_stat_error;
7159 if (!merged && edge_weight == graph->edge[edge].weight)
7160 graph->edge[edge].no_merge = 1;
7162 return isl_stat_ok;
7165 /* Does "node" belong to the cluster identified by "cluster"?
7167 static int node_cluster_exactly(struct isl_sched_node *node, int cluster)
7169 return node->cluster == cluster;
7172 /* Does "edge" connect two nodes belonging to the cluster
7173 * identified by "cluster"?
7175 static int edge_cluster_exactly(struct isl_sched_edge *edge, int cluster)
7177 return edge->src->cluster == cluster && edge->dst->cluster == cluster;
7180 /* Swap the schedule of "node1" and "node2".
7181 * Both nodes have been derived from the same node in a common parent graph.
7182 * Since the "coincident" field is shared with that node
7183 * in the parent graph, there is no need to also swap this field.
7185 static void swap_sched(struct isl_sched_node *node1,
7186 struct isl_sched_node *node2)
7188 isl_mat *sched;
7189 isl_map *sched_map;
7191 sched = node1->sched;
7192 node1->sched = node2->sched;
7193 node2->sched = sched;
7195 sched_map = node1->sched_map;
7196 node1->sched_map = node2->sched_map;
7197 node2->sched_map = sched_map;
7200 /* Copy the current band schedule from the SCCs that form the cluster
7201 * with index "pos" to the actual cluster at position "pos".
7202 * By construction, the index of the first SCC that belongs to the cluster
7203 * is also "pos".
7205 * The order of the nodes inside both the SCCs and the cluster
7206 * is assumed to be same as the order in the original "graph".
7208 * Since the SCC graphs will no longer be used after this function,
7209 * the schedules are actually swapped rather than copied.
7211 static isl_stat copy_partial(struct isl_sched_graph *graph,
7212 struct isl_clustering *c, int pos)
7214 int i, j;
7216 c->cluster[pos].n_total_row = c->scc[pos].n_total_row;
7217 c->cluster[pos].n_row = c->scc[pos].n_row;
7218 c->cluster[pos].maxvar = c->scc[pos].maxvar;
7219 j = 0;
7220 for (i = 0; i < graph->n; ++i) {
7221 int k;
7222 int s;
7224 if (graph->node[i].cluster != pos)
7225 continue;
7226 s = graph->node[i].scc;
7227 k = c->scc_node[s]++;
7228 swap_sched(&c->cluster[pos].node[j], &c->scc[s].node[k]);
7229 if (c->scc[s].maxvar > c->cluster[pos].maxvar)
7230 c->cluster[pos].maxvar = c->scc[s].maxvar;
7231 ++j;
7234 return isl_stat_ok;
7237 /* Is there a (conditional) validity dependence from node[j] to node[i],
7238 * forcing node[i] to follow node[j] or do the nodes belong to the same
7239 * cluster?
7241 static isl_bool node_follows_strong_or_same_cluster(int i, int j, void *user)
7243 struct isl_sched_graph *graph = user;
7245 if (graph->node[i].cluster == graph->node[j].cluster)
7246 return isl_bool_true;
7247 return graph_has_validity_edge(graph, &graph->node[j], &graph->node[i]);
7250 /* Extract the merged clusters of SCCs in "graph", sort them, and
7251 * store them in c->clusters. Update c->scc_cluster accordingly.
7253 * First keep track of the cluster containing the SCC to which a node
7254 * belongs in the node itself.
7255 * Then extract the clusters into c->clusters, copying the current
7256 * band schedule from the SCCs that belong to the cluster.
7257 * Do this only once per cluster.
7259 * Finally, topologically sort the clusters and update c->scc_cluster
7260 * to match the new scc numbering. While the SCCs were originally
7261 * sorted already, some SCCs that depend on some other SCCs may
7262 * have been merged with SCCs that appear before these other SCCs.
7263 * A reordering may therefore be required.
7265 static isl_stat extract_clusters(isl_ctx *ctx, struct isl_sched_graph *graph,
7266 struct isl_clustering *c)
7268 int i;
7270 for (i = 0; i < graph->n; ++i)
7271 graph->node[i].cluster = c->scc_cluster[graph->node[i].scc];
7273 for (i = 0; i < graph->scc; ++i) {
7274 if (c->scc_cluster[i] != i)
7275 continue;
7276 if (extract_sub_graph(ctx, graph, &node_cluster_exactly,
7277 &edge_cluster_exactly, i, &c->cluster[i]) < 0)
7278 return isl_stat_error;
7279 c->cluster[i].src_scc = -1;
7280 c->cluster[i].dst_scc = -1;
7281 if (copy_partial(graph, c, i) < 0)
7282 return isl_stat_error;
7285 if (detect_ccs(ctx, graph, &node_follows_strong_or_same_cluster) < 0)
7286 return isl_stat_error;
7287 for (i = 0; i < graph->n; ++i)
7288 c->scc_cluster[graph->node[i].scc] = graph->node[i].cluster;
7290 return isl_stat_ok;
7293 /* Compute weights on the proximity edges of "graph" that can
7294 * be used by find_proximity to find the most appropriate
7295 * proximity edge to use to merge two clusters in "c".
7296 * The weights are also used by has_bounded_distances to determine
7297 * whether the merge should be allowed.
7298 * Store the maximum of the computed weights in graph->max_weight.
7300 * The computed weight is a measure for the number of remaining schedule
7301 * dimensions that can still be completely aligned.
7302 * In particular, compute the number of equalities between
7303 * input dimensions and output dimensions in the proximity constraints.
7304 * The directions that are already handled by outer schedule bands
7305 * are projected out prior to determining this number.
7307 * Edges that will never be considered by find_proximity are ignored.
7309 static isl_stat compute_weights(struct isl_sched_graph *graph,
7310 struct isl_clustering *c)
7312 int i;
7314 graph->max_weight = 0;
7316 for (i = 0; i < graph->n_edge; ++i) {
7317 struct isl_sched_edge *edge = &graph->edge[i];
7318 struct isl_sched_node *src = edge->src;
7319 struct isl_sched_node *dst = edge->dst;
7320 isl_basic_map *hull;
7321 isl_bool prox;
7322 isl_size n_in, n_out, n;
7324 prox = is_non_empty_proximity(edge);
7325 if (prox < 0)
7326 return isl_stat_error;
7327 if (!prox)
7328 continue;
7329 if (bad_cluster(&c->scc[edge->src->scc]) ||
7330 bad_cluster(&c->scc[edge->dst->scc]))
7331 continue;
7332 if (c->scc_cluster[edge->dst->scc] ==
7333 c->scc_cluster[edge->src->scc])
7334 continue;
7336 hull = isl_map_affine_hull(isl_map_copy(edge->map));
7337 hull = isl_basic_map_transform_dims(hull, isl_dim_in, 0,
7338 isl_mat_copy(src->vmap));
7339 hull = isl_basic_map_transform_dims(hull, isl_dim_out, 0,
7340 isl_mat_copy(dst->vmap));
7341 hull = isl_basic_map_project_out(hull,
7342 isl_dim_in, 0, src->rank);
7343 hull = isl_basic_map_project_out(hull,
7344 isl_dim_out, 0, dst->rank);
7345 hull = isl_basic_map_remove_divs(hull);
7346 n_in = isl_basic_map_dim(hull, isl_dim_in);
7347 n_out = isl_basic_map_dim(hull, isl_dim_out);
7348 if (n_in < 0 || n_out < 0)
7349 hull = isl_basic_map_free(hull);
7350 hull = isl_basic_map_drop_constraints_not_involving_dims(hull,
7351 isl_dim_in, 0, n_in);
7352 hull = isl_basic_map_drop_constraints_not_involving_dims(hull,
7353 isl_dim_out, 0, n_out);
7354 n = isl_basic_map_n_equality(hull);
7355 isl_basic_map_free(hull);
7356 if (n < 0)
7357 return isl_stat_error;
7358 edge->weight = n;
7360 if (edge->weight > graph->max_weight)
7361 graph->max_weight = edge->weight;
7364 return isl_stat_ok;
7367 /* Call compute_schedule_finish_band on each of the clusters in "c"
7368 * in their topological order. This order is determined by the scc
7369 * fields of the nodes in "graph".
7370 * Combine the results in a sequence expressing the topological order.
7372 * If there is only one cluster left, then there is no need to introduce
7373 * a sequence node. Also, in this case, the cluster necessarily contains
7374 * the SCC at position 0 in the original graph and is therefore also
7375 * stored in the first cluster of "c".
7377 static __isl_give isl_schedule_node *finish_bands_clustering(
7378 __isl_take isl_schedule_node *node, struct isl_sched_graph *graph,
7379 struct isl_clustering *c)
7381 int i;
7382 isl_ctx *ctx;
7383 isl_union_set_list *filters;
7385 if (graph->scc == 1)
7386 return compute_schedule_finish_band(node, &c->cluster[0], 0);
7388 ctx = isl_schedule_node_get_ctx(node);
7390 filters = extract_sccs(ctx, graph);
7391 node = isl_schedule_node_insert_sequence(node, filters);
7393 for (i = 0; i < graph->scc; ++i) {
7394 int j = c->scc_cluster[i];
7395 node = isl_schedule_node_grandchild(node, i, 0);
7396 node = compute_schedule_finish_band(node, &c->cluster[j], 0);
7397 node = isl_schedule_node_grandparent(node);
7400 return node;
7403 /* Compute a schedule for a connected dependence graph by first considering
7404 * each strongly connected component (SCC) in the graph separately and then
7405 * incrementally combining them into clusters.
7406 * Return the updated schedule node.
7408 * Initially, each cluster consists of a single SCC, each with its
7409 * own band schedule. The algorithm then tries to merge pairs
7410 * of clusters along a proximity edge until no more suitable
7411 * proximity edges can be found. During this merging, the schedule
7412 * is maintained in the individual SCCs.
7413 * After the merging is completed, the full resulting clusters
7414 * are extracted and in finish_bands_clustering,
7415 * compute_schedule_finish_band is called on each of them to integrate
7416 * the band into "node" and to continue the computation.
7418 * compute_weights initializes the weights that are used by find_proximity.
7420 static __isl_give isl_schedule_node *compute_schedule_wcc_clustering(
7421 __isl_take isl_schedule_node *node, struct isl_sched_graph *graph)
7423 isl_ctx *ctx;
7424 struct isl_clustering c;
7425 int i;
7427 ctx = isl_schedule_node_get_ctx(node);
7429 if (clustering_init(ctx, &c, graph) < 0)
7430 goto error;
7432 if (compute_weights(graph, &c) < 0)
7433 goto error;
7435 for (;;) {
7436 i = find_proximity(graph, &c);
7437 if (i < 0)
7438 goto error;
7439 if (i >= graph->n_edge)
7440 break;
7441 if (merge_clusters_along_edge(ctx, graph, i, &c) < 0)
7442 goto error;
7445 if (extract_clusters(ctx, graph, &c) < 0)
7446 goto error;
7448 node = finish_bands_clustering(node, graph, &c);
7450 clustering_free(ctx, &c);
7451 return node;
7452 error:
7453 clustering_free(ctx, &c);
7454 return isl_schedule_node_free(node);
7457 /* Compute a schedule for a connected dependence graph and return
7458 * the updated schedule node.
7460 * If Feautrier's algorithm is selected, we first recursively try to satisfy
7461 * as many validity dependences as possible. When all validity dependences
7462 * are satisfied we extend the schedule to a full-dimensional schedule.
7464 * Call compute_schedule_wcc_whole or compute_schedule_wcc_clustering
7465 * depending on whether the user has selected the option to try and
7466 * compute a schedule for the entire (weakly connected) component first.
7467 * If there is only a single strongly connected component (SCC), then
7468 * there is no point in trying to combine SCCs
7469 * in compute_schedule_wcc_clustering, so compute_schedule_wcc_whole
7470 * is called instead.
7472 static __isl_give isl_schedule_node *compute_schedule_wcc(
7473 __isl_take isl_schedule_node *node, struct isl_sched_graph *graph)
7475 isl_ctx *ctx;
7477 if (!node)
7478 return NULL;
7480 ctx = isl_schedule_node_get_ctx(node);
7481 if (detect_sccs(ctx, graph) < 0)
7482 return isl_schedule_node_free(node);
7484 if (compute_maxvar(graph) < 0)
7485 return isl_schedule_node_free(node);
7487 if (need_feautrier_step(ctx, graph))
7488 return compute_schedule_wcc_feautrier(node, graph);
7490 if (graph->scc <= 1 || isl_options_get_schedule_whole_component(ctx))
7491 return compute_schedule_wcc_whole(node, graph);
7492 else
7493 return compute_schedule_wcc_clustering(node, graph);
7496 /* Compute a schedule for each group of nodes identified by node->scc
7497 * separately and then combine them in a sequence node (or as set node
7498 * if graph->weak is set) inserted at position "node" of the schedule tree.
7499 * Return the updated schedule node.
7501 * If "wcc" is set then each of the groups belongs to a single
7502 * weakly connected component in the dependence graph so that
7503 * there is no need for compute_sub_schedule to look for weakly
7504 * connected components.
7506 * If a set node would be introduced and if the number of components
7507 * is equal to the number of nodes, then check if the schedule
7508 * is already complete. If so, a redundant set node would be introduced
7509 * (without any further descendants) stating that the statements
7510 * can be executed in arbitrary order, which is also expressed
7511 * by the absence of any node. Refrain from inserting any nodes
7512 * in this case and simply return.
7514 static __isl_give isl_schedule_node *compute_component_schedule(
7515 __isl_take isl_schedule_node *node, struct isl_sched_graph *graph,
7516 int wcc)
7518 int component;
7519 isl_ctx *ctx;
7520 isl_union_set_list *filters;
7522 if (!node)
7523 return NULL;
7525 if (graph->weak && graph->scc == graph->n) {
7526 if (compute_maxvar(graph) < 0)
7527 return isl_schedule_node_free(node);
7528 if (graph->n_row >= graph->maxvar)
7529 return node;
7532 ctx = isl_schedule_node_get_ctx(node);
7533 filters = extract_sccs(ctx, graph);
7534 if (graph->weak)
7535 node = isl_schedule_node_insert_set(node, filters);
7536 else
7537 node = isl_schedule_node_insert_sequence(node, filters);
7539 for (component = 0; component < graph->scc; ++component) {
7540 node = isl_schedule_node_grandchild(node, component, 0);
7541 node = compute_sub_schedule(node, ctx, graph,
7542 &isl_sched_node_scc_exactly,
7543 &edge_scc_exactly, component, wcc);
7544 node = isl_schedule_node_grandparent(node);
7547 return node;
7550 /* Compute a schedule for the given dependence graph and insert it at "node".
7551 * Return the updated schedule node.
7553 * We first check if the graph is connected (through validity and conditional
7554 * validity dependences) and, if not, compute a schedule
7555 * for each component separately.
7556 * If the schedule_serialize_sccs option is set, then we check for strongly
7557 * connected components instead and compute a separate schedule for
7558 * each such strongly connected component.
7560 static __isl_give isl_schedule_node *compute_schedule(isl_schedule_node *node,
7561 struct isl_sched_graph *graph)
7563 isl_ctx *ctx;
7565 if (!node)
7566 return NULL;
7568 ctx = isl_schedule_node_get_ctx(node);
7569 if (isl_options_get_schedule_serialize_sccs(ctx)) {
7570 if (detect_sccs(ctx, graph) < 0)
7571 return isl_schedule_node_free(node);
7572 } else {
7573 if (detect_wccs(ctx, graph) < 0)
7574 return isl_schedule_node_free(node);
7577 if (graph->scc > 1)
7578 return compute_component_schedule(node, graph, 1);
7580 return compute_schedule_wcc(node, graph);
7583 /* Compute a schedule on sc->domain that respects the given schedule
7584 * constraints.
7586 * In particular, the schedule respects all the validity dependences.
7587 * If the default isl scheduling algorithm is used, it tries to minimize
7588 * the dependence distances over the proximity dependences.
7589 * If Feautrier's scheduling algorithm is used, the proximity dependence
7590 * distances are only minimized during the extension to a full-dimensional
7591 * schedule.
7593 * If there are any condition and conditional validity dependences,
7594 * then the conditional validity dependences may be violated inside
7595 * a tilable band, provided they have no adjacent non-local
7596 * condition dependences.
7598 __isl_give isl_schedule *isl_schedule_constraints_compute_schedule(
7599 __isl_take isl_schedule_constraints *sc)
7601 isl_ctx *ctx = isl_schedule_constraints_get_ctx(sc);
7602 struct isl_sched_graph graph = { 0 };
7603 isl_schedule *sched;
7604 isl_schedule_node *node;
7605 isl_union_set *domain;
7606 isl_size n;
7608 sc = isl_schedule_constraints_align_params(sc);
7610 domain = isl_schedule_constraints_get_domain(sc);
7611 n = isl_union_set_n_set(domain);
7612 if (n == 0) {
7613 isl_schedule_constraints_free(sc);
7614 return isl_schedule_from_domain(domain);
7617 if (n < 0 || graph_init(&graph, sc) < 0)
7618 domain = isl_union_set_free(domain);
7620 node = isl_schedule_node_from_domain(domain);
7621 node = isl_schedule_node_child(node, 0);
7622 if (graph.n > 0)
7623 node = compute_schedule(node, &graph);
7624 sched = isl_schedule_node_get_schedule(node);
7625 isl_schedule_node_free(node);
7627 graph_free(ctx, &graph);
7628 isl_schedule_constraints_free(sc);
7630 return sched;
7633 /* Compute a schedule for the given union of domains that respects
7634 * all the validity dependences and minimizes
7635 * the dependence distances over the proximity dependences.
7637 * This function is kept for backward compatibility.
7639 __isl_give isl_schedule *isl_union_set_compute_schedule(
7640 __isl_take isl_union_set *domain,
7641 __isl_take isl_union_map *validity,
7642 __isl_take isl_union_map *proximity)
7644 isl_schedule_constraints *sc;
7646 sc = isl_schedule_constraints_on_domain(domain);
7647 sc = isl_schedule_constraints_set_validity(sc, validity);
7648 sc = isl_schedule_constraints_set_proximity(sc, proximity);
7650 return isl_schedule_constraints_compute_schedule(sc);