isl_val_n_abs_num_chunks: return isl_size
[isl.git] / isl_scheduler.c
blob615b90db43a0ba836cc768482a0571aaa769fe3b
1 /*
2 * Copyright 2011 INRIA Saclay
3 * Copyright 2012-2014 Ecole Normale Superieure
4 * Copyright 2015-2016 Sven Verdoolaege
5 * Copyright 2016 INRIA Paris
6 * Copyright 2017 Sven Verdoolaege
8 * Use of this software is governed by the MIT license
10 * Written by Sven Verdoolaege, INRIA Saclay - Ile-de-France,
11 * Parc Club Orsay Universite, ZAC des vignes, 4 rue Jacques Monod,
12 * 91893 Orsay, France
13 * and Ecole Normale Superieure, 45 rue d'Ulm, 75230 Paris, France
14 * and Centre de Recherche Inria de Paris, 2 rue Simone Iff - Voie DQ12,
15 * CS 42112, 75589 Paris Cedex 12, France
18 #include <isl_ctx_private.h>
19 #include <isl_map_private.h>
20 #include <isl_space_private.h>
21 #include <isl_aff_private.h>
22 #include <isl/hash.h>
23 #include <isl/id.h>
24 #include <isl/constraint.h>
25 #include <isl/schedule.h>
26 #include <isl_schedule_constraints.h>
27 #include <isl/schedule_node.h>
28 #include <isl_mat_private.h>
29 #include <isl_vec_private.h>
30 #include <isl/set.h>
31 #include <isl_union_set_private.h>
32 #include <isl_seq.h>
33 #include <isl_tab.h>
34 #include <isl_dim_map.h>
35 #include <isl/map_to_basic_set.h>
36 #include <isl_sort.h>
37 #include <isl_options_private.h>
38 #include <isl_tarjan.h>
39 #include <isl_morph.h>
40 #include <isl/ilp.h>
41 #include <isl_val_private.h>
44 * The scheduling algorithm implemented in this file was inspired by
45 * Bondhugula et al., "Automatic Transformations for Communication-Minimized
46 * Parallelization and Locality Optimization in the Polyhedral Model".
48 * For a detailed description of the variant implemented in isl,
49 * see Verdoolaege and Janssens, "Scheduling for PPCG" (2017).
53 /* Internal information about a node that is used during the construction
54 * of a schedule.
55 * space represents the original space in which the domain lives;
56 * that is, the space is not affected by compression
57 * sched is a matrix representation of the schedule being constructed
58 * for this node; if compressed is set, then this schedule is
59 * defined over the compressed domain space
60 * sched_map is an isl_map representation of the same (partial) schedule
61 * sched_map may be NULL; if compressed is set, then this map
62 * is defined over the uncompressed domain space
63 * rank is the number of linearly independent rows in the linear part
64 * of sched
65 * the rows of "vmap" represent a change of basis for the node
66 * variables; the first rank rows span the linear part of
67 * the schedule rows; the remaining rows are linearly independent
68 * the rows of "indep" represent linear combinations of the schedule
69 * coefficients that are non-zero when the schedule coefficients are
70 * linearly independent of previously computed schedule rows.
71 * start is the first variable in the LP problem in the sequences that
72 * represents the schedule coefficients of this node
73 * nvar is the dimension of the (compressed) domain
74 * nparam is the number of parameters or 0 if we are not constructing
75 * a parametric schedule
77 * If compressed is set, then hull represents the constraints
78 * that were used to derive the compression, while compress and
79 * decompress map the original space to the compressed space and
80 * vice versa.
82 * scc is the index of SCC (or WCC) this node belongs to
84 * "cluster" is only used inside extract_clusters and identifies
85 * the cluster of SCCs that the node belongs to.
87 * coincident contains a boolean for each of the rows of the schedule,
88 * indicating whether the corresponding scheduling dimension satisfies
89 * the coincidence constraints in the sense that the corresponding
90 * dependence distances are zero.
92 * If the schedule_treat_coalescing option is set, then
93 * "sizes" contains the sizes of the (compressed) instance set
94 * in each direction. If there is no fixed size in a given direction,
95 * then the corresponding size value is set to infinity.
96 * If the schedule_treat_coalescing option or the schedule_max_coefficient
97 * option is set, then "max" contains the maximal values for
98 * schedule coefficients of the (compressed) variables. If no bound
99 * needs to be imposed on a particular variable, then the corresponding
100 * value is negative.
101 * If not NULL, then "bounds" contains a non-parametric set
102 * in the compressed space that is bounded by the size in each direction.
104 struct isl_sched_node {
105 isl_space *space;
106 int compressed;
107 isl_set *hull;
108 isl_multi_aff *compress;
109 isl_multi_aff *decompress;
110 isl_mat *sched;
111 isl_map *sched_map;
112 int rank;
113 isl_mat *indep;
114 isl_mat *vmap;
115 int start;
116 int nvar;
117 int nparam;
119 int scc;
120 int cluster;
122 int *coincident;
124 isl_multi_val *sizes;
125 isl_basic_set *bounds;
126 isl_vec *max;
129 static int node_has_tuples(const void *entry, const void *val)
131 struct isl_sched_node *node = (struct isl_sched_node *)entry;
132 isl_space *space = (isl_space *) val;
134 return isl_space_has_equal_tuples(node->space, space);
137 static int node_scc_exactly(struct isl_sched_node *node, int scc)
139 return node->scc == scc;
142 static int node_scc_at_most(struct isl_sched_node *node, int scc)
144 return node->scc <= scc;
147 static int node_scc_at_least(struct isl_sched_node *node, int scc)
149 return node->scc >= scc;
152 /* An edge in the dependence graph. An edge may be used to
153 * ensure validity of the generated schedule, to minimize the dependence
154 * distance or both
156 * map is the dependence relation, with i -> j in the map if j depends on i
157 * tagged_condition and tagged_validity contain the union of all tagged
158 * condition or conditional validity dependence relations that
159 * specialize the dependence relation "map"; that is,
160 * if (i -> a) -> (j -> b) is an element of "tagged_condition"
161 * or "tagged_validity", then i -> j is an element of "map".
162 * If these fields are NULL, then they represent the empty relation.
163 * src is the source node
164 * dst is the sink node
166 * types is a bit vector containing the types of this edge.
167 * validity is set if the edge is used to ensure correctness
168 * coincidence is used to enforce zero dependence distances
169 * proximity is set if the edge is used to minimize dependence distances
170 * condition is set if the edge represents a condition
171 * for a conditional validity schedule constraint
172 * local can only be set for condition edges and indicates that
173 * the dependence distance over the edge should be zero
174 * conditional_validity is set if the edge is used to conditionally
175 * ensure correctness
177 * For validity edges, start and end mark the sequence of inequality
178 * constraints in the LP problem that encode the validity constraint
179 * corresponding to this edge.
181 * During clustering, an edge may be marked "no_merge" if it should
182 * not be used to merge clusters.
183 * The weight is also only used during clustering and it is
184 * an indication of how many schedule dimensions on either side
185 * of the schedule constraints can be aligned.
186 * If the weight is negative, then this means that this edge was postponed
187 * by has_bounded_distances or any_no_merge. The original weight can
188 * be retrieved by adding 1 + graph->max_weight, with "graph"
189 * the graph containing this edge.
191 struct isl_sched_edge {
192 isl_map *map;
193 isl_union_map *tagged_condition;
194 isl_union_map *tagged_validity;
196 struct isl_sched_node *src;
197 struct isl_sched_node *dst;
199 unsigned types;
201 int start;
202 int end;
204 int no_merge;
205 int weight;
208 /* Is "edge" marked as being of type "type"?
210 static int is_type(struct isl_sched_edge *edge, enum isl_edge_type type)
212 return ISL_FL_ISSET(edge->types, 1 << type);
215 /* Mark "edge" as being of type "type".
217 static void set_type(struct isl_sched_edge *edge, enum isl_edge_type type)
219 ISL_FL_SET(edge->types, 1 << type);
222 /* No longer mark "edge" as being of type "type"?
224 static void clear_type(struct isl_sched_edge *edge, enum isl_edge_type type)
226 ISL_FL_CLR(edge->types, 1 << type);
229 /* Is "edge" marked as a validity edge?
231 static int is_validity(struct isl_sched_edge *edge)
233 return is_type(edge, isl_edge_validity);
236 /* Mark "edge" as a validity edge.
238 static void set_validity(struct isl_sched_edge *edge)
240 set_type(edge, isl_edge_validity);
243 /* Is "edge" marked as a proximity edge?
245 static int is_proximity(struct isl_sched_edge *edge)
247 return is_type(edge, isl_edge_proximity);
250 /* Is "edge" marked as a local edge?
252 static int is_local(struct isl_sched_edge *edge)
254 return is_type(edge, isl_edge_local);
257 /* Mark "edge" as a local edge.
259 static void set_local(struct isl_sched_edge *edge)
261 set_type(edge, isl_edge_local);
264 /* No longer mark "edge" as a local edge.
266 static void clear_local(struct isl_sched_edge *edge)
268 clear_type(edge, isl_edge_local);
271 /* Is "edge" marked as a coincidence edge?
273 static int is_coincidence(struct isl_sched_edge *edge)
275 return is_type(edge, isl_edge_coincidence);
278 /* Is "edge" marked as a condition edge?
280 static int is_condition(struct isl_sched_edge *edge)
282 return is_type(edge, isl_edge_condition);
285 /* Is "edge" marked as a conditional validity edge?
287 static int is_conditional_validity(struct isl_sched_edge *edge)
289 return is_type(edge, isl_edge_conditional_validity);
292 /* Is "edge" of a type that can appear multiple times between
293 * the same pair of nodes?
295 * Condition edges and conditional validity edges may have tagged
296 * dependence relations, in which case an edge is added for each
297 * pair of tags.
299 static int is_multi_edge_type(struct isl_sched_edge *edge)
301 return is_condition(edge) || is_conditional_validity(edge);
304 /* Internal information about the dependence graph used during
305 * the construction of the schedule.
307 * intra_hmap is a cache, mapping dependence relations to their dual,
308 * for dependences from a node to itself, possibly without
309 * coefficients for the parameters
310 * intra_hmap_param is a cache, mapping dependence relations to their dual,
311 * for dependences from a node to itself, including coefficients
312 * for the parameters
313 * inter_hmap is a cache, mapping dependence relations to their dual,
314 * for dependences between distinct nodes
315 * if compression is involved then the key for these maps
316 * is the original, uncompressed dependence relation, while
317 * the value is the dual of the compressed dependence relation.
319 * n is the number of nodes
320 * node is the list of nodes
321 * maxvar is the maximal number of variables over all nodes
322 * max_row is the allocated number of rows in the schedule
323 * n_row is the current (maximal) number of linearly independent
324 * rows in the node schedules
325 * n_total_row is the current number of rows in the node schedules
326 * band_start is the starting row in the node schedules of the current band
327 * root is set to the original dependence graph from which this graph
328 * is derived through splitting. If this graph is not the result of
329 * splitting, then the root field points to the graph itself.
331 * sorted contains a list of node indices sorted according to the
332 * SCC to which a node belongs
334 * n_edge is the number of edges
335 * edge is the list of edges
336 * max_edge contains the maximal number of edges of each type;
337 * in particular, it contains the number of edges in the inital graph.
338 * edge_table contains pointers into the edge array, hashed on the source
339 * and sink spaces; there is one such table for each type;
340 * a given edge may be referenced from more than one table
341 * if the corresponding relation appears in more than one of the
342 * sets of dependences; however, for each type there is only
343 * a single edge between a given pair of source and sink space
344 * in the entire graph
346 * node_table contains pointers into the node array, hashed on the space tuples
348 * region contains a list of variable sequences that should be non-trivial
350 * lp contains the (I)LP problem used to obtain new schedule rows
352 * src_scc and dst_scc are the source and sink SCCs of an edge with
353 * conflicting constraints
355 * scc represents the number of components
356 * weak is set if the components are weakly connected
358 * max_weight is used during clustering and represents the maximal
359 * weight of the relevant proximity edges.
361 struct isl_sched_graph {
362 isl_map_to_basic_set *intra_hmap;
363 isl_map_to_basic_set *intra_hmap_param;
364 isl_map_to_basic_set *inter_hmap;
366 struct isl_sched_node *node;
367 int n;
368 int maxvar;
369 int max_row;
370 int n_row;
372 int *sorted;
374 int n_total_row;
375 int band_start;
377 struct isl_sched_graph *root;
379 struct isl_sched_edge *edge;
380 int n_edge;
381 int max_edge[isl_edge_last + 1];
382 struct isl_hash_table *edge_table[isl_edge_last + 1];
384 struct isl_hash_table *node_table;
385 struct isl_trivial_region *region;
387 isl_basic_set *lp;
389 int src_scc;
390 int dst_scc;
392 int scc;
393 int weak;
395 int max_weight;
398 /* Initialize node_table based on the list of nodes.
400 static int graph_init_table(isl_ctx *ctx, struct isl_sched_graph *graph)
402 int i;
404 graph->node_table = isl_hash_table_alloc(ctx, graph->n);
405 if (!graph->node_table)
406 return -1;
408 for (i = 0; i < graph->n; ++i) {
409 struct isl_hash_table_entry *entry;
410 uint32_t hash;
412 hash = isl_space_get_tuple_hash(graph->node[i].space);
413 entry = isl_hash_table_find(ctx, graph->node_table, hash,
414 &node_has_tuples,
415 graph->node[i].space, 1);
416 if (!entry)
417 return -1;
418 entry->data = &graph->node[i];
421 return 0;
424 /* Return a pointer to the node that lives within the given space,
425 * an invalid node if there is no such node, or NULL in case of error.
427 static struct isl_sched_node *graph_find_node(isl_ctx *ctx,
428 struct isl_sched_graph *graph, __isl_keep isl_space *space)
430 struct isl_hash_table_entry *entry;
431 uint32_t hash;
433 if (!space)
434 return NULL;
436 hash = isl_space_get_tuple_hash(space);
437 entry = isl_hash_table_find(ctx, graph->node_table, hash,
438 &node_has_tuples, space, 0);
440 return entry ? entry->data : graph->node + graph->n;
443 /* Is "node" a node in "graph"?
445 static int is_node(struct isl_sched_graph *graph,
446 struct isl_sched_node *node)
448 return node && node >= &graph->node[0] && node < &graph->node[graph->n];
451 static int edge_has_src_and_dst(const void *entry, const void *val)
453 const struct isl_sched_edge *edge = entry;
454 const struct isl_sched_edge *temp = val;
456 return edge->src == temp->src && edge->dst == temp->dst;
459 /* Add the given edge to graph->edge_table[type].
461 static isl_stat graph_edge_table_add(isl_ctx *ctx,
462 struct isl_sched_graph *graph, enum isl_edge_type type,
463 struct isl_sched_edge *edge)
465 struct isl_hash_table_entry *entry;
466 uint32_t hash;
468 hash = isl_hash_init();
469 hash = isl_hash_builtin(hash, edge->src);
470 hash = isl_hash_builtin(hash, edge->dst);
471 entry = isl_hash_table_find(ctx, graph->edge_table[type], hash,
472 &edge_has_src_and_dst, edge, 1);
473 if (!entry)
474 return isl_stat_error;
475 entry->data = edge;
477 return isl_stat_ok;
480 /* Add "edge" to all relevant edge tables.
481 * That is, for every type of the edge, add it to the corresponding table.
483 static isl_stat graph_edge_tables_add(isl_ctx *ctx,
484 struct isl_sched_graph *graph, struct isl_sched_edge *edge)
486 enum isl_edge_type t;
488 for (t = isl_edge_first; t <= isl_edge_last; ++t) {
489 if (!is_type(edge, t))
490 continue;
491 if (graph_edge_table_add(ctx, graph, t, edge) < 0)
492 return isl_stat_error;
495 return isl_stat_ok;
498 /* Allocate the edge_tables based on the maximal number of edges of
499 * each type.
501 static int graph_init_edge_tables(isl_ctx *ctx, struct isl_sched_graph *graph)
503 int i;
505 for (i = 0; i <= isl_edge_last; ++i) {
506 graph->edge_table[i] = isl_hash_table_alloc(ctx,
507 graph->max_edge[i]);
508 if (!graph->edge_table[i])
509 return -1;
512 return 0;
515 /* If graph->edge_table[type] contains an edge from the given source
516 * to the given destination, then return the hash table entry of this edge.
517 * Otherwise, return NULL.
519 static struct isl_hash_table_entry *graph_find_edge_entry(
520 struct isl_sched_graph *graph,
521 enum isl_edge_type type,
522 struct isl_sched_node *src, struct isl_sched_node *dst)
524 isl_ctx *ctx = isl_space_get_ctx(src->space);
525 uint32_t hash;
526 struct isl_sched_edge temp = { .src = src, .dst = dst };
528 hash = isl_hash_init();
529 hash = isl_hash_builtin(hash, temp.src);
530 hash = isl_hash_builtin(hash, temp.dst);
531 return isl_hash_table_find(ctx, graph->edge_table[type], hash,
532 &edge_has_src_and_dst, &temp, 0);
536 /* If graph->edge_table[type] contains an edge from the given source
537 * to the given destination, then return this edge.
538 * Otherwise, return NULL.
540 static struct isl_sched_edge *graph_find_edge(struct isl_sched_graph *graph,
541 enum isl_edge_type type,
542 struct isl_sched_node *src, struct isl_sched_node *dst)
544 struct isl_hash_table_entry *entry;
546 entry = graph_find_edge_entry(graph, type, src, dst);
547 if (!entry)
548 return NULL;
550 return entry->data;
553 /* Check whether the dependence graph has an edge of the given type
554 * between the given two nodes.
556 static isl_bool graph_has_edge(struct isl_sched_graph *graph,
557 enum isl_edge_type type,
558 struct isl_sched_node *src, struct isl_sched_node *dst)
560 struct isl_sched_edge *edge;
561 isl_bool empty;
563 edge = graph_find_edge(graph, type, src, dst);
564 if (!edge)
565 return isl_bool_false;
567 empty = isl_map_plain_is_empty(edge->map);
569 return isl_bool_not(empty);
572 /* Look for any edge with the same src, dst and map fields as "model".
574 * Return the matching edge if one can be found.
575 * Return "model" if no matching edge is found.
576 * Return NULL on error.
578 static struct isl_sched_edge *graph_find_matching_edge(
579 struct isl_sched_graph *graph, struct isl_sched_edge *model)
581 enum isl_edge_type i;
582 struct isl_sched_edge *edge;
584 for (i = isl_edge_first; i <= isl_edge_last; ++i) {
585 int is_equal;
587 edge = graph_find_edge(graph, i, model->src, model->dst);
588 if (!edge)
589 continue;
590 is_equal = isl_map_plain_is_equal(model->map, edge->map);
591 if (is_equal < 0)
592 return NULL;
593 if (is_equal)
594 return edge;
597 return model;
600 /* Remove the given edge from all the edge_tables that refer to it.
602 static void graph_remove_edge(struct isl_sched_graph *graph,
603 struct isl_sched_edge *edge)
605 isl_ctx *ctx = isl_map_get_ctx(edge->map);
606 enum isl_edge_type i;
608 for (i = isl_edge_first; i <= isl_edge_last; ++i) {
609 struct isl_hash_table_entry *entry;
611 entry = graph_find_edge_entry(graph, i, edge->src, edge->dst);
612 if (!entry)
613 continue;
614 if (entry->data != edge)
615 continue;
616 isl_hash_table_remove(ctx, graph->edge_table[i], entry);
620 /* Check whether the dependence graph has any edge
621 * between the given two nodes.
623 static isl_bool graph_has_any_edge(struct isl_sched_graph *graph,
624 struct isl_sched_node *src, struct isl_sched_node *dst)
626 enum isl_edge_type i;
627 isl_bool r;
629 for (i = isl_edge_first; i <= isl_edge_last; ++i) {
630 r = graph_has_edge(graph, i, src, dst);
631 if (r < 0 || r)
632 return r;
635 return r;
638 /* Check whether the dependence graph has a validity edge
639 * between the given two nodes.
641 * Conditional validity edges are essentially validity edges that
642 * can be ignored if the corresponding condition edges are iteration private.
643 * Here, we are only checking for the presence of validity
644 * edges, so we need to consider the conditional validity edges too.
645 * In particular, this function is used during the detection
646 * of strongly connected components and we cannot ignore
647 * conditional validity edges during this detection.
649 static isl_bool graph_has_validity_edge(struct isl_sched_graph *graph,
650 struct isl_sched_node *src, struct isl_sched_node *dst)
652 isl_bool r;
654 r = graph_has_edge(graph, isl_edge_validity, src, dst);
655 if (r < 0 || r)
656 return r;
658 return graph_has_edge(graph, isl_edge_conditional_validity, src, dst);
661 /* Perform all the required memory allocations for a schedule graph "graph"
662 * with "n_node" nodes and "n_edge" edge and initialize the corresponding
663 * fields.
665 static isl_stat graph_alloc(isl_ctx *ctx, struct isl_sched_graph *graph,
666 int n_node, int n_edge)
668 int i;
670 graph->n = n_node;
671 graph->n_edge = n_edge;
672 graph->node = isl_calloc_array(ctx, struct isl_sched_node, graph->n);
673 graph->sorted = isl_calloc_array(ctx, int, graph->n);
674 graph->region = isl_alloc_array(ctx,
675 struct isl_trivial_region, graph->n);
676 graph->edge = isl_calloc_array(ctx,
677 struct isl_sched_edge, graph->n_edge);
679 graph->intra_hmap = isl_map_to_basic_set_alloc(ctx, 2 * n_edge);
680 graph->intra_hmap_param = isl_map_to_basic_set_alloc(ctx, 2 * n_edge);
681 graph->inter_hmap = isl_map_to_basic_set_alloc(ctx, 2 * n_edge);
683 if (!graph->node || !graph->region || (graph->n_edge && !graph->edge) ||
684 !graph->sorted)
685 return isl_stat_error;
687 for(i = 0; i < graph->n; ++i)
688 graph->sorted[i] = i;
690 return isl_stat_ok;
693 /* Free the memory associated to node "node" in "graph".
694 * The "coincident" field is shared by nodes in a graph and its subgraph.
695 * It therefore only needs to be freed for the original dependence graph,
696 * i.e., one that is not the result of splitting.
698 static void clear_node(struct isl_sched_graph *graph,
699 struct isl_sched_node *node)
701 isl_space_free(node->space);
702 isl_set_free(node->hull);
703 isl_multi_aff_free(node->compress);
704 isl_multi_aff_free(node->decompress);
705 isl_mat_free(node->sched);
706 isl_map_free(node->sched_map);
707 isl_mat_free(node->indep);
708 isl_mat_free(node->vmap);
709 if (graph->root == graph)
710 free(node->coincident);
711 isl_multi_val_free(node->sizes);
712 isl_basic_set_free(node->bounds);
713 isl_vec_free(node->max);
716 static void graph_free(isl_ctx *ctx, struct isl_sched_graph *graph)
718 int i;
720 isl_map_to_basic_set_free(graph->intra_hmap);
721 isl_map_to_basic_set_free(graph->intra_hmap_param);
722 isl_map_to_basic_set_free(graph->inter_hmap);
724 if (graph->node)
725 for (i = 0; i < graph->n; ++i)
726 clear_node(graph, &graph->node[i]);
727 free(graph->node);
728 free(graph->sorted);
729 if (graph->edge)
730 for (i = 0; i < graph->n_edge; ++i) {
731 isl_map_free(graph->edge[i].map);
732 isl_union_map_free(graph->edge[i].tagged_condition);
733 isl_union_map_free(graph->edge[i].tagged_validity);
735 free(graph->edge);
736 free(graph->region);
737 for (i = 0; i <= isl_edge_last; ++i)
738 isl_hash_table_free(ctx, graph->edge_table[i]);
739 isl_hash_table_free(ctx, graph->node_table);
740 isl_basic_set_free(graph->lp);
743 /* For each "set" on which this function is called, increment
744 * graph->n by one and update graph->maxvar.
746 static isl_stat init_n_maxvar(__isl_take isl_set *set, void *user)
748 struct isl_sched_graph *graph = user;
749 isl_size nvar = isl_set_dim(set, isl_dim_set);
751 graph->n++;
752 if (nvar > graph->maxvar)
753 graph->maxvar = nvar;
755 isl_set_free(set);
757 if (nvar < 0)
758 return isl_stat_error;
759 return isl_stat_ok;
762 /* Compute the number of rows that should be allocated for the schedule.
763 * In particular, we need one row for each variable or one row
764 * for each basic map in the dependences.
765 * Note that it is practically impossible to exhaust both
766 * the number of dependences and the number of variables.
768 static isl_stat compute_max_row(struct isl_sched_graph *graph,
769 __isl_keep isl_schedule_constraints *sc)
771 int n_edge;
772 isl_stat r;
773 isl_union_set *domain;
775 graph->n = 0;
776 graph->maxvar = 0;
777 domain = isl_schedule_constraints_get_domain(sc);
778 r = isl_union_set_foreach_set(domain, &init_n_maxvar, graph);
779 isl_union_set_free(domain);
780 if (r < 0)
781 return isl_stat_error;
782 n_edge = isl_schedule_constraints_n_basic_map(sc);
783 if (n_edge < 0)
784 return isl_stat_error;
785 graph->max_row = n_edge + graph->maxvar;
787 return isl_stat_ok;
790 /* Does "bset" have any defining equalities for its set variables?
792 static isl_bool has_any_defining_equality(__isl_keep isl_basic_set *bset)
794 int i;
795 isl_size n;
797 n = isl_basic_set_dim(bset, isl_dim_set);
798 if (n < 0)
799 return isl_bool_error;
801 for (i = 0; i < n; ++i) {
802 isl_bool has;
804 has = isl_basic_set_has_defining_equality(bset, isl_dim_set, i,
805 NULL);
806 if (has < 0 || has)
807 return has;
810 return isl_bool_false;
813 /* Set the entries of node->max to the value of the schedule_max_coefficient
814 * option, if set.
816 static isl_stat set_max_coefficient(isl_ctx *ctx, struct isl_sched_node *node)
818 int max;
820 max = isl_options_get_schedule_max_coefficient(ctx);
821 if (max == -1)
822 return isl_stat_ok;
824 node->max = isl_vec_alloc(ctx, node->nvar);
825 node->max = isl_vec_set_si(node->max, max);
826 if (!node->max)
827 return isl_stat_error;
829 return isl_stat_ok;
832 /* Set the entries of node->max to the minimum of the schedule_max_coefficient
833 * option (if set) and half of the minimum of the sizes in the other
834 * dimensions. Round up when computing the half such that
835 * if the minimum of the sizes is one, half of the size is taken to be one
836 * rather than zero.
837 * If the global minimum is unbounded (i.e., if both
838 * the schedule_max_coefficient is not set and the sizes in the other
839 * dimensions are unbounded), then store a negative value.
840 * If the schedule coefficient is close to the size of the instance set
841 * in another dimension, then the schedule may represent a loop
842 * coalescing transformation (especially if the coefficient
843 * in that other dimension is one). Forcing the coefficient to be
844 * smaller than or equal to half the minimal size should avoid this
845 * situation.
847 static isl_stat compute_max_coefficient(isl_ctx *ctx,
848 struct isl_sched_node *node)
850 int max;
851 int i, j;
852 isl_vec *v;
854 max = isl_options_get_schedule_max_coefficient(ctx);
855 v = isl_vec_alloc(ctx, node->nvar);
856 if (!v)
857 return isl_stat_error;
859 for (i = 0; i < node->nvar; ++i) {
860 isl_int_set_si(v->el[i], max);
861 isl_int_mul_si(v->el[i], v->el[i], 2);
864 for (i = 0; i < node->nvar; ++i) {
865 isl_val *size;
867 size = isl_multi_val_get_val(node->sizes, i);
868 if (!size)
869 goto error;
870 if (!isl_val_is_int(size)) {
871 isl_val_free(size);
872 continue;
874 for (j = 0; j < node->nvar; ++j) {
875 if (j == i)
876 continue;
877 if (isl_int_is_neg(v->el[j]) ||
878 isl_int_gt(v->el[j], size->n))
879 isl_int_set(v->el[j], size->n);
881 isl_val_free(size);
884 for (i = 0; i < node->nvar; ++i)
885 isl_int_cdiv_q_ui(v->el[i], v->el[i], 2);
887 node->max = v;
888 return isl_stat_ok;
889 error:
890 isl_vec_free(v);
891 return isl_stat_error;
894 /* Compute and return the size of "set" in dimension "dim".
895 * The size is taken to be the difference in values for that variable
896 * for fixed values of the other variables.
897 * This assumes that "set" is convex.
898 * In particular, the variable is first isolated from the other variables
899 * in the range of a map
901 * [i_0, ..., i_dim-1, i_dim+1, ...] -> [i_dim]
903 * and then duplicated
905 * [i_0, ..., i_dim-1, i_dim+1, ...] -> [[i_dim] -> [i_dim']]
907 * The shared variables are then projected out and the maximal value
908 * of i_dim' - i_dim is computed.
910 static __isl_give isl_val *compute_size(__isl_take isl_set *set, int dim)
912 isl_map *map;
913 isl_local_space *ls;
914 isl_aff *obj;
915 isl_val *v;
917 map = isl_set_project_onto_map(set, isl_dim_set, dim, 1);
918 map = isl_map_project_out(map, isl_dim_in, dim, 1);
919 map = isl_map_range_product(map, isl_map_copy(map));
920 map = isl_set_unwrap(isl_map_range(map));
921 set = isl_map_deltas(map);
922 ls = isl_local_space_from_space(isl_set_get_space(set));
923 obj = isl_aff_var_on_domain(ls, isl_dim_set, 0);
924 v = isl_set_max_val(set, obj);
925 isl_aff_free(obj);
926 isl_set_free(set);
928 return v;
931 /* Compute the size of the instance set "set" of "node", after compression,
932 * as well as bounds on the corresponding coefficients, if needed.
934 * The sizes are needed when the schedule_treat_coalescing option is set.
935 * The bounds are needed when the schedule_treat_coalescing option or
936 * the schedule_max_coefficient option is set.
938 * If the schedule_treat_coalescing option is not set, then at most
939 * the bounds need to be set and this is done in set_max_coefficient.
940 * Otherwise, compress the domain if needed, compute the size
941 * in each direction and store the results in node->size.
942 * If the domain is not convex, then the sizes are computed
943 * on a convex superset in order to avoid picking up sizes
944 * that are valid for the individual disjuncts, but not for
945 * the domain as a whole.
946 * Finally, set the bounds on the coefficients based on the sizes
947 * and the schedule_max_coefficient option in compute_max_coefficient.
949 static isl_stat compute_sizes_and_max(isl_ctx *ctx, struct isl_sched_node *node,
950 __isl_take isl_set *set)
952 int j;
953 isl_size n;
954 isl_multi_val *mv;
956 if (!isl_options_get_schedule_treat_coalescing(ctx)) {
957 isl_set_free(set);
958 return set_max_coefficient(ctx, node);
961 if (node->compressed)
962 set = isl_set_preimage_multi_aff(set,
963 isl_multi_aff_copy(node->decompress));
964 set = isl_set_from_basic_set(isl_set_simple_hull(set));
965 mv = isl_multi_val_zero(isl_set_get_space(set));
966 n = isl_set_dim(set, isl_dim_set);
967 if (n < 0)
968 mv = isl_multi_val_free(mv);
969 for (j = 0; j < n; ++j) {
970 isl_val *v;
972 v = compute_size(isl_set_copy(set), j);
973 mv = isl_multi_val_set_val(mv, j, v);
975 node->sizes = mv;
976 isl_set_free(set);
977 if (!node->sizes)
978 return isl_stat_error;
979 return compute_max_coefficient(ctx, node);
982 /* Add a new node to the graph representing the given instance set.
983 * "nvar" is the (possibly compressed) number of variables and
984 * may be smaller than then number of set variables in "set"
985 * if "compressed" is set.
986 * If "compressed" is set, then "hull" represents the constraints
987 * that were used to derive the compression, while "compress" and
988 * "decompress" map the original space to the compressed space and
989 * vice versa.
990 * If "compressed" is not set, then "hull", "compress" and "decompress"
991 * should be NULL.
993 * Compute the size of the instance set and bounds on the coefficients,
994 * if needed.
996 static isl_stat add_node(struct isl_sched_graph *graph,
997 __isl_take isl_set *set, int nvar, int compressed,
998 __isl_take isl_set *hull, __isl_take isl_multi_aff *compress,
999 __isl_take isl_multi_aff *decompress)
1001 isl_size nparam;
1002 isl_ctx *ctx;
1003 isl_mat *sched;
1004 isl_space *space;
1005 int *coincident;
1006 struct isl_sched_node *node;
1008 nparam = isl_set_dim(set, isl_dim_param);
1009 if (nparam < 0)
1010 goto error;
1012 ctx = isl_set_get_ctx(set);
1013 if (!ctx->opt->schedule_parametric)
1014 nparam = 0;
1015 sched = isl_mat_alloc(ctx, 0, 1 + nparam + nvar);
1016 node = &graph->node[graph->n];
1017 graph->n++;
1018 space = isl_set_get_space(set);
1019 node->space = space;
1020 node->nvar = nvar;
1021 node->nparam = nparam;
1022 node->sched = sched;
1023 node->sched_map = NULL;
1024 coincident = isl_calloc_array(ctx, int, graph->max_row);
1025 node->coincident = coincident;
1026 node->compressed = compressed;
1027 node->hull = hull;
1028 node->compress = compress;
1029 node->decompress = decompress;
1030 if (compute_sizes_and_max(ctx, node, set) < 0)
1031 return isl_stat_error;
1033 if (!space || !sched || (graph->max_row && !coincident))
1034 return isl_stat_error;
1035 if (compressed && (!hull || !compress || !decompress))
1036 return isl_stat_error;
1038 return isl_stat_ok;
1039 error:
1040 isl_set_free(set);
1041 isl_set_free(hull);
1042 isl_multi_aff_free(compress);
1043 isl_multi_aff_free(decompress);
1044 return isl_stat_error;
1047 /* Construct an identifier for node "node", which will represent "set".
1048 * The name of the identifier is either "compressed" or
1049 * "compressed_<name>", with <name> the name of the space of "set".
1050 * The user pointer of the identifier points to "node".
1052 static __isl_give isl_id *construct_compressed_id(__isl_keep isl_set *set,
1053 struct isl_sched_node *node)
1055 isl_bool has_name;
1056 isl_ctx *ctx;
1057 isl_id *id;
1058 isl_printer *p;
1059 const char *name;
1060 char *id_name;
1062 has_name = isl_set_has_tuple_name(set);
1063 if (has_name < 0)
1064 return NULL;
1066 ctx = isl_set_get_ctx(set);
1067 if (!has_name)
1068 return isl_id_alloc(ctx, "compressed", node);
1070 p = isl_printer_to_str(ctx);
1071 name = isl_set_get_tuple_name(set);
1072 p = isl_printer_print_str(p, "compressed_");
1073 p = isl_printer_print_str(p, name);
1074 id_name = isl_printer_get_str(p);
1075 isl_printer_free(p);
1077 id = isl_id_alloc(ctx, id_name, node);
1078 free(id_name);
1080 return id;
1083 /* Add a new node to the graph representing the given set.
1085 * If any of the set variables is defined by an equality, then
1086 * we perform variable compression such that we can perform
1087 * the scheduling on the compressed domain.
1088 * In this case, an identifier is used that references the new node
1089 * such that each compressed space is unique and
1090 * such that the node can be recovered from the compressed space.
1092 static isl_stat extract_node(__isl_take isl_set *set, void *user)
1094 isl_size nvar;
1095 isl_bool has_equality;
1096 isl_id *id;
1097 isl_basic_set *hull;
1098 isl_set *hull_set;
1099 isl_morph *morph;
1100 isl_multi_aff *compress, *decompress;
1101 struct isl_sched_graph *graph = user;
1103 hull = isl_set_affine_hull(isl_set_copy(set));
1104 hull = isl_basic_set_remove_divs(hull);
1105 nvar = isl_set_dim(set, isl_dim_set);
1106 has_equality = has_any_defining_equality(hull);
1108 if (nvar < 0 || has_equality < 0)
1109 goto error;
1110 if (!has_equality) {
1111 isl_basic_set_free(hull);
1112 return add_node(graph, set, nvar, 0, NULL, NULL, NULL);
1115 id = construct_compressed_id(set, &graph->node[graph->n]);
1116 morph = isl_basic_set_variable_compression_with_id(hull,
1117 isl_dim_set, id);
1118 isl_id_free(id);
1119 nvar = isl_morph_ran_dim(morph, isl_dim_set);
1120 if (nvar < 0)
1121 set = isl_set_free(set);
1122 compress = isl_morph_get_var_multi_aff(morph);
1123 morph = isl_morph_inverse(morph);
1124 decompress = isl_morph_get_var_multi_aff(morph);
1125 isl_morph_free(morph);
1127 hull_set = isl_set_from_basic_set(hull);
1128 return add_node(graph, set, nvar, 1, hull_set, compress, decompress);
1129 error:
1130 isl_basic_set_free(hull);
1131 isl_set_free(set);
1132 return isl_stat_error;
1135 struct isl_extract_edge_data {
1136 enum isl_edge_type type;
1137 struct isl_sched_graph *graph;
1140 /* Merge edge2 into edge1, freeing the contents of edge2.
1141 * Return 0 on success and -1 on failure.
1143 * edge1 and edge2 are assumed to have the same value for the map field.
1145 static int merge_edge(struct isl_sched_edge *edge1,
1146 struct isl_sched_edge *edge2)
1148 edge1->types |= edge2->types;
1149 isl_map_free(edge2->map);
1151 if (is_condition(edge2)) {
1152 if (!edge1->tagged_condition)
1153 edge1->tagged_condition = edge2->tagged_condition;
1154 else
1155 edge1->tagged_condition =
1156 isl_union_map_union(edge1->tagged_condition,
1157 edge2->tagged_condition);
1160 if (is_conditional_validity(edge2)) {
1161 if (!edge1->tagged_validity)
1162 edge1->tagged_validity = edge2->tagged_validity;
1163 else
1164 edge1->tagged_validity =
1165 isl_union_map_union(edge1->tagged_validity,
1166 edge2->tagged_validity);
1169 if (is_condition(edge2) && !edge1->tagged_condition)
1170 return -1;
1171 if (is_conditional_validity(edge2) && !edge1->tagged_validity)
1172 return -1;
1174 return 0;
1177 /* Insert dummy tags in domain and range of "map".
1179 * In particular, if "map" is of the form
1181 * A -> B
1183 * then return
1185 * [A -> dummy_tag] -> [B -> dummy_tag]
1187 * where the dummy_tags are identical and equal to any dummy tags
1188 * introduced by any other call to this function.
1190 static __isl_give isl_map *insert_dummy_tags(__isl_take isl_map *map)
1192 static char dummy;
1193 isl_ctx *ctx;
1194 isl_id *id;
1195 isl_space *space;
1196 isl_set *domain, *range;
1198 ctx = isl_map_get_ctx(map);
1200 id = isl_id_alloc(ctx, NULL, &dummy);
1201 space = isl_space_params(isl_map_get_space(map));
1202 space = isl_space_set_from_params(space);
1203 space = isl_space_set_tuple_id(space, isl_dim_set, id);
1204 space = isl_space_map_from_set(space);
1206 domain = isl_map_wrap(map);
1207 range = isl_map_wrap(isl_map_universe(space));
1208 map = isl_map_from_domain_and_range(domain, range);
1209 map = isl_map_zip(map);
1211 return map;
1214 /* Given that at least one of "src" or "dst" is compressed, return
1215 * a map between the spaces of these nodes restricted to the affine
1216 * hull that was used in the compression.
1218 static __isl_give isl_map *extract_hull(struct isl_sched_node *src,
1219 struct isl_sched_node *dst)
1221 isl_set *dom, *ran;
1223 if (src->compressed)
1224 dom = isl_set_copy(src->hull);
1225 else
1226 dom = isl_set_universe(isl_space_copy(src->space));
1227 if (dst->compressed)
1228 ran = isl_set_copy(dst->hull);
1229 else
1230 ran = isl_set_universe(isl_space_copy(dst->space));
1232 return isl_map_from_domain_and_range(dom, ran);
1235 /* Intersect the domains of the nested relations in domain and range
1236 * of "tagged" with "map".
1238 static __isl_give isl_map *map_intersect_domains(__isl_take isl_map *tagged,
1239 __isl_keep isl_map *map)
1241 isl_set *set;
1243 tagged = isl_map_zip(tagged);
1244 set = isl_map_wrap(isl_map_copy(map));
1245 tagged = isl_map_intersect_domain(tagged, set);
1246 tagged = isl_map_zip(tagged);
1247 return tagged;
1250 /* Return a pointer to the node that lives in the domain space of "map",
1251 * an invalid node if there is no such node, or NULL in case of error.
1253 static struct isl_sched_node *find_domain_node(isl_ctx *ctx,
1254 struct isl_sched_graph *graph, __isl_keep isl_map *map)
1256 struct isl_sched_node *node;
1257 isl_space *space;
1259 space = isl_space_domain(isl_map_get_space(map));
1260 node = graph_find_node(ctx, graph, space);
1261 isl_space_free(space);
1263 return node;
1266 /* Return a pointer to the node that lives in the range space of "map",
1267 * an invalid node if there is no such node, or NULL in case of error.
1269 static struct isl_sched_node *find_range_node(isl_ctx *ctx,
1270 struct isl_sched_graph *graph, __isl_keep isl_map *map)
1272 struct isl_sched_node *node;
1273 isl_space *space;
1275 space = isl_space_range(isl_map_get_space(map));
1276 node = graph_find_node(ctx, graph, space);
1277 isl_space_free(space);
1279 return node;
1282 /* Refrain from adding a new edge based on "map".
1283 * Instead, just free the map.
1284 * "tagged" is either a copy of "map" with additional tags or NULL.
1286 static isl_stat skip_edge(__isl_take isl_map *map, __isl_take isl_map *tagged)
1288 isl_map_free(map);
1289 isl_map_free(tagged);
1291 return isl_stat_ok;
1294 /* Add a new edge to the graph based on the given map
1295 * and add it to data->graph->edge_table[data->type].
1296 * If a dependence relation of a given type happens to be identical
1297 * to one of the dependence relations of a type that was added before,
1298 * then we don't create a new edge, but instead mark the original edge
1299 * as also representing a dependence of the current type.
1301 * Edges of type isl_edge_condition or isl_edge_conditional_validity
1302 * may be specified as "tagged" dependence relations. That is, "map"
1303 * may contain elements (i -> a) -> (j -> b), where i -> j denotes
1304 * the dependence on iterations and a and b are tags.
1305 * edge->map is set to the relation containing the elements i -> j,
1306 * while edge->tagged_condition and edge->tagged_validity contain
1307 * the union of all the "map" relations
1308 * for which extract_edge is called that result in the same edge->map.
1310 * If the source or the destination node is compressed, then
1311 * intersect both "map" and "tagged" with the constraints that
1312 * were used to construct the compression.
1313 * This ensures that there are no schedule constraints defined
1314 * outside of these domains, while the scheduler no longer has
1315 * any control over those outside parts.
1317 static isl_stat extract_edge(__isl_take isl_map *map, void *user)
1319 isl_bool empty;
1320 isl_ctx *ctx = isl_map_get_ctx(map);
1321 struct isl_extract_edge_data *data = user;
1322 struct isl_sched_graph *graph = data->graph;
1323 struct isl_sched_node *src, *dst;
1324 struct isl_sched_edge *edge;
1325 isl_map *tagged = NULL;
1327 if (data->type == isl_edge_condition ||
1328 data->type == isl_edge_conditional_validity) {
1329 if (isl_map_can_zip(map)) {
1330 tagged = isl_map_copy(map);
1331 map = isl_set_unwrap(isl_map_domain(isl_map_zip(map)));
1332 } else {
1333 tagged = insert_dummy_tags(isl_map_copy(map));
1337 src = find_domain_node(ctx, graph, map);
1338 dst = find_range_node(ctx, graph, map);
1340 if (!src || !dst)
1341 goto error;
1342 if (!is_node(graph, src) || !is_node(graph, dst))
1343 return skip_edge(map, tagged);
1345 if (src->compressed || dst->compressed) {
1346 isl_map *hull;
1347 hull = extract_hull(src, dst);
1348 if (tagged)
1349 tagged = map_intersect_domains(tagged, hull);
1350 map = isl_map_intersect(map, hull);
1353 empty = isl_map_plain_is_empty(map);
1354 if (empty < 0)
1355 goto error;
1356 if (empty)
1357 return skip_edge(map, tagged);
1359 graph->edge[graph->n_edge].src = src;
1360 graph->edge[graph->n_edge].dst = dst;
1361 graph->edge[graph->n_edge].map = map;
1362 graph->edge[graph->n_edge].types = 0;
1363 graph->edge[graph->n_edge].tagged_condition = NULL;
1364 graph->edge[graph->n_edge].tagged_validity = NULL;
1365 set_type(&graph->edge[graph->n_edge], data->type);
1366 if (data->type == isl_edge_condition)
1367 graph->edge[graph->n_edge].tagged_condition =
1368 isl_union_map_from_map(tagged);
1369 if (data->type == isl_edge_conditional_validity)
1370 graph->edge[graph->n_edge].tagged_validity =
1371 isl_union_map_from_map(tagged);
1373 edge = graph_find_matching_edge(graph, &graph->edge[graph->n_edge]);
1374 if (!edge) {
1375 graph->n_edge++;
1376 return isl_stat_error;
1378 if (edge == &graph->edge[graph->n_edge])
1379 return graph_edge_table_add(ctx, graph, data->type,
1380 &graph->edge[graph->n_edge++]);
1382 if (merge_edge(edge, &graph->edge[graph->n_edge]) < 0)
1383 return isl_stat_error;
1385 return graph_edge_table_add(ctx, graph, data->type, edge);
1386 error:
1387 isl_map_free(map);
1388 isl_map_free(tagged);
1389 return isl_stat_error;
1392 /* Initialize the schedule graph "graph" from the schedule constraints "sc".
1394 * The context is included in the domain before the nodes of
1395 * the graphs are extracted in order to be able to exploit
1396 * any possible additional equalities.
1397 * Note that this intersection is only performed locally here.
1399 static isl_stat graph_init(struct isl_sched_graph *graph,
1400 __isl_keep isl_schedule_constraints *sc)
1402 isl_ctx *ctx;
1403 isl_union_set *domain;
1404 isl_union_map *c;
1405 struct isl_extract_edge_data data;
1406 enum isl_edge_type i;
1407 isl_stat r;
1409 if (!sc)
1410 return isl_stat_error;
1412 ctx = isl_schedule_constraints_get_ctx(sc);
1414 domain = isl_schedule_constraints_get_domain(sc);
1415 graph->n = isl_union_set_n_set(domain);
1416 isl_union_set_free(domain);
1418 if (graph_alloc(ctx, graph, graph->n,
1419 isl_schedule_constraints_n_map(sc)) < 0)
1420 return isl_stat_error;
1422 if (compute_max_row(graph, sc) < 0)
1423 return isl_stat_error;
1424 graph->root = graph;
1425 graph->n = 0;
1426 domain = isl_schedule_constraints_get_domain(sc);
1427 domain = isl_union_set_intersect_params(domain,
1428 isl_schedule_constraints_get_context(sc));
1429 r = isl_union_set_foreach_set(domain, &extract_node, graph);
1430 isl_union_set_free(domain);
1431 if (r < 0)
1432 return isl_stat_error;
1433 if (graph_init_table(ctx, graph) < 0)
1434 return isl_stat_error;
1435 for (i = isl_edge_first; i <= isl_edge_last; ++i) {
1436 c = isl_schedule_constraints_get(sc, i);
1437 graph->max_edge[i] = isl_union_map_n_map(c);
1438 isl_union_map_free(c);
1439 if (!c)
1440 return isl_stat_error;
1442 if (graph_init_edge_tables(ctx, graph) < 0)
1443 return isl_stat_error;
1444 graph->n_edge = 0;
1445 data.graph = graph;
1446 for (i = isl_edge_first; i <= isl_edge_last; ++i) {
1447 isl_stat r;
1449 data.type = i;
1450 c = isl_schedule_constraints_get(sc, i);
1451 r = isl_union_map_foreach_map(c, &extract_edge, &data);
1452 isl_union_map_free(c);
1453 if (r < 0)
1454 return isl_stat_error;
1457 return isl_stat_ok;
1460 /* Check whether there is any dependence from node[j] to node[i]
1461 * or from node[i] to node[j].
1463 static isl_bool node_follows_weak(int i, int j, void *user)
1465 isl_bool f;
1466 struct isl_sched_graph *graph = user;
1468 f = graph_has_any_edge(graph, &graph->node[j], &graph->node[i]);
1469 if (f < 0 || f)
1470 return f;
1471 return graph_has_any_edge(graph, &graph->node[i], &graph->node[j]);
1474 /* Check whether there is a (conditional) validity dependence from node[j]
1475 * to node[i], forcing node[i] to follow node[j].
1477 static isl_bool node_follows_strong(int i, int j, void *user)
1479 struct isl_sched_graph *graph = user;
1481 return graph_has_validity_edge(graph, &graph->node[j], &graph->node[i]);
1484 /* Use Tarjan's algorithm for computing the strongly connected components
1485 * in the dependence graph only considering those edges defined by "follows".
1487 static isl_stat detect_ccs(isl_ctx *ctx, struct isl_sched_graph *graph,
1488 isl_bool (*follows)(int i, int j, void *user))
1490 int i, n;
1491 struct isl_tarjan_graph *g = NULL;
1493 g = isl_tarjan_graph_init(ctx, graph->n, follows, graph);
1494 if (!g)
1495 return isl_stat_error;
1497 graph->scc = 0;
1498 i = 0;
1499 n = graph->n;
1500 while (n) {
1501 while (g->order[i] != -1) {
1502 graph->node[g->order[i]].scc = graph->scc;
1503 --n;
1504 ++i;
1506 ++i;
1507 graph->scc++;
1510 isl_tarjan_graph_free(g);
1512 return isl_stat_ok;
1515 /* Apply Tarjan's algorithm to detect the strongly connected components
1516 * in the dependence graph.
1517 * Only consider the (conditional) validity dependences and clear "weak".
1519 static isl_stat detect_sccs(isl_ctx *ctx, struct isl_sched_graph *graph)
1521 graph->weak = 0;
1522 return detect_ccs(ctx, graph, &node_follows_strong);
1525 /* Apply Tarjan's algorithm to detect the (weakly) connected components
1526 * in the dependence graph.
1527 * Consider all dependences and set "weak".
1529 static isl_stat detect_wccs(isl_ctx *ctx, struct isl_sched_graph *graph)
1531 graph->weak = 1;
1532 return detect_ccs(ctx, graph, &node_follows_weak);
1535 static int cmp_scc(const void *a, const void *b, void *data)
1537 struct isl_sched_graph *graph = data;
1538 const int *i1 = a;
1539 const int *i2 = b;
1541 return graph->node[*i1].scc - graph->node[*i2].scc;
1544 /* Sort the elements of graph->sorted according to the corresponding SCCs.
1546 static int sort_sccs(struct isl_sched_graph *graph)
1548 return isl_sort(graph->sorted, graph->n, sizeof(int), &cmp_scc, graph);
1551 /* Return a non-parametric set in the compressed space of "node" that is
1552 * bounded by the size in each direction
1554 * { [x] : -S_i <= x_i <= S_i }
1556 * If S_i is infinity in direction i, then there are no constraints
1557 * in that direction.
1559 * Cache the result in node->bounds.
1561 static __isl_give isl_basic_set *get_size_bounds(struct isl_sched_node *node)
1563 isl_space *space;
1564 isl_basic_set *bounds;
1565 int i;
1567 if (node->bounds)
1568 return isl_basic_set_copy(node->bounds);
1570 if (node->compressed)
1571 space = isl_multi_aff_get_domain_space(node->decompress);
1572 else
1573 space = isl_space_copy(node->space);
1574 space = isl_space_drop_all_params(space);
1575 bounds = isl_basic_set_universe(space);
1577 for (i = 0; i < node->nvar; ++i) {
1578 isl_val *size;
1580 size = isl_multi_val_get_val(node->sizes, i);
1581 if (!size)
1582 return isl_basic_set_free(bounds);
1583 if (!isl_val_is_int(size)) {
1584 isl_val_free(size);
1585 continue;
1587 bounds = isl_basic_set_upper_bound_val(bounds, isl_dim_set, i,
1588 isl_val_copy(size));
1589 bounds = isl_basic_set_lower_bound_val(bounds, isl_dim_set, i,
1590 isl_val_neg(size));
1593 node->bounds = isl_basic_set_copy(bounds);
1594 return bounds;
1597 /* Drop some constraints from "delta" that could be exploited
1598 * to construct loop coalescing schedules.
1599 * In particular, drop those constraint that bound the difference
1600 * to the size of the domain.
1601 * First project out the parameters to improve the effectiveness.
1603 static __isl_give isl_set *drop_coalescing_constraints(
1604 __isl_take isl_set *delta, struct isl_sched_node *node)
1606 isl_size nparam;
1607 isl_basic_set *bounds;
1609 nparam = isl_set_dim(delta, isl_dim_param);
1610 if (nparam < 0)
1611 return isl_set_free(delta);
1613 bounds = get_size_bounds(node);
1615 delta = isl_set_project_out(delta, isl_dim_param, 0, nparam);
1616 delta = isl_set_remove_divs(delta);
1617 delta = isl_set_plain_gist_basic_set(delta, bounds);
1618 return delta;
1621 /* Given a dependence relation R from "node" to itself,
1622 * construct the set of coefficients of valid constraints for elements
1623 * in that dependence relation.
1624 * In particular, the result contains tuples of coefficients
1625 * c_0, c_n, c_x such that
1627 * c_0 + c_n n + c_x y - c_x x >= 0 for each (x,y) in R
1629 * or, equivalently,
1631 * c_0 + c_n n + c_x d >= 0 for each d in delta R = { y - x | (x,y) in R }
1633 * We choose here to compute the dual of delta R.
1634 * Alternatively, we could have computed the dual of R, resulting
1635 * in a set of tuples c_0, c_n, c_x, c_y, and then
1636 * plugged in (c_0, c_n, c_x, -c_x).
1638 * If "need_param" is set, then the resulting coefficients effectively
1639 * include coefficients for the parameters c_n. Otherwise, they may
1640 * have been projected out already.
1641 * Since the constraints may be different for these two cases,
1642 * they are stored in separate caches.
1643 * In particular, if no parameter coefficients are required and
1644 * the schedule_treat_coalescing option is set, then the parameters
1645 * are projected out and some constraints that could be exploited
1646 * to construct coalescing schedules are removed before the dual
1647 * is computed.
1649 * If "node" has been compressed, then the dependence relation
1650 * is also compressed before the set of coefficients is computed.
1652 static __isl_give isl_basic_set *intra_coefficients(
1653 struct isl_sched_graph *graph, struct isl_sched_node *node,
1654 __isl_take isl_map *map, int need_param)
1656 isl_ctx *ctx;
1657 isl_set *delta;
1658 isl_map *key;
1659 isl_basic_set *coef;
1660 isl_maybe_isl_basic_set m;
1661 isl_map_to_basic_set **hmap = &graph->intra_hmap;
1662 int treat;
1664 if (!map)
1665 return NULL;
1667 ctx = isl_map_get_ctx(map);
1668 treat = !need_param && isl_options_get_schedule_treat_coalescing(ctx);
1669 if (!treat)
1670 hmap = &graph->intra_hmap_param;
1671 m = isl_map_to_basic_set_try_get(*hmap, map);
1672 if (m.valid < 0 || m.valid) {
1673 isl_map_free(map);
1674 return m.value;
1677 key = isl_map_copy(map);
1678 if (node->compressed) {
1679 map = isl_map_preimage_domain_multi_aff(map,
1680 isl_multi_aff_copy(node->decompress));
1681 map = isl_map_preimage_range_multi_aff(map,
1682 isl_multi_aff_copy(node->decompress));
1684 delta = isl_map_deltas(map);
1685 if (treat)
1686 delta = drop_coalescing_constraints(delta, node);
1687 delta = isl_set_remove_divs(delta);
1688 coef = isl_set_coefficients(delta);
1689 *hmap = isl_map_to_basic_set_set(*hmap, key, isl_basic_set_copy(coef));
1691 return coef;
1694 /* Given a dependence relation R, construct the set of coefficients
1695 * of valid constraints for elements in that dependence relation.
1696 * In particular, the result contains tuples of coefficients
1697 * c_0, c_n, c_x, c_y such that
1699 * c_0 + c_n n + c_x x + c_y y >= 0 for each (x,y) in R
1701 * If the source or destination nodes of "edge" have been compressed,
1702 * then the dependence relation is also compressed before
1703 * the set of coefficients is computed.
1705 static __isl_give isl_basic_set *inter_coefficients(
1706 struct isl_sched_graph *graph, struct isl_sched_edge *edge,
1707 __isl_take isl_map *map)
1709 isl_set *set;
1710 isl_map *key;
1711 isl_basic_set *coef;
1712 isl_maybe_isl_basic_set m;
1714 m = isl_map_to_basic_set_try_get(graph->inter_hmap, map);
1715 if (m.valid < 0 || m.valid) {
1716 isl_map_free(map);
1717 return m.value;
1720 key = isl_map_copy(map);
1721 if (edge->src->compressed)
1722 map = isl_map_preimage_domain_multi_aff(map,
1723 isl_multi_aff_copy(edge->src->decompress));
1724 if (edge->dst->compressed)
1725 map = isl_map_preimage_range_multi_aff(map,
1726 isl_multi_aff_copy(edge->dst->decompress));
1727 set = isl_map_wrap(isl_map_remove_divs(map));
1728 coef = isl_set_coefficients(set);
1729 graph->inter_hmap = isl_map_to_basic_set_set(graph->inter_hmap, key,
1730 isl_basic_set_copy(coef));
1732 return coef;
1735 /* Return the position of the coefficients of the variables in
1736 * the coefficients constraints "coef".
1738 * The space of "coef" is of the form
1740 * { coefficients[[cst, params] -> S] }
1742 * Return the position of S.
1744 static isl_size coef_var_offset(__isl_keep isl_basic_set *coef)
1746 isl_size offset;
1747 isl_space *space;
1749 space = isl_space_unwrap(isl_basic_set_get_space(coef));
1750 offset = isl_space_dim(space, isl_dim_in);
1751 isl_space_free(space);
1753 return offset;
1756 /* Return the offset of the coefficient of the constant term of "node"
1757 * within the (I)LP.
1759 * Within each node, the coefficients have the following order:
1760 * - positive and negative parts of c_i_x
1761 * - c_i_n (if parametric)
1762 * - c_i_0
1764 static int node_cst_coef_offset(struct isl_sched_node *node)
1766 return node->start + 2 * node->nvar + node->nparam;
1769 /* Return the offset of the coefficients of the parameters of "node"
1770 * within the (I)LP.
1772 * Within each node, the coefficients have the following order:
1773 * - positive and negative parts of c_i_x
1774 * - c_i_n (if parametric)
1775 * - c_i_0
1777 static int node_par_coef_offset(struct isl_sched_node *node)
1779 return node->start + 2 * node->nvar;
1782 /* Return the offset of the coefficients of the variables of "node"
1783 * within the (I)LP.
1785 * Within each node, the coefficients have the following order:
1786 * - positive and negative parts of c_i_x
1787 * - c_i_n (if parametric)
1788 * - c_i_0
1790 static int node_var_coef_offset(struct isl_sched_node *node)
1792 return node->start;
1795 /* Return the position of the pair of variables encoding
1796 * coefficient "i" of "node".
1798 * The order of these variable pairs is the opposite of
1799 * that of the coefficients, with 2 variables per coefficient.
1801 static int node_var_coef_pos(struct isl_sched_node *node, int i)
1803 return node_var_coef_offset(node) + 2 * (node->nvar - 1 - i);
1806 /* Construct an isl_dim_map for mapping constraints on coefficients
1807 * for "node" to the corresponding positions in graph->lp.
1808 * "offset" is the offset of the coefficients for the variables
1809 * in the input constraints.
1810 * "s" is the sign of the mapping.
1812 * The input constraints are given in terms of the coefficients
1813 * (c_0, c_x) or (c_0, c_n, c_x).
1814 * The mapping produced by this function essentially plugs in
1815 * (0, c_i_x^+ - c_i_x^-) if s = 1 and
1816 * (0, -c_i_x^+ + c_i_x^-) if s = -1 or
1817 * (0, 0, c_i_x^+ - c_i_x^-) if s = 1 and
1818 * (0, 0, -c_i_x^+ + c_i_x^-) if s = -1.
1819 * In graph->lp, the c_i_x^- appear before their c_i_x^+ counterpart.
1820 * Furthermore, the order of these pairs is the opposite of that
1821 * of the corresponding coefficients.
1823 * The caller can extend the mapping to also map the other coefficients
1824 * (and therefore not plug in 0).
1826 static __isl_give isl_dim_map *intra_dim_map(isl_ctx *ctx,
1827 struct isl_sched_graph *graph, struct isl_sched_node *node,
1828 int offset, int s)
1830 int pos;
1831 isl_size total;
1832 isl_dim_map *dim_map;
1834 total = isl_basic_set_dim(graph->lp, isl_dim_all);
1835 if (!node || total < 0)
1836 return NULL;
1838 pos = node_var_coef_pos(node, 0);
1839 dim_map = isl_dim_map_alloc(ctx, total);
1840 isl_dim_map_range(dim_map, pos, -2, offset, 1, node->nvar, -s);
1841 isl_dim_map_range(dim_map, pos + 1, -2, offset, 1, node->nvar, s);
1843 return dim_map;
1846 /* Construct an isl_dim_map for mapping constraints on coefficients
1847 * for "src" (node i) and "dst" (node j) to the corresponding positions
1848 * in graph->lp.
1849 * "offset" is the offset of the coefficients for the variables of "src"
1850 * in the input constraints.
1851 * "s" is the sign of the mapping.
1853 * The input constraints are given in terms of the coefficients
1854 * (c_0, c_n, c_x, c_y).
1855 * The mapping produced by this function essentially plugs in
1856 * (c_j_0 - c_i_0, c_j_n - c_i_n,
1857 * -(c_i_x^+ - c_i_x^-), c_j_x^+ - c_j_x^-) if s = 1 and
1858 * (-c_j_0 + c_i_0, -c_j_n + c_i_n,
1859 * c_i_x^+ - c_i_x^-, -(c_j_x^+ - c_j_x^-)) if s = -1.
1860 * In graph->lp, the c_*^- appear before their c_*^+ counterpart.
1861 * Furthermore, the order of these pairs is the opposite of that
1862 * of the corresponding coefficients.
1864 * The caller can further extend the mapping.
1866 static __isl_give isl_dim_map *inter_dim_map(isl_ctx *ctx,
1867 struct isl_sched_graph *graph, struct isl_sched_node *src,
1868 struct isl_sched_node *dst, int offset, int s)
1870 int pos;
1871 isl_size total;
1872 isl_dim_map *dim_map;
1874 total = isl_basic_set_dim(graph->lp, isl_dim_all);
1875 if (!src || !dst || total < 0)
1876 return NULL;
1878 dim_map = isl_dim_map_alloc(ctx, total);
1880 pos = node_cst_coef_offset(dst);
1881 isl_dim_map_range(dim_map, pos, 0, 0, 0, 1, s);
1882 pos = node_par_coef_offset(dst);
1883 isl_dim_map_range(dim_map, pos, 1, 1, 1, dst->nparam, s);
1884 pos = node_var_coef_pos(dst, 0);
1885 isl_dim_map_range(dim_map, pos, -2, offset + src->nvar, 1,
1886 dst->nvar, -s);
1887 isl_dim_map_range(dim_map, pos + 1, -2, offset + src->nvar, 1,
1888 dst->nvar, s);
1890 pos = node_cst_coef_offset(src);
1891 isl_dim_map_range(dim_map, pos, 0, 0, 0, 1, -s);
1892 pos = node_par_coef_offset(src);
1893 isl_dim_map_range(dim_map, pos, 1, 1, 1, src->nparam, -s);
1894 pos = node_var_coef_pos(src, 0);
1895 isl_dim_map_range(dim_map, pos, -2, offset, 1, src->nvar, s);
1896 isl_dim_map_range(dim_map, pos + 1, -2, offset, 1, src->nvar, -s);
1898 return dim_map;
1901 /* Add the constraints from "src" to "dst" using "dim_map",
1902 * after making sure there is enough room in "dst" for the extra constraints.
1904 static __isl_give isl_basic_set *add_constraints_dim_map(
1905 __isl_take isl_basic_set *dst, __isl_take isl_basic_set *src,
1906 __isl_take isl_dim_map *dim_map)
1908 int n_eq, n_ineq;
1910 n_eq = isl_basic_set_n_equality(src);
1911 n_ineq = isl_basic_set_n_inequality(src);
1912 dst = isl_basic_set_extend_constraints(dst, n_eq, n_ineq);
1913 dst = isl_basic_set_add_constraints_dim_map(dst, src, dim_map);
1914 return dst;
1917 /* Add constraints to graph->lp that force validity for the given
1918 * dependence from a node i to itself.
1919 * That is, add constraints that enforce
1921 * (c_i_0 + c_i_n n + c_i_x y) - (c_i_0 + c_i_n n + c_i_x x)
1922 * = c_i_x (y - x) >= 0
1924 * for each (x,y) in R.
1925 * We obtain general constraints on coefficients (c_0, c_x)
1926 * of valid constraints for (y - x) and then plug in (0, c_i_x^+ - c_i_x^-),
1927 * where c_i_x = c_i_x^+ - c_i_x^-, with c_i_x^+ and c_i_x^- non-negative.
1928 * In graph->lp, the c_i_x^- appear before their c_i_x^+ counterpart.
1929 * Note that the result of intra_coefficients may also contain
1930 * parameter coefficients c_n, in which case 0 is plugged in for them as well.
1932 static isl_stat add_intra_validity_constraints(struct isl_sched_graph *graph,
1933 struct isl_sched_edge *edge)
1935 isl_size offset;
1936 isl_map *map = isl_map_copy(edge->map);
1937 isl_ctx *ctx = isl_map_get_ctx(map);
1938 isl_dim_map *dim_map;
1939 isl_basic_set *coef;
1940 struct isl_sched_node *node = edge->src;
1942 coef = intra_coefficients(graph, node, map, 0);
1944 offset = coef_var_offset(coef);
1945 if (offset < 0)
1946 coef = isl_basic_set_free(coef);
1947 if (!coef)
1948 return isl_stat_error;
1950 dim_map = intra_dim_map(ctx, graph, node, offset, 1);
1951 graph->lp = add_constraints_dim_map(graph->lp, coef, dim_map);
1953 return isl_stat_ok;
1956 /* Add constraints to graph->lp that force validity for the given
1957 * dependence from node i to node j.
1958 * That is, add constraints that enforce
1960 * (c_j_0 + c_j_n n + c_j_x y) - (c_i_0 + c_i_n n + c_i_x x) >= 0
1962 * for each (x,y) in R.
1963 * We obtain general constraints on coefficients (c_0, c_n, c_x, c_y)
1964 * of valid constraints for R and then plug in
1965 * (c_j_0 - c_i_0, c_j_n - c_i_n, -(c_i_x^+ - c_i_x^-), c_j_x^+ - c_j_x^-),
1966 * where c_* = c_*^+ - c_*^-, with c_*^+ and c_*^- non-negative.
1967 * In graph->lp, the c_*^- appear before their c_*^+ counterpart.
1969 static isl_stat add_inter_validity_constraints(struct isl_sched_graph *graph,
1970 struct isl_sched_edge *edge)
1972 isl_size offset;
1973 isl_map *map;
1974 isl_ctx *ctx;
1975 isl_dim_map *dim_map;
1976 isl_basic_set *coef;
1977 struct isl_sched_node *src = edge->src;
1978 struct isl_sched_node *dst = edge->dst;
1980 if (!graph->lp)
1981 return isl_stat_error;
1983 map = isl_map_copy(edge->map);
1984 ctx = isl_map_get_ctx(map);
1985 coef = inter_coefficients(graph, edge, map);
1987 offset = coef_var_offset(coef);
1988 if (offset < 0)
1989 coef = isl_basic_set_free(coef);
1990 if (!coef)
1991 return isl_stat_error;
1993 dim_map = inter_dim_map(ctx, graph, src, dst, offset, 1);
1995 edge->start = graph->lp->n_ineq;
1996 graph->lp = add_constraints_dim_map(graph->lp, coef, dim_map);
1997 if (!graph->lp)
1998 return isl_stat_error;
1999 edge->end = graph->lp->n_ineq;
2001 return isl_stat_ok;
2004 /* Add constraints to graph->lp that bound the dependence distance for the given
2005 * dependence from a node i to itself.
2006 * If s = 1, we add the constraint
2008 * c_i_x (y - x) <= m_0 + m_n n
2010 * or
2012 * -c_i_x (y - x) + m_0 + m_n n >= 0
2014 * for each (x,y) in R.
2015 * If s = -1, we add the constraint
2017 * -c_i_x (y - x) <= m_0 + m_n n
2019 * or
2021 * c_i_x (y - x) + m_0 + m_n n >= 0
2023 * for each (x,y) in R.
2024 * We obtain general constraints on coefficients (c_0, c_n, c_x)
2025 * of valid constraints for (y - x) and then plug in (m_0, m_n, -s * c_i_x),
2026 * with each coefficient (except m_0) represented as a pair of non-negative
2027 * coefficients.
2030 * If "local" is set, then we add constraints
2032 * c_i_x (y - x) <= 0
2034 * or
2036 * -c_i_x (y - x) <= 0
2038 * instead, forcing the dependence distance to be (less than or) equal to 0.
2039 * That is, we plug in (0, 0, -s * c_i_x),
2040 * intra_coefficients is not required to have c_n in its result when
2041 * "local" is set. If they are missing, then (0, -s * c_i_x) is plugged in.
2042 * Note that dependences marked local are treated as validity constraints
2043 * by add_all_validity_constraints and therefore also have
2044 * their distances bounded by 0 from below.
2046 static isl_stat add_intra_proximity_constraints(struct isl_sched_graph *graph,
2047 struct isl_sched_edge *edge, int s, int local)
2049 isl_size offset;
2050 isl_size nparam;
2051 isl_map *map = isl_map_copy(edge->map);
2052 isl_ctx *ctx = isl_map_get_ctx(map);
2053 isl_dim_map *dim_map;
2054 isl_basic_set *coef;
2055 struct isl_sched_node *node = edge->src;
2057 coef = intra_coefficients(graph, node, map, !local);
2058 nparam = isl_space_dim(node->space, isl_dim_param);
2060 offset = coef_var_offset(coef);
2061 if (nparam < 0 || offset < 0)
2062 coef = isl_basic_set_free(coef);
2063 if (!coef)
2064 return isl_stat_error;
2066 dim_map = intra_dim_map(ctx, graph, node, offset, -s);
2068 if (!local) {
2069 isl_dim_map_range(dim_map, 1, 0, 0, 0, 1, 1);
2070 isl_dim_map_range(dim_map, 4, 2, 1, 1, nparam, -1);
2071 isl_dim_map_range(dim_map, 5, 2, 1, 1, nparam, 1);
2073 graph->lp = add_constraints_dim_map(graph->lp, coef, dim_map);
2075 return isl_stat_ok;
2078 /* Add constraints to graph->lp that bound the dependence distance for the given
2079 * dependence from node i to node j.
2080 * If s = 1, we add the constraint
2082 * (c_j_0 + c_j_n n + c_j_x y) - (c_i_0 + c_i_n n + c_i_x x)
2083 * <= m_0 + m_n n
2085 * or
2087 * -(c_j_0 + c_j_n n + c_j_x y) + (c_i_0 + c_i_n n + c_i_x x) +
2088 * m_0 + m_n n >= 0
2090 * for each (x,y) in R.
2091 * If s = -1, we add the constraint
2093 * -((c_j_0 + c_j_n n + c_j_x y) - (c_i_0 + c_i_n n + c_i_x x))
2094 * <= m_0 + m_n n
2096 * or
2098 * (c_j_0 + c_j_n n + c_j_x y) - (c_i_0 + c_i_n n + c_i_x x) +
2099 * m_0 + m_n n >= 0
2101 * for each (x,y) in R.
2102 * We obtain general constraints on coefficients (c_0, c_n, c_x, c_y)
2103 * of valid constraints for R and then plug in
2104 * (m_0 - s*c_j_0 + s*c_i_0, m_n - s*c_j_n + s*c_i_n,
2105 * s*c_i_x, -s*c_j_x)
2106 * with each coefficient (except m_0, c_*_0 and c_*_n)
2107 * represented as a pair of non-negative coefficients.
2110 * If "local" is set (and s = 1), then we add constraints
2112 * (c_j_0 + c_j_n n + c_j_x y) - (c_i_0 + c_i_n n + c_i_x x) <= 0
2114 * or
2116 * -((c_j_0 + c_j_n n + c_j_x y) + (c_i_0 + c_i_n n + c_i_x x)) >= 0
2118 * instead, forcing the dependence distance to be (less than or) equal to 0.
2119 * That is, we plug in
2120 * (-s*c_j_0 + s*c_i_0, -s*c_j_n + s*c_i_n, s*c_i_x, -s*c_j_x).
2121 * Note that dependences marked local are treated as validity constraints
2122 * by add_all_validity_constraints and therefore also have
2123 * their distances bounded by 0 from below.
2125 static isl_stat add_inter_proximity_constraints(struct isl_sched_graph *graph,
2126 struct isl_sched_edge *edge, int s, int local)
2128 isl_size offset;
2129 isl_size nparam;
2130 isl_map *map = isl_map_copy(edge->map);
2131 isl_ctx *ctx = isl_map_get_ctx(map);
2132 isl_dim_map *dim_map;
2133 isl_basic_set *coef;
2134 struct isl_sched_node *src = edge->src;
2135 struct isl_sched_node *dst = edge->dst;
2137 coef = inter_coefficients(graph, edge, map);
2138 nparam = isl_space_dim(src->space, isl_dim_param);
2140 offset = coef_var_offset(coef);
2141 if (nparam < 0 || offset < 0)
2142 coef = isl_basic_set_free(coef);
2143 if (!coef)
2144 return isl_stat_error;
2146 dim_map = inter_dim_map(ctx, graph, src, dst, offset, -s);
2148 if (!local) {
2149 isl_dim_map_range(dim_map, 1, 0, 0, 0, 1, 1);
2150 isl_dim_map_range(dim_map, 4, 2, 1, 1, nparam, -1);
2151 isl_dim_map_range(dim_map, 5, 2, 1, 1, nparam, 1);
2154 graph->lp = add_constraints_dim_map(graph->lp, coef, dim_map);
2156 return isl_stat_ok;
2159 /* Should the distance over "edge" be forced to zero?
2160 * That is, is it marked as a local edge?
2161 * If "use_coincidence" is set, then coincidence edges are treated
2162 * as local edges.
2164 static int force_zero(struct isl_sched_edge *edge, int use_coincidence)
2166 return is_local(edge) || (use_coincidence && is_coincidence(edge));
2169 /* Add all validity constraints to graph->lp.
2171 * An edge that is forced to be local needs to have its dependence
2172 * distances equal to zero. We take care of bounding them by 0 from below
2173 * here. add_all_proximity_constraints takes care of bounding them by 0
2174 * from above.
2176 * If "use_coincidence" is set, then we treat coincidence edges as local edges.
2177 * Otherwise, we ignore them.
2179 static int add_all_validity_constraints(struct isl_sched_graph *graph,
2180 int use_coincidence)
2182 int i;
2184 for (i = 0; i < graph->n_edge; ++i) {
2185 struct isl_sched_edge *edge = &graph->edge[i];
2186 int zero;
2188 zero = force_zero(edge, use_coincidence);
2189 if (!is_validity(edge) && !zero)
2190 continue;
2191 if (edge->src != edge->dst)
2192 continue;
2193 if (add_intra_validity_constraints(graph, edge) < 0)
2194 return -1;
2197 for (i = 0; i < graph->n_edge; ++i) {
2198 struct isl_sched_edge *edge = &graph->edge[i];
2199 int zero;
2201 zero = force_zero(edge, use_coincidence);
2202 if (!is_validity(edge) && !zero)
2203 continue;
2204 if (edge->src == edge->dst)
2205 continue;
2206 if (add_inter_validity_constraints(graph, edge) < 0)
2207 return -1;
2210 return 0;
2213 /* Add constraints to graph->lp that bound the dependence distance
2214 * for all dependence relations.
2215 * If a given proximity dependence is identical to a validity
2216 * dependence, then the dependence distance is already bounded
2217 * from below (by zero), so we only need to bound the distance
2218 * from above. (This includes the case of "local" dependences
2219 * which are treated as validity dependence by add_all_validity_constraints.)
2220 * Otherwise, we need to bound the distance both from above and from below.
2222 * If "use_coincidence" is set, then we treat coincidence edges as local edges.
2223 * Otherwise, we ignore them.
2225 static int add_all_proximity_constraints(struct isl_sched_graph *graph,
2226 int use_coincidence)
2228 int i;
2230 for (i = 0; i < graph->n_edge; ++i) {
2231 struct isl_sched_edge *edge = &graph->edge[i];
2232 int zero;
2234 zero = force_zero(edge, use_coincidence);
2235 if (!is_proximity(edge) && !zero)
2236 continue;
2237 if (edge->src == edge->dst &&
2238 add_intra_proximity_constraints(graph, edge, 1, zero) < 0)
2239 return -1;
2240 if (edge->src != edge->dst &&
2241 add_inter_proximity_constraints(graph, edge, 1, zero) < 0)
2242 return -1;
2243 if (is_validity(edge) || zero)
2244 continue;
2245 if (edge->src == edge->dst &&
2246 add_intra_proximity_constraints(graph, edge, -1, 0) < 0)
2247 return -1;
2248 if (edge->src != edge->dst &&
2249 add_inter_proximity_constraints(graph, edge, -1, 0) < 0)
2250 return -1;
2253 return 0;
2256 /* Normalize the rows of "indep" such that all rows are lexicographically
2257 * positive and such that each row contains as many final zeros as possible,
2258 * given the choice for the previous rows.
2259 * Do this by performing elementary row operations.
2261 static __isl_give isl_mat *normalize_independent(__isl_take isl_mat *indep)
2263 indep = isl_mat_reverse_gauss(indep);
2264 indep = isl_mat_lexnonneg_rows(indep);
2265 return indep;
2268 /* Extract the linear part of the current schedule for node "node".
2270 static __isl_give isl_mat *extract_linear_schedule(struct isl_sched_node *node)
2272 int n_row = isl_mat_rows(node->sched);
2274 return isl_mat_sub_alloc(node->sched, 0, n_row,
2275 1 + node->nparam, node->nvar);
2278 /* Compute a basis for the rows in the linear part of the schedule
2279 * and extend this basis to a full basis. The remaining rows
2280 * can then be used to force linear independence from the rows
2281 * in the schedule.
2283 * In particular, given the schedule rows S, we compute
2285 * S = H Q
2286 * S U = H
2288 * with H the Hermite normal form of S. That is, all but the
2289 * first rank columns of H are zero and so each row in S is
2290 * a linear combination of the first rank rows of Q.
2291 * The matrix Q can be used as a variable transformation
2292 * that isolates the directions of S in the first rank rows.
2293 * Transposing S U = H yields
2295 * U^T S^T = H^T
2297 * with all but the first rank rows of H^T zero.
2298 * The last rows of U^T are therefore linear combinations
2299 * of schedule coefficients that are all zero on schedule
2300 * coefficients that are linearly dependent on the rows of S.
2301 * At least one of these combinations is non-zero on
2302 * linearly independent schedule coefficients.
2303 * The rows are normalized to involve as few of the last
2304 * coefficients as possible and to have a positive initial value.
2306 static int node_update_vmap(struct isl_sched_node *node)
2308 isl_mat *H, *U, *Q;
2310 H = extract_linear_schedule(node);
2312 H = isl_mat_left_hermite(H, 0, &U, &Q);
2313 isl_mat_free(node->indep);
2314 isl_mat_free(node->vmap);
2315 node->vmap = Q;
2316 node->indep = isl_mat_transpose(U);
2317 node->rank = isl_mat_initial_non_zero_cols(H);
2318 node->indep = isl_mat_drop_rows(node->indep, 0, node->rank);
2319 node->indep = normalize_independent(node->indep);
2320 isl_mat_free(H);
2322 if (!node->indep || !node->vmap || node->rank < 0)
2323 return -1;
2324 return 0;
2327 /* Is "edge" marked as a validity or a conditional validity edge?
2329 static int is_any_validity(struct isl_sched_edge *edge)
2331 return is_validity(edge) || is_conditional_validity(edge);
2334 /* How many times should we count the constraints in "edge"?
2336 * We count as follows
2337 * validity -> 1 (>= 0)
2338 * validity+proximity -> 2 (>= 0 and upper bound)
2339 * proximity -> 2 (lower and upper bound)
2340 * local(+any) -> 2 (>= 0 and <= 0)
2342 * If an edge is only marked conditional_validity then it counts
2343 * as zero since it is only checked afterwards.
2345 * If "use_coincidence" is set, then we treat coincidence edges as local edges.
2346 * Otherwise, we ignore them.
2348 static int edge_multiplicity(struct isl_sched_edge *edge, int use_coincidence)
2350 if (is_proximity(edge) || force_zero(edge, use_coincidence))
2351 return 2;
2352 if (is_validity(edge))
2353 return 1;
2354 return 0;
2357 /* How many times should the constraints in "edge" be counted
2358 * as a parametric intra-node constraint?
2360 * Only proximity edges that are not forced zero need
2361 * coefficient constraints that include coefficients for parameters.
2362 * If the edge is also a validity edge, then only
2363 * an upper bound is introduced. Otherwise, both lower and upper bounds
2364 * are introduced.
2366 static int parametric_intra_edge_multiplicity(struct isl_sched_edge *edge,
2367 int use_coincidence)
2369 if (edge->src != edge->dst)
2370 return 0;
2371 if (!is_proximity(edge))
2372 return 0;
2373 if (force_zero(edge, use_coincidence))
2374 return 0;
2375 if (is_validity(edge))
2376 return 1;
2377 else
2378 return 2;
2381 /* Add "f" times the number of equality and inequality constraints of "bset"
2382 * to "n_eq" and "n_ineq" and free "bset".
2384 static isl_stat update_count(__isl_take isl_basic_set *bset,
2385 int f, int *n_eq, int *n_ineq)
2387 if (!bset)
2388 return isl_stat_error;
2390 *n_eq += isl_basic_set_n_equality(bset);
2391 *n_ineq += isl_basic_set_n_inequality(bset);
2392 isl_basic_set_free(bset);
2394 return isl_stat_ok;
2397 /* Count the number of equality and inequality constraints
2398 * that will be added for the given map.
2400 * The edges that require parameter coefficients are counted separately.
2402 * "use_coincidence" is set if we should take into account coincidence edges.
2404 static isl_stat count_map_constraints(struct isl_sched_graph *graph,
2405 struct isl_sched_edge *edge, __isl_take isl_map *map,
2406 int *n_eq, int *n_ineq, int use_coincidence)
2408 isl_map *copy;
2409 isl_basic_set *coef;
2410 int f = edge_multiplicity(edge, use_coincidence);
2411 int fp = parametric_intra_edge_multiplicity(edge, use_coincidence);
2413 if (f == 0) {
2414 isl_map_free(map);
2415 return isl_stat_ok;
2418 if (edge->src != edge->dst) {
2419 coef = inter_coefficients(graph, edge, map);
2420 return update_count(coef, f, n_eq, n_ineq);
2423 if (fp > 0) {
2424 copy = isl_map_copy(map);
2425 coef = intra_coefficients(graph, edge->src, copy, 1);
2426 if (update_count(coef, fp, n_eq, n_ineq) < 0)
2427 goto error;
2430 if (f > fp) {
2431 copy = isl_map_copy(map);
2432 coef = intra_coefficients(graph, edge->src, copy, 0);
2433 if (update_count(coef, f - fp, n_eq, n_ineq) < 0)
2434 goto error;
2437 isl_map_free(map);
2438 return isl_stat_ok;
2439 error:
2440 isl_map_free(map);
2441 return isl_stat_error;
2444 /* Count the number of equality and inequality constraints
2445 * that will be added to the main lp problem.
2446 * We count as follows
2447 * validity -> 1 (>= 0)
2448 * validity+proximity -> 2 (>= 0 and upper bound)
2449 * proximity -> 2 (lower and upper bound)
2450 * local(+any) -> 2 (>= 0 and <= 0)
2452 * If "use_coincidence" is set, then we treat coincidence edges as local edges.
2453 * Otherwise, we ignore them.
2455 static int count_constraints(struct isl_sched_graph *graph,
2456 int *n_eq, int *n_ineq, int use_coincidence)
2458 int i;
2460 *n_eq = *n_ineq = 0;
2461 for (i = 0; i < graph->n_edge; ++i) {
2462 struct isl_sched_edge *edge = &graph->edge[i];
2463 isl_map *map = isl_map_copy(edge->map);
2465 if (count_map_constraints(graph, edge, map, n_eq, n_ineq,
2466 use_coincidence) < 0)
2467 return -1;
2470 return 0;
2473 /* Count the number of constraints that will be added by
2474 * add_bound_constant_constraints to bound the values of the constant terms
2475 * and increment *n_eq and *n_ineq accordingly.
2477 * In practice, add_bound_constant_constraints only adds inequalities.
2479 static isl_stat count_bound_constant_constraints(isl_ctx *ctx,
2480 struct isl_sched_graph *graph, int *n_eq, int *n_ineq)
2482 if (isl_options_get_schedule_max_constant_term(ctx) == -1)
2483 return isl_stat_ok;
2485 *n_ineq += graph->n;
2487 return isl_stat_ok;
2490 /* Add constraints to bound the values of the constant terms in the schedule,
2491 * if requested by the user.
2493 * The maximal value of the constant terms is defined by the option
2494 * "schedule_max_constant_term".
2496 static isl_stat add_bound_constant_constraints(isl_ctx *ctx,
2497 struct isl_sched_graph *graph)
2499 int i, k;
2500 int max;
2501 isl_size total;
2503 max = isl_options_get_schedule_max_constant_term(ctx);
2504 if (max == -1)
2505 return isl_stat_ok;
2507 total = isl_basic_set_dim(graph->lp, isl_dim_set);
2508 if (total < 0)
2509 return isl_stat_error;
2511 for (i = 0; i < graph->n; ++i) {
2512 struct isl_sched_node *node = &graph->node[i];
2513 int pos;
2515 k = isl_basic_set_alloc_inequality(graph->lp);
2516 if (k < 0)
2517 return isl_stat_error;
2518 isl_seq_clr(graph->lp->ineq[k], 1 + total);
2519 pos = node_cst_coef_offset(node);
2520 isl_int_set_si(graph->lp->ineq[k][1 + pos], -1);
2521 isl_int_set_si(graph->lp->ineq[k][0], max);
2524 return isl_stat_ok;
2527 /* Count the number of constraints that will be added by
2528 * add_bound_coefficient_constraints and increment *n_eq and *n_ineq
2529 * accordingly.
2531 * In practice, add_bound_coefficient_constraints only adds inequalities.
2533 static int count_bound_coefficient_constraints(isl_ctx *ctx,
2534 struct isl_sched_graph *graph, int *n_eq, int *n_ineq)
2536 int i;
2538 if (isl_options_get_schedule_max_coefficient(ctx) == -1 &&
2539 !isl_options_get_schedule_treat_coalescing(ctx))
2540 return 0;
2542 for (i = 0; i < graph->n; ++i)
2543 *n_ineq += graph->node[i].nparam + 2 * graph->node[i].nvar;
2545 return 0;
2548 /* Add constraints to graph->lp that bound the values of
2549 * the parameter schedule coefficients of "node" to "max" and
2550 * the variable schedule coefficients to the corresponding entry
2551 * in node->max.
2552 * In either case, a negative value means that no bound needs to be imposed.
2554 * For parameter coefficients, this amounts to adding a constraint
2556 * c_n <= max
2558 * i.e.,
2560 * -c_n + max >= 0
2562 * The variables coefficients are, however, not represented directly.
2563 * Instead, the variable coefficients c_x are written as differences
2564 * c_x = c_x^+ - c_x^-.
2565 * That is,
2567 * -max_i <= c_x_i <= max_i
2569 * is encoded as
2571 * -max_i <= c_x_i^+ - c_x_i^- <= max_i
2573 * or
2575 * -(c_x_i^+ - c_x_i^-) + max_i >= 0
2576 * c_x_i^+ - c_x_i^- + max_i >= 0
2578 static isl_stat node_add_coefficient_constraints(isl_ctx *ctx,
2579 struct isl_sched_graph *graph, struct isl_sched_node *node, int max)
2581 int i, j, k;
2582 isl_size total;
2583 isl_vec *ineq;
2585 total = isl_basic_set_dim(graph->lp, isl_dim_set);
2586 if (total < 0)
2587 return isl_stat_error;
2589 for (j = 0; j < node->nparam; ++j) {
2590 int dim;
2592 if (max < 0)
2593 continue;
2595 k = isl_basic_set_alloc_inequality(graph->lp);
2596 if (k < 0)
2597 return isl_stat_error;
2598 dim = 1 + node_par_coef_offset(node) + j;
2599 isl_seq_clr(graph->lp->ineq[k], 1 + total);
2600 isl_int_set_si(graph->lp->ineq[k][dim], -1);
2601 isl_int_set_si(graph->lp->ineq[k][0], max);
2604 ineq = isl_vec_alloc(ctx, 1 + total);
2605 ineq = isl_vec_clr(ineq);
2606 if (!ineq)
2607 return isl_stat_error;
2608 for (i = 0; i < node->nvar; ++i) {
2609 int pos = 1 + node_var_coef_pos(node, i);
2611 if (isl_int_is_neg(node->max->el[i]))
2612 continue;
2614 isl_int_set_si(ineq->el[pos], 1);
2615 isl_int_set_si(ineq->el[pos + 1], -1);
2616 isl_int_set(ineq->el[0], node->max->el[i]);
2618 k = isl_basic_set_alloc_inequality(graph->lp);
2619 if (k < 0)
2620 goto error;
2621 isl_seq_cpy(graph->lp->ineq[k], ineq->el, 1 + total);
2623 isl_seq_neg(ineq->el + pos, ineq->el + pos, 2);
2624 k = isl_basic_set_alloc_inequality(graph->lp);
2625 if (k < 0)
2626 goto error;
2627 isl_seq_cpy(graph->lp->ineq[k], ineq->el, 1 + total);
2629 isl_seq_clr(ineq->el + pos, 2);
2631 isl_vec_free(ineq);
2633 return isl_stat_ok;
2634 error:
2635 isl_vec_free(ineq);
2636 return isl_stat_error;
2639 /* Add constraints that bound the values of the variable and parameter
2640 * coefficients of the schedule.
2642 * The maximal value of the coefficients is defined by the option
2643 * 'schedule_max_coefficient' and the entries in node->max.
2644 * These latter entries are only set if either the schedule_max_coefficient
2645 * option or the schedule_treat_coalescing option is set.
2647 static isl_stat add_bound_coefficient_constraints(isl_ctx *ctx,
2648 struct isl_sched_graph *graph)
2650 int i;
2651 int max;
2653 max = isl_options_get_schedule_max_coefficient(ctx);
2655 if (max == -1 && !isl_options_get_schedule_treat_coalescing(ctx))
2656 return isl_stat_ok;
2658 for (i = 0; i < graph->n; ++i) {
2659 struct isl_sched_node *node = &graph->node[i];
2661 if (node_add_coefficient_constraints(ctx, graph, node, max) < 0)
2662 return isl_stat_error;
2665 return isl_stat_ok;
2668 /* Add a constraint to graph->lp that equates the value at position
2669 * "sum_pos" to the sum of the "n" values starting at "first".
2671 static isl_stat add_sum_constraint(struct isl_sched_graph *graph,
2672 int sum_pos, int first, int n)
2674 int i, k;
2675 isl_size total;
2677 total = isl_basic_set_dim(graph->lp, isl_dim_set);
2678 if (total < 0)
2679 return isl_stat_error;
2681 k = isl_basic_set_alloc_equality(graph->lp);
2682 if (k < 0)
2683 return isl_stat_error;
2684 isl_seq_clr(graph->lp->eq[k], 1 + total);
2685 isl_int_set_si(graph->lp->eq[k][1 + sum_pos], -1);
2686 for (i = 0; i < n; ++i)
2687 isl_int_set_si(graph->lp->eq[k][1 + first + i], 1);
2689 return isl_stat_ok;
2692 /* Add a constraint to graph->lp that equates the value at position
2693 * "sum_pos" to the sum of the parameter coefficients of all nodes.
2695 static isl_stat add_param_sum_constraint(struct isl_sched_graph *graph,
2696 int sum_pos)
2698 int i, j, k;
2699 isl_size total;
2701 total = isl_basic_set_dim(graph->lp, isl_dim_set);
2702 if (total < 0)
2703 return isl_stat_error;
2705 k = isl_basic_set_alloc_equality(graph->lp);
2706 if (k < 0)
2707 return isl_stat_error;
2708 isl_seq_clr(graph->lp->eq[k], 1 + total);
2709 isl_int_set_si(graph->lp->eq[k][1 + sum_pos], -1);
2710 for (i = 0; i < graph->n; ++i) {
2711 int pos = 1 + node_par_coef_offset(&graph->node[i]);
2713 for (j = 0; j < graph->node[i].nparam; ++j)
2714 isl_int_set_si(graph->lp->eq[k][pos + j], 1);
2717 return isl_stat_ok;
2720 /* Add a constraint to graph->lp that equates the value at position
2721 * "sum_pos" to the sum of the variable coefficients of all nodes.
2723 static isl_stat add_var_sum_constraint(struct isl_sched_graph *graph,
2724 int sum_pos)
2726 int i, j, k;
2727 isl_size total;
2729 total = isl_basic_set_dim(graph->lp, isl_dim_set);
2730 if (total < 0)
2731 return isl_stat_error;
2733 k = isl_basic_set_alloc_equality(graph->lp);
2734 if (k < 0)
2735 return isl_stat_error;
2736 isl_seq_clr(graph->lp->eq[k], 1 + total);
2737 isl_int_set_si(graph->lp->eq[k][1 + sum_pos], -1);
2738 for (i = 0; i < graph->n; ++i) {
2739 struct isl_sched_node *node = &graph->node[i];
2740 int pos = 1 + node_var_coef_offset(node);
2742 for (j = 0; j < 2 * node->nvar; ++j)
2743 isl_int_set_si(graph->lp->eq[k][pos + j], 1);
2746 return isl_stat_ok;
2749 /* Construct an ILP problem for finding schedule coefficients
2750 * that result in non-negative, but small dependence distances
2751 * over all dependences.
2752 * In particular, the dependence distances over proximity edges
2753 * are bounded by m_0 + m_n n and we compute schedule coefficients
2754 * with small values (preferably zero) of m_n and m_0.
2756 * All variables of the ILP are non-negative. The actual coefficients
2757 * may be negative, so each coefficient is represented as the difference
2758 * of two non-negative variables. The negative part always appears
2759 * immediately before the positive part.
2760 * Other than that, the variables have the following order
2762 * - sum of positive and negative parts of m_n coefficients
2763 * - m_0
2764 * - sum of all c_n coefficients
2765 * (unconstrained when computing non-parametric schedules)
2766 * - sum of positive and negative parts of all c_x coefficients
2767 * - positive and negative parts of m_n coefficients
2768 * - for each node
2769 * - positive and negative parts of c_i_x, in opposite order
2770 * - c_i_n (if parametric)
2771 * - c_i_0
2773 * The constraints are those from the edges plus two or three equalities
2774 * to express the sums.
2776 * If "use_coincidence" is set, then we treat coincidence edges as local edges.
2777 * Otherwise, we ignore them.
2779 static isl_stat setup_lp(isl_ctx *ctx, struct isl_sched_graph *graph,
2780 int use_coincidence)
2782 int i;
2783 isl_size nparam;
2784 unsigned total;
2785 isl_space *space;
2786 int parametric;
2787 int param_pos;
2788 int n_eq, n_ineq;
2790 parametric = ctx->opt->schedule_parametric;
2791 nparam = isl_space_dim(graph->node[0].space, isl_dim_param);
2792 if (nparam < 0)
2793 return isl_stat_error;
2794 param_pos = 4;
2795 total = param_pos + 2 * nparam;
2796 for (i = 0; i < graph->n; ++i) {
2797 struct isl_sched_node *node = &graph->node[graph->sorted[i]];
2798 if (node_update_vmap(node) < 0)
2799 return isl_stat_error;
2800 node->start = total;
2801 total += 1 + node->nparam + 2 * node->nvar;
2804 if (count_constraints(graph, &n_eq, &n_ineq, use_coincidence) < 0)
2805 return isl_stat_error;
2806 if (count_bound_constant_constraints(ctx, graph, &n_eq, &n_ineq) < 0)
2807 return isl_stat_error;
2808 if (count_bound_coefficient_constraints(ctx, graph, &n_eq, &n_ineq) < 0)
2809 return isl_stat_error;
2811 space = isl_space_set_alloc(ctx, 0, total);
2812 isl_basic_set_free(graph->lp);
2813 n_eq += 2 + parametric;
2815 graph->lp = isl_basic_set_alloc_space(space, 0, n_eq, n_ineq);
2817 if (add_sum_constraint(graph, 0, param_pos, 2 * nparam) < 0)
2818 return isl_stat_error;
2819 if (parametric && add_param_sum_constraint(graph, 2) < 0)
2820 return isl_stat_error;
2821 if (add_var_sum_constraint(graph, 3) < 0)
2822 return isl_stat_error;
2823 if (add_bound_constant_constraints(ctx, graph) < 0)
2824 return isl_stat_error;
2825 if (add_bound_coefficient_constraints(ctx, graph) < 0)
2826 return isl_stat_error;
2827 if (add_all_validity_constraints(graph, use_coincidence) < 0)
2828 return isl_stat_error;
2829 if (add_all_proximity_constraints(graph, use_coincidence) < 0)
2830 return isl_stat_error;
2832 return isl_stat_ok;
2835 /* Analyze the conflicting constraint found by
2836 * isl_tab_basic_set_non_trivial_lexmin. If it corresponds to the validity
2837 * constraint of one of the edges between distinct nodes, living, moreover
2838 * in distinct SCCs, then record the source and sink SCC as this may
2839 * be a good place to cut between SCCs.
2841 static int check_conflict(int con, void *user)
2843 int i;
2844 struct isl_sched_graph *graph = user;
2846 if (graph->src_scc >= 0)
2847 return 0;
2849 con -= graph->lp->n_eq;
2851 if (con >= graph->lp->n_ineq)
2852 return 0;
2854 for (i = 0; i < graph->n_edge; ++i) {
2855 if (!is_validity(&graph->edge[i]))
2856 continue;
2857 if (graph->edge[i].src == graph->edge[i].dst)
2858 continue;
2859 if (graph->edge[i].src->scc == graph->edge[i].dst->scc)
2860 continue;
2861 if (graph->edge[i].start > con)
2862 continue;
2863 if (graph->edge[i].end <= con)
2864 continue;
2865 graph->src_scc = graph->edge[i].src->scc;
2866 graph->dst_scc = graph->edge[i].dst->scc;
2869 return 0;
2872 /* Check whether the next schedule row of the given node needs to be
2873 * non-trivial. Lower-dimensional domains may have some trivial rows,
2874 * but as soon as the number of remaining required non-trivial rows
2875 * is as large as the number or remaining rows to be computed,
2876 * all remaining rows need to be non-trivial.
2878 static int needs_row(struct isl_sched_graph *graph, struct isl_sched_node *node)
2880 return node->nvar - node->rank >= graph->maxvar - graph->n_row;
2883 /* Construct a non-triviality region with triviality directions
2884 * corresponding to the rows of "indep".
2885 * The rows of "indep" are expressed in terms of the schedule coefficients c_i,
2886 * while the triviality directions are expressed in terms of
2887 * pairs of non-negative variables c^+_i - c^-_i, with c^-_i appearing
2888 * before c^+_i. Furthermore,
2889 * the pairs of non-negative variables representing the coefficients
2890 * are stored in the opposite order.
2892 static __isl_give isl_mat *construct_trivial(__isl_keep isl_mat *indep)
2894 isl_ctx *ctx;
2895 isl_mat *mat;
2896 int i, j, n, n_var;
2898 if (!indep)
2899 return NULL;
2901 ctx = isl_mat_get_ctx(indep);
2902 n = isl_mat_rows(indep);
2903 n_var = isl_mat_cols(indep);
2904 mat = isl_mat_alloc(ctx, n, 2 * n_var);
2905 if (!mat)
2906 return NULL;
2907 for (i = 0; i < n; ++i) {
2908 for (j = 0; j < n_var; ++j) {
2909 int nj = n_var - 1 - j;
2910 isl_int_neg(mat->row[i][2 * nj], indep->row[i][j]);
2911 isl_int_set(mat->row[i][2 * nj + 1], indep->row[i][j]);
2915 return mat;
2918 /* Solve the ILP problem constructed in setup_lp.
2919 * For each node such that all the remaining rows of its schedule
2920 * need to be non-trivial, we construct a non-triviality region.
2921 * This region imposes that the next row is independent of previous rows.
2922 * In particular, the non-triviality region enforces that at least
2923 * one of the linear combinations in the rows of node->indep is non-zero.
2925 static __isl_give isl_vec *solve_lp(isl_ctx *ctx, struct isl_sched_graph *graph)
2927 int i;
2928 isl_vec *sol;
2929 isl_basic_set *lp;
2931 for (i = 0; i < graph->n; ++i) {
2932 struct isl_sched_node *node = &graph->node[i];
2933 isl_mat *trivial;
2935 graph->region[i].pos = node_var_coef_offset(node);
2936 if (needs_row(graph, node))
2937 trivial = construct_trivial(node->indep);
2938 else
2939 trivial = isl_mat_zero(ctx, 0, 0);
2940 graph->region[i].trivial = trivial;
2942 lp = isl_basic_set_copy(graph->lp);
2943 sol = isl_tab_basic_set_non_trivial_lexmin(lp, 2, graph->n,
2944 graph->region, &check_conflict, graph);
2945 for (i = 0; i < graph->n; ++i)
2946 isl_mat_free(graph->region[i].trivial);
2947 return sol;
2950 /* Extract the coefficients for the variables of "node" from "sol".
2952 * Each schedule coefficient c_i_x is represented as the difference
2953 * between two non-negative variables c_i_x^+ - c_i_x^-.
2954 * The c_i_x^- appear before their c_i_x^+ counterpart.
2955 * Furthermore, the order of these pairs is the opposite of that
2956 * of the corresponding coefficients.
2958 * Return c_i_x = c_i_x^+ - c_i_x^-
2960 static __isl_give isl_vec *extract_var_coef(struct isl_sched_node *node,
2961 __isl_keep isl_vec *sol)
2963 int i;
2964 int pos;
2965 isl_vec *csol;
2967 if (!sol)
2968 return NULL;
2969 csol = isl_vec_alloc(isl_vec_get_ctx(sol), node->nvar);
2970 if (!csol)
2971 return NULL;
2973 pos = 1 + node_var_coef_offset(node);
2974 for (i = 0; i < node->nvar; ++i)
2975 isl_int_sub(csol->el[node->nvar - 1 - i],
2976 sol->el[pos + 2 * i + 1], sol->el[pos + 2 * i]);
2978 return csol;
2981 /* Update the schedules of all nodes based on the given solution
2982 * of the LP problem.
2983 * The new row is added to the current band.
2984 * All possibly negative coefficients are encoded as a difference
2985 * of two non-negative variables, so we need to perform the subtraction
2986 * here.
2988 * If coincident is set, then the caller guarantees that the new
2989 * row satisfies the coincidence constraints.
2991 static int update_schedule(struct isl_sched_graph *graph,
2992 __isl_take isl_vec *sol, int coincident)
2994 int i, j;
2995 isl_vec *csol = NULL;
2997 if (!sol)
2998 goto error;
2999 if (sol->size == 0)
3000 isl_die(sol->ctx, isl_error_internal,
3001 "no solution found", goto error);
3002 if (graph->n_total_row >= graph->max_row)
3003 isl_die(sol->ctx, isl_error_internal,
3004 "too many schedule rows", goto error);
3006 for (i = 0; i < graph->n; ++i) {
3007 struct isl_sched_node *node = &graph->node[i];
3008 int pos;
3009 int row = isl_mat_rows(node->sched);
3011 isl_vec_free(csol);
3012 csol = extract_var_coef(node, sol);
3013 if (!csol)
3014 goto error;
3016 isl_map_free(node->sched_map);
3017 node->sched_map = NULL;
3018 node->sched = isl_mat_add_rows(node->sched, 1);
3019 if (!node->sched)
3020 goto error;
3021 pos = node_cst_coef_offset(node);
3022 node->sched = isl_mat_set_element(node->sched,
3023 row, 0, sol->el[1 + pos]);
3024 pos = node_par_coef_offset(node);
3025 for (j = 0; j < node->nparam; ++j)
3026 node->sched = isl_mat_set_element(node->sched,
3027 row, 1 + j, sol->el[1 + pos + j]);
3028 for (j = 0; j < node->nvar; ++j)
3029 node->sched = isl_mat_set_element(node->sched,
3030 row, 1 + node->nparam + j, csol->el[j]);
3031 node->coincident[graph->n_total_row] = coincident;
3033 isl_vec_free(sol);
3034 isl_vec_free(csol);
3036 graph->n_row++;
3037 graph->n_total_row++;
3039 return 0;
3040 error:
3041 isl_vec_free(sol);
3042 isl_vec_free(csol);
3043 return -1;
3046 /* Convert row "row" of node->sched into an isl_aff living in "ls"
3047 * and return this isl_aff.
3049 static __isl_give isl_aff *extract_schedule_row(__isl_take isl_local_space *ls,
3050 struct isl_sched_node *node, int row)
3052 int j;
3053 isl_int v;
3054 isl_aff *aff;
3056 isl_int_init(v);
3058 aff = isl_aff_zero_on_domain(ls);
3059 if (isl_mat_get_element(node->sched, row, 0, &v) < 0)
3060 goto error;
3061 aff = isl_aff_set_constant(aff, v);
3062 for (j = 0; j < node->nparam; ++j) {
3063 if (isl_mat_get_element(node->sched, row, 1 + j, &v) < 0)
3064 goto error;
3065 aff = isl_aff_set_coefficient(aff, isl_dim_param, j, v);
3067 for (j = 0; j < node->nvar; ++j) {
3068 if (isl_mat_get_element(node->sched, row,
3069 1 + node->nparam + j, &v) < 0)
3070 goto error;
3071 aff = isl_aff_set_coefficient(aff, isl_dim_in, j, v);
3074 isl_int_clear(v);
3076 return aff;
3077 error:
3078 isl_int_clear(v);
3079 isl_aff_free(aff);
3080 return NULL;
3083 /* Convert the "n" rows starting at "first" of node->sched into a multi_aff
3084 * and return this multi_aff.
3086 * The result is defined over the uncompressed node domain.
3088 static __isl_give isl_multi_aff *node_extract_partial_schedule_multi_aff(
3089 struct isl_sched_node *node, int first, int n)
3091 int i;
3092 isl_space *space;
3093 isl_local_space *ls;
3094 isl_aff *aff;
3095 isl_multi_aff *ma;
3096 int nrow;
3098 if (!node)
3099 return NULL;
3100 nrow = isl_mat_rows(node->sched);
3101 if (node->compressed)
3102 space = isl_multi_aff_get_domain_space(node->decompress);
3103 else
3104 space = isl_space_copy(node->space);
3105 ls = isl_local_space_from_space(isl_space_copy(space));
3106 space = isl_space_from_domain(space);
3107 space = isl_space_add_dims(space, isl_dim_out, n);
3108 ma = isl_multi_aff_zero(space);
3110 for (i = first; i < first + n; ++i) {
3111 aff = extract_schedule_row(isl_local_space_copy(ls), node, i);
3112 ma = isl_multi_aff_set_aff(ma, i - first, aff);
3115 isl_local_space_free(ls);
3117 if (node->compressed)
3118 ma = isl_multi_aff_pullback_multi_aff(ma,
3119 isl_multi_aff_copy(node->compress));
3121 return ma;
3124 /* Convert node->sched into a multi_aff and return this multi_aff.
3126 * The result is defined over the uncompressed node domain.
3128 static __isl_give isl_multi_aff *node_extract_schedule_multi_aff(
3129 struct isl_sched_node *node)
3131 int nrow;
3133 nrow = isl_mat_rows(node->sched);
3134 return node_extract_partial_schedule_multi_aff(node, 0, nrow);
3137 /* Convert node->sched into a map and return this map.
3139 * The result is cached in node->sched_map, which needs to be released
3140 * whenever node->sched is updated.
3141 * It is defined over the uncompressed node domain.
3143 static __isl_give isl_map *node_extract_schedule(struct isl_sched_node *node)
3145 if (!node->sched_map) {
3146 isl_multi_aff *ma;
3148 ma = node_extract_schedule_multi_aff(node);
3149 node->sched_map = isl_map_from_multi_aff(ma);
3152 return isl_map_copy(node->sched_map);
3155 /* Construct a map that can be used to update a dependence relation
3156 * based on the current schedule.
3157 * That is, construct a map expressing that source and sink
3158 * are executed within the same iteration of the current schedule.
3159 * This map can then be intersected with the dependence relation.
3160 * This is not the most efficient way, but this shouldn't be a critical
3161 * operation.
3163 static __isl_give isl_map *specializer(struct isl_sched_node *src,
3164 struct isl_sched_node *dst)
3166 isl_map *src_sched, *dst_sched;
3168 src_sched = node_extract_schedule(src);
3169 dst_sched = node_extract_schedule(dst);
3170 return isl_map_apply_range(src_sched, isl_map_reverse(dst_sched));
3173 /* Intersect the domains of the nested relations in domain and range
3174 * of "umap" with "map".
3176 static __isl_give isl_union_map *intersect_domains(
3177 __isl_take isl_union_map *umap, __isl_keep isl_map *map)
3179 isl_union_set *uset;
3181 umap = isl_union_map_zip(umap);
3182 uset = isl_union_set_from_set(isl_map_wrap(isl_map_copy(map)));
3183 umap = isl_union_map_intersect_domain(umap, uset);
3184 umap = isl_union_map_zip(umap);
3185 return umap;
3188 /* Update the dependence relation of the given edge based
3189 * on the current schedule.
3190 * If the dependence is carried completely by the current schedule, then
3191 * it is removed from the edge_tables. It is kept in the list of edges
3192 * as otherwise all edge_tables would have to be recomputed.
3194 * If the edge is of a type that can appear multiple times
3195 * between the same pair of nodes, then it is added to
3196 * the edge table (again). This prevents the situation
3197 * where none of these edges is referenced from the edge table
3198 * because the one that was referenced turned out to be empty and
3199 * was therefore removed from the table.
3201 static isl_stat update_edge(isl_ctx *ctx, struct isl_sched_graph *graph,
3202 struct isl_sched_edge *edge)
3204 int empty;
3205 isl_map *id;
3207 id = specializer(edge->src, edge->dst);
3208 edge->map = isl_map_intersect(edge->map, isl_map_copy(id));
3209 if (!edge->map)
3210 goto error;
3212 if (edge->tagged_condition) {
3213 edge->tagged_condition =
3214 intersect_domains(edge->tagged_condition, id);
3215 if (!edge->tagged_condition)
3216 goto error;
3218 if (edge->tagged_validity) {
3219 edge->tagged_validity =
3220 intersect_domains(edge->tagged_validity, id);
3221 if (!edge->tagged_validity)
3222 goto error;
3225 empty = isl_map_plain_is_empty(edge->map);
3226 if (empty < 0)
3227 goto error;
3228 if (empty) {
3229 graph_remove_edge(graph, edge);
3230 } else if (is_multi_edge_type(edge)) {
3231 if (graph_edge_tables_add(ctx, graph, edge) < 0)
3232 goto error;
3235 isl_map_free(id);
3236 return isl_stat_ok;
3237 error:
3238 isl_map_free(id);
3239 return isl_stat_error;
3242 /* Does the domain of "umap" intersect "uset"?
3244 static int domain_intersects(__isl_keep isl_union_map *umap,
3245 __isl_keep isl_union_set *uset)
3247 int empty;
3249 umap = isl_union_map_copy(umap);
3250 umap = isl_union_map_intersect_domain(umap, isl_union_set_copy(uset));
3251 empty = isl_union_map_is_empty(umap);
3252 isl_union_map_free(umap);
3254 return empty < 0 ? -1 : !empty;
3257 /* Does the range of "umap" intersect "uset"?
3259 static int range_intersects(__isl_keep isl_union_map *umap,
3260 __isl_keep isl_union_set *uset)
3262 int empty;
3264 umap = isl_union_map_copy(umap);
3265 umap = isl_union_map_intersect_range(umap, isl_union_set_copy(uset));
3266 empty = isl_union_map_is_empty(umap);
3267 isl_union_map_free(umap);
3269 return empty < 0 ? -1 : !empty;
3272 /* Are the condition dependences of "edge" local with respect to
3273 * the current schedule?
3275 * That is, are domain and range of the condition dependences mapped
3276 * to the same point?
3278 * In other words, is the condition false?
3280 static int is_condition_false(struct isl_sched_edge *edge)
3282 isl_union_map *umap;
3283 isl_map *map, *sched, *test;
3284 int empty, local;
3286 empty = isl_union_map_is_empty(edge->tagged_condition);
3287 if (empty < 0 || empty)
3288 return empty;
3290 umap = isl_union_map_copy(edge->tagged_condition);
3291 umap = isl_union_map_zip(umap);
3292 umap = isl_union_set_unwrap(isl_union_map_domain(umap));
3293 map = isl_map_from_union_map(umap);
3295 sched = node_extract_schedule(edge->src);
3296 map = isl_map_apply_domain(map, sched);
3297 sched = node_extract_schedule(edge->dst);
3298 map = isl_map_apply_range(map, sched);
3300 test = isl_map_identity(isl_map_get_space(map));
3301 local = isl_map_is_subset(map, test);
3302 isl_map_free(map);
3303 isl_map_free(test);
3305 return local;
3308 /* For each conditional validity constraint that is adjacent
3309 * to a condition with domain in condition_source or range in condition_sink,
3310 * turn it into an unconditional validity constraint.
3312 static int unconditionalize_adjacent_validity(struct isl_sched_graph *graph,
3313 __isl_take isl_union_set *condition_source,
3314 __isl_take isl_union_set *condition_sink)
3316 int i;
3318 condition_source = isl_union_set_coalesce(condition_source);
3319 condition_sink = isl_union_set_coalesce(condition_sink);
3321 for (i = 0; i < graph->n_edge; ++i) {
3322 int adjacent;
3323 isl_union_map *validity;
3325 if (!is_conditional_validity(&graph->edge[i]))
3326 continue;
3327 if (is_validity(&graph->edge[i]))
3328 continue;
3330 validity = graph->edge[i].tagged_validity;
3331 adjacent = domain_intersects(validity, condition_sink);
3332 if (adjacent >= 0 && !adjacent)
3333 adjacent = range_intersects(validity, condition_source);
3334 if (adjacent < 0)
3335 goto error;
3336 if (!adjacent)
3337 continue;
3339 set_validity(&graph->edge[i]);
3342 isl_union_set_free(condition_source);
3343 isl_union_set_free(condition_sink);
3344 return 0;
3345 error:
3346 isl_union_set_free(condition_source);
3347 isl_union_set_free(condition_sink);
3348 return -1;
3351 /* Update the dependence relations of all edges based on the current schedule
3352 * and enforce conditional validity constraints that are adjacent
3353 * to satisfied condition constraints.
3355 * First check if any of the condition constraints are satisfied
3356 * (i.e., not local to the outer schedule) and keep track of
3357 * their domain and range.
3358 * Then update all dependence relations (which removes the non-local
3359 * constraints).
3360 * Finally, if any condition constraints turned out to be satisfied,
3361 * then turn all adjacent conditional validity constraints into
3362 * unconditional validity constraints.
3364 static int update_edges(isl_ctx *ctx, struct isl_sched_graph *graph)
3366 int i;
3367 int any = 0;
3368 isl_union_set *source, *sink;
3370 source = isl_union_set_empty(isl_space_params_alloc(ctx, 0));
3371 sink = isl_union_set_empty(isl_space_params_alloc(ctx, 0));
3372 for (i = 0; i < graph->n_edge; ++i) {
3373 int local;
3374 isl_union_set *uset;
3375 isl_union_map *umap;
3377 if (!is_condition(&graph->edge[i]))
3378 continue;
3379 if (is_local(&graph->edge[i]))
3380 continue;
3381 local = is_condition_false(&graph->edge[i]);
3382 if (local < 0)
3383 goto error;
3384 if (local)
3385 continue;
3387 any = 1;
3389 umap = isl_union_map_copy(graph->edge[i].tagged_condition);
3390 uset = isl_union_map_domain(umap);
3391 source = isl_union_set_union(source, uset);
3393 umap = isl_union_map_copy(graph->edge[i].tagged_condition);
3394 uset = isl_union_map_range(umap);
3395 sink = isl_union_set_union(sink, uset);
3398 for (i = 0; i < graph->n_edge; ++i) {
3399 if (update_edge(ctx, graph, &graph->edge[i]) < 0)
3400 goto error;
3403 if (any)
3404 return unconditionalize_adjacent_validity(graph, source, sink);
3406 isl_union_set_free(source);
3407 isl_union_set_free(sink);
3408 return 0;
3409 error:
3410 isl_union_set_free(source);
3411 isl_union_set_free(sink);
3412 return -1;
3415 static void next_band(struct isl_sched_graph *graph)
3417 graph->band_start = graph->n_total_row;
3420 /* Return the union of the universe domains of the nodes in "graph"
3421 * that satisfy "pred".
3423 static __isl_give isl_union_set *isl_sched_graph_domain(isl_ctx *ctx,
3424 struct isl_sched_graph *graph,
3425 int (*pred)(struct isl_sched_node *node, int data), int data)
3427 int i;
3428 isl_set *set;
3429 isl_union_set *dom;
3431 for (i = 0; i < graph->n; ++i)
3432 if (pred(&graph->node[i], data))
3433 break;
3435 if (i >= graph->n)
3436 isl_die(ctx, isl_error_internal,
3437 "empty component", return NULL);
3439 set = isl_set_universe(isl_space_copy(graph->node[i].space));
3440 dom = isl_union_set_from_set(set);
3442 for (i = i + 1; i < graph->n; ++i) {
3443 if (!pred(&graph->node[i], data))
3444 continue;
3445 set = isl_set_universe(isl_space_copy(graph->node[i].space));
3446 dom = isl_union_set_union(dom, isl_union_set_from_set(set));
3449 return dom;
3452 /* Return a list of unions of universe domains, where each element
3453 * in the list corresponds to an SCC (or WCC) indexed by node->scc.
3455 static __isl_give isl_union_set_list *extract_sccs(isl_ctx *ctx,
3456 struct isl_sched_graph *graph)
3458 int i;
3459 isl_union_set_list *filters;
3461 filters = isl_union_set_list_alloc(ctx, graph->scc);
3462 for (i = 0; i < graph->scc; ++i) {
3463 isl_union_set *dom;
3465 dom = isl_sched_graph_domain(ctx, graph, &node_scc_exactly, i);
3466 filters = isl_union_set_list_add(filters, dom);
3469 return filters;
3472 /* Return a list of two unions of universe domains, one for the SCCs up
3473 * to and including graph->src_scc and another for the other SCCs.
3475 static __isl_give isl_union_set_list *extract_split(isl_ctx *ctx,
3476 struct isl_sched_graph *graph)
3478 isl_union_set *dom;
3479 isl_union_set_list *filters;
3481 filters = isl_union_set_list_alloc(ctx, 2);
3482 dom = isl_sched_graph_domain(ctx, graph,
3483 &node_scc_at_most, graph->src_scc);
3484 filters = isl_union_set_list_add(filters, dom);
3485 dom = isl_sched_graph_domain(ctx, graph,
3486 &node_scc_at_least, graph->src_scc + 1);
3487 filters = isl_union_set_list_add(filters, dom);
3489 return filters;
3492 /* Copy nodes that satisfy node_pred from the src dependence graph
3493 * to the dst dependence graph.
3495 static isl_stat copy_nodes(struct isl_sched_graph *dst,
3496 struct isl_sched_graph *src,
3497 int (*node_pred)(struct isl_sched_node *node, int data), int data)
3499 int i;
3501 dst->n = 0;
3502 for (i = 0; i < src->n; ++i) {
3503 int j;
3505 if (!node_pred(&src->node[i], data))
3506 continue;
3508 j = dst->n;
3509 dst->node[j].space = isl_space_copy(src->node[i].space);
3510 dst->node[j].compressed = src->node[i].compressed;
3511 dst->node[j].hull = isl_set_copy(src->node[i].hull);
3512 dst->node[j].compress =
3513 isl_multi_aff_copy(src->node[i].compress);
3514 dst->node[j].decompress =
3515 isl_multi_aff_copy(src->node[i].decompress);
3516 dst->node[j].nvar = src->node[i].nvar;
3517 dst->node[j].nparam = src->node[i].nparam;
3518 dst->node[j].sched = isl_mat_copy(src->node[i].sched);
3519 dst->node[j].sched_map = isl_map_copy(src->node[i].sched_map);
3520 dst->node[j].coincident = src->node[i].coincident;
3521 dst->node[j].sizes = isl_multi_val_copy(src->node[i].sizes);
3522 dst->node[j].bounds = isl_basic_set_copy(src->node[i].bounds);
3523 dst->node[j].max = isl_vec_copy(src->node[i].max);
3524 dst->n++;
3526 if (!dst->node[j].space || !dst->node[j].sched)
3527 return isl_stat_error;
3528 if (dst->node[j].compressed &&
3529 (!dst->node[j].hull || !dst->node[j].compress ||
3530 !dst->node[j].decompress))
3531 return isl_stat_error;
3534 return isl_stat_ok;
3537 /* Copy non-empty edges that satisfy edge_pred from the src dependence graph
3538 * to the dst dependence graph.
3539 * If the source or destination node of the edge is not in the destination
3540 * graph, then it must be a backward proximity edge and it should simply
3541 * be ignored.
3543 static isl_stat copy_edges(isl_ctx *ctx, struct isl_sched_graph *dst,
3544 struct isl_sched_graph *src,
3545 int (*edge_pred)(struct isl_sched_edge *edge, int data), int data)
3547 int i;
3549 dst->n_edge = 0;
3550 for (i = 0; i < src->n_edge; ++i) {
3551 struct isl_sched_edge *edge = &src->edge[i];
3552 isl_map *map;
3553 isl_union_map *tagged_condition;
3554 isl_union_map *tagged_validity;
3555 struct isl_sched_node *dst_src, *dst_dst;
3557 if (!edge_pred(edge, data))
3558 continue;
3560 if (isl_map_plain_is_empty(edge->map))
3561 continue;
3563 dst_src = graph_find_node(ctx, dst, edge->src->space);
3564 dst_dst = graph_find_node(ctx, dst, edge->dst->space);
3565 if (!dst_src || !dst_dst)
3566 return isl_stat_error;
3567 if (!is_node(dst, dst_src) || !is_node(dst, dst_dst)) {
3568 if (is_validity(edge) || is_conditional_validity(edge))
3569 isl_die(ctx, isl_error_internal,
3570 "backward (conditional) validity edge",
3571 return isl_stat_error);
3572 continue;
3575 map = isl_map_copy(edge->map);
3576 tagged_condition = isl_union_map_copy(edge->tagged_condition);
3577 tagged_validity = isl_union_map_copy(edge->tagged_validity);
3579 dst->edge[dst->n_edge].src = dst_src;
3580 dst->edge[dst->n_edge].dst = dst_dst;
3581 dst->edge[dst->n_edge].map = map;
3582 dst->edge[dst->n_edge].tagged_condition = tagged_condition;
3583 dst->edge[dst->n_edge].tagged_validity = tagged_validity;
3584 dst->edge[dst->n_edge].types = edge->types;
3585 dst->n_edge++;
3587 if (edge->tagged_condition && !tagged_condition)
3588 return isl_stat_error;
3589 if (edge->tagged_validity && !tagged_validity)
3590 return isl_stat_error;
3592 if (graph_edge_tables_add(ctx, dst,
3593 &dst->edge[dst->n_edge - 1]) < 0)
3594 return isl_stat_error;
3597 return isl_stat_ok;
3600 /* Compute the maximal number of variables over all nodes.
3601 * This is the maximal number of linearly independent schedule
3602 * rows that we need to compute.
3603 * Just in case we end up in a part of the dependence graph
3604 * with only lower-dimensional domains, we make sure we will
3605 * compute the required amount of extra linearly independent rows.
3607 static int compute_maxvar(struct isl_sched_graph *graph)
3609 int i;
3611 graph->maxvar = 0;
3612 for (i = 0; i < graph->n; ++i) {
3613 struct isl_sched_node *node = &graph->node[i];
3614 int nvar;
3616 if (node_update_vmap(node) < 0)
3617 return -1;
3618 nvar = node->nvar + graph->n_row - node->rank;
3619 if (nvar > graph->maxvar)
3620 graph->maxvar = nvar;
3623 return 0;
3626 /* Extract the subgraph of "graph" that consists of the nodes satisfying
3627 * "node_pred" and the edges satisfying "edge_pred" and store
3628 * the result in "sub".
3630 static isl_stat extract_sub_graph(isl_ctx *ctx, struct isl_sched_graph *graph,
3631 int (*node_pred)(struct isl_sched_node *node, int data),
3632 int (*edge_pred)(struct isl_sched_edge *edge, int data),
3633 int data, struct isl_sched_graph *sub)
3635 int i, n = 0, n_edge = 0;
3636 int t;
3638 for (i = 0; i < graph->n; ++i)
3639 if (node_pred(&graph->node[i], data))
3640 ++n;
3641 for (i = 0; i < graph->n_edge; ++i)
3642 if (edge_pred(&graph->edge[i], data))
3643 ++n_edge;
3644 if (graph_alloc(ctx, sub, n, n_edge) < 0)
3645 return isl_stat_error;
3646 sub->root = graph->root;
3647 if (copy_nodes(sub, graph, node_pred, data) < 0)
3648 return isl_stat_error;
3649 if (graph_init_table(ctx, sub) < 0)
3650 return isl_stat_error;
3651 for (t = 0; t <= isl_edge_last; ++t)
3652 sub->max_edge[t] = graph->max_edge[t];
3653 if (graph_init_edge_tables(ctx, sub) < 0)
3654 return isl_stat_error;
3655 if (copy_edges(ctx, sub, graph, edge_pred, data) < 0)
3656 return isl_stat_error;
3657 sub->n_row = graph->n_row;
3658 sub->max_row = graph->max_row;
3659 sub->n_total_row = graph->n_total_row;
3660 sub->band_start = graph->band_start;
3662 return isl_stat_ok;
3665 static __isl_give isl_schedule_node *compute_schedule(isl_schedule_node *node,
3666 struct isl_sched_graph *graph);
3667 static __isl_give isl_schedule_node *compute_schedule_wcc(
3668 isl_schedule_node *node, struct isl_sched_graph *graph);
3670 /* Compute a schedule for a subgraph of "graph". In particular, for
3671 * the graph composed of nodes that satisfy node_pred and edges that
3672 * that satisfy edge_pred.
3673 * If the subgraph is known to consist of a single component, then wcc should
3674 * be set and then we call compute_schedule_wcc on the constructed subgraph.
3675 * Otherwise, we call compute_schedule, which will check whether the subgraph
3676 * is connected.
3678 * The schedule is inserted at "node" and the updated schedule node
3679 * is returned.
3681 static __isl_give isl_schedule_node *compute_sub_schedule(
3682 __isl_take isl_schedule_node *node, isl_ctx *ctx,
3683 struct isl_sched_graph *graph,
3684 int (*node_pred)(struct isl_sched_node *node, int data),
3685 int (*edge_pred)(struct isl_sched_edge *edge, int data),
3686 int data, int wcc)
3688 struct isl_sched_graph split = { 0 };
3690 if (extract_sub_graph(ctx, graph, node_pred, edge_pred, data,
3691 &split) < 0)
3692 goto error;
3694 if (wcc)
3695 node = compute_schedule_wcc(node, &split);
3696 else
3697 node = compute_schedule(node, &split);
3699 graph_free(ctx, &split);
3700 return node;
3701 error:
3702 graph_free(ctx, &split);
3703 return isl_schedule_node_free(node);
3706 static int edge_scc_exactly(struct isl_sched_edge *edge, int scc)
3708 return edge->src->scc == scc && edge->dst->scc == scc;
3711 static int edge_dst_scc_at_most(struct isl_sched_edge *edge, int scc)
3713 return edge->dst->scc <= scc;
3716 static int edge_src_scc_at_least(struct isl_sched_edge *edge, int scc)
3718 return edge->src->scc >= scc;
3721 /* Reset the current band by dropping all its schedule rows.
3723 static isl_stat reset_band(struct isl_sched_graph *graph)
3725 int i;
3726 int drop;
3728 drop = graph->n_total_row - graph->band_start;
3729 graph->n_total_row -= drop;
3730 graph->n_row -= drop;
3732 for (i = 0; i < graph->n; ++i) {
3733 struct isl_sched_node *node = &graph->node[i];
3735 isl_map_free(node->sched_map);
3736 node->sched_map = NULL;
3738 node->sched = isl_mat_drop_rows(node->sched,
3739 graph->band_start, drop);
3741 if (!node->sched)
3742 return isl_stat_error;
3745 return isl_stat_ok;
3748 /* Split the current graph into two parts and compute a schedule for each
3749 * part individually. In particular, one part consists of all SCCs up
3750 * to and including graph->src_scc, while the other part contains the other
3751 * SCCs. The split is enforced by a sequence node inserted at position "node"
3752 * in the schedule tree. Return the updated schedule node.
3753 * If either of these two parts consists of a sequence, then it is spliced
3754 * into the sequence containing the two parts.
3756 * The current band is reset. It would be possible to reuse
3757 * the previously computed rows as the first rows in the next
3758 * band, but recomputing them may result in better rows as we are looking
3759 * at a smaller part of the dependence graph.
3761 static __isl_give isl_schedule_node *compute_split_schedule(
3762 __isl_take isl_schedule_node *node, struct isl_sched_graph *graph)
3764 int is_seq;
3765 isl_ctx *ctx;
3766 isl_union_set_list *filters;
3768 if (!node)
3769 return NULL;
3771 if (reset_band(graph) < 0)
3772 return isl_schedule_node_free(node);
3774 next_band(graph);
3776 ctx = isl_schedule_node_get_ctx(node);
3777 filters = extract_split(ctx, graph);
3778 node = isl_schedule_node_insert_sequence(node, filters);
3779 node = isl_schedule_node_child(node, 1);
3780 node = isl_schedule_node_child(node, 0);
3782 node = compute_sub_schedule(node, ctx, graph,
3783 &node_scc_at_least, &edge_src_scc_at_least,
3784 graph->src_scc + 1, 0);
3785 is_seq = isl_schedule_node_get_type(node) == isl_schedule_node_sequence;
3786 node = isl_schedule_node_parent(node);
3787 node = isl_schedule_node_parent(node);
3788 if (is_seq)
3789 node = isl_schedule_node_sequence_splice_child(node, 1);
3790 node = isl_schedule_node_child(node, 0);
3791 node = isl_schedule_node_child(node, 0);
3792 node = compute_sub_schedule(node, ctx, graph,
3793 &node_scc_at_most, &edge_dst_scc_at_most,
3794 graph->src_scc, 0);
3795 is_seq = isl_schedule_node_get_type(node) == isl_schedule_node_sequence;
3796 node = isl_schedule_node_parent(node);
3797 node = isl_schedule_node_parent(node);
3798 if (is_seq)
3799 node = isl_schedule_node_sequence_splice_child(node, 0);
3801 return node;
3804 /* Insert a band node at position "node" in the schedule tree corresponding
3805 * to the current band in "graph". Mark the band node permutable
3806 * if "permutable" is set.
3807 * The partial schedules and the coincidence property are extracted
3808 * from the graph nodes.
3809 * Return the updated schedule node.
3811 static __isl_give isl_schedule_node *insert_current_band(
3812 __isl_take isl_schedule_node *node, struct isl_sched_graph *graph,
3813 int permutable)
3815 int i;
3816 int start, end, n;
3817 isl_multi_aff *ma;
3818 isl_multi_pw_aff *mpa;
3819 isl_multi_union_pw_aff *mupa;
3821 if (!node)
3822 return NULL;
3824 if (graph->n < 1)
3825 isl_die(isl_schedule_node_get_ctx(node), isl_error_internal,
3826 "graph should have at least one node",
3827 return isl_schedule_node_free(node));
3829 start = graph->band_start;
3830 end = graph->n_total_row;
3831 n = end - start;
3833 ma = node_extract_partial_schedule_multi_aff(&graph->node[0], start, n);
3834 mpa = isl_multi_pw_aff_from_multi_aff(ma);
3835 mupa = isl_multi_union_pw_aff_from_multi_pw_aff(mpa);
3837 for (i = 1; i < graph->n; ++i) {
3838 isl_multi_union_pw_aff *mupa_i;
3840 ma = node_extract_partial_schedule_multi_aff(&graph->node[i],
3841 start, n);
3842 mpa = isl_multi_pw_aff_from_multi_aff(ma);
3843 mupa_i = isl_multi_union_pw_aff_from_multi_pw_aff(mpa);
3844 mupa = isl_multi_union_pw_aff_union_add(mupa, mupa_i);
3846 node = isl_schedule_node_insert_partial_schedule(node, mupa);
3848 for (i = 0; i < n; ++i)
3849 node = isl_schedule_node_band_member_set_coincident(node, i,
3850 graph->node[0].coincident[start + i]);
3851 node = isl_schedule_node_band_set_permutable(node, permutable);
3853 return node;
3856 /* Update the dependence relations based on the current schedule,
3857 * add the current band to "node" and then continue with the computation
3858 * of the next band.
3859 * Return the updated schedule node.
3861 static __isl_give isl_schedule_node *compute_next_band(
3862 __isl_take isl_schedule_node *node,
3863 struct isl_sched_graph *graph, int permutable)
3865 isl_ctx *ctx;
3867 if (!node)
3868 return NULL;
3870 ctx = isl_schedule_node_get_ctx(node);
3871 if (update_edges(ctx, graph) < 0)
3872 return isl_schedule_node_free(node);
3873 node = insert_current_band(node, graph, permutable);
3874 next_band(graph);
3876 node = isl_schedule_node_child(node, 0);
3877 node = compute_schedule(node, graph);
3878 node = isl_schedule_node_parent(node);
3880 return node;
3883 /* Add the constraints "coef" derived from an edge from "node" to itself
3884 * to graph->lp in order to respect the dependences and to try and carry them.
3885 * "pos" is the sequence number of the edge that needs to be carried.
3886 * "coef" represents general constraints on coefficients (c_0, c_x)
3887 * of valid constraints for (y - x) with x and y instances of the node.
3889 * The constraints added to graph->lp need to enforce
3891 * (c_j_0 + c_j_x y) - (c_j_0 + c_j_x x)
3892 * = c_j_x (y - x) >= e_i
3894 * for each (x,y) in the dependence relation of the edge.
3895 * That is, (-e_i, c_j_x) needs to be plugged in for (c_0, c_x),
3896 * taking into account that each coefficient in c_j_x is represented
3897 * as a pair of non-negative coefficients.
3899 static isl_stat add_intra_constraints(struct isl_sched_graph *graph,
3900 struct isl_sched_node *node, __isl_take isl_basic_set *coef, int pos)
3902 isl_size offset;
3903 isl_ctx *ctx;
3904 isl_dim_map *dim_map;
3906 offset = coef_var_offset(coef);
3907 if (offset < 0)
3908 coef = isl_basic_set_free(coef);
3909 if (!coef)
3910 return isl_stat_error;
3912 ctx = isl_basic_set_get_ctx(coef);
3913 dim_map = intra_dim_map(ctx, graph, node, offset, 1);
3914 isl_dim_map_range(dim_map, 3 + pos, 0, 0, 0, 1, -1);
3915 graph->lp = add_constraints_dim_map(graph->lp, coef, dim_map);
3917 return isl_stat_ok;
3920 /* Add the constraints "coef" derived from an edge from "src" to "dst"
3921 * to graph->lp in order to respect the dependences and to try and carry them.
3922 * "pos" is the sequence number of the edge that needs to be carried or
3923 * -1 if no attempt should be made to carry the dependences.
3924 * "coef" represents general constraints on coefficients (c_0, c_n, c_x, c_y)
3925 * of valid constraints for (x, y) with x and y instances of "src" and "dst".
3927 * The constraints added to graph->lp need to enforce
3929 * (c_k_0 + c_k_n n + c_k_x y) - (c_j_0 + c_j_n n + c_j_x x) >= e_i
3931 * for each (x,y) in the dependence relation of the edge or
3933 * (c_k_0 + c_k_n n + c_k_x y) - (c_j_0 + c_j_n n + c_j_x x) >= 0
3935 * if pos is -1.
3936 * That is,
3937 * (-e_i + c_k_0 - c_j_0, c_k_n - c_j_n, -c_j_x, c_k_x)
3938 * or
3939 * (c_k_0 - c_j_0, c_k_n - c_j_n, -c_j_x, c_k_x)
3940 * needs to be plugged in for (c_0, c_n, c_x, c_y),
3941 * taking into account that each coefficient in c_j_x and c_k_x is represented
3942 * as a pair of non-negative coefficients.
3944 static isl_stat add_inter_constraints(struct isl_sched_graph *graph,
3945 struct isl_sched_node *src, struct isl_sched_node *dst,
3946 __isl_take isl_basic_set *coef, int pos)
3948 isl_size offset;
3949 isl_ctx *ctx;
3950 isl_dim_map *dim_map;
3952 offset = coef_var_offset(coef);
3953 if (offset < 0)
3954 coef = isl_basic_set_free(coef);
3955 if (!coef)
3956 return isl_stat_error;
3958 ctx = isl_basic_set_get_ctx(coef);
3959 dim_map = inter_dim_map(ctx, graph, src, dst, offset, 1);
3960 if (pos >= 0)
3961 isl_dim_map_range(dim_map, 3 + pos, 0, 0, 0, 1, -1);
3962 graph->lp = add_constraints_dim_map(graph->lp, coef, dim_map);
3964 return isl_stat_ok;
3967 /* Data structure for keeping track of the data needed
3968 * to exploit non-trivial lineality spaces.
3970 * "any_non_trivial" is true if there are any non-trivial lineality spaces.
3971 * If "any_non_trivial" is not true, then "equivalent" and "mask" may be NULL.
3972 * "equivalent" connects instances to other instances on the same line(s).
3973 * "mask" contains the domain spaces of "equivalent".
3974 * Any instance set not in "mask" does not have a non-trivial lineality space.
3976 struct isl_exploit_lineality_data {
3977 isl_bool any_non_trivial;
3978 isl_union_map *equivalent;
3979 isl_union_set *mask;
3982 /* Data structure collecting information used during the construction
3983 * of an LP for carrying dependences.
3985 * "intra" is a sequence of coefficient constraints for intra-node edges.
3986 * "inter" is a sequence of coefficient constraints for inter-node edges.
3987 * "lineality" contains data used to exploit non-trivial lineality spaces.
3989 struct isl_carry {
3990 isl_basic_set_list *intra;
3991 isl_basic_set_list *inter;
3992 struct isl_exploit_lineality_data lineality;
3995 /* Free all the data stored in "carry".
3997 static void isl_carry_clear(struct isl_carry *carry)
3999 isl_basic_set_list_free(carry->intra);
4000 isl_basic_set_list_free(carry->inter);
4001 isl_union_map_free(carry->lineality.equivalent);
4002 isl_union_set_free(carry->lineality.mask);
4005 /* Return a pointer to the node in "graph" that lives in "space".
4006 * If the requested node has been compressed, then "space"
4007 * corresponds to the compressed space.
4008 * The graph is assumed to have such a node.
4009 * Return NULL in case of error.
4011 * First try and see if "space" is the space of an uncompressed node.
4012 * If so, return that node.
4013 * Otherwise, "space" was constructed by construct_compressed_id and
4014 * contains a user pointer pointing to the node in the tuple id.
4015 * However, this node belongs to the original dependence graph.
4016 * If "graph" is a subgraph of this original dependence graph,
4017 * then the node with the same space still needs to be looked up
4018 * in the current graph.
4020 static struct isl_sched_node *graph_find_compressed_node(isl_ctx *ctx,
4021 struct isl_sched_graph *graph, __isl_keep isl_space *space)
4023 isl_id *id;
4024 struct isl_sched_node *node;
4026 if (!space)
4027 return NULL;
4029 node = graph_find_node(ctx, graph, space);
4030 if (!node)
4031 return NULL;
4032 if (is_node(graph, node))
4033 return node;
4035 id = isl_space_get_tuple_id(space, isl_dim_set);
4036 node = isl_id_get_user(id);
4037 isl_id_free(id);
4039 if (!node)
4040 return NULL;
4042 if (!is_node(graph->root, node))
4043 isl_die(ctx, isl_error_internal,
4044 "space points to invalid node", return NULL);
4045 if (graph != graph->root)
4046 node = graph_find_node(ctx, graph, node->space);
4047 if (!is_node(graph, node))
4048 isl_die(ctx, isl_error_internal,
4049 "unable to find node", return NULL);
4051 return node;
4054 /* Internal data structure for add_all_constraints.
4056 * "graph" is the schedule constraint graph for which an LP problem
4057 * is being constructed.
4058 * "carry_inter" indicates whether inter-node edges should be carried.
4059 * "pos" is the position of the next edge that needs to be carried.
4061 struct isl_add_all_constraints_data {
4062 isl_ctx *ctx;
4063 struct isl_sched_graph *graph;
4064 int carry_inter;
4065 int pos;
4068 /* Add the constraints "coef" derived from an edge from a node to itself
4069 * to data->graph->lp in order to respect the dependences and
4070 * to try and carry them.
4072 * The space of "coef" is of the form
4074 * coefficients[[c_cst] -> S[c_x]]
4076 * with S[c_x] the (compressed) space of the node.
4077 * Extract the node from the space and call add_intra_constraints.
4079 static isl_stat lp_add_intra(__isl_take isl_basic_set *coef, void *user)
4081 struct isl_add_all_constraints_data *data = user;
4082 isl_space *space;
4083 struct isl_sched_node *node;
4085 space = isl_basic_set_get_space(coef);
4086 space = isl_space_range(isl_space_unwrap(space));
4087 node = graph_find_compressed_node(data->ctx, data->graph, space);
4088 isl_space_free(space);
4089 return add_intra_constraints(data->graph, node, coef, data->pos++);
4092 /* Add the constraints "coef" derived from an edge from a node j
4093 * to a node k to data->graph->lp in order to respect the dependences and
4094 * to try and carry them (provided data->carry_inter is set).
4096 * The space of "coef" is of the form
4098 * coefficients[[c_cst, c_n] -> [S_j[c_x] -> S_k[c_y]]]
4100 * with S_j[c_x] and S_k[c_y] the (compressed) spaces of the nodes.
4101 * Extract the nodes from the space and call add_inter_constraints.
4103 static isl_stat lp_add_inter(__isl_take isl_basic_set *coef, void *user)
4105 struct isl_add_all_constraints_data *data = user;
4106 isl_space *space, *dom;
4107 struct isl_sched_node *src, *dst;
4108 int pos;
4110 space = isl_basic_set_get_space(coef);
4111 space = isl_space_unwrap(isl_space_range(isl_space_unwrap(space)));
4112 dom = isl_space_domain(isl_space_copy(space));
4113 src = graph_find_compressed_node(data->ctx, data->graph, dom);
4114 isl_space_free(dom);
4115 space = isl_space_range(space);
4116 dst = graph_find_compressed_node(data->ctx, data->graph, space);
4117 isl_space_free(space);
4119 pos = data->carry_inter ? data->pos++ : -1;
4120 return add_inter_constraints(data->graph, src, dst, coef, pos);
4123 /* Add constraints to graph->lp that force all (conditional) validity
4124 * dependences to be respected and attempt to carry them.
4125 * "intra" is the sequence of coefficient constraints for intra-node edges.
4126 * "inter" is the sequence of coefficient constraints for inter-node edges.
4127 * "carry_inter" indicates whether inter-node edges should be carried or
4128 * only respected.
4130 static isl_stat add_all_constraints(isl_ctx *ctx, struct isl_sched_graph *graph,
4131 __isl_keep isl_basic_set_list *intra,
4132 __isl_keep isl_basic_set_list *inter, int carry_inter)
4134 struct isl_add_all_constraints_data data = { ctx, graph, carry_inter };
4136 data.pos = 0;
4137 if (isl_basic_set_list_foreach(intra, &lp_add_intra, &data) < 0)
4138 return isl_stat_error;
4139 if (isl_basic_set_list_foreach(inter, &lp_add_inter, &data) < 0)
4140 return isl_stat_error;
4141 return isl_stat_ok;
4144 /* Internal data structure for count_all_constraints
4145 * for keeping track of the number of equality and inequality constraints.
4147 struct isl_sched_count {
4148 int n_eq;
4149 int n_ineq;
4152 /* Add the number of equality and inequality constraints of "bset"
4153 * to data->n_eq and data->n_ineq.
4155 static isl_stat bset_update_count(__isl_take isl_basic_set *bset, void *user)
4157 struct isl_sched_count *data = user;
4159 return update_count(bset, 1, &data->n_eq, &data->n_ineq);
4162 /* Count the number of equality and inequality constraints
4163 * that will be added to the carry_lp problem.
4164 * We count each edge exactly once.
4165 * "intra" is the sequence of coefficient constraints for intra-node edges.
4166 * "inter" is the sequence of coefficient constraints for inter-node edges.
4168 static isl_stat count_all_constraints(__isl_keep isl_basic_set_list *intra,
4169 __isl_keep isl_basic_set_list *inter, int *n_eq, int *n_ineq)
4171 struct isl_sched_count data;
4173 data.n_eq = data.n_ineq = 0;
4174 if (isl_basic_set_list_foreach(inter, &bset_update_count, &data) < 0)
4175 return isl_stat_error;
4176 if (isl_basic_set_list_foreach(intra, &bset_update_count, &data) < 0)
4177 return isl_stat_error;
4179 *n_eq = data.n_eq;
4180 *n_ineq = data.n_ineq;
4182 return isl_stat_ok;
4185 /* Construct an LP problem for finding schedule coefficients
4186 * such that the schedule carries as many validity dependences as possible.
4187 * In particular, for each dependence i, we bound the dependence distance
4188 * from below by e_i, with 0 <= e_i <= 1 and then maximize the sum
4189 * of all e_i's. Dependences with e_i = 0 in the solution are simply
4190 * respected, while those with e_i > 0 (in practice e_i = 1) are carried.
4191 * "intra" is the sequence of coefficient constraints for intra-node edges.
4192 * "inter" is the sequence of coefficient constraints for inter-node edges.
4193 * "n_edge" is the total number of edges.
4194 * "carry_inter" indicates whether inter-node edges should be carried or
4195 * only respected. That is, if "carry_inter" is not set, then
4196 * no e_i variables are introduced for the inter-node edges.
4198 * All variables of the LP are non-negative. The actual coefficients
4199 * may be negative, so each coefficient is represented as the difference
4200 * of two non-negative variables. The negative part always appears
4201 * immediately before the positive part.
4202 * Other than that, the variables have the following order
4204 * - sum of (1 - e_i) over all edges
4205 * - sum of all c_n coefficients
4206 * (unconstrained when computing non-parametric schedules)
4207 * - sum of positive and negative parts of all c_x coefficients
4208 * - for each edge
4209 * - e_i
4210 * - for each node
4211 * - positive and negative parts of c_i_x, in opposite order
4212 * - c_i_n (if parametric)
4213 * - c_i_0
4215 * The constraints are those from the (validity) edges plus three equalities
4216 * to express the sums and n_edge inequalities to express e_i <= 1.
4218 static isl_stat setup_carry_lp(isl_ctx *ctx, struct isl_sched_graph *graph,
4219 int n_edge, __isl_keep isl_basic_set_list *intra,
4220 __isl_keep isl_basic_set_list *inter, int carry_inter)
4222 int i;
4223 int k;
4224 isl_space *dim;
4225 unsigned total;
4226 int n_eq, n_ineq;
4228 total = 3 + n_edge;
4229 for (i = 0; i < graph->n; ++i) {
4230 struct isl_sched_node *node = &graph->node[graph->sorted[i]];
4231 node->start = total;
4232 total += 1 + node->nparam + 2 * node->nvar;
4235 if (count_all_constraints(intra, inter, &n_eq, &n_ineq) < 0)
4236 return isl_stat_error;
4238 dim = isl_space_set_alloc(ctx, 0, total);
4239 isl_basic_set_free(graph->lp);
4240 n_eq += 3;
4241 n_ineq += n_edge;
4242 graph->lp = isl_basic_set_alloc_space(dim, 0, n_eq, n_ineq);
4243 graph->lp = isl_basic_set_set_rational(graph->lp);
4245 k = isl_basic_set_alloc_equality(graph->lp);
4246 if (k < 0)
4247 return isl_stat_error;
4248 isl_seq_clr(graph->lp->eq[k], 1 + total);
4249 isl_int_set_si(graph->lp->eq[k][0], -n_edge);
4250 isl_int_set_si(graph->lp->eq[k][1], 1);
4251 for (i = 0; i < n_edge; ++i)
4252 isl_int_set_si(graph->lp->eq[k][4 + i], 1);
4254 if (add_param_sum_constraint(graph, 1) < 0)
4255 return isl_stat_error;
4256 if (add_var_sum_constraint(graph, 2) < 0)
4257 return isl_stat_error;
4259 for (i = 0; i < n_edge; ++i) {
4260 k = isl_basic_set_alloc_inequality(graph->lp);
4261 if (k < 0)
4262 return isl_stat_error;
4263 isl_seq_clr(graph->lp->ineq[k], 1 + total);
4264 isl_int_set_si(graph->lp->ineq[k][4 + i], -1);
4265 isl_int_set_si(graph->lp->ineq[k][0], 1);
4268 if (add_all_constraints(ctx, graph, intra, inter, carry_inter) < 0)
4269 return isl_stat_error;
4271 return isl_stat_ok;
4274 static __isl_give isl_schedule_node *compute_component_schedule(
4275 __isl_take isl_schedule_node *node, struct isl_sched_graph *graph,
4276 int wcc);
4278 /* If the schedule_split_scaled option is set and if the linear
4279 * parts of the scheduling rows for all nodes in the graphs have
4280 * a non-trivial common divisor, then remove this
4281 * common divisor from the linear part.
4282 * Otherwise, insert a band node directly and continue with
4283 * the construction of the schedule.
4285 * If a non-trivial common divisor is found, then
4286 * the linear part is reduced and the remainder is ignored.
4287 * The pieces of the graph that are assigned different remainders
4288 * form (groups of) strongly connected components within
4289 * the scaled down band. If needed, they can therefore
4290 * be ordered along this remainder in a sequence node.
4291 * However, this ordering is not enforced here in order to allow
4292 * the scheduler to combine some of the strongly connected components.
4294 static __isl_give isl_schedule_node *split_scaled(
4295 __isl_take isl_schedule_node *node, struct isl_sched_graph *graph)
4297 int i;
4298 int row;
4299 isl_ctx *ctx;
4300 isl_int gcd, gcd_i;
4302 if (!node)
4303 return NULL;
4305 ctx = isl_schedule_node_get_ctx(node);
4306 if (!ctx->opt->schedule_split_scaled)
4307 return compute_next_band(node, graph, 0);
4308 if (graph->n <= 1)
4309 return compute_next_band(node, graph, 0);
4311 isl_int_init(gcd);
4312 isl_int_init(gcd_i);
4314 isl_int_set_si(gcd, 0);
4316 row = isl_mat_rows(graph->node[0].sched) - 1;
4318 for (i = 0; i < graph->n; ++i) {
4319 struct isl_sched_node *node = &graph->node[i];
4320 int cols = isl_mat_cols(node->sched);
4322 isl_seq_gcd(node->sched->row[row] + 1, cols - 1, &gcd_i);
4323 isl_int_gcd(gcd, gcd, gcd_i);
4326 isl_int_clear(gcd_i);
4328 if (isl_int_cmp_si(gcd, 1) <= 0) {
4329 isl_int_clear(gcd);
4330 return compute_next_band(node, graph, 0);
4333 for (i = 0; i < graph->n; ++i) {
4334 struct isl_sched_node *node = &graph->node[i];
4336 isl_int_fdiv_q(node->sched->row[row][0],
4337 node->sched->row[row][0], gcd);
4338 isl_int_mul(node->sched->row[row][0],
4339 node->sched->row[row][0], gcd);
4340 node->sched = isl_mat_scale_down_row(node->sched, row, gcd);
4341 if (!node->sched)
4342 goto error;
4345 isl_int_clear(gcd);
4347 return compute_next_band(node, graph, 0);
4348 error:
4349 isl_int_clear(gcd);
4350 return isl_schedule_node_free(node);
4353 /* Is the schedule row "sol" trivial on node "node"?
4354 * That is, is the solution zero on the dimensions linearly independent of
4355 * the previously found solutions?
4356 * Return 1 if the solution is trivial, 0 if it is not and -1 on error.
4358 * Each coefficient is represented as the difference between
4359 * two non-negative values in "sol".
4360 * We construct the schedule row s and check if it is linearly
4361 * independent of previously computed schedule rows
4362 * by computing T s, with T the linear combinations that are zero
4363 * on linearly dependent schedule rows.
4364 * If the result consists of all zeros, then the solution is trivial.
4366 static int is_trivial(struct isl_sched_node *node, __isl_keep isl_vec *sol)
4368 int trivial;
4369 isl_vec *node_sol;
4371 if (!sol)
4372 return -1;
4373 if (node->nvar == node->rank)
4374 return 0;
4376 node_sol = extract_var_coef(node, sol);
4377 node_sol = isl_mat_vec_product(isl_mat_copy(node->indep), node_sol);
4378 if (!node_sol)
4379 return -1;
4381 trivial = isl_seq_first_non_zero(node_sol->el,
4382 node->nvar - node->rank) == -1;
4384 isl_vec_free(node_sol);
4386 return trivial;
4389 /* Is the schedule row "sol" trivial on any node where it should
4390 * not be trivial?
4391 * Return 1 if any solution is trivial, 0 if they are not and -1 on error.
4393 static int is_any_trivial(struct isl_sched_graph *graph,
4394 __isl_keep isl_vec *sol)
4396 int i;
4398 for (i = 0; i < graph->n; ++i) {
4399 struct isl_sched_node *node = &graph->node[i];
4400 int trivial;
4402 if (!needs_row(graph, node))
4403 continue;
4404 trivial = is_trivial(node, sol);
4405 if (trivial < 0 || trivial)
4406 return trivial;
4409 return 0;
4412 /* Does the schedule represented by "sol" perform loop coalescing on "node"?
4413 * If so, return the position of the coalesced dimension.
4414 * Otherwise, return node->nvar or -1 on error.
4416 * In particular, look for pairs of coefficients c_i and c_j such that
4417 * |c_j/c_i| > ceil(size_i/2), i.e., |c_j| > |c_i * ceil(size_i/2)|.
4418 * If any such pair is found, then return i.
4419 * If size_i is infinity, then no check on c_i needs to be performed.
4421 static int find_node_coalescing(struct isl_sched_node *node,
4422 __isl_keep isl_vec *sol)
4424 int i, j;
4425 isl_int max;
4426 isl_vec *csol;
4428 if (node->nvar <= 1)
4429 return node->nvar;
4431 csol = extract_var_coef(node, sol);
4432 if (!csol)
4433 return -1;
4434 isl_int_init(max);
4435 for (i = 0; i < node->nvar; ++i) {
4436 isl_val *v;
4438 if (isl_int_is_zero(csol->el[i]))
4439 continue;
4440 v = isl_multi_val_get_val(node->sizes, i);
4441 if (!v)
4442 goto error;
4443 if (!isl_val_is_int(v)) {
4444 isl_val_free(v);
4445 continue;
4447 v = isl_val_div_ui(v, 2);
4448 v = isl_val_ceil(v);
4449 if (!v)
4450 goto error;
4451 isl_int_mul(max, v->n, csol->el[i]);
4452 isl_val_free(v);
4454 for (j = 0; j < node->nvar; ++j) {
4455 if (j == i)
4456 continue;
4457 if (isl_int_abs_gt(csol->el[j], max))
4458 break;
4460 if (j < node->nvar)
4461 break;
4464 isl_int_clear(max);
4465 isl_vec_free(csol);
4466 return i;
4467 error:
4468 isl_int_clear(max);
4469 isl_vec_free(csol);
4470 return -1;
4473 /* Force the schedule coefficient at position "pos" of "node" to be zero
4474 * in "tl".
4475 * The coefficient is encoded as the difference between two non-negative
4476 * variables. Force these two variables to have the same value.
4478 static __isl_give isl_tab_lexmin *zero_out_node_coef(
4479 __isl_take isl_tab_lexmin *tl, struct isl_sched_node *node, int pos)
4481 int dim;
4482 isl_ctx *ctx;
4483 isl_vec *eq;
4485 ctx = isl_space_get_ctx(node->space);
4486 dim = isl_tab_lexmin_dim(tl);
4487 if (dim < 0)
4488 return isl_tab_lexmin_free(tl);
4489 eq = isl_vec_alloc(ctx, 1 + dim);
4490 eq = isl_vec_clr(eq);
4491 if (!eq)
4492 return isl_tab_lexmin_free(tl);
4494 pos = 1 + node_var_coef_pos(node, pos);
4495 isl_int_set_si(eq->el[pos], 1);
4496 isl_int_set_si(eq->el[pos + 1], -1);
4497 tl = isl_tab_lexmin_add_eq(tl, eq->el);
4498 isl_vec_free(eq);
4500 return tl;
4503 /* Return the lexicographically smallest rational point in the basic set
4504 * from which "tl" was constructed, double checking that this input set
4505 * was not empty.
4507 static __isl_give isl_vec *non_empty_solution(__isl_keep isl_tab_lexmin *tl)
4509 isl_vec *sol;
4511 sol = isl_tab_lexmin_get_solution(tl);
4512 if (!sol)
4513 return NULL;
4514 if (sol->size == 0)
4515 isl_die(isl_vec_get_ctx(sol), isl_error_internal,
4516 "error in schedule construction",
4517 return isl_vec_free(sol));
4518 return sol;
4521 /* Does the solution "sol" of the LP problem constructed by setup_carry_lp
4522 * carry any of the "n_edge" groups of dependences?
4523 * The value in the first position is the sum of (1 - e_i) over all "n_edge"
4524 * edges, with 0 <= e_i <= 1 equal to 1 when the dependences represented
4525 * by the edge are carried by the solution.
4526 * If the sum of the (1 - e_i) is smaller than "n_edge" then at least
4527 * one of those is carried.
4529 * Note that despite the fact that the problem is solved using a rational
4530 * solver, the solution is guaranteed to be integral.
4531 * Specifically, the dependence distance lower bounds e_i (and therefore
4532 * also their sum) are integers. See Lemma 5 of [1].
4534 * Any potential denominator of the sum is cleared by this function.
4535 * The denominator is not relevant for any of the other elements
4536 * in the solution.
4538 * [1] P. Feautrier, Some Efficient Solutions to the Affine Scheduling
4539 * Problem, Part II: Multi-Dimensional Time.
4540 * In Intl. Journal of Parallel Programming, 1992.
4542 static int carries_dependences(__isl_keep isl_vec *sol, int n_edge)
4544 isl_int_divexact(sol->el[1], sol->el[1], sol->el[0]);
4545 isl_int_set_si(sol->el[0], 1);
4546 return isl_int_cmp_si(sol->el[1], n_edge) < 0;
4549 /* Return the lexicographically smallest rational point in "lp",
4550 * assuming that all variables are non-negative and performing some
4551 * additional sanity checks.
4552 * If "want_integral" is set, then compute the lexicographically smallest
4553 * integer point instead.
4554 * In particular, "lp" should not be empty by construction.
4555 * Double check that this is the case.
4556 * If dependences are not carried for any of the "n_edge" edges,
4557 * then return an empty vector.
4559 * If the schedule_treat_coalescing option is set and
4560 * if the computed schedule performs loop coalescing on a given node,
4561 * i.e., if it is of the form
4563 * c_i i + c_j j + ...
4565 * with |c_j/c_i| >= size_i, then force the coefficient c_i to be zero
4566 * to cut out this solution. Repeat this process until no more loop
4567 * coalescing occurs or until no more dependences can be carried.
4568 * In the latter case, revert to the previously computed solution.
4570 * If the caller requests an integral solution and if coalescing should
4571 * be treated, then perform the coalescing treatment first as
4572 * an integral solution computed before coalescing treatment
4573 * would carry the same number of edges and would therefore probably
4574 * also be coalescing.
4576 * To allow the coalescing treatment to be performed first,
4577 * the initial solution is allowed to be rational and it is only
4578 * cut out (if needed) in the next iteration, if no coalescing measures
4579 * were taken.
4581 static __isl_give isl_vec *non_neg_lexmin(struct isl_sched_graph *graph,
4582 __isl_take isl_basic_set *lp, int n_edge, int want_integral)
4584 int i, pos, cut;
4585 isl_ctx *ctx;
4586 isl_tab_lexmin *tl;
4587 isl_vec *sol = NULL, *prev;
4588 int treat_coalescing;
4589 int try_again;
4591 if (!lp)
4592 return NULL;
4593 ctx = isl_basic_set_get_ctx(lp);
4594 treat_coalescing = isl_options_get_schedule_treat_coalescing(ctx);
4595 tl = isl_tab_lexmin_from_basic_set(lp);
4597 cut = 0;
4598 do {
4599 int integral;
4601 try_again = 0;
4602 if (cut)
4603 tl = isl_tab_lexmin_cut_to_integer(tl);
4604 prev = sol;
4605 sol = non_empty_solution(tl);
4606 if (!sol)
4607 goto error;
4609 integral = isl_int_is_one(sol->el[0]);
4610 if (!carries_dependences(sol, n_edge)) {
4611 if (!prev)
4612 prev = isl_vec_alloc(ctx, 0);
4613 isl_vec_free(sol);
4614 sol = prev;
4615 break;
4617 prev = isl_vec_free(prev);
4618 cut = want_integral && !integral;
4619 if (cut)
4620 try_again = 1;
4621 if (!treat_coalescing)
4622 continue;
4623 for (i = 0; i < graph->n; ++i) {
4624 struct isl_sched_node *node = &graph->node[i];
4626 pos = find_node_coalescing(node, sol);
4627 if (pos < 0)
4628 goto error;
4629 if (pos < node->nvar)
4630 break;
4632 if (i < graph->n) {
4633 try_again = 1;
4634 tl = zero_out_node_coef(tl, &graph->node[i], pos);
4635 cut = 0;
4637 } while (try_again);
4639 isl_tab_lexmin_free(tl);
4641 return sol;
4642 error:
4643 isl_tab_lexmin_free(tl);
4644 isl_vec_free(prev);
4645 isl_vec_free(sol);
4646 return NULL;
4649 /* If "edge" is an edge from a node to itself, then add the corresponding
4650 * dependence relation to "umap".
4651 * If "node" has been compressed, then the dependence relation
4652 * is also compressed first.
4654 static __isl_give isl_union_map *add_intra(__isl_take isl_union_map *umap,
4655 struct isl_sched_edge *edge)
4657 isl_map *map;
4658 struct isl_sched_node *node = edge->src;
4660 if (edge->src != edge->dst)
4661 return umap;
4663 map = isl_map_copy(edge->map);
4664 if (node->compressed) {
4665 map = isl_map_preimage_domain_multi_aff(map,
4666 isl_multi_aff_copy(node->decompress));
4667 map = isl_map_preimage_range_multi_aff(map,
4668 isl_multi_aff_copy(node->decompress));
4670 umap = isl_union_map_add_map(umap, map);
4671 return umap;
4674 /* If "edge" is an edge from a node to another node, then add the corresponding
4675 * dependence relation to "umap".
4676 * If the source or destination nodes of "edge" have been compressed,
4677 * then the dependence relation is also compressed first.
4679 static __isl_give isl_union_map *add_inter(__isl_take isl_union_map *umap,
4680 struct isl_sched_edge *edge)
4682 isl_map *map;
4684 if (edge->src == edge->dst)
4685 return umap;
4687 map = isl_map_copy(edge->map);
4688 if (edge->src->compressed)
4689 map = isl_map_preimage_domain_multi_aff(map,
4690 isl_multi_aff_copy(edge->src->decompress));
4691 if (edge->dst->compressed)
4692 map = isl_map_preimage_range_multi_aff(map,
4693 isl_multi_aff_copy(edge->dst->decompress));
4694 umap = isl_union_map_add_map(umap, map);
4695 return umap;
4698 /* Internal data structure used by union_drop_coalescing_constraints
4699 * to collect bounds on all relevant statements.
4701 * "graph" is the schedule constraint graph for which an LP problem
4702 * is being constructed.
4703 * "bounds" collects the bounds.
4705 struct isl_collect_bounds_data {
4706 isl_ctx *ctx;
4707 struct isl_sched_graph *graph;
4708 isl_union_set *bounds;
4711 /* Add the size bounds for the node with instance deltas in "set"
4712 * to data->bounds.
4714 static isl_stat collect_bounds(__isl_take isl_set *set, void *user)
4716 struct isl_collect_bounds_data *data = user;
4717 struct isl_sched_node *node;
4718 isl_space *space;
4719 isl_set *bounds;
4721 space = isl_set_get_space(set);
4722 isl_set_free(set);
4724 node = graph_find_compressed_node(data->ctx, data->graph, space);
4725 isl_space_free(space);
4727 bounds = isl_set_from_basic_set(get_size_bounds(node));
4728 data->bounds = isl_union_set_add_set(data->bounds, bounds);
4730 return isl_stat_ok;
4733 /* Drop some constraints from "delta" that could be exploited
4734 * to construct loop coalescing schedules.
4735 * In particular, drop those constraint that bound the difference
4736 * to the size of the domain.
4737 * Do this for each set/node in "delta" separately.
4738 * The parameters are assumed to have been projected out by the caller.
4740 static __isl_give isl_union_set *union_drop_coalescing_constraints(isl_ctx *ctx,
4741 struct isl_sched_graph *graph, __isl_take isl_union_set *delta)
4743 struct isl_collect_bounds_data data = { ctx, graph };
4745 data.bounds = isl_union_set_empty(isl_space_params_alloc(ctx, 0));
4746 if (isl_union_set_foreach_set(delta, &collect_bounds, &data) < 0)
4747 data.bounds = isl_union_set_free(data.bounds);
4748 delta = isl_union_set_plain_gist(delta, data.bounds);
4750 return delta;
4753 /* Given a non-trivial lineality space "lineality", add the corresponding
4754 * universe set to data->mask and add a map from elements to
4755 * other elements along the lines in "lineality" to data->equivalent.
4756 * If this is the first time this function gets called
4757 * (data->any_non_trivial is still false), then set data->any_non_trivial and
4758 * initialize data->mask and data->equivalent.
4760 * In particular, if the lineality space is defined by equality constraints
4762 * E x = 0
4764 * then construct an affine mapping
4766 * f : x -> E x
4768 * and compute the equivalence relation of having the same image under f:
4770 * { x -> x' : E x = E x' }
4772 static isl_stat add_non_trivial_lineality(__isl_take isl_basic_set *lineality,
4773 struct isl_exploit_lineality_data *data)
4775 isl_mat *eq;
4776 isl_space *space;
4777 isl_set *univ;
4778 isl_multi_aff *ma;
4779 isl_multi_pw_aff *mpa;
4780 isl_map *map;
4781 int n;
4783 if (isl_basic_set_check_no_locals(lineality) < 0)
4784 goto error;
4786 space = isl_basic_set_get_space(lineality);
4787 if (!data->any_non_trivial) {
4788 data->equivalent = isl_union_map_empty(isl_space_copy(space));
4789 data->mask = isl_union_set_empty(isl_space_copy(space));
4791 data->any_non_trivial = isl_bool_true;
4793 univ = isl_set_universe(isl_space_copy(space));
4794 data->mask = isl_union_set_add_set(data->mask, univ);
4796 eq = isl_basic_set_extract_equalities(lineality);
4797 n = isl_mat_rows(eq);
4798 eq = isl_mat_insert_zero_rows(eq, 0, 1);
4799 eq = isl_mat_set_element_si(eq, 0, 0, 1);
4800 space = isl_space_from_domain(space);
4801 space = isl_space_add_dims(space, isl_dim_out, n);
4802 ma = isl_multi_aff_from_aff_mat(space, eq);
4803 mpa = isl_multi_pw_aff_from_multi_aff(ma);
4804 map = isl_multi_pw_aff_eq_map(mpa, isl_multi_pw_aff_copy(mpa));
4805 data->equivalent = isl_union_map_add_map(data->equivalent, map);
4807 isl_basic_set_free(lineality);
4808 return isl_stat_ok;
4809 error:
4810 isl_basic_set_free(lineality);
4811 return isl_stat_error;
4814 /* Check if the lineality space "set" is non-trivial (i.e., is not just
4815 * the origin or, in other words, satisfies a number of equality constraints
4816 * that is smaller than the dimension of the set).
4817 * If so, extend data->mask and data->equivalent accordingly.
4819 * The input should not have any local variables already, but
4820 * isl_set_remove_divs is called to make sure it does not.
4822 static isl_stat add_lineality(__isl_take isl_set *set, void *user)
4824 struct isl_exploit_lineality_data *data = user;
4825 isl_basic_set *hull;
4826 isl_size dim;
4827 int n_eq;
4829 set = isl_set_remove_divs(set);
4830 hull = isl_set_unshifted_simple_hull(set);
4831 dim = isl_basic_set_dim(hull, isl_dim_set);
4832 n_eq = isl_basic_set_n_equality(hull);
4833 if (dim < 0)
4834 goto error;
4835 if (dim != n_eq)
4836 return add_non_trivial_lineality(hull, data);
4837 isl_basic_set_free(hull);
4838 return isl_stat_ok;
4839 error:
4840 isl_basic_set_free(hull);
4841 return isl_stat_error;
4844 /* Check if the difference set on intra-node schedule constraints "intra"
4845 * has any non-trivial lineality space.
4846 * If so, then extend the difference set to a difference set
4847 * on equivalent elements. That is, if "intra" is
4849 * { y - x : (x,y) \in V }
4851 * and elements are equivalent if they have the same image under f,
4852 * then return
4854 * { y' - x' : (x,y) \in V and f(x) = f(x') and f(y) = f(y') }
4856 * or, since f is linear,
4858 * { y' - x' : (x,y) \in V and f(y - x) = f(y' - x') }
4860 * The results of the search for non-trivial lineality spaces is stored
4861 * in "data".
4863 static __isl_give isl_union_set *exploit_intra_lineality(
4864 __isl_take isl_union_set *intra,
4865 struct isl_exploit_lineality_data *data)
4867 isl_union_set *lineality;
4868 isl_union_set *uset;
4870 data->any_non_trivial = isl_bool_false;
4871 lineality = isl_union_set_copy(intra);
4872 lineality = isl_union_set_combined_lineality_space(lineality);
4873 if (isl_union_set_foreach_set(lineality, &add_lineality, data) < 0)
4874 data->any_non_trivial = isl_bool_error;
4875 isl_union_set_free(lineality);
4877 if (data->any_non_trivial < 0)
4878 return isl_union_set_free(intra);
4879 if (!data->any_non_trivial)
4880 return intra;
4882 uset = isl_union_set_copy(intra);
4883 intra = isl_union_set_subtract(intra, isl_union_set_copy(data->mask));
4884 uset = isl_union_set_apply(uset, isl_union_map_copy(data->equivalent));
4885 intra = isl_union_set_union(intra, uset);
4887 intra = isl_union_set_remove_divs(intra);
4889 return intra;
4892 /* If the difference set on intra-node schedule constraints was found to have
4893 * any non-trivial lineality space by exploit_intra_lineality,
4894 * as recorded in "data", then extend the inter-node
4895 * schedule constraints "inter" to schedule constraints on equivalent elements.
4896 * That is, if "inter" is V and
4897 * elements are equivalent if they have the same image under f, then return
4899 * { (x', y') : (x,y) \in V and f(x) = f(x') and f(y) = f(y') }
4901 static __isl_give isl_union_map *exploit_inter_lineality(
4902 __isl_take isl_union_map *inter,
4903 struct isl_exploit_lineality_data *data)
4905 isl_union_map *umap;
4907 if (data->any_non_trivial < 0)
4908 return isl_union_map_free(inter);
4909 if (!data->any_non_trivial)
4910 return inter;
4912 umap = isl_union_map_copy(inter);
4913 inter = isl_union_map_subtract_range(inter,
4914 isl_union_set_copy(data->mask));
4915 umap = isl_union_map_apply_range(umap,
4916 isl_union_map_copy(data->equivalent));
4917 inter = isl_union_map_union(inter, umap);
4918 umap = isl_union_map_copy(inter);
4919 inter = isl_union_map_subtract_domain(inter,
4920 isl_union_set_copy(data->mask));
4921 umap = isl_union_map_apply_range(isl_union_map_copy(data->equivalent),
4922 umap);
4923 inter = isl_union_map_union(inter, umap);
4925 inter = isl_union_map_remove_divs(inter);
4927 return inter;
4930 /* For each (conditional) validity edge in "graph",
4931 * add the corresponding dependence relation using "add"
4932 * to a collection of dependence relations and return the result.
4933 * If "coincidence" is set, then coincidence edges are considered as well.
4935 static __isl_give isl_union_map *collect_validity(struct isl_sched_graph *graph,
4936 __isl_give isl_union_map *(*add)(__isl_take isl_union_map *umap,
4937 struct isl_sched_edge *edge), int coincidence)
4939 int i;
4940 isl_space *space;
4941 isl_union_map *umap;
4943 space = isl_space_copy(graph->node[0].space);
4944 umap = isl_union_map_empty(space);
4946 for (i = 0; i < graph->n_edge; ++i) {
4947 struct isl_sched_edge *edge = &graph->edge[i];
4949 if (!is_any_validity(edge) &&
4950 (!coincidence || !is_coincidence(edge)))
4951 continue;
4953 umap = add(umap, edge);
4956 return umap;
4959 /* For each dependence relation on a (conditional) validity edge
4960 * from a node to itself,
4961 * construct the set of coefficients of valid constraints for elements
4962 * in that dependence relation and collect the results.
4963 * If "coincidence" is set, then coincidence edges are considered as well.
4965 * In particular, for each dependence relation R, constraints
4966 * on coefficients (c_0, c_x) are constructed such that
4968 * c_0 + c_x d >= 0 for each d in delta R = { y - x | (x,y) in R }
4970 * If the schedule_treat_coalescing option is set, then some constraints
4971 * that could be exploited to construct coalescing schedules
4972 * are removed before the dual is computed, but after the parameters
4973 * have been projected out.
4974 * The entire computation is essentially the same as that performed
4975 * by intra_coefficients, except that it operates on multiple
4976 * edges together and that the parameters are always projected out.
4978 * Additionally, exploit any non-trivial lineality space
4979 * in the difference set after removing coalescing constraints and
4980 * store the results of the non-trivial lineality space detection in "data".
4981 * The procedure is currently run unconditionally, but it is unlikely
4982 * to find any non-trivial lineality spaces if no coalescing constraints
4983 * have been removed.
4985 * Note that if a dependence relation is a union of basic maps,
4986 * then each basic map needs to be treated individually as it may only
4987 * be possible to carry the dependences expressed by some of those
4988 * basic maps and not all of them.
4989 * The collected validity constraints are therefore not coalesced and
4990 * it is assumed that they are not coalesced automatically.
4991 * Duplicate basic maps can be removed, however.
4992 * In particular, if the same basic map appears as a disjunct
4993 * in multiple edges, then it only needs to be carried once.
4995 static __isl_give isl_basic_set_list *collect_intra_validity(isl_ctx *ctx,
4996 struct isl_sched_graph *graph, int coincidence,
4997 struct isl_exploit_lineality_data *data)
4999 isl_union_map *intra;
5000 isl_union_set *delta;
5001 isl_basic_set_list *list;
5003 intra = collect_validity(graph, &add_intra, coincidence);
5004 delta = isl_union_map_deltas(intra);
5005 delta = isl_union_set_project_out_all_params(delta);
5006 delta = isl_union_set_remove_divs(delta);
5007 if (isl_options_get_schedule_treat_coalescing(ctx))
5008 delta = union_drop_coalescing_constraints(ctx, graph, delta);
5009 delta = exploit_intra_lineality(delta, data);
5010 list = isl_union_set_get_basic_set_list(delta);
5011 isl_union_set_free(delta);
5013 return isl_basic_set_list_coefficients(list);
5016 /* For each dependence relation on a (conditional) validity edge
5017 * from a node to some other node,
5018 * construct the set of coefficients of valid constraints for elements
5019 * in that dependence relation and collect the results.
5020 * If "coincidence" is set, then coincidence edges are considered as well.
5022 * In particular, for each dependence relation R, constraints
5023 * on coefficients (c_0, c_n, c_x, c_y) are constructed such that
5025 * c_0 + c_n n + c_x x + c_y y >= 0 for each (x,y) in R
5027 * This computation is essentially the same as that performed
5028 * by inter_coefficients, except that it operates on multiple
5029 * edges together.
5031 * Additionally, exploit any non-trivial lineality space
5032 * that may have been discovered by collect_intra_validity
5033 * (as stored in "data").
5035 * Note that if a dependence relation is a union of basic maps,
5036 * then each basic map needs to be treated individually as it may only
5037 * be possible to carry the dependences expressed by some of those
5038 * basic maps and not all of them.
5039 * The collected validity constraints are therefore not coalesced and
5040 * it is assumed that they are not coalesced automatically.
5041 * Duplicate basic maps can be removed, however.
5042 * In particular, if the same basic map appears as a disjunct
5043 * in multiple edges, then it only needs to be carried once.
5045 static __isl_give isl_basic_set_list *collect_inter_validity(
5046 struct isl_sched_graph *graph, int coincidence,
5047 struct isl_exploit_lineality_data *data)
5049 isl_union_map *inter;
5050 isl_union_set *wrap;
5051 isl_basic_set_list *list;
5053 inter = collect_validity(graph, &add_inter, coincidence);
5054 inter = exploit_inter_lineality(inter, data);
5055 inter = isl_union_map_remove_divs(inter);
5056 wrap = isl_union_map_wrap(inter);
5057 list = isl_union_set_get_basic_set_list(wrap);
5058 isl_union_set_free(wrap);
5059 return isl_basic_set_list_coefficients(list);
5062 /* Construct an LP problem for finding schedule coefficients
5063 * such that the schedule carries as many of the "n_edge" groups of
5064 * dependences as possible based on the corresponding coefficient
5065 * constraints and return the lexicographically smallest non-trivial solution.
5066 * "intra" is the sequence of coefficient constraints for intra-node edges.
5067 * "inter" is the sequence of coefficient constraints for inter-node edges.
5068 * If "want_integral" is set, then compute an integral solution
5069 * for the coefficients rather than using the numerators
5070 * of a rational solution.
5071 * "carry_inter" indicates whether inter-node edges should be carried or
5072 * only respected.
5074 * If none of the "n_edge" groups can be carried
5075 * then return an empty vector.
5077 static __isl_give isl_vec *compute_carrying_sol_coef(isl_ctx *ctx,
5078 struct isl_sched_graph *graph, int n_edge,
5079 __isl_keep isl_basic_set_list *intra,
5080 __isl_keep isl_basic_set_list *inter, int want_integral,
5081 int carry_inter)
5083 isl_basic_set *lp;
5085 if (setup_carry_lp(ctx, graph, n_edge, intra, inter, carry_inter) < 0)
5086 return NULL;
5088 lp = isl_basic_set_copy(graph->lp);
5089 return non_neg_lexmin(graph, lp, n_edge, want_integral);
5092 /* Construct an LP problem for finding schedule coefficients
5093 * such that the schedule carries as many of the validity dependences
5094 * as possible and
5095 * return the lexicographically smallest non-trivial solution.
5096 * If "fallback" is set, then the carrying is performed as a fallback
5097 * for the Pluto-like scheduler.
5098 * If "coincidence" is set, then try and carry coincidence edges as well.
5100 * The variable "n_edge" stores the number of groups that should be carried.
5101 * If none of the "n_edge" groups can be carried
5102 * then return an empty vector.
5103 * If, moreover, "n_edge" is zero, then the LP problem does not even
5104 * need to be constructed.
5106 * If a fallback solution is being computed, then compute an integral solution
5107 * for the coefficients rather than using the numerators
5108 * of a rational solution.
5110 * If a fallback solution is being computed, if there are any intra-node
5111 * dependences, and if requested by the user, then first try
5112 * to only carry those intra-node dependences.
5113 * If this fails to carry any dependences, then try again
5114 * with the inter-node dependences included.
5116 static __isl_give isl_vec *compute_carrying_sol(isl_ctx *ctx,
5117 struct isl_sched_graph *graph, int fallback, int coincidence)
5119 int n_intra, n_inter;
5120 int n_edge;
5121 struct isl_carry carry = { 0 };
5122 isl_vec *sol;
5124 carry.intra = collect_intra_validity(ctx, graph, coincidence,
5125 &carry.lineality);
5126 carry.inter = collect_inter_validity(graph, coincidence,
5127 &carry.lineality);
5128 if (!carry.intra || !carry.inter)
5129 goto error;
5130 n_intra = isl_basic_set_list_n_basic_set(carry.intra);
5131 n_inter = isl_basic_set_list_n_basic_set(carry.inter);
5133 if (fallback && n_intra > 0 &&
5134 isl_options_get_schedule_carry_self_first(ctx)) {
5135 sol = compute_carrying_sol_coef(ctx, graph, n_intra,
5136 carry.intra, carry.inter, fallback, 0);
5137 if (!sol || sol->size != 0 || n_inter == 0) {
5138 isl_carry_clear(&carry);
5139 return sol;
5141 isl_vec_free(sol);
5144 n_edge = n_intra + n_inter;
5145 if (n_edge == 0) {
5146 isl_carry_clear(&carry);
5147 return isl_vec_alloc(ctx, 0);
5150 sol = compute_carrying_sol_coef(ctx, graph, n_edge,
5151 carry.intra, carry.inter, fallback, 1);
5152 isl_carry_clear(&carry);
5153 return sol;
5154 error:
5155 isl_carry_clear(&carry);
5156 return NULL;
5159 /* Construct a schedule row for each node such that as many validity dependences
5160 * as possible are carried and then continue with the next band.
5161 * If "fallback" is set, then the carrying is performed as a fallback
5162 * for the Pluto-like scheduler.
5163 * If "coincidence" is set, then try and carry coincidence edges as well.
5165 * If there are no validity dependences, then no dependence can be carried and
5166 * the procedure is guaranteed to fail. If there is more than one component,
5167 * then try computing a schedule on each component separately
5168 * to prevent or at least postpone this failure.
5170 * If a schedule row is computed, then check that dependences are carried
5171 * for at least one of the edges.
5173 * If the computed schedule row turns out to be trivial on one or
5174 * more nodes where it should not be trivial, then we throw it away
5175 * and try again on each component separately.
5177 * If there is only one component, then we accept the schedule row anyway,
5178 * but we do not consider it as a complete row and therefore do not
5179 * increment graph->n_row. Note that the ranks of the nodes that
5180 * do get a non-trivial schedule part will get updated regardless and
5181 * graph->maxvar is computed based on these ranks. The test for
5182 * whether more schedule rows are required in compute_schedule_wcc
5183 * is therefore not affected.
5185 * Insert a band corresponding to the schedule row at position "node"
5186 * of the schedule tree and continue with the construction of the schedule.
5187 * This insertion and the continued construction is performed by split_scaled
5188 * after optionally checking for non-trivial common divisors.
5190 static __isl_give isl_schedule_node *carry(__isl_take isl_schedule_node *node,
5191 struct isl_sched_graph *graph, int fallback, int coincidence)
5193 int trivial;
5194 isl_ctx *ctx;
5195 isl_vec *sol;
5197 if (!node)
5198 return NULL;
5200 ctx = isl_schedule_node_get_ctx(node);
5201 sol = compute_carrying_sol(ctx, graph, fallback, coincidence);
5202 if (!sol)
5203 return isl_schedule_node_free(node);
5204 if (sol->size == 0) {
5205 isl_vec_free(sol);
5206 if (graph->scc > 1)
5207 return compute_component_schedule(node, graph, 1);
5208 isl_die(ctx, isl_error_unknown, "unable to carry dependences",
5209 return isl_schedule_node_free(node));
5212 trivial = is_any_trivial(graph, sol);
5213 if (trivial < 0) {
5214 sol = isl_vec_free(sol);
5215 } else if (trivial && graph->scc > 1) {
5216 isl_vec_free(sol);
5217 return compute_component_schedule(node, graph, 1);
5220 if (update_schedule(graph, sol, 0) < 0)
5221 return isl_schedule_node_free(node);
5222 if (trivial)
5223 graph->n_row--;
5225 return split_scaled(node, graph);
5228 /* Construct a schedule row for each node such that as many validity dependences
5229 * as possible are carried and then continue with the next band.
5230 * Do so as a fallback for the Pluto-like scheduler.
5231 * If "coincidence" is set, then try and carry coincidence edges as well.
5233 static __isl_give isl_schedule_node *carry_fallback(
5234 __isl_take isl_schedule_node *node, struct isl_sched_graph *graph,
5235 int coincidence)
5237 return carry(node, graph, 1, coincidence);
5240 /* Construct a schedule row for each node such that as many validity dependences
5241 * as possible are carried and then continue with the next band.
5242 * Do so for the case where the Feautrier scheduler was selected
5243 * by the user.
5245 static __isl_give isl_schedule_node *carry_feautrier(
5246 __isl_take isl_schedule_node *node, struct isl_sched_graph *graph)
5248 return carry(node, graph, 0, 0);
5251 /* Construct a schedule row for each node such that as many validity dependences
5252 * as possible are carried and then continue with the next band.
5253 * Do so as a fallback for the Pluto-like scheduler.
5255 static __isl_give isl_schedule_node *carry_dependences(
5256 __isl_take isl_schedule_node *node, struct isl_sched_graph *graph)
5258 return carry_fallback(node, graph, 0);
5261 /* Construct a schedule row for each node such that as many validity or
5262 * coincidence dependences as possible are carried and
5263 * then continue with the next band.
5264 * Do so as a fallback for the Pluto-like scheduler.
5266 static __isl_give isl_schedule_node *carry_coincidence(
5267 __isl_take isl_schedule_node *node, struct isl_sched_graph *graph)
5269 return carry_fallback(node, graph, 1);
5272 /* Topologically sort statements mapped to the same schedule iteration
5273 * and add insert a sequence node in front of "node"
5274 * corresponding to this order.
5275 * If "initialized" is set, then it may be assumed that compute_maxvar
5276 * has been called on the current band. Otherwise, call
5277 * compute_maxvar if and before carry_dependences gets called.
5279 * If it turns out to be impossible to sort the statements apart,
5280 * because different dependences impose different orderings
5281 * on the statements, then we extend the schedule such that
5282 * it carries at least one more dependence.
5284 static __isl_give isl_schedule_node *sort_statements(
5285 __isl_take isl_schedule_node *node, struct isl_sched_graph *graph,
5286 int initialized)
5288 isl_ctx *ctx;
5289 isl_union_set_list *filters;
5291 if (!node)
5292 return NULL;
5294 ctx = isl_schedule_node_get_ctx(node);
5295 if (graph->n < 1)
5296 isl_die(ctx, isl_error_internal,
5297 "graph should have at least one node",
5298 return isl_schedule_node_free(node));
5300 if (graph->n == 1)
5301 return node;
5303 if (update_edges(ctx, graph) < 0)
5304 return isl_schedule_node_free(node);
5306 if (graph->n_edge == 0)
5307 return node;
5309 if (detect_sccs(ctx, graph) < 0)
5310 return isl_schedule_node_free(node);
5312 next_band(graph);
5313 if (graph->scc < graph->n) {
5314 if (!initialized && compute_maxvar(graph) < 0)
5315 return isl_schedule_node_free(node);
5316 return carry_dependences(node, graph);
5319 filters = extract_sccs(ctx, graph);
5320 node = isl_schedule_node_insert_sequence(node, filters);
5322 return node;
5325 /* Are there any (non-empty) (conditional) validity edges in the graph?
5327 static int has_validity_edges(struct isl_sched_graph *graph)
5329 int i;
5331 for (i = 0; i < graph->n_edge; ++i) {
5332 int empty;
5334 empty = isl_map_plain_is_empty(graph->edge[i].map);
5335 if (empty < 0)
5336 return -1;
5337 if (empty)
5338 continue;
5339 if (is_any_validity(&graph->edge[i]))
5340 return 1;
5343 return 0;
5346 /* Should we apply a Feautrier step?
5347 * That is, did the user request the Feautrier algorithm and are
5348 * there any validity dependences (left)?
5350 static int need_feautrier_step(isl_ctx *ctx, struct isl_sched_graph *graph)
5352 if (ctx->opt->schedule_algorithm != ISL_SCHEDULE_ALGORITHM_FEAUTRIER)
5353 return 0;
5355 return has_validity_edges(graph);
5358 /* Compute a schedule for a connected dependence graph using Feautrier's
5359 * multi-dimensional scheduling algorithm and return the updated schedule node.
5361 * The original algorithm is described in [1].
5362 * The main idea is to minimize the number of scheduling dimensions, by
5363 * trying to satisfy as many dependences as possible per scheduling dimension.
5365 * [1] P. Feautrier, Some Efficient Solutions to the Affine Scheduling
5366 * Problem, Part II: Multi-Dimensional Time.
5367 * In Intl. Journal of Parallel Programming, 1992.
5369 static __isl_give isl_schedule_node *compute_schedule_wcc_feautrier(
5370 isl_schedule_node *node, struct isl_sched_graph *graph)
5372 return carry_feautrier(node, graph);
5375 /* Turn off the "local" bit on all (condition) edges.
5377 static void clear_local_edges(struct isl_sched_graph *graph)
5379 int i;
5381 for (i = 0; i < graph->n_edge; ++i)
5382 if (is_condition(&graph->edge[i]))
5383 clear_local(&graph->edge[i]);
5386 /* Does "graph" have both condition and conditional validity edges?
5388 static int need_condition_check(struct isl_sched_graph *graph)
5390 int i;
5391 int any_condition = 0;
5392 int any_conditional_validity = 0;
5394 for (i = 0; i < graph->n_edge; ++i) {
5395 if (is_condition(&graph->edge[i]))
5396 any_condition = 1;
5397 if (is_conditional_validity(&graph->edge[i]))
5398 any_conditional_validity = 1;
5401 return any_condition && any_conditional_validity;
5404 /* Does "graph" contain any coincidence edge?
5406 static int has_any_coincidence(struct isl_sched_graph *graph)
5408 int i;
5410 for (i = 0; i < graph->n_edge; ++i)
5411 if (is_coincidence(&graph->edge[i]))
5412 return 1;
5414 return 0;
5417 /* Extract the final schedule row as a map with the iteration domain
5418 * of "node" as domain.
5420 static __isl_give isl_map *final_row(struct isl_sched_node *node)
5422 isl_multi_aff *ma;
5423 int row;
5425 row = isl_mat_rows(node->sched) - 1;
5426 ma = node_extract_partial_schedule_multi_aff(node, row, 1);
5427 return isl_map_from_multi_aff(ma);
5430 /* Is the conditional validity dependence in the edge with index "edge_index"
5431 * violated by the latest (i.e., final) row of the schedule?
5432 * That is, is i scheduled after j
5433 * for any conditional validity dependence i -> j?
5435 static int is_violated(struct isl_sched_graph *graph, int edge_index)
5437 isl_map *src_sched, *dst_sched, *map;
5438 struct isl_sched_edge *edge = &graph->edge[edge_index];
5439 int empty;
5441 src_sched = final_row(edge->src);
5442 dst_sched = final_row(edge->dst);
5443 map = isl_map_copy(edge->map);
5444 map = isl_map_apply_domain(map, src_sched);
5445 map = isl_map_apply_range(map, dst_sched);
5446 map = isl_map_order_gt(map, isl_dim_in, 0, isl_dim_out, 0);
5447 empty = isl_map_is_empty(map);
5448 isl_map_free(map);
5450 if (empty < 0)
5451 return -1;
5453 return !empty;
5456 /* Does "graph" have any satisfied condition edges that
5457 * are adjacent to the conditional validity constraint with
5458 * domain "conditional_source" and range "conditional_sink"?
5460 * A satisfied condition is one that is not local.
5461 * If a condition was forced to be local already (i.e., marked as local)
5462 * then there is no need to check if it is in fact local.
5464 * Additionally, mark all adjacent condition edges found as local.
5466 static int has_adjacent_true_conditions(struct isl_sched_graph *graph,
5467 __isl_keep isl_union_set *conditional_source,
5468 __isl_keep isl_union_set *conditional_sink)
5470 int i;
5471 int any = 0;
5473 for (i = 0; i < graph->n_edge; ++i) {
5474 int adjacent, local;
5475 isl_union_map *condition;
5477 if (!is_condition(&graph->edge[i]))
5478 continue;
5479 if (is_local(&graph->edge[i]))
5480 continue;
5482 condition = graph->edge[i].tagged_condition;
5483 adjacent = domain_intersects(condition, conditional_sink);
5484 if (adjacent >= 0 && !adjacent)
5485 adjacent = range_intersects(condition,
5486 conditional_source);
5487 if (adjacent < 0)
5488 return -1;
5489 if (!adjacent)
5490 continue;
5492 set_local(&graph->edge[i]);
5494 local = is_condition_false(&graph->edge[i]);
5495 if (local < 0)
5496 return -1;
5497 if (!local)
5498 any = 1;
5501 return any;
5504 /* Are there any violated conditional validity dependences with
5505 * adjacent condition dependences that are not local with respect
5506 * to the current schedule?
5507 * That is, is the conditional validity constraint violated?
5509 * Additionally, mark all those adjacent condition dependences as local.
5510 * We also mark those adjacent condition dependences that were not marked
5511 * as local before, but just happened to be local already. This ensures
5512 * that they remain local if the schedule is recomputed.
5514 * We first collect domain and range of all violated conditional validity
5515 * dependences and then check if there are any adjacent non-local
5516 * condition dependences.
5518 static int has_violated_conditional_constraint(isl_ctx *ctx,
5519 struct isl_sched_graph *graph)
5521 int i;
5522 int any = 0;
5523 isl_union_set *source, *sink;
5525 source = isl_union_set_empty(isl_space_params_alloc(ctx, 0));
5526 sink = isl_union_set_empty(isl_space_params_alloc(ctx, 0));
5527 for (i = 0; i < graph->n_edge; ++i) {
5528 isl_union_set *uset;
5529 isl_union_map *umap;
5530 int violated;
5532 if (!is_conditional_validity(&graph->edge[i]))
5533 continue;
5535 violated = is_violated(graph, i);
5536 if (violated < 0)
5537 goto error;
5538 if (!violated)
5539 continue;
5541 any = 1;
5543 umap = isl_union_map_copy(graph->edge[i].tagged_validity);
5544 uset = isl_union_map_domain(umap);
5545 source = isl_union_set_union(source, uset);
5546 source = isl_union_set_coalesce(source);
5548 umap = isl_union_map_copy(graph->edge[i].tagged_validity);
5549 uset = isl_union_map_range(umap);
5550 sink = isl_union_set_union(sink, uset);
5551 sink = isl_union_set_coalesce(sink);
5554 if (any)
5555 any = has_adjacent_true_conditions(graph, source, sink);
5557 isl_union_set_free(source);
5558 isl_union_set_free(sink);
5559 return any;
5560 error:
5561 isl_union_set_free(source);
5562 isl_union_set_free(sink);
5563 return -1;
5566 /* Examine the current band (the rows between graph->band_start and
5567 * graph->n_total_row), deciding whether to drop it or add it to "node"
5568 * and then continue with the computation of the next band, if any.
5569 * If "initialized" is set, then it may be assumed that compute_maxvar
5570 * has been called on the current band. Otherwise, call
5571 * compute_maxvar if and before carry_dependences gets called.
5573 * The caller keeps looking for a new row as long as
5574 * graph->n_row < graph->maxvar. If the latest attempt to find
5575 * such a row failed (i.e., we still have graph->n_row < graph->maxvar),
5576 * then we either
5577 * - split between SCCs and start over (assuming we found an interesting
5578 * pair of SCCs between which to split)
5579 * - continue with the next band (assuming the current band has at least
5580 * one row)
5581 * - if there is more than one SCC left, then split along all SCCs
5582 * - if outer coincidence needs to be enforced, then try to carry as many
5583 * validity or coincidence dependences as possible and
5584 * continue with the next band
5585 * - try to carry as many validity dependences as possible and
5586 * continue with the next band
5587 * In each case, we first insert a band node in the schedule tree
5588 * if any rows have been computed.
5590 * If the caller managed to complete the schedule and the current band
5591 * is empty, then finish off by topologically
5592 * sorting the statements based on the remaining dependences.
5593 * If, on the other hand, the current band has at least one row,
5594 * then continue with the next band. Note that this next band
5595 * will necessarily be empty, but the graph may still be split up
5596 * into weakly connected components before arriving back here.
5598 static __isl_give isl_schedule_node *compute_schedule_finish_band(
5599 __isl_take isl_schedule_node *node, struct isl_sched_graph *graph,
5600 int initialized)
5602 int empty;
5604 if (!node)
5605 return NULL;
5607 empty = graph->n_total_row == graph->band_start;
5608 if (graph->n_row < graph->maxvar) {
5609 isl_ctx *ctx;
5611 ctx = isl_schedule_node_get_ctx(node);
5612 if (!ctx->opt->schedule_maximize_band_depth && !empty)
5613 return compute_next_band(node, graph, 1);
5614 if (graph->src_scc >= 0)
5615 return compute_split_schedule(node, graph);
5616 if (!empty)
5617 return compute_next_band(node, graph, 1);
5618 if (graph->scc > 1)
5619 return compute_component_schedule(node, graph, 1);
5620 if (!initialized && compute_maxvar(graph) < 0)
5621 return isl_schedule_node_free(node);
5622 if (isl_options_get_schedule_outer_coincidence(ctx))
5623 return carry_coincidence(node, graph);
5624 return carry_dependences(node, graph);
5627 if (!empty)
5628 return compute_next_band(node, graph, 1);
5629 return sort_statements(node, graph, initialized);
5632 /* Construct a band of schedule rows for a connected dependence graph.
5633 * The caller is responsible for determining the strongly connected
5634 * components and calling compute_maxvar first.
5636 * We try to find a sequence of as many schedule rows as possible that result
5637 * in non-negative dependence distances (independent of the previous rows
5638 * in the sequence, i.e., such that the sequence is tilable), with as
5639 * many of the initial rows as possible satisfying the coincidence constraints.
5640 * The computation stops if we can't find any more rows or if we have found
5641 * all the rows we wanted to find.
5643 * If ctx->opt->schedule_outer_coincidence is set, then we force the
5644 * outermost dimension to satisfy the coincidence constraints. If this
5645 * turns out to be impossible, we fall back on the general scheme above
5646 * and try to carry as many dependences as possible.
5648 * If "graph" contains both condition and conditional validity dependences,
5649 * then we need to check that that the conditional schedule constraint
5650 * is satisfied, i.e., there are no violated conditional validity dependences
5651 * that are adjacent to any non-local condition dependences.
5652 * If there are, then we mark all those adjacent condition dependences
5653 * as local and recompute the current band. Those dependences that
5654 * are marked local will then be forced to be local.
5655 * The initial computation is performed with no dependences marked as local.
5656 * If we are lucky, then there will be no violated conditional validity
5657 * dependences adjacent to any non-local condition dependences.
5658 * Otherwise, we mark some additional condition dependences as local and
5659 * recompute. We continue this process until there are no violations left or
5660 * until we are no longer able to compute a schedule.
5661 * Since there are only a finite number of dependences,
5662 * there will only be a finite number of iterations.
5664 static isl_stat compute_schedule_wcc_band(isl_ctx *ctx,
5665 struct isl_sched_graph *graph)
5667 int has_coincidence;
5668 int use_coincidence;
5669 int force_coincidence = 0;
5670 int check_conditional;
5672 if (sort_sccs(graph) < 0)
5673 return isl_stat_error;
5675 clear_local_edges(graph);
5676 check_conditional = need_condition_check(graph);
5677 has_coincidence = has_any_coincidence(graph);
5679 if (ctx->opt->schedule_outer_coincidence)
5680 force_coincidence = 1;
5682 use_coincidence = has_coincidence;
5683 while (graph->n_row < graph->maxvar) {
5684 isl_vec *sol;
5685 int violated;
5686 int coincident;
5688 graph->src_scc = -1;
5689 graph->dst_scc = -1;
5691 if (setup_lp(ctx, graph, use_coincidence) < 0)
5692 return isl_stat_error;
5693 sol = solve_lp(ctx, graph);
5694 if (!sol)
5695 return isl_stat_error;
5696 if (sol->size == 0) {
5697 int empty = graph->n_total_row == graph->band_start;
5699 isl_vec_free(sol);
5700 if (use_coincidence && (!force_coincidence || !empty)) {
5701 use_coincidence = 0;
5702 continue;
5704 return isl_stat_ok;
5706 coincident = !has_coincidence || use_coincidence;
5707 if (update_schedule(graph, sol, coincident) < 0)
5708 return isl_stat_error;
5710 if (!check_conditional)
5711 continue;
5712 violated = has_violated_conditional_constraint(ctx, graph);
5713 if (violated < 0)
5714 return isl_stat_error;
5715 if (!violated)
5716 continue;
5717 if (reset_band(graph) < 0)
5718 return isl_stat_error;
5719 use_coincidence = has_coincidence;
5722 return isl_stat_ok;
5725 /* Compute a schedule for a connected dependence graph by considering
5726 * the graph as a whole and return the updated schedule node.
5728 * The actual schedule rows of the current band are computed by
5729 * compute_schedule_wcc_band. compute_schedule_finish_band takes
5730 * care of integrating the band into "node" and continuing
5731 * the computation.
5733 static __isl_give isl_schedule_node *compute_schedule_wcc_whole(
5734 __isl_take isl_schedule_node *node, struct isl_sched_graph *graph)
5736 isl_ctx *ctx;
5738 if (!node)
5739 return NULL;
5741 ctx = isl_schedule_node_get_ctx(node);
5742 if (compute_schedule_wcc_band(ctx, graph) < 0)
5743 return isl_schedule_node_free(node);
5745 return compute_schedule_finish_band(node, graph, 1);
5748 /* Clustering information used by compute_schedule_wcc_clustering.
5750 * "n" is the number of SCCs in the original dependence graph
5751 * "scc" is an array of "n" elements, each representing an SCC
5752 * of the original dependence graph. All entries in the same cluster
5753 * have the same number of schedule rows.
5754 * "scc_cluster" maps each SCC index to the cluster to which it belongs,
5755 * where each cluster is represented by the index of the first SCC
5756 * in the cluster. Initially, each SCC belongs to a cluster containing
5757 * only that SCC.
5759 * "scc_in_merge" is used by merge_clusters_along_edge to keep
5760 * track of which SCCs need to be merged.
5762 * "cluster" contains the merged clusters of SCCs after the clustering
5763 * has completed.
5765 * "scc_node" is a temporary data structure used inside copy_partial.
5766 * For each SCC, it keeps track of the number of nodes in the SCC
5767 * that have already been copied.
5769 struct isl_clustering {
5770 int n;
5771 struct isl_sched_graph *scc;
5772 struct isl_sched_graph *cluster;
5773 int *scc_cluster;
5774 int *scc_node;
5775 int *scc_in_merge;
5778 /* Initialize the clustering data structure "c" from "graph".
5780 * In particular, allocate memory, extract the SCCs from "graph"
5781 * into c->scc, initialize scc_cluster and construct
5782 * a band of schedule rows for each SCC.
5783 * Within each SCC, there is only one SCC by definition.
5784 * Each SCC initially belongs to a cluster containing only that SCC.
5786 static isl_stat clustering_init(isl_ctx *ctx, struct isl_clustering *c,
5787 struct isl_sched_graph *graph)
5789 int i;
5791 c->n = graph->scc;
5792 c->scc = isl_calloc_array(ctx, struct isl_sched_graph, c->n);
5793 c->cluster = isl_calloc_array(ctx, struct isl_sched_graph, c->n);
5794 c->scc_cluster = isl_calloc_array(ctx, int, c->n);
5795 c->scc_node = isl_calloc_array(ctx, int, c->n);
5796 c->scc_in_merge = isl_calloc_array(ctx, int, c->n);
5797 if (!c->scc || !c->cluster ||
5798 !c->scc_cluster || !c->scc_node || !c->scc_in_merge)
5799 return isl_stat_error;
5801 for (i = 0; i < c->n; ++i) {
5802 if (extract_sub_graph(ctx, graph, &node_scc_exactly,
5803 &edge_scc_exactly, i, &c->scc[i]) < 0)
5804 return isl_stat_error;
5805 c->scc[i].scc = 1;
5806 if (compute_maxvar(&c->scc[i]) < 0)
5807 return isl_stat_error;
5808 if (compute_schedule_wcc_band(ctx, &c->scc[i]) < 0)
5809 return isl_stat_error;
5810 c->scc_cluster[i] = i;
5813 return isl_stat_ok;
5816 /* Free all memory allocated for "c".
5818 static void clustering_free(isl_ctx *ctx, struct isl_clustering *c)
5820 int i;
5822 if (c->scc)
5823 for (i = 0; i < c->n; ++i)
5824 graph_free(ctx, &c->scc[i]);
5825 free(c->scc);
5826 if (c->cluster)
5827 for (i = 0; i < c->n; ++i)
5828 graph_free(ctx, &c->cluster[i]);
5829 free(c->cluster);
5830 free(c->scc_cluster);
5831 free(c->scc_node);
5832 free(c->scc_in_merge);
5835 /* Should we refrain from merging the cluster in "graph" with
5836 * any other cluster?
5837 * In particular, is its current schedule band empty and incomplete.
5839 static int bad_cluster(struct isl_sched_graph *graph)
5841 return graph->n_row < graph->maxvar &&
5842 graph->n_total_row == graph->band_start;
5845 /* Is "edge" a proximity edge with a non-empty dependence relation?
5847 static isl_bool is_non_empty_proximity(struct isl_sched_edge *edge)
5849 if (!is_proximity(edge))
5850 return isl_bool_false;
5851 return isl_bool_not(isl_map_plain_is_empty(edge->map));
5854 /* Return the index of an edge in "graph" that can be used to merge
5855 * two clusters in "c".
5856 * Return graph->n_edge if no such edge can be found.
5857 * Return -1 on error.
5859 * In particular, return a proximity edge between two clusters
5860 * that is not marked "no_merge" and such that neither of the
5861 * two clusters has an incomplete, empty band.
5863 * If there are multiple such edges, then try and find the most
5864 * appropriate edge to use for merging. In particular, pick the edge
5865 * with the greatest weight. If there are multiple of those,
5866 * then pick one with the shortest distance between
5867 * the two cluster representatives.
5869 static int find_proximity(struct isl_sched_graph *graph,
5870 struct isl_clustering *c)
5872 int i, best = graph->n_edge, best_dist, best_weight;
5874 for (i = 0; i < graph->n_edge; ++i) {
5875 struct isl_sched_edge *edge = &graph->edge[i];
5876 int dist, weight;
5877 isl_bool prox;
5879 prox = is_non_empty_proximity(edge);
5880 if (prox < 0)
5881 return -1;
5882 if (!prox)
5883 continue;
5884 if (edge->no_merge)
5885 continue;
5886 if (bad_cluster(&c->scc[edge->src->scc]) ||
5887 bad_cluster(&c->scc[edge->dst->scc]))
5888 continue;
5889 dist = c->scc_cluster[edge->dst->scc] -
5890 c->scc_cluster[edge->src->scc];
5891 if (dist == 0)
5892 continue;
5893 weight = edge->weight;
5894 if (best < graph->n_edge) {
5895 if (best_weight > weight)
5896 continue;
5897 if (best_weight == weight && best_dist <= dist)
5898 continue;
5900 best = i;
5901 best_dist = dist;
5902 best_weight = weight;
5905 return best;
5908 /* Internal data structure used in mark_merge_sccs.
5910 * "graph" is the dependence graph in which a strongly connected
5911 * component is constructed.
5912 * "scc_cluster" maps each SCC index to the cluster to which it belongs.
5913 * "src" and "dst" are the indices of the nodes that are being merged.
5915 struct isl_mark_merge_sccs_data {
5916 struct isl_sched_graph *graph;
5917 int *scc_cluster;
5918 int src;
5919 int dst;
5922 /* Check whether the cluster containing node "i" depends on the cluster
5923 * containing node "j". If "i" and "j" belong to the same cluster,
5924 * then they are taken to depend on each other to ensure that
5925 * the resulting strongly connected component consists of complete
5926 * clusters. Furthermore, if "i" and "j" are the two nodes that
5927 * are being merged, then they are taken to depend on each other as well.
5928 * Otherwise, check if there is a (conditional) validity dependence
5929 * from node[j] to node[i], forcing node[i] to follow node[j].
5931 static isl_bool cluster_follows(int i, int j, void *user)
5933 struct isl_mark_merge_sccs_data *data = user;
5934 struct isl_sched_graph *graph = data->graph;
5935 int *scc_cluster = data->scc_cluster;
5937 if (data->src == i && data->dst == j)
5938 return isl_bool_true;
5939 if (data->src == j && data->dst == i)
5940 return isl_bool_true;
5941 if (scc_cluster[graph->node[i].scc] == scc_cluster[graph->node[j].scc])
5942 return isl_bool_true;
5944 return graph_has_validity_edge(graph, &graph->node[j], &graph->node[i]);
5947 /* Mark all SCCs that belong to either of the two clusters in "c"
5948 * connected by the edge in "graph" with index "edge", or to any
5949 * of the intermediate clusters.
5950 * The marking is recorded in c->scc_in_merge.
5952 * The given edge has been selected for merging two clusters,
5953 * meaning that there is at least a proximity edge between the two nodes.
5954 * However, there may also be (indirect) validity dependences
5955 * between the two nodes. When merging the two clusters, all clusters
5956 * containing one or more of the intermediate nodes along the
5957 * indirect validity dependences need to be merged in as well.
5959 * First collect all such nodes by computing the strongly connected
5960 * component (SCC) containing the two nodes connected by the edge, where
5961 * the two nodes are considered to depend on each other to make
5962 * sure they end up in the same SCC. Similarly, each node is considered
5963 * to depend on every other node in the same cluster to ensure
5964 * that the SCC consists of complete clusters.
5966 * Then the original SCCs that contain any of these nodes are marked
5967 * in c->scc_in_merge.
5969 static isl_stat mark_merge_sccs(isl_ctx *ctx, struct isl_sched_graph *graph,
5970 int edge, struct isl_clustering *c)
5972 struct isl_mark_merge_sccs_data data;
5973 struct isl_tarjan_graph *g;
5974 int i;
5976 for (i = 0; i < c->n; ++i)
5977 c->scc_in_merge[i] = 0;
5979 data.graph = graph;
5980 data.scc_cluster = c->scc_cluster;
5981 data.src = graph->edge[edge].src - graph->node;
5982 data.dst = graph->edge[edge].dst - graph->node;
5984 g = isl_tarjan_graph_component(ctx, graph->n, data.dst,
5985 &cluster_follows, &data);
5986 if (!g)
5987 goto error;
5989 i = g->op;
5990 if (i < 3)
5991 isl_die(ctx, isl_error_internal,
5992 "expecting at least two nodes in component",
5993 goto error);
5994 if (g->order[--i] != -1)
5995 isl_die(ctx, isl_error_internal,
5996 "expecting end of component marker", goto error);
5998 for (--i; i >= 0 && g->order[i] != -1; --i) {
5999 int scc = graph->node[g->order[i]].scc;
6000 c->scc_in_merge[scc] = 1;
6003 isl_tarjan_graph_free(g);
6004 return isl_stat_ok;
6005 error:
6006 isl_tarjan_graph_free(g);
6007 return isl_stat_error;
6010 /* Construct the identifier "cluster_i".
6012 static __isl_give isl_id *cluster_id(isl_ctx *ctx, int i)
6014 char name[40];
6016 snprintf(name, sizeof(name), "cluster_%d", i);
6017 return isl_id_alloc(ctx, name, NULL);
6020 /* Construct the space of the cluster with index "i" containing
6021 * the strongly connected component "scc".
6023 * In particular, construct a space called cluster_i with dimension equal
6024 * to the number of schedule rows in the current band of "scc".
6026 static __isl_give isl_space *cluster_space(struct isl_sched_graph *scc, int i)
6028 int nvar;
6029 isl_space *space;
6030 isl_id *id;
6032 nvar = scc->n_total_row - scc->band_start;
6033 space = isl_space_copy(scc->node[0].space);
6034 space = isl_space_params(space);
6035 space = isl_space_set_from_params(space);
6036 space = isl_space_add_dims(space, isl_dim_set, nvar);
6037 id = cluster_id(isl_space_get_ctx(space), i);
6038 space = isl_space_set_tuple_id(space, isl_dim_set, id);
6040 return space;
6043 /* Collect the domain of the graph for merging clusters.
6045 * In particular, for each cluster with first SCC "i", construct
6046 * a set in the space called cluster_i with dimension equal
6047 * to the number of schedule rows in the current band of the cluster.
6049 static __isl_give isl_union_set *collect_domain(isl_ctx *ctx,
6050 struct isl_sched_graph *graph, struct isl_clustering *c)
6052 int i;
6053 isl_space *space;
6054 isl_union_set *domain;
6056 space = isl_space_params_alloc(ctx, 0);
6057 domain = isl_union_set_empty(space);
6059 for (i = 0; i < graph->scc; ++i) {
6060 isl_space *space;
6062 if (!c->scc_in_merge[i])
6063 continue;
6064 if (c->scc_cluster[i] != i)
6065 continue;
6066 space = cluster_space(&c->scc[i], i);
6067 domain = isl_union_set_add_set(domain, isl_set_universe(space));
6070 return domain;
6073 /* Construct a map from the original instances to the corresponding
6074 * cluster instance in the current bands of the clusters in "c".
6076 static __isl_give isl_union_map *collect_cluster_map(isl_ctx *ctx,
6077 struct isl_sched_graph *graph, struct isl_clustering *c)
6079 int i, j;
6080 isl_space *space;
6081 isl_union_map *cluster_map;
6083 space = isl_space_params_alloc(ctx, 0);
6084 cluster_map = isl_union_map_empty(space);
6085 for (i = 0; i < graph->scc; ++i) {
6086 int start, n;
6087 isl_id *id;
6089 if (!c->scc_in_merge[i])
6090 continue;
6092 id = cluster_id(ctx, c->scc_cluster[i]);
6093 start = c->scc[i].band_start;
6094 n = c->scc[i].n_total_row - start;
6095 for (j = 0; j < c->scc[i].n; ++j) {
6096 isl_multi_aff *ma;
6097 isl_map *map;
6098 struct isl_sched_node *node = &c->scc[i].node[j];
6100 ma = node_extract_partial_schedule_multi_aff(node,
6101 start, n);
6102 ma = isl_multi_aff_set_tuple_id(ma, isl_dim_out,
6103 isl_id_copy(id));
6104 map = isl_map_from_multi_aff(ma);
6105 cluster_map = isl_union_map_add_map(cluster_map, map);
6107 isl_id_free(id);
6110 return cluster_map;
6113 /* Add "umap" to the schedule constraints "sc" of all types of "edge"
6114 * that are not isl_edge_condition or isl_edge_conditional_validity.
6116 static __isl_give isl_schedule_constraints *add_non_conditional_constraints(
6117 struct isl_sched_edge *edge, __isl_keep isl_union_map *umap,
6118 __isl_take isl_schedule_constraints *sc)
6120 enum isl_edge_type t;
6122 if (!sc)
6123 return NULL;
6125 for (t = isl_edge_first; t <= isl_edge_last; ++t) {
6126 if (t == isl_edge_condition ||
6127 t == isl_edge_conditional_validity)
6128 continue;
6129 if (!is_type(edge, t))
6130 continue;
6131 sc = isl_schedule_constraints_add(sc, t,
6132 isl_union_map_copy(umap));
6135 return sc;
6138 /* Add schedule constraints of types isl_edge_condition and
6139 * isl_edge_conditional_validity to "sc" by applying "umap" to
6140 * the domains of the wrapped relations in domain and range
6141 * of the corresponding tagged constraints of "edge".
6143 static __isl_give isl_schedule_constraints *add_conditional_constraints(
6144 struct isl_sched_edge *edge, __isl_keep isl_union_map *umap,
6145 __isl_take isl_schedule_constraints *sc)
6147 enum isl_edge_type t;
6148 isl_union_map *tagged;
6150 for (t = isl_edge_condition; t <= isl_edge_conditional_validity; ++t) {
6151 if (!is_type(edge, t))
6152 continue;
6153 if (t == isl_edge_condition)
6154 tagged = isl_union_map_copy(edge->tagged_condition);
6155 else
6156 tagged = isl_union_map_copy(edge->tagged_validity);
6157 tagged = isl_union_map_zip(tagged);
6158 tagged = isl_union_map_apply_domain(tagged,
6159 isl_union_map_copy(umap));
6160 tagged = isl_union_map_zip(tagged);
6161 sc = isl_schedule_constraints_add(sc, t, tagged);
6162 if (!sc)
6163 return NULL;
6166 return sc;
6169 /* Given a mapping "cluster_map" from the original instances to
6170 * the cluster instances, add schedule constraints on the clusters
6171 * to "sc" corresponding to the original constraints represented by "edge".
6173 * For non-tagged dependence constraints, the cluster constraints
6174 * are obtained by applying "cluster_map" to the edge->map.
6176 * For tagged dependence constraints, "cluster_map" needs to be applied
6177 * to the domains of the wrapped relations in domain and range
6178 * of the tagged dependence constraints. Pick out the mappings
6179 * from these domains from "cluster_map" and construct their product.
6180 * This mapping can then be applied to the pair of domains.
6182 static __isl_give isl_schedule_constraints *collect_edge_constraints(
6183 struct isl_sched_edge *edge, __isl_keep isl_union_map *cluster_map,
6184 __isl_take isl_schedule_constraints *sc)
6186 isl_union_map *umap;
6187 isl_space *space;
6188 isl_union_set *uset;
6189 isl_union_map *umap1, *umap2;
6191 if (!sc)
6192 return NULL;
6194 umap = isl_union_map_from_map(isl_map_copy(edge->map));
6195 umap = isl_union_map_apply_domain(umap,
6196 isl_union_map_copy(cluster_map));
6197 umap = isl_union_map_apply_range(umap,
6198 isl_union_map_copy(cluster_map));
6199 sc = add_non_conditional_constraints(edge, umap, sc);
6200 isl_union_map_free(umap);
6202 if (!sc || (!is_condition(edge) && !is_conditional_validity(edge)))
6203 return sc;
6205 space = isl_space_domain(isl_map_get_space(edge->map));
6206 uset = isl_union_set_from_set(isl_set_universe(space));
6207 umap1 = isl_union_map_copy(cluster_map);
6208 umap1 = isl_union_map_intersect_domain(umap1, uset);
6209 space = isl_space_range(isl_map_get_space(edge->map));
6210 uset = isl_union_set_from_set(isl_set_universe(space));
6211 umap2 = isl_union_map_copy(cluster_map);
6212 umap2 = isl_union_map_intersect_domain(umap2, uset);
6213 umap = isl_union_map_product(umap1, umap2);
6215 sc = add_conditional_constraints(edge, umap, sc);
6217 isl_union_map_free(umap);
6218 return sc;
6221 /* Given a mapping "cluster_map" from the original instances to
6222 * the cluster instances, add schedule constraints on the clusters
6223 * to "sc" corresponding to all edges in "graph" between nodes that
6224 * belong to SCCs that are marked for merging in "scc_in_merge".
6226 static __isl_give isl_schedule_constraints *collect_constraints(
6227 struct isl_sched_graph *graph, int *scc_in_merge,
6228 __isl_keep isl_union_map *cluster_map,
6229 __isl_take isl_schedule_constraints *sc)
6231 int i;
6233 for (i = 0; i < graph->n_edge; ++i) {
6234 struct isl_sched_edge *edge = &graph->edge[i];
6236 if (!scc_in_merge[edge->src->scc])
6237 continue;
6238 if (!scc_in_merge[edge->dst->scc])
6239 continue;
6240 sc = collect_edge_constraints(edge, cluster_map, sc);
6243 return sc;
6246 /* Construct a dependence graph for scheduling clusters with respect
6247 * to each other and store the result in "merge_graph".
6248 * In particular, the nodes of the graph correspond to the schedule
6249 * dimensions of the current bands of those clusters that have been
6250 * marked for merging in "c".
6252 * First construct an isl_schedule_constraints object for this domain
6253 * by transforming the edges in "graph" to the domain.
6254 * Then initialize a dependence graph for scheduling from these
6255 * constraints.
6257 static isl_stat init_merge_graph(isl_ctx *ctx, struct isl_sched_graph *graph,
6258 struct isl_clustering *c, struct isl_sched_graph *merge_graph)
6260 isl_union_set *domain;
6261 isl_union_map *cluster_map;
6262 isl_schedule_constraints *sc;
6263 isl_stat r;
6265 domain = collect_domain(ctx, graph, c);
6266 sc = isl_schedule_constraints_on_domain(domain);
6267 if (!sc)
6268 return isl_stat_error;
6269 cluster_map = collect_cluster_map(ctx, graph, c);
6270 sc = collect_constraints(graph, c->scc_in_merge, cluster_map, sc);
6271 isl_union_map_free(cluster_map);
6273 r = graph_init(merge_graph, sc);
6275 isl_schedule_constraints_free(sc);
6277 return r;
6280 /* Compute the maximal number of remaining schedule rows that still need
6281 * to be computed for the nodes that belong to clusters with the maximal
6282 * dimension for the current band (i.e., the band that is to be merged).
6283 * Only clusters that are about to be merged are considered.
6284 * "maxvar" is the maximal dimension for the current band.
6285 * "c" contains information about the clusters.
6287 * Return the maximal number of remaining schedule rows or -1 on error.
6289 static int compute_maxvar_max_slack(int maxvar, struct isl_clustering *c)
6291 int i, j;
6292 int max_slack;
6294 max_slack = 0;
6295 for (i = 0; i < c->n; ++i) {
6296 int nvar;
6297 struct isl_sched_graph *scc;
6299 if (!c->scc_in_merge[i])
6300 continue;
6301 scc = &c->scc[i];
6302 nvar = scc->n_total_row - scc->band_start;
6303 if (nvar != maxvar)
6304 continue;
6305 for (j = 0; j < scc->n; ++j) {
6306 struct isl_sched_node *node = &scc->node[j];
6307 int slack;
6309 if (node_update_vmap(node) < 0)
6310 return -1;
6311 slack = node->nvar - node->rank;
6312 if (slack > max_slack)
6313 max_slack = slack;
6317 return max_slack;
6320 /* If there are any clusters where the dimension of the current band
6321 * (i.e., the band that is to be merged) is smaller than "maxvar" and
6322 * if there are any nodes in such a cluster where the number
6323 * of remaining schedule rows that still need to be computed
6324 * is greater than "max_slack", then return the smallest current band
6325 * dimension of all these clusters. Otherwise return the original value
6326 * of "maxvar". Return -1 in case of any error.
6327 * Only clusters that are about to be merged are considered.
6328 * "c" contains information about the clusters.
6330 static int limit_maxvar_to_slack(int maxvar, int max_slack,
6331 struct isl_clustering *c)
6333 int i, j;
6335 for (i = 0; i < c->n; ++i) {
6336 int nvar;
6337 struct isl_sched_graph *scc;
6339 if (!c->scc_in_merge[i])
6340 continue;
6341 scc = &c->scc[i];
6342 nvar = scc->n_total_row - scc->band_start;
6343 if (nvar >= maxvar)
6344 continue;
6345 for (j = 0; j < scc->n; ++j) {
6346 struct isl_sched_node *node = &scc->node[j];
6347 int slack;
6349 if (node_update_vmap(node) < 0)
6350 return -1;
6351 slack = node->nvar - node->rank;
6352 if (slack > max_slack) {
6353 maxvar = nvar;
6354 break;
6359 return maxvar;
6362 /* Adjust merge_graph->maxvar based on the number of remaining schedule rows
6363 * that still need to be computed. In particular, if there is a node
6364 * in a cluster where the dimension of the current band is smaller
6365 * than merge_graph->maxvar, but the number of remaining schedule rows
6366 * is greater than that of any node in a cluster with the maximal
6367 * dimension for the current band (i.e., merge_graph->maxvar),
6368 * then adjust merge_graph->maxvar to the (smallest) current band dimension
6369 * of those clusters. Without this adjustment, the total number of
6370 * schedule dimensions would be increased, resulting in a skewed view
6371 * of the number of coincident dimensions.
6372 * "c" contains information about the clusters.
6374 * If the maximize_band_depth option is set and merge_graph->maxvar is reduced,
6375 * then there is no point in attempting any merge since it will be rejected
6376 * anyway. Set merge_graph->maxvar to zero in such cases.
6378 static isl_stat adjust_maxvar_to_slack(isl_ctx *ctx,
6379 struct isl_sched_graph *merge_graph, struct isl_clustering *c)
6381 int max_slack, maxvar;
6383 max_slack = compute_maxvar_max_slack(merge_graph->maxvar, c);
6384 if (max_slack < 0)
6385 return isl_stat_error;
6386 maxvar = limit_maxvar_to_slack(merge_graph->maxvar, max_slack, c);
6387 if (maxvar < 0)
6388 return isl_stat_error;
6390 if (maxvar < merge_graph->maxvar) {
6391 if (isl_options_get_schedule_maximize_band_depth(ctx))
6392 merge_graph->maxvar = 0;
6393 else
6394 merge_graph->maxvar = maxvar;
6397 return isl_stat_ok;
6400 /* Return the number of coincident dimensions in the current band of "graph",
6401 * where the nodes of "graph" are assumed to be scheduled by a single band.
6403 static int get_n_coincident(struct isl_sched_graph *graph)
6405 int i;
6407 for (i = graph->band_start; i < graph->n_total_row; ++i)
6408 if (!graph->node[0].coincident[i])
6409 break;
6411 return i - graph->band_start;
6414 /* Should the clusters be merged based on the cluster schedule
6415 * in the current (and only) band of "merge_graph", given that
6416 * coincidence should be maximized?
6418 * If the number of coincident schedule dimensions in the merged band
6419 * would be less than the maximal number of coincident schedule dimensions
6420 * in any of the merged clusters, then the clusters should not be merged.
6422 static isl_bool ok_to_merge_coincident(struct isl_clustering *c,
6423 struct isl_sched_graph *merge_graph)
6425 int i;
6426 int n_coincident;
6427 int max_coincident;
6429 max_coincident = 0;
6430 for (i = 0; i < c->n; ++i) {
6431 if (!c->scc_in_merge[i])
6432 continue;
6433 n_coincident = get_n_coincident(&c->scc[i]);
6434 if (n_coincident > max_coincident)
6435 max_coincident = n_coincident;
6438 n_coincident = get_n_coincident(merge_graph);
6440 return n_coincident >= max_coincident;
6443 /* Return the transformation on "node" expressed by the current (and only)
6444 * band of "merge_graph" applied to the clusters in "c".
6446 * First find the representation of "node" in its SCC in "c" and
6447 * extract the transformation expressed by the current band.
6448 * Then extract the transformation applied by "merge_graph"
6449 * to the cluster to which this SCC belongs.
6450 * Combine the two to obtain the complete transformation on the node.
6452 * Note that the range of the first transformation is an anonymous space,
6453 * while the domain of the second is named "cluster_X". The range
6454 * of the former therefore needs to be adjusted before the two
6455 * can be combined.
6457 static __isl_give isl_map *extract_node_transformation(isl_ctx *ctx,
6458 struct isl_sched_node *node, struct isl_clustering *c,
6459 struct isl_sched_graph *merge_graph)
6461 struct isl_sched_node *scc_node, *cluster_node;
6462 int start, n;
6463 isl_id *id;
6464 isl_space *space;
6465 isl_multi_aff *ma, *ma2;
6467 scc_node = graph_find_node(ctx, &c->scc[node->scc], node->space);
6468 if (scc_node && !is_node(&c->scc[node->scc], scc_node))
6469 isl_die(ctx, isl_error_internal, "unable to find node",
6470 return NULL);
6471 start = c->scc[node->scc].band_start;
6472 n = c->scc[node->scc].n_total_row - start;
6473 ma = node_extract_partial_schedule_multi_aff(scc_node, start, n);
6474 space = cluster_space(&c->scc[node->scc], c->scc_cluster[node->scc]);
6475 cluster_node = graph_find_node(ctx, merge_graph, space);
6476 if (cluster_node && !is_node(merge_graph, cluster_node))
6477 isl_die(ctx, isl_error_internal, "unable to find cluster",
6478 space = isl_space_free(space));
6479 id = isl_space_get_tuple_id(space, isl_dim_set);
6480 ma = isl_multi_aff_set_tuple_id(ma, isl_dim_out, id);
6481 isl_space_free(space);
6482 n = merge_graph->n_total_row;
6483 ma2 = node_extract_partial_schedule_multi_aff(cluster_node, 0, n);
6484 ma = isl_multi_aff_pullback_multi_aff(ma2, ma);
6486 return isl_map_from_multi_aff(ma);
6489 /* Give a set of distances "set", are they bounded by a small constant
6490 * in direction "pos"?
6491 * In practice, check if they are bounded by 2 by checking that there
6492 * are no elements with a value greater than or equal to 3 or
6493 * smaller than or equal to -3.
6495 static isl_bool distance_is_bounded(__isl_keep isl_set *set, int pos)
6497 isl_bool bounded;
6498 isl_set *test;
6500 if (!set)
6501 return isl_bool_error;
6503 test = isl_set_copy(set);
6504 test = isl_set_lower_bound_si(test, isl_dim_set, pos, 3);
6505 bounded = isl_set_is_empty(test);
6506 isl_set_free(test);
6508 if (bounded < 0 || !bounded)
6509 return bounded;
6511 test = isl_set_copy(set);
6512 test = isl_set_upper_bound_si(test, isl_dim_set, pos, -3);
6513 bounded = isl_set_is_empty(test);
6514 isl_set_free(test);
6516 return bounded;
6519 /* Does the set "set" have a fixed (but possible parametric) value
6520 * at dimension "pos"?
6522 static isl_bool has_single_value(__isl_keep isl_set *set, int pos)
6524 isl_size n;
6525 isl_bool single;
6527 n = isl_set_dim(set, isl_dim_set);
6528 if (n < 0)
6529 return isl_bool_error;
6530 set = isl_set_copy(set);
6531 set = isl_set_project_out(set, isl_dim_set, pos + 1, n - (pos + 1));
6532 set = isl_set_project_out(set, isl_dim_set, 0, pos);
6533 single = isl_set_is_singleton(set);
6534 isl_set_free(set);
6536 return single;
6539 /* Does "map" have a fixed (but possible parametric) value
6540 * at dimension "pos" of either its domain or its range?
6542 static isl_bool has_singular_src_or_dst(__isl_keep isl_map *map, int pos)
6544 isl_set *set;
6545 isl_bool single;
6547 set = isl_map_domain(isl_map_copy(map));
6548 single = has_single_value(set, pos);
6549 isl_set_free(set);
6551 if (single < 0 || single)
6552 return single;
6554 set = isl_map_range(isl_map_copy(map));
6555 single = has_single_value(set, pos);
6556 isl_set_free(set);
6558 return single;
6561 /* Does the edge "edge" from "graph" have bounded dependence distances
6562 * in the merged graph "merge_graph" of a selection of clusters in "c"?
6564 * Extract the complete transformations of the source and destination
6565 * nodes of the edge, apply them to the edge constraints and
6566 * compute the differences. Finally, check if these differences are bounded
6567 * in each direction.
6569 * If the dimension of the band is greater than the number of
6570 * dimensions that can be expected to be optimized by the edge
6571 * (based on its weight), then also allow the differences to be unbounded
6572 * in the remaining dimensions, but only if either the source or
6573 * the destination has a fixed value in that direction.
6574 * This allows a statement that produces values that are used by
6575 * several instances of another statement to be merged with that
6576 * other statement.
6577 * However, merging such clusters will introduce an inherently
6578 * large proximity distance inside the merged cluster, meaning
6579 * that proximity distances will no longer be optimized in
6580 * subsequent merges. These merges are therefore only allowed
6581 * after all other possible merges have been tried.
6582 * The first time such a merge is encountered, the weight of the edge
6583 * is replaced by a negative weight. The second time (i.e., after
6584 * all merges over edges with a non-negative weight have been tried),
6585 * the merge is allowed.
6587 static isl_bool has_bounded_distances(isl_ctx *ctx, struct isl_sched_edge *edge,
6588 struct isl_sched_graph *graph, struct isl_clustering *c,
6589 struct isl_sched_graph *merge_graph)
6591 int i, n_slack;
6592 isl_size n;
6593 isl_bool bounded;
6594 isl_map *map, *t;
6595 isl_set *dist;
6597 map = isl_map_copy(edge->map);
6598 t = extract_node_transformation(ctx, edge->src, c, merge_graph);
6599 map = isl_map_apply_domain(map, t);
6600 t = extract_node_transformation(ctx, edge->dst, c, merge_graph);
6601 map = isl_map_apply_range(map, t);
6602 dist = isl_map_deltas(isl_map_copy(map));
6604 bounded = isl_bool_true;
6605 n = isl_set_dim(dist, isl_dim_set);
6606 if (n < 0)
6607 goto error;
6608 n_slack = n - edge->weight;
6609 if (edge->weight < 0)
6610 n_slack -= graph->max_weight + 1;
6611 for (i = 0; i < n; ++i) {
6612 isl_bool bounded_i, singular_i;
6614 bounded_i = distance_is_bounded(dist, i);
6615 if (bounded_i < 0)
6616 goto error;
6617 if (bounded_i)
6618 continue;
6619 if (edge->weight >= 0)
6620 bounded = isl_bool_false;
6621 n_slack--;
6622 if (n_slack < 0)
6623 break;
6624 singular_i = has_singular_src_or_dst(map, i);
6625 if (singular_i < 0)
6626 goto error;
6627 if (singular_i)
6628 continue;
6629 bounded = isl_bool_false;
6630 break;
6632 if (!bounded && i >= n && edge->weight >= 0)
6633 edge->weight -= graph->max_weight + 1;
6634 isl_map_free(map);
6635 isl_set_free(dist);
6637 return bounded;
6638 error:
6639 isl_map_free(map);
6640 isl_set_free(dist);
6641 return isl_bool_error;
6644 /* Should the clusters be merged based on the cluster schedule
6645 * in the current (and only) band of "merge_graph"?
6646 * "graph" is the original dependence graph, while "c" records
6647 * which SCCs are involved in the latest merge.
6649 * In particular, is there at least one proximity constraint
6650 * that is optimized by the merge?
6652 * A proximity constraint is considered to be optimized
6653 * if the dependence distances are small.
6655 static isl_bool ok_to_merge_proximity(isl_ctx *ctx,
6656 struct isl_sched_graph *graph, struct isl_clustering *c,
6657 struct isl_sched_graph *merge_graph)
6659 int i;
6661 for (i = 0; i < graph->n_edge; ++i) {
6662 struct isl_sched_edge *edge = &graph->edge[i];
6663 isl_bool bounded;
6665 if (!is_proximity(edge))
6666 continue;
6667 if (!c->scc_in_merge[edge->src->scc])
6668 continue;
6669 if (!c->scc_in_merge[edge->dst->scc])
6670 continue;
6671 if (c->scc_cluster[edge->dst->scc] ==
6672 c->scc_cluster[edge->src->scc])
6673 continue;
6674 bounded = has_bounded_distances(ctx, edge, graph, c,
6675 merge_graph);
6676 if (bounded < 0 || bounded)
6677 return bounded;
6680 return isl_bool_false;
6683 /* Should the clusters be merged based on the cluster schedule
6684 * in the current (and only) band of "merge_graph"?
6685 * "graph" is the original dependence graph, while "c" records
6686 * which SCCs are involved in the latest merge.
6688 * If the current band is empty, then the clusters should not be merged.
6690 * If the band depth should be maximized and the merge schedule
6691 * is incomplete (meaning that the dimension of some of the schedule
6692 * bands in the original schedule will be reduced), then the clusters
6693 * should not be merged.
6695 * If the schedule_maximize_coincidence option is set, then check that
6696 * the number of coincident schedule dimensions is not reduced.
6698 * Finally, only allow the merge if at least one proximity
6699 * constraint is optimized.
6701 static isl_bool ok_to_merge(isl_ctx *ctx, struct isl_sched_graph *graph,
6702 struct isl_clustering *c, struct isl_sched_graph *merge_graph)
6704 if (merge_graph->n_total_row == merge_graph->band_start)
6705 return isl_bool_false;
6707 if (isl_options_get_schedule_maximize_band_depth(ctx) &&
6708 merge_graph->n_total_row < merge_graph->maxvar)
6709 return isl_bool_false;
6711 if (isl_options_get_schedule_maximize_coincidence(ctx)) {
6712 isl_bool ok;
6714 ok = ok_to_merge_coincident(c, merge_graph);
6715 if (ok < 0 || !ok)
6716 return ok;
6719 return ok_to_merge_proximity(ctx, graph, c, merge_graph);
6722 /* Apply the schedule in "t_node" to the "n" rows starting at "first"
6723 * of the schedule in "node" and return the result.
6725 * That is, essentially compute
6727 * T * N(first:first+n-1)
6729 * taking into account the constant term and the parameter coefficients
6730 * in "t_node".
6732 static __isl_give isl_mat *node_transformation(isl_ctx *ctx,
6733 struct isl_sched_node *t_node, struct isl_sched_node *node,
6734 int first, int n)
6736 int i, j;
6737 isl_mat *t;
6738 int n_row, n_col, n_param, n_var;
6740 n_param = node->nparam;
6741 n_var = node->nvar;
6742 n_row = isl_mat_rows(t_node->sched);
6743 n_col = isl_mat_cols(node->sched);
6744 t = isl_mat_alloc(ctx, n_row, n_col);
6745 if (!t)
6746 return NULL;
6747 for (i = 0; i < n_row; ++i) {
6748 isl_seq_cpy(t->row[i], t_node->sched->row[i], 1 + n_param);
6749 isl_seq_clr(t->row[i] + 1 + n_param, n_var);
6750 for (j = 0; j < n; ++j)
6751 isl_seq_addmul(t->row[i],
6752 t_node->sched->row[i][1 + n_param + j],
6753 node->sched->row[first + j],
6754 1 + n_param + n_var);
6756 return t;
6759 /* Apply the cluster schedule in "t_node" to the current band
6760 * schedule of the nodes in "graph".
6762 * In particular, replace the rows starting at band_start
6763 * by the result of applying the cluster schedule in "t_node"
6764 * to the original rows.
6766 * The coincidence of the schedule is determined by the coincidence
6767 * of the cluster schedule.
6769 static isl_stat transform(isl_ctx *ctx, struct isl_sched_graph *graph,
6770 struct isl_sched_node *t_node)
6772 int i, j;
6773 int n_new;
6774 int start, n;
6776 start = graph->band_start;
6777 n = graph->n_total_row - start;
6779 n_new = isl_mat_rows(t_node->sched);
6780 for (i = 0; i < graph->n; ++i) {
6781 struct isl_sched_node *node = &graph->node[i];
6782 isl_mat *t;
6784 t = node_transformation(ctx, t_node, node, start, n);
6785 node->sched = isl_mat_drop_rows(node->sched, start, n);
6786 node->sched = isl_mat_concat(node->sched, t);
6787 node->sched_map = isl_map_free(node->sched_map);
6788 if (!node->sched)
6789 return isl_stat_error;
6790 for (j = 0; j < n_new; ++j)
6791 node->coincident[start + j] = t_node->coincident[j];
6793 graph->n_total_row -= n;
6794 graph->n_row -= n;
6795 graph->n_total_row += n_new;
6796 graph->n_row += n_new;
6798 return isl_stat_ok;
6801 /* Merge the clusters marked for merging in "c" into a single
6802 * cluster using the cluster schedule in the current band of "merge_graph".
6803 * The representative SCC for the new cluster is the SCC with
6804 * the smallest index.
6806 * The current band schedule of each SCC in the new cluster is obtained
6807 * by applying the schedule of the corresponding original cluster
6808 * to the original band schedule.
6809 * All SCCs in the new cluster have the same number of schedule rows.
6811 static isl_stat merge(isl_ctx *ctx, struct isl_clustering *c,
6812 struct isl_sched_graph *merge_graph)
6814 int i;
6815 int cluster = -1;
6816 isl_space *space;
6818 for (i = 0; i < c->n; ++i) {
6819 struct isl_sched_node *node;
6821 if (!c->scc_in_merge[i])
6822 continue;
6823 if (cluster < 0)
6824 cluster = i;
6825 space = cluster_space(&c->scc[i], c->scc_cluster[i]);
6826 node = graph_find_node(ctx, merge_graph, space);
6827 isl_space_free(space);
6828 if (!node)
6829 return isl_stat_error;
6830 if (!is_node(merge_graph, node))
6831 isl_die(ctx, isl_error_internal,
6832 "unable to find cluster",
6833 return isl_stat_error);
6834 if (transform(ctx, &c->scc[i], node) < 0)
6835 return isl_stat_error;
6836 c->scc_cluster[i] = cluster;
6839 return isl_stat_ok;
6842 /* Try and merge the clusters of SCCs marked in c->scc_in_merge
6843 * by scheduling the current cluster bands with respect to each other.
6845 * Construct a dependence graph with a space for each cluster and
6846 * with the coordinates of each space corresponding to the schedule
6847 * dimensions of the current band of that cluster.
6848 * Construct a cluster schedule in this cluster dependence graph and
6849 * apply it to the current cluster bands if it is applicable
6850 * according to ok_to_merge.
6852 * If the number of remaining schedule dimensions in a cluster
6853 * with a non-maximal current schedule dimension is greater than
6854 * the number of remaining schedule dimensions in clusters
6855 * with a maximal current schedule dimension, then restrict
6856 * the number of rows to be computed in the cluster schedule
6857 * to the minimal such non-maximal current schedule dimension.
6858 * Do this by adjusting merge_graph.maxvar.
6860 * Return isl_bool_true if the clusters have effectively been merged
6861 * into a single cluster.
6863 * Note that since the standard scheduling algorithm minimizes the maximal
6864 * distance over proximity constraints, the proximity constraints between
6865 * the merged clusters may not be optimized any further than what is
6866 * sufficient to bring the distances within the limits of the internal
6867 * proximity constraints inside the individual clusters.
6868 * It may therefore make sense to perform an additional translation step
6869 * to bring the clusters closer to each other, while maintaining
6870 * the linear part of the merging schedule found using the standard
6871 * scheduling algorithm.
6873 static isl_bool try_merge(isl_ctx *ctx, struct isl_sched_graph *graph,
6874 struct isl_clustering *c)
6876 struct isl_sched_graph merge_graph = { 0 };
6877 isl_bool merged;
6879 if (init_merge_graph(ctx, graph, c, &merge_graph) < 0)
6880 goto error;
6882 if (compute_maxvar(&merge_graph) < 0)
6883 goto error;
6884 if (adjust_maxvar_to_slack(ctx, &merge_graph,c) < 0)
6885 goto error;
6886 if (compute_schedule_wcc_band(ctx, &merge_graph) < 0)
6887 goto error;
6888 merged = ok_to_merge(ctx, graph, c, &merge_graph);
6889 if (merged && merge(ctx, c, &merge_graph) < 0)
6890 goto error;
6892 graph_free(ctx, &merge_graph);
6893 return merged;
6894 error:
6895 graph_free(ctx, &merge_graph);
6896 return isl_bool_error;
6899 /* Is there any edge marked "no_merge" between two SCCs that are
6900 * about to be merged (i.e., that are set in "scc_in_merge")?
6901 * "merge_edge" is the proximity edge along which the clusters of SCCs
6902 * are going to be merged.
6904 * If there is any edge between two SCCs with a negative weight,
6905 * while the weight of "merge_edge" is non-negative, then this
6906 * means that the edge was postponed. "merge_edge" should then
6907 * also be postponed since merging along the edge with negative weight should
6908 * be postponed until all edges with non-negative weight have been tried.
6909 * Replace the weight of "merge_edge" by a negative weight as well and
6910 * tell the caller not to attempt a merge.
6912 static int any_no_merge(struct isl_sched_graph *graph, int *scc_in_merge,
6913 struct isl_sched_edge *merge_edge)
6915 int i;
6917 for (i = 0; i < graph->n_edge; ++i) {
6918 struct isl_sched_edge *edge = &graph->edge[i];
6920 if (!scc_in_merge[edge->src->scc])
6921 continue;
6922 if (!scc_in_merge[edge->dst->scc])
6923 continue;
6924 if (edge->no_merge)
6925 return 1;
6926 if (merge_edge->weight >= 0 && edge->weight < 0) {
6927 merge_edge->weight -= graph->max_weight + 1;
6928 return 1;
6932 return 0;
6935 /* Merge the two clusters in "c" connected by the edge in "graph"
6936 * with index "edge" into a single cluster.
6937 * If it turns out to be impossible to merge these two clusters,
6938 * then mark the edge as "no_merge" such that it will not be
6939 * considered again.
6941 * First mark all SCCs that need to be merged. This includes the SCCs
6942 * in the two clusters, but it may also include the SCCs
6943 * of intermediate clusters.
6944 * If there is already a no_merge edge between any pair of such SCCs,
6945 * then simply mark the current edge as no_merge as well.
6946 * Likewise, if any of those edges was postponed by has_bounded_distances,
6947 * then postpone the current edge as well.
6948 * Otherwise, try and merge the clusters and mark "edge" as "no_merge"
6949 * if the clusters did not end up getting merged, unless the non-merge
6950 * is due to the fact that the edge was postponed. This postponement
6951 * can be recognized by a change in weight (from non-negative to negative).
6953 static isl_stat merge_clusters_along_edge(isl_ctx *ctx,
6954 struct isl_sched_graph *graph, int edge, struct isl_clustering *c)
6956 isl_bool merged;
6957 int edge_weight = graph->edge[edge].weight;
6959 if (mark_merge_sccs(ctx, graph, edge, c) < 0)
6960 return isl_stat_error;
6962 if (any_no_merge(graph, c->scc_in_merge, &graph->edge[edge]))
6963 merged = isl_bool_false;
6964 else
6965 merged = try_merge(ctx, graph, c);
6966 if (merged < 0)
6967 return isl_stat_error;
6968 if (!merged && edge_weight == graph->edge[edge].weight)
6969 graph->edge[edge].no_merge = 1;
6971 return isl_stat_ok;
6974 /* Does "node" belong to the cluster identified by "cluster"?
6976 static int node_cluster_exactly(struct isl_sched_node *node, int cluster)
6978 return node->cluster == cluster;
6981 /* Does "edge" connect two nodes belonging to the cluster
6982 * identified by "cluster"?
6984 static int edge_cluster_exactly(struct isl_sched_edge *edge, int cluster)
6986 return edge->src->cluster == cluster && edge->dst->cluster == cluster;
6989 /* Swap the schedule of "node1" and "node2".
6990 * Both nodes have been derived from the same node in a common parent graph.
6991 * Since the "coincident" field is shared with that node
6992 * in the parent graph, there is no need to also swap this field.
6994 static void swap_sched(struct isl_sched_node *node1,
6995 struct isl_sched_node *node2)
6997 isl_mat *sched;
6998 isl_map *sched_map;
7000 sched = node1->sched;
7001 node1->sched = node2->sched;
7002 node2->sched = sched;
7004 sched_map = node1->sched_map;
7005 node1->sched_map = node2->sched_map;
7006 node2->sched_map = sched_map;
7009 /* Copy the current band schedule from the SCCs that form the cluster
7010 * with index "pos" to the actual cluster at position "pos".
7011 * By construction, the index of the first SCC that belongs to the cluster
7012 * is also "pos".
7014 * The order of the nodes inside both the SCCs and the cluster
7015 * is assumed to be same as the order in the original "graph".
7017 * Since the SCC graphs will no longer be used after this function,
7018 * the schedules are actually swapped rather than copied.
7020 static isl_stat copy_partial(struct isl_sched_graph *graph,
7021 struct isl_clustering *c, int pos)
7023 int i, j;
7025 c->cluster[pos].n_total_row = c->scc[pos].n_total_row;
7026 c->cluster[pos].n_row = c->scc[pos].n_row;
7027 c->cluster[pos].maxvar = c->scc[pos].maxvar;
7028 j = 0;
7029 for (i = 0; i < graph->n; ++i) {
7030 int k;
7031 int s;
7033 if (graph->node[i].cluster != pos)
7034 continue;
7035 s = graph->node[i].scc;
7036 k = c->scc_node[s]++;
7037 swap_sched(&c->cluster[pos].node[j], &c->scc[s].node[k]);
7038 if (c->scc[s].maxvar > c->cluster[pos].maxvar)
7039 c->cluster[pos].maxvar = c->scc[s].maxvar;
7040 ++j;
7043 return isl_stat_ok;
7046 /* Is there a (conditional) validity dependence from node[j] to node[i],
7047 * forcing node[i] to follow node[j] or do the nodes belong to the same
7048 * cluster?
7050 static isl_bool node_follows_strong_or_same_cluster(int i, int j, void *user)
7052 struct isl_sched_graph *graph = user;
7054 if (graph->node[i].cluster == graph->node[j].cluster)
7055 return isl_bool_true;
7056 return graph_has_validity_edge(graph, &graph->node[j], &graph->node[i]);
7059 /* Extract the merged clusters of SCCs in "graph", sort them, and
7060 * store them in c->clusters. Update c->scc_cluster accordingly.
7062 * First keep track of the cluster containing the SCC to which a node
7063 * belongs in the node itself.
7064 * Then extract the clusters into c->clusters, copying the current
7065 * band schedule from the SCCs that belong to the cluster.
7066 * Do this only once per cluster.
7068 * Finally, topologically sort the clusters and update c->scc_cluster
7069 * to match the new scc numbering. While the SCCs were originally
7070 * sorted already, some SCCs that depend on some other SCCs may
7071 * have been merged with SCCs that appear before these other SCCs.
7072 * A reordering may therefore be required.
7074 static isl_stat extract_clusters(isl_ctx *ctx, struct isl_sched_graph *graph,
7075 struct isl_clustering *c)
7077 int i;
7079 for (i = 0; i < graph->n; ++i)
7080 graph->node[i].cluster = c->scc_cluster[graph->node[i].scc];
7082 for (i = 0; i < graph->scc; ++i) {
7083 if (c->scc_cluster[i] != i)
7084 continue;
7085 if (extract_sub_graph(ctx, graph, &node_cluster_exactly,
7086 &edge_cluster_exactly, i, &c->cluster[i]) < 0)
7087 return isl_stat_error;
7088 c->cluster[i].src_scc = -1;
7089 c->cluster[i].dst_scc = -1;
7090 if (copy_partial(graph, c, i) < 0)
7091 return isl_stat_error;
7094 if (detect_ccs(ctx, graph, &node_follows_strong_or_same_cluster) < 0)
7095 return isl_stat_error;
7096 for (i = 0; i < graph->n; ++i)
7097 c->scc_cluster[graph->node[i].scc] = graph->node[i].cluster;
7099 return isl_stat_ok;
7102 /* Compute weights on the proximity edges of "graph" that can
7103 * be used by find_proximity to find the most appropriate
7104 * proximity edge to use to merge two clusters in "c".
7105 * The weights are also used by has_bounded_distances to determine
7106 * whether the merge should be allowed.
7107 * Store the maximum of the computed weights in graph->max_weight.
7109 * The computed weight is a measure for the number of remaining schedule
7110 * dimensions that can still be completely aligned.
7111 * In particular, compute the number of equalities between
7112 * input dimensions and output dimensions in the proximity constraints.
7113 * The directions that are already handled by outer schedule bands
7114 * are projected out prior to determining this number.
7116 * Edges that will never be considered by find_proximity are ignored.
7118 static isl_stat compute_weights(struct isl_sched_graph *graph,
7119 struct isl_clustering *c)
7121 int i;
7123 graph->max_weight = 0;
7125 for (i = 0; i < graph->n_edge; ++i) {
7126 struct isl_sched_edge *edge = &graph->edge[i];
7127 struct isl_sched_node *src = edge->src;
7128 struct isl_sched_node *dst = edge->dst;
7129 isl_basic_map *hull;
7130 isl_bool prox;
7131 isl_size n_in, n_out;
7133 prox = is_non_empty_proximity(edge);
7134 if (prox < 0)
7135 return isl_stat_error;
7136 if (!prox)
7137 continue;
7138 if (bad_cluster(&c->scc[edge->src->scc]) ||
7139 bad_cluster(&c->scc[edge->dst->scc]))
7140 continue;
7141 if (c->scc_cluster[edge->dst->scc] ==
7142 c->scc_cluster[edge->src->scc])
7143 continue;
7145 hull = isl_map_affine_hull(isl_map_copy(edge->map));
7146 hull = isl_basic_map_transform_dims(hull, isl_dim_in, 0,
7147 isl_mat_copy(src->vmap));
7148 hull = isl_basic_map_transform_dims(hull, isl_dim_out, 0,
7149 isl_mat_copy(dst->vmap));
7150 hull = isl_basic_map_project_out(hull,
7151 isl_dim_in, 0, src->rank);
7152 hull = isl_basic_map_project_out(hull,
7153 isl_dim_out, 0, dst->rank);
7154 hull = isl_basic_map_remove_divs(hull);
7155 n_in = isl_basic_map_dim(hull, isl_dim_in);
7156 n_out = isl_basic_map_dim(hull, isl_dim_out);
7157 if (n_in < 0 || n_out < 0)
7158 hull = isl_basic_map_free(hull);
7159 hull = isl_basic_map_drop_constraints_not_involving_dims(hull,
7160 isl_dim_in, 0, n_in);
7161 hull = isl_basic_map_drop_constraints_not_involving_dims(hull,
7162 isl_dim_out, 0, n_out);
7163 if (!hull)
7164 return isl_stat_error;
7165 edge->weight = isl_basic_map_n_equality(hull);
7166 isl_basic_map_free(hull);
7168 if (edge->weight > graph->max_weight)
7169 graph->max_weight = edge->weight;
7172 return isl_stat_ok;
7175 /* Call compute_schedule_finish_band on each of the clusters in "c"
7176 * in their topological order. This order is determined by the scc
7177 * fields of the nodes in "graph".
7178 * Combine the results in a sequence expressing the topological order.
7180 * If there is only one cluster left, then there is no need to introduce
7181 * a sequence node. Also, in this case, the cluster necessarily contains
7182 * the SCC at position 0 in the original graph and is therefore also
7183 * stored in the first cluster of "c".
7185 static __isl_give isl_schedule_node *finish_bands_clustering(
7186 __isl_take isl_schedule_node *node, struct isl_sched_graph *graph,
7187 struct isl_clustering *c)
7189 int i;
7190 isl_ctx *ctx;
7191 isl_union_set_list *filters;
7193 if (graph->scc == 1)
7194 return compute_schedule_finish_band(node, &c->cluster[0], 0);
7196 ctx = isl_schedule_node_get_ctx(node);
7198 filters = extract_sccs(ctx, graph);
7199 node = isl_schedule_node_insert_sequence(node, filters);
7201 for (i = 0; i < graph->scc; ++i) {
7202 int j = c->scc_cluster[i];
7203 node = isl_schedule_node_child(node, i);
7204 node = isl_schedule_node_child(node, 0);
7205 node = compute_schedule_finish_band(node, &c->cluster[j], 0);
7206 node = isl_schedule_node_parent(node);
7207 node = isl_schedule_node_parent(node);
7210 return node;
7213 /* Compute a schedule for a connected dependence graph by first considering
7214 * each strongly connected component (SCC) in the graph separately and then
7215 * incrementally combining them into clusters.
7216 * Return the updated schedule node.
7218 * Initially, each cluster consists of a single SCC, each with its
7219 * own band schedule. The algorithm then tries to merge pairs
7220 * of clusters along a proximity edge until no more suitable
7221 * proximity edges can be found. During this merging, the schedule
7222 * is maintained in the individual SCCs.
7223 * After the merging is completed, the full resulting clusters
7224 * are extracted and in finish_bands_clustering,
7225 * compute_schedule_finish_band is called on each of them to integrate
7226 * the band into "node" and to continue the computation.
7228 * compute_weights initializes the weights that are used by find_proximity.
7230 static __isl_give isl_schedule_node *compute_schedule_wcc_clustering(
7231 __isl_take isl_schedule_node *node, struct isl_sched_graph *graph)
7233 isl_ctx *ctx;
7234 struct isl_clustering c;
7235 int i;
7237 ctx = isl_schedule_node_get_ctx(node);
7239 if (clustering_init(ctx, &c, graph) < 0)
7240 goto error;
7242 if (compute_weights(graph, &c) < 0)
7243 goto error;
7245 for (;;) {
7246 i = find_proximity(graph, &c);
7247 if (i < 0)
7248 goto error;
7249 if (i >= graph->n_edge)
7250 break;
7251 if (merge_clusters_along_edge(ctx, graph, i, &c) < 0)
7252 goto error;
7255 if (extract_clusters(ctx, graph, &c) < 0)
7256 goto error;
7258 node = finish_bands_clustering(node, graph, &c);
7260 clustering_free(ctx, &c);
7261 return node;
7262 error:
7263 clustering_free(ctx, &c);
7264 return isl_schedule_node_free(node);
7267 /* Compute a schedule for a connected dependence graph and return
7268 * the updated schedule node.
7270 * If Feautrier's algorithm is selected, we first recursively try to satisfy
7271 * as many validity dependences as possible. When all validity dependences
7272 * are satisfied we extend the schedule to a full-dimensional schedule.
7274 * Call compute_schedule_wcc_whole or compute_schedule_wcc_clustering
7275 * depending on whether the user has selected the option to try and
7276 * compute a schedule for the entire (weakly connected) component first.
7277 * If there is only a single strongly connected component (SCC), then
7278 * there is no point in trying to combine SCCs
7279 * in compute_schedule_wcc_clustering, so compute_schedule_wcc_whole
7280 * is called instead.
7282 static __isl_give isl_schedule_node *compute_schedule_wcc(
7283 __isl_take isl_schedule_node *node, struct isl_sched_graph *graph)
7285 isl_ctx *ctx;
7287 if (!node)
7288 return NULL;
7290 ctx = isl_schedule_node_get_ctx(node);
7291 if (detect_sccs(ctx, graph) < 0)
7292 return isl_schedule_node_free(node);
7294 if (compute_maxvar(graph) < 0)
7295 return isl_schedule_node_free(node);
7297 if (need_feautrier_step(ctx, graph))
7298 return compute_schedule_wcc_feautrier(node, graph);
7300 if (graph->scc <= 1 || isl_options_get_schedule_whole_component(ctx))
7301 return compute_schedule_wcc_whole(node, graph);
7302 else
7303 return compute_schedule_wcc_clustering(node, graph);
7306 /* Compute a schedule for each group of nodes identified by node->scc
7307 * separately and then combine them in a sequence node (or as set node
7308 * if graph->weak is set) inserted at position "node" of the schedule tree.
7309 * Return the updated schedule node.
7311 * If "wcc" is set then each of the groups belongs to a single
7312 * weakly connected component in the dependence graph so that
7313 * there is no need for compute_sub_schedule to look for weakly
7314 * connected components.
7316 * If a set node would be introduced and if the number of components
7317 * is equal to the number of nodes, then check if the schedule
7318 * is already complete. If so, a redundant set node would be introduced
7319 * (without any further descendants) stating that the statements
7320 * can be executed in arbitrary order, which is also expressed
7321 * by the absence of any node. Refrain from inserting any nodes
7322 * in this case and simply return.
7324 static __isl_give isl_schedule_node *compute_component_schedule(
7325 __isl_take isl_schedule_node *node, struct isl_sched_graph *graph,
7326 int wcc)
7328 int component;
7329 isl_ctx *ctx;
7330 isl_union_set_list *filters;
7332 if (!node)
7333 return NULL;
7335 if (graph->weak && graph->scc == graph->n) {
7336 if (compute_maxvar(graph) < 0)
7337 return isl_schedule_node_free(node);
7338 if (graph->n_row >= graph->maxvar)
7339 return node;
7342 ctx = isl_schedule_node_get_ctx(node);
7343 filters = extract_sccs(ctx, graph);
7344 if (graph->weak)
7345 node = isl_schedule_node_insert_set(node, filters);
7346 else
7347 node = isl_schedule_node_insert_sequence(node, filters);
7349 for (component = 0; component < graph->scc; ++component) {
7350 node = isl_schedule_node_child(node, component);
7351 node = isl_schedule_node_child(node, 0);
7352 node = compute_sub_schedule(node, ctx, graph,
7353 &node_scc_exactly,
7354 &edge_scc_exactly, component, wcc);
7355 node = isl_schedule_node_parent(node);
7356 node = isl_schedule_node_parent(node);
7359 return node;
7362 /* Compute a schedule for the given dependence graph and insert it at "node".
7363 * Return the updated schedule node.
7365 * We first check if the graph is connected (through validity and conditional
7366 * validity dependences) and, if not, compute a schedule
7367 * for each component separately.
7368 * If the schedule_serialize_sccs option is set, then we check for strongly
7369 * connected components instead and compute a separate schedule for
7370 * each such strongly connected component.
7372 static __isl_give isl_schedule_node *compute_schedule(isl_schedule_node *node,
7373 struct isl_sched_graph *graph)
7375 isl_ctx *ctx;
7377 if (!node)
7378 return NULL;
7380 ctx = isl_schedule_node_get_ctx(node);
7381 if (isl_options_get_schedule_serialize_sccs(ctx)) {
7382 if (detect_sccs(ctx, graph) < 0)
7383 return isl_schedule_node_free(node);
7384 } else {
7385 if (detect_wccs(ctx, graph) < 0)
7386 return isl_schedule_node_free(node);
7389 if (graph->scc > 1)
7390 return compute_component_schedule(node, graph, 1);
7392 return compute_schedule_wcc(node, graph);
7395 /* Compute a schedule on sc->domain that respects the given schedule
7396 * constraints.
7398 * In particular, the schedule respects all the validity dependences.
7399 * If the default isl scheduling algorithm is used, it tries to minimize
7400 * the dependence distances over the proximity dependences.
7401 * If Feautrier's scheduling algorithm is used, the proximity dependence
7402 * distances are only minimized during the extension to a full-dimensional
7403 * schedule.
7405 * If there are any condition and conditional validity dependences,
7406 * then the conditional validity dependences may be violated inside
7407 * a tilable band, provided they have no adjacent non-local
7408 * condition dependences.
7410 __isl_give isl_schedule *isl_schedule_constraints_compute_schedule(
7411 __isl_take isl_schedule_constraints *sc)
7413 isl_ctx *ctx = isl_schedule_constraints_get_ctx(sc);
7414 struct isl_sched_graph graph = { 0 };
7415 isl_schedule *sched;
7416 isl_schedule_node *node;
7417 isl_union_set *domain;
7419 sc = isl_schedule_constraints_align_params(sc);
7421 domain = isl_schedule_constraints_get_domain(sc);
7422 if (isl_union_set_n_set(domain) == 0) {
7423 isl_schedule_constraints_free(sc);
7424 return isl_schedule_from_domain(domain);
7427 if (graph_init(&graph, sc) < 0)
7428 domain = isl_union_set_free(domain);
7430 node = isl_schedule_node_from_domain(domain);
7431 node = isl_schedule_node_child(node, 0);
7432 if (graph.n > 0)
7433 node = compute_schedule(node, &graph);
7434 sched = isl_schedule_node_get_schedule(node);
7435 isl_schedule_node_free(node);
7437 graph_free(ctx, &graph);
7438 isl_schedule_constraints_free(sc);
7440 return sched;
7443 /* Compute a schedule for the given union of domains that respects
7444 * all the validity dependences and minimizes
7445 * the dependence distances over the proximity dependences.
7447 * This function is kept for backward compatibility.
7449 __isl_give isl_schedule *isl_union_set_compute_schedule(
7450 __isl_take isl_union_set *domain,
7451 __isl_take isl_union_map *validity,
7452 __isl_take isl_union_map *proximity)
7454 isl_schedule_constraints *sc;
7456 sc = isl_schedule_constraints_on_domain(domain);
7457 sc = isl_schedule_constraints_set_validity(sc, validity);
7458 sc = isl_schedule_constraints_set_proximity(sc, proximity);
7460 return isl_schedule_constraints_compute_schedule(sc);