isl_scheduler.c: inter_coefficients: use isl_map_to_basic_set_try_get
[isl.git] / isl_scheduler.c
blob0052cdd16ab8c281b52167e44d6eaa09576cb0b1
1 /*
2 * Copyright 2011 INRIA Saclay
3 * Copyright 2012-2014 Ecole Normale Superieure
4 * Copyright 2015 Sven Verdoolaege
6 * Use of this software is governed by the MIT license
8 * Written by Sven Verdoolaege, INRIA Saclay - Ile-de-France,
9 * Parc Club Orsay Universite, ZAC des vignes, 4 rue Jacques Monod,
10 * 91893 Orsay, France
11 * and Ecole Normale Superieure, 45 rue d'Ulm, 75230 Paris, France
14 #include <isl_ctx_private.h>
15 #include <isl_map_private.h>
16 #include <isl_space_private.h>
17 #include <isl_aff_private.h>
18 #include <isl/hash.h>
19 #include <isl/constraint.h>
20 #include <isl/schedule.h>
21 #include <isl/schedule_node.h>
22 #include <isl_mat_private.h>
23 #include <isl_vec_private.h>
24 #include <isl/set.h>
25 #include <isl/union_set.h>
26 #include <isl_seq.h>
27 #include <isl_tab.h>
28 #include <isl_dim_map.h>
29 #include <isl/map_to_basic_set.h>
30 #include <isl_sort.h>
31 #include <isl_options_private.h>
32 #include <isl_tarjan.h>
33 #include <isl_morph.h>
36 * The scheduling algorithm implemented in this file was inspired by
37 * Bondhugula et al., "Automatic Transformations for Communication-Minimized
38 * Parallelization and Locality Optimization in the Polyhedral Model".
41 enum isl_edge_type {
42 isl_edge_validity = 0,
43 isl_edge_first = isl_edge_validity,
44 isl_edge_coincidence,
45 isl_edge_condition,
46 isl_edge_conditional_validity,
47 isl_edge_proximity,
48 isl_edge_last = isl_edge_proximity,
49 isl_edge_local
52 /* The constraints that need to be satisfied by a schedule on "domain".
54 * "context" specifies extra constraints on the parameters.
56 * "validity" constraints map domain elements i to domain elements
57 * that should be scheduled after i. (Hard constraint)
58 * "proximity" constraints map domain elements i to domains elements
59 * that should be scheduled as early as possible after i (or before i).
60 * (Soft constraint)
62 * "condition" and "conditional_validity" constraints map possibly "tagged"
63 * domain elements i -> s to "tagged" domain elements j -> t.
64 * The elements of the "conditional_validity" constraints, but without the
65 * tags (i.e., the elements i -> j) are treated as validity constraints,
66 * except that during the construction of a tilable band,
67 * the elements of the "conditional_validity" constraints may be violated
68 * provided that all adjacent elements of the "condition" constraints
69 * are local within the band.
70 * A dependence is local within a band if domain and range are mapped
71 * to the same schedule point by the band.
73 struct isl_schedule_constraints {
74 isl_union_set *domain;
75 isl_set *context;
77 isl_union_map *constraint[isl_edge_last + 1];
80 __isl_give isl_schedule_constraints *isl_schedule_constraints_copy(
81 __isl_keep isl_schedule_constraints *sc)
83 isl_ctx *ctx;
84 isl_schedule_constraints *sc_copy;
85 enum isl_edge_type i;
87 ctx = isl_union_set_get_ctx(sc->domain);
88 sc_copy = isl_calloc_type(ctx, struct isl_schedule_constraints);
89 if (!sc_copy)
90 return NULL;
92 sc_copy->domain = isl_union_set_copy(sc->domain);
93 sc_copy->context = isl_set_copy(sc->context);
94 if (!sc_copy->domain || !sc_copy->context)
95 return isl_schedule_constraints_free(sc_copy);
97 for (i = isl_edge_first; i <= isl_edge_last; ++i) {
98 sc_copy->constraint[i] = isl_union_map_copy(sc->constraint[i]);
99 if (!sc_copy->constraint[i])
100 return isl_schedule_constraints_free(sc_copy);
103 return sc_copy;
107 /* Construct an isl_schedule_constraints object for computing a schedule
108 * on "domain". The initial object does not impose any constraints.
110 __isl_give isl_schedule_constraints *isl_schedule_constraints_on_domain(
111 __isl_take isl_union_set *domain)
113 isl_ctx *ctx;
114 isl_space *space;
115 isl_schedule_constraints *sc;
116 isl_union_map *empty;
117 enum isl_edge_type i;
119 if (!domain)
120 return NULL;
122 ctx = isl_union_set_get_ctx(domain);
123 sc = isl_calloc_type(ctx, struct isl_schedule_constraints);
124 if (!sc)
125 goto error;
127 space = isl_union_set_get_space(domain);
128 sc->domain = domain;
129 sc->context = isl_set_universe(isl_space_copy(space));
130 empty = isl_union_map_empty(space);
131 for (i = isl_edge_first; i <= isl_edge_last; ++i) {
132 sc->constraint[i] = isl_union_map_copy(empty);
133 if (!sc->constraint[i])
134 sc->domain = isl_union_set_free(sc->domain);
136 isl_union_map_free(empty);
138 if (!sc->domain || !sc->context)
139 return isl_schedule_constraints_free(sc);
141 return sc;
142 error:
143 isl_union_set_free(domain);
144 return NULL;
147 /* Replace the context of "sc" by "context".
149 __isl_give isl_schedule_constraints *isl_schedule_constraints_set_context(
150 __isl_take isl_schedule_constraints *sc, __isl_take isl_set *context)
152 if (!sc || !context)
153 goto error;
155 isl_set_free(sc->context);
156 sc->context = context;
158 return sc;
159 error:
160 isl_schedule_constraints_free(sc);
161 isl_set_free(context);
162 return NULL;
165 /* Replace the validity constraints of "sc" by "validity".
167 __isl_give isl_schedule_constraints *isl_schedule_constraints_set_validity(
168 __isl_take isl_schedule_constraints *sc,
169 __isl_take isl_union_map *validity)
171 if (!sc || !validity)
172 goto error;
174 isl_union_map_free(sc->constraint[isl_edge_validity]);
175 sc->constraint[isl_edge_validity] = validity;
177 return sc;
178 error:
179 isl_schedule_constraints_free(sc);
180 isl_union_map_free(validity);
181 return NULL;
184 /* Replace the coincidence constraints of "sc" by "coincidence".
186 __isl_give isl_schedule_constraints *isl_schedule_constraints_set_coincidence(
187 __isl_take isl_schedule_constraints *sc,
188 __isl_take isl_union_map *coincidence)
190 if (!sc || !coincidence)
191 goto error;
193 isl_union_map_free(sc->constraint[isl_edge_coincidence]);
194 sc->constraint[isl_edge_coincidence] = coincidence;
196 return sc;
197 error:
198 isl_schedule_constraints_free(sc);
199 isl_union_map_free(coincidence);
200 return NULL;
203 /* Replace the proximity constraints of "sc" by "proximity".
205 __isl_give isl_schedule_constraints *isl_schedule_constraints_set_proximity(
206 __isl_take isl_schedule_constraints *sc,
207 __isl_take isl_union_map *proximity)
209 if (!sc || !proximity)
210 goto error;
212 isl_union_map_free(sc->constraint[isl_edge_proximity]);
213 sc->constraint[isl_edge_proximity] = proximity;
215 return sc;
216 error:
217 isl_schedule_constraints_free(sc);
218 isl_union_map_free(proximity);
219 return NULL;
222 /* Replace the conditional validity constraints of "sc" by "condition"
223 * and "validity".
225 __isl_give isl_schedule_constraints *
226 isl_schedule_constraints_set_conditional_validity(
227 __isl_take isl_schedule_constraints *sc,
228 __isl_take isl_union_map *condition,
229 __isl_take isl_union_map *validity)
231 if (!sc || !condition || !validity)
232 goto error;
234 isl_union_map_free(sc->constraint[isl_edge_condition]);
235 sc->constraint[isl_edge_condition] = condition;
236 isl_union_map_free(sc->constraint[isl_edge_conditional_validity]);
237 sc->constraint[isl_edge_conditional_validity] = validity;
239 return sc;
240 error:
241 isl_schedule_constraints_free(sc);
242 isl_union_map_free(condition);
243 isl_union_map_free(validity);
244 return NULL;
247 __isl_null isl_schedule_constraints *isl_schedule_constraints_free(
248 __isl_take isl_schedule_constraints *sc)
250 enum isl_edge_type i;
252 if (!sc)
253 return NULL;
255 isl_union_set_free(sc->domain);
256 isl_set_free(sc->context);
257 for (i = isl_edge_first; i <= isl_edge_last; ++i)
258 isl_union_map_free(sc->constraint[i]);
260 free(sc);
262 return NULL;
265 isl_ctx *isl_schedule_constraints_get_ctx(
266 __isl_keep isl_schedule_constraints *sc)
268 return sc ? isl_union_set_get_ctx(sc->domain) : NULL;
271 /* Return the domain of "sc".
273 __isl_give isl_union_set *isl_schedule_constraints_get_domain(
274 __isl_keep isl_schedule_constraints *sc)
276 if (!sc)
277 return NULL;
279 return isl_union_set_copy(sc->domain);
282 /* Return the validity constraints of "sc".
284 __isl_give isl_union_map *isl_schedule_constraints_get_validity(
285 __isl_keep isl_schedule_constraints *sc)
287 if (!sc)
288 return NULL;
290 return isl_union_map_copy(sc->constraint[isl_edge_validity]);
293 /* Return the coincidence constraints of "sc".
295 __isl_give isl_union_map *isl_schedule_constraints_get_coincidence(
296 __isl_keep isl_schedule_constraints *sc)
298 if (!sc)
299 return NULL;
301 return isl_union_map_copy(sc->constraint[isl_edge_coincidence]);
304 /* Return the conditional validity constraints of "sc".
306 __isl_give isl_union_map *isl_schedule_constraints_get_conditional_validity(
307 __isl_keep isl_schedule_constraints *sc)
309 if (!sc)
310 return NULL;
312 return
313 isl_union_map_copy(sc->constraint[isl_edge_conditional_validity]);
316 /* Return the conditions for the conditional validity constraints of "sc".
318 __isl_give isl_union_map *
319 isl_schedule_constraints_get_conditional_validity_condition(
320 __isl_keep isl_schedule_constraints *sc)
322 if (!sc)
323 return NULL;
325 return isl_union_map_copy(sc->constraint[isl_edge_condition]);
328 void isl_schedule_constraints_dump(__isl_keep isl_schedule_constraints *sc)
330 if (!sc)
331 return;
333 fprintf(stderr, "domain: ");
334 isl_union_set_dump(sc->domain);
335 fprintf(stderr, "context: ");
336 isl_set_dump(sc->context);
337 fprintf(stderr, "validity: ");
338 isl_union_map_dump(sc->constraint[isl_edge_validity]);
339 fprintf(stderr, "proximity: ");
340 isl_union_map_dump(sc->constraint[isl_edge_proximity]);
341 fprintf(stderr, "coincidence: ");
342 isl_union_map_dump(sc->constraint[isl_edge_coincidence]);
343 fprintf(stderr, "condition: ");
344 isl_union_map_dump(sc->constraint[isl_edge_condition]);
345 fprintf(stderr, "conditional_validity: ");
346 isl_union_map_dump(sc->constraint[isl_edge_conditional_validity]);
349 /* Align the parameters of the fields of "sc".
351 static __isl_give isl_schedule_constraints *
352 isl_schedule_constraints_align_params(__isl_take isl_schedule_constraints *sc)
354 isl_space *space;
355 enum isl_edge_type i;
357 if (!sc)
358 return NULL;
360 space = isl_union_set_get_space(sc->domain);
361 space = isl_space_align_params(space, isl_set_get_space(sc->context));
362 for (i = isl_edge_first; i <= isl_edge_last; ++i)
363 space = isl_space_align_params(space,
364 isl_union_map_get_space(sc->constraint[i]));
366 for (i = isl_edge_first; i <= isl_edge_last; ++i) {
367 sc->constraint[i] = isl_union_map_align_params(
368 sc->constraint[i], isl_space_copy(space));
369 if (!sc->constraint[i])
370 space = isl_space_free(space);
372 sc->context = isl_set_align_params(sc->context, isl_space_copy(space));
373 sc->domain = isl_union_set_align_params(sc->domain, space);
374 if (!sc->context || !sc->domain)
375 return isl_schedule_constraints_free(sc);
377 return sc;
380 /* Return the total number of isl_maps in the constraints of "sc".
382 static __isl_give int isl_schedule_constraints_n_map(
383 __isl_keep isl_schedule_constraints *sc)
385 enum isl_edge_type i;
386 int n = 0;
388 for (i = isl_edge_first; i <= isl_edge_last; ++i)
389 n += isl_union_map_n_map(sc->constraint[i]);
391 return n;
394 /* Internal information about a node that is used during the construction
395 * of a schedule.
396 * space represents the space in which the domain lives
397 * sched is a matrix representation of the schedule being constructed
398 * for this node; if compressed is set, then this schedule is
399 * defined over the compressed domain space
400 * sched_map is an isl_map representation of the same (partial) schedule
401 * sched_map may be NULL; if compressed is set, then this map
402 * is defined over the uncompressed domain space
403 * rank is the number of linearly independent rows in the linear part
404 * of sched
405 * the columns of cmap represent a change of basis for the schedule
406 * coefficients; the first rank columns span the linear part of
407 * the schedule rows
408 * cinv is the inverse of cmap.
409 * ctrans is the transpose of cmap.
410 * start is the first variable in the LP problem in the sequences that
411 * represents the schedule coefficients of this node
412 * nvar is the dimension of the domain
413 * nparam is the number of parameters or 0 if we are not constructing
414 * a parametric schedule
416 * If compressed is set, then hull represents the constraints
417 * that were used to derive the compression, while compress and
418 * decompress map the original space to the compressed space and
419 * vice versa.
421 * scc is the index of SCC (or WCC) this node belongs to
423 * "cluster" is only used inside extract_clusters and identifies
424 * the cluster of SCCs that the node belongs to.
426 * coincident contains a boolean for each of the rows of the schedule,
427 * indicating whether the corresponding scheduling dimension satisfies
428 * the coincidence constraints in the sense that the corresponding
429 * dependence distances are zero.
431 struct isl_sched_node {
432 isl_space *space;
433 int compressed;
434 isl_set *hull;
435 isl_multi_aff *compress;
436 isl_multi_aff *decompress;
437 isl_mat *sched;
438 isl_map *sched_map;
439 int rank;
440 isl_mat *cmap;
441 isl_mat *cinv;
442 isl_mat *ctrans;
443 int start;
444 int nvar;
445 int nparam;
447 int scc;
448 int cluster;
450 int *coincident;
453 static int node_has_space(const void *entry, const void *val)
455 struct isl_sched_node *node = (struct isl_sched_node *)entry;
456 isl_space *dim = (isl_space *)val;
458 return isl_space_is_equal(node->space, dim);
461 static int node_scc_exactly(struct isl_sched_node *node, int scc)
463 return node->scc == scc;
466 static int node_scc_at_most(struct isl_sched_node *node, int scc)
468 return node->scc <= scc;
471 static int node_scc_at_least(struct isl_sched_node *node, int scc)
473 return node->scc >= scc;
476 /* An edge in the dependence graph. An edge may be used to
477 * ensure validity of the generated schedule, to minimize the dependence
478 * distance or both
480 * map is the dependence relation, with i -> j in the map if j depends on i
481 * tagged_condition and tagged_validity contain the union of all tagged
482 * condition or conditional validity dependence relations that
483 * specialize the dependence relation "map"; that is,
484 * if (i -> a) -> (j -> b) is an element of "tagged_condition"
485 * or "tagged_validity", then i -> j is an element of "map".
486 * If these fields are NULL, then they represent the empty relation.
487 * src is the source node
488 * dst is the sink node
490 * types is a bit vector containing the types of this edge.
491 * validity is set if the edge is used to ensure correctness
492 * coincidence is used to enforce zero dependence distances
493 * proximity is set if the edge is used to minimize dependence distances
494 * condition is set if the edge represents a condition
495 * for a conditional validity schedule constraint
496 * local can only be set for condition edges and indicates that
497 * the dependence distance over the edge should be zero
498 * conditional_validity is set if the edge is used to conditionally
499 * ensure correctness
501 * For validity edges, start and end mark the sequence of inequality
502 * constraints in the LP problem that encode the validity constraint
503 * corresponding to this edge.
505 * During clustering, an edge may be marked "no_merge" if it should
506 * not be used to merge clusters.
507 * The weight is also only used during clustering and it is
508 * an indication of how many schedule dimensions on either side
509 * of the schedule constraints can be aligned.
510 * If the weight is negative, then this means that this edge was postponed
511 * by has_bounded_distances or any_no_merge. The original weight can
512 * be retrieved by adding 1 + graph->max_weight, with "graph"
513 * the graph containing this edge.
515 struct isl_sched_edge {
516 isl_map *map;
517 isl_union_map *tagged_condition;
518 isl_union_map *tagged_validity;
520 struct isl_sched_node *src;
521 struct isl_sched_node *dst;
523 unsigned types;
525 int start;
526 int end;
528 int no_merge;
529 int weight;
532 /* Is "edge" marked as being of type "type"?
534 static int is_type(struct isl_sched_edge *edge, enum isl_edge_type type)
536 return ISL_FL_ISSET(edge->types, 1 << type);
539 /* Mark "edge" as being of type "type".
541 static void set_type(struct isl_sched_edge *edge, enum isl_edge_type type)
543 ISL_FL_SET(edge->types, 1 << type);
546 /* No longer mark "edge" as being of type "type"?
548 static void clear_type(struct isl_sched_edge *edge, enum isl_edge_type type)
550 ISL_FL_CLR(edge->types, 1 << type);
553 /* Is "edge" marked as a validity edge?
555 static int is_validity(struct isl_sched_edge *edge)
557 return is_type(edge, isl_edge_validity);
560 /* Mark "edge" as a validity edge.
562 static void set_validity(struct isl_sched_edge *edge)
564 set_type(edge, isl_edge_validity);
567 /* Is "edge" marked as a proximity edge?
569 static int is_proximity(struct isl_sched_edge *edge)
571 return is_type(edge, isl_edge_proximity);
574 /* Is "edge" marked as a local edge?
576 static int is_local(struct isl_sched_edge *edge)
578 return is_type(edge, isl_edge_local);
581 /* Mark "edge" as a local edge.
583 static void set_local(struct isl_sched_edge *edge)
585 set_type(edge, isl_edge_local);
588 /* No longer mark "edge" as a local edge.
590 static void clear_local(struct isl_sched_edge *edge)
592 clear_type(edge, isl_edge_local);
595 /* Is "edge" marked as a coincidence edge?
597 static int is_coincidence(struct isl_sched_edge *edge)
599 return is_type(edge, isl_edge_coincidence);
602 /* Is "edge" marked as a condition edge?
604 static int is_condition(struct isl_sched_edge *edge)
606 return is_type(edge, isl_edge_condition);
609 /* Is "edge" marked as a conditional validity edge?
611 static int is_conditional_validity(struct isl_sched_edge *edge)
613 return is_type(edge, isl_edge_conditional_validity);
616 /* Internal information about the dependence graph used during
617 * the construction of the schedule.
619 * intra_hmap is a cache, mapping dependence relations to their dual,
620 * for dependences from a node to itself
621 * inter_hmap is a cache, mapping dependence relations to their dual,
622 * for dependences between distinct nodes
623 * if compression is involved then the key for these maps
624 * it the original, uncompressed dependence relation, while
625 * the value is the dual of the compressed dependence relation.
627 * n is the number of nodes
628 * node is the list of nodes
629 * maxvar is the maximal number of variables over all nodes
630 * max_row is the allocated number of rows in the schedule
631 * n_row is the current (maximal) number of linearly independent
632 * rows in the node schedules
633 * n_total_row is the current number of rows in the node schedules
634 * band_start is the starting row in the node schedules of the current band
635 * root is set if this graph is the original dependence graph,
636 * without any splitting
638 * sorted contains a list of node indices sorted according to the
639 * SCC to which a node belongs
641 * n_edge is the number of edges
642 * edge is the list of edges
643 * max_edge contains the maximal number of edges of each type;
644 * in particular, it contains the number of edges in the inital graph.
645 * edge_table contains pointers into the edge array, hashed on the source
646 * and sink spaces; there is one such table for each type;
647 * a given edge may be referenced from more than one table
648 * if the corresponding relation appears in more than one of the
649 * sets of dependences; however, for each type there is only
650 * a single edge between a given pair of source and sink space
651 * in the entire graph
653 * node_table contains pointers into the node array, hashed on the space
655 * region contains a list of variable sequences that should be non-trivial
657 * lp contains the (I)LP problem used to obtain new schedule rows
659 * src_scc and dst_scc are the source and sink SCCs of an edge with
660 * conflicting constraints
662 * scc represents the number of components
663 * weak is set if the components are weakly connected
665 * max_weight is used during clustering and represents the maximal
666 * weight of the relevant proximity edges.
668 struct isl_sched_graph {
669 isl_map_to_basic_set *intra_hmap;
670 isl_map_to_basic_set *inter_hmap;
672 struct isl_sched_node *node;
673 int n;
674 int maxvar;
675 int max_row;
676 int n_row;
678 int *sorted;
680 int n_total_row;
681 int band_start;
683 int root;
685 struct isl_sched_edge *edge;
686 int n_edge;
687 int max_edge[isl_edge_last + 1];
688 struct isl_hash_table *edge_table[isl_edge_last + 1];
690 struct isl_hash_table *node_table;
691 struct isl_region *region;
693 isl_basic_set *lp;
695 int src_scc;
696 int dst_scc;
698 int scc;
699 int weak;
701 int max_weight;
704 /* Initialize node_table based on the list of nodes.
706 static int graph_init_table(isl_ctx *ctx, struct isl_sched_graph *graph)
708 int i;
710 graph->node_table = isl_hash_table_alloc(ctx, graph->n);
711 if (!graph->node_table)
712 return -1;
714 for (i = 0; i < graph->n; ++i) {
715 struct isl_hash_table_entry *entry;
716 uint32_t hash;
718 hash = isl_space_get_hash(graph->node[i].space);
719 entry = isl_hash_table_find(ctx, graph->node_table, hash,
720 &node_has_space,
721 graph->node[i].space, 1);
722 if (!entry)
723 return -1;
724 entry->data = &graph->node[i];
727 return 0;
730 /* Return a pointer to the node that lives within the given space,
731 * or NULL if there is no such node.
733 static struct isl_sched_node *graph_find_node(isl_ctx *ctx,
734 struct isl_sched_graph *graph, __isl_keep isl_space *dim)
736 struct isl_hash_table_entry *entry;
737 uint32_t hash;
739 hash = isl_space_get_hash(dim);
740 entry = isl_hash_table_find(ctx, graph->node_table, hash,
741 &node_has_space, dim, 0);
743 return entry ? entry->data : NULL;
746 static int edge_has_src_and_dst(const void *entry, const void *val)
748 const struct isl_sched_edge *edge = entry;
749 const struct isl_sched_edge *temp = val;
751 return edge->src == temp->src && edge->dst == temp->dst;
754 /* Add the given edge to graph->edge_table[type].
756 static isl_stat graph_edge_table_add(isl_ctx *ctx,
757 struct isl_sched_graph *graph, enum isl_edge_type type,
758 struct isl_sched_edge *edge)
760 struct isl_hash_table_entry *entry;
761 uint32_t hash;
763 hash = isl_hash_init();
764 hash = isl_hash_builtin(hash, edge->src);
765 hash = isl_hash_builtin(hash, edge->dst);
766 entry = isl_hash_table_find(ctx, graph->edge_table[type], hash,
767 &edge_has_src_and_dst, edge, 1);
768 if (!entry)
769 return isl_stat_error;
770 entry->data = edge;
772 return isl_stat_ok;
775 /* Allocate the edge_tables based on the maximal number of edges of
776 * each type.
778 static int graph_init_edge_tables(isl_ctx *ctx, struct isl_sched_graph *graph)
780 int i;
782 for (i = 0; i <= isl_edge_last; ++i) {
783 graph->edge_table[i] = isl_hash_table_alloc(ctx,
784 graph->max_edge[i]);
785 if (!graph->edge_table[i])
786 return -1;
789 return 0;
792 /* If graph->edge_table[type] contains an edge from the given source
793 * to the given destination, then return the hash table entry of this edge.
794 * Otherwise, return NULL.
796 static struct isl_hash_table_entry *graph_find_edge_entry(
797 struct isl_sched_graph *graph,
798 enum isl_edge_type type,
799 struct isl_sched_node *src, struct isl_sched_node *dst)
801 isl_ctx *ctx = isl_space_get_ctx(src->space);
802 uint32_t hash;
803 struct isl_sched_edge temp = { .src = src, .dst = dst };
805 hash = isl_hash_init();
806 hash = isl_hash_builtin(hash, temp.src);
807 hash = isl_hash_builtin(hash, temp.dst);
808 return isl_hash_table_find(ctx, graph->edge_table[type], hash,
809 &edge_has_src_and_dst, &temp, 0);
813 /* If graph->edge_table[type] contains an edge from the given source
814 * to the given destination, then return this edge.
815 * Otherwise, return NULL.
817 static struct isl_sched_edge *graph_find_edge(struct isl_sched_graph *graph,
818 enum isl_edge_type type,
819 struct isl_sched_node *src, struct isl_sched_node *dst)
821 struct isl_hash_table_entry *entry;
823 entry = graph_find_edge_entry(graph, type, src, dst);
824 if (!entry)
825 return NULL;
827 return entry->data;
830 /* Check whether the dependence graph has an edge of the given type
831 * between the given two nodes.
833 static isl_bool graph_has_edge(struct isl_sched_graph *graph,
834 enum isl_edge_type type,
835 struct isl_sched_node *src, struct isl_sched_node *dst)
837 struct isl_sched_edge *edge;
838 isl_bool empty;
840 edge = graph_find_edge(graph, type, src, dst);
841 if (!edge)
842 return 0;
844 empty = isl_map_plain_is_empty(edge->map);
845 if (empty < 0)
846 return isl_bool_error;
848 return !empty;
851 /* Look for any edge with the same src, dst and map fields as "model".
853 * Return the matching edge if one can be found.
854 * Return "model" if no matching edge is found.
855 * Return NULL on error.
857 static struct isl_sched_edge *graph_find_matching_edge(
858 struct isl_sched_graph *graph, struct isl_sched_edge *model)
860 enum isl_edge_type i;
861 struct isl_sched_edge *edge;
863 for (i = isl_edge_first; i <= isl_edge_last; ++i) {
864 int is_equal;
866 edge = graph_find_edge(graph, i, model->src, model->dst);
867 if (!edge)
868 continue;
869 is_equal = isl_map_plain_is_equal(model->map, edge->map);
870 if (is_equal < 0)
871 return NULL;
872 if (is_equal)
873 return edge;
876 return model;
879 /* Remove the given edge from all the edge_tables that refer to it.
881 static void graph_remove_edge(struct isl_sched_graph *graph,
882 struct isl_sched_edge *edge)
884 isl_ctx *ctx = isl_map_get_ctx(edge->map);
885 enum isl_edge_type i;
887 for (i = isl_edge_first; i <= isl_edge_last; ++i) {
888 struct isl_hash_table_entry *entry;
890 entry = graph_find_edge_entry(graph, i, edge->src, edge->dst);
891 if (!entry)
892 continue;
893 if (entry->data != edge)
894 continue;
895 isl_hash_table_remove(ctx, graph->edge_table[i], entry);
899 /* Check whether the dependence graph has any edge
900 * between the given two nodes.
902 static isl_bool graph_has_any_edge(struct isl_sched_graph *graph,
903 struct isl_sched_node *src, struct isl_sched_node *dst)
905 enum isl_edge_type i;
906 isl_bool r;
908 for (i = isl_edge_first; i <= isl_edge_last; ++i) {
909 r = graph_has_edge(graph, i, src, dst);
910 if (r < 0 || r)
911 return r;
914 return r;
917 /* Check whether the dependence graph has a validity edge
918 * between the given two nodes.
920 * Conditional validity edges are essentially validity edges that
921 * can be ignored if the corresponding condition edges are iteration private.
922 * Here, we are only checking for the presence of validity
923 * edges, so we need to consider the conditional validity edges too.
924 * In particular, this function is used during the detection
925 * of strongly connected components and we cannot ignore
926 * conditional validity edges during this detection.
928 static isl_bool graph_has_validity_edge(struct isl_sched_graph *graph,
929 struct isl_sched_node *src, struct isl_sched_node *dst)
931 isl_bool r;
933 r = graph_has_edge(graph, isl_edge_validity, src, dst);
934 if (r < 0 || r)
935 return r;
937 return graph_has_edge(graph, isl_edge_conditional_validity, src, dst);
940 static int graph_alloc(isl_ctx *ctx, struct isl_sched_graph *graph,
941 int n_node, int n_edge)
943 int i;
945 graph->n = n_node;
946 graph->n_edge = n_edge;
947 graph->node = isl_calloc_array(ctx, struct isl_sched_node, graph->n);
948 graph->sorted = isl_calloc_array(ctx, int, graph->n);
949 graph->region = isl_alloc_array(ctx, struct isl_region, graph->n);
950 graph->edge = isl_calloc_array(ctx,
951 struct isl_sched_edge, graph->n_edge);
953 graph->intra_hmap = isl_map_to_basic_set_alloc(ctx, 2 * n_edge);
954 graph->inter_hmap = isl_map_to_basic_set_alloc(ctx, 2 * n_edge);
956 if (!graph->node || !graph->region || (graph->n_edge && !graph->edge) ||
957 !graph->sorted)
958 return -1;
960 for(i = 0; i < graph->n; ++i)
961 graph->sorted[i] = i;
963 return 0;
966 static void graph_free(isl_ctx *ctx, struct isl_sched_graph *graph)
968 int i;
970 isl_map_to_basic_set_free(graph->intra_hmap);
971 isl_map_to_basic_set_free(graph->inter_hmap);
973 if (graph->node)
974 for (i = 0; i < graph->n; ++i) {
975 isl_space_free(graph->node[i].space);
976 isl_set_free(graph->node[i].hull);
977 isl_multi_aff_free(graph->node[i].compress);
978 isl_multi_aff_free(graph->node[i].decompress);
979 isl_mat_free(graph->node[i].sched);
980 isl_map_free(graph->node[i].sched_map);
981 isl_mat_free(graph->node[i].cmap);
982 isl_mat_free(graph->node[i].cinv);
983 isl_mat_free(graph->node[i].ctrans);
984 if (graph->root)
985 free(graph->node[i].coincident);
987 free(graph->node);
988 free(graph->sorted);
989 if (graph->edge)
990 for (i = 0; i < graph->n_edge; ++i) {
991 isl_map_free(graph->edge[i].map);
992 isl_union_map_free(graph->edge[i].tagged_condition);
993 isl_union_map_free(graph->edge[i].tagged_validity);
995 free(graph->edge);
996 free(graph->region);
997 for (i = 0; i <= isl_edge_last; ++i)
998 isl_hash_table_free(ctx, graph->edge_table[i]);
999 isl_hash_table_free(ctx, graph->node_table);
1000 isl_basic_set_free(graph->lp);
1003 /* For each "set" on which this function is called, increment
1004 * graph->n by one and update graph->maxvar.
1006 static isl_stat init_n_maxvar(__isl_take isl_set *set, void *user)
1008 struct isl_sched_graph *graph = user;
1009 int nvar = isl_set_dim(set, isl_dim_set);
1011 graph->n++;
1012 if (nvar > graph->maxvar)
1013 graph->maxvar = nvar;
1015 isl_set_free(set);
1017 return isl_stat_ok;
1020 /* Add the number of basic maps in "map" to *n.
1022 static isl_stat add_n_basic_map(__isl_take isl_map *map, void *user)
1024 int *n = user;
1026 *n += isl_map_n_basic_map(map);
1027 isl_map_free(map);
1029 return isl_stat_ok;
1032 /* Compute the number of rows that should be allocated for the schedule.
1033 * In particular, we need one row for each variable or one row
1034 * for each basic map in the dependences.
1035 * Note that it is practically impossible to exhaust both
1036 * the number of dependences and the number of variables.
1038 static int compute_max_row(struct isl_sched_graph *graph,
1039 __isl_keep isl_schedule_constraints *sc)
1041 enum isl_edge_type i;
1042 int n_edge;
1044 graph->n = 0;
1045 graph->maxvar = 0;
1046 if (isl_union_set_foreach_set(sc->domain, &init_n_maxvar, graph) < 0)
1047 return -1;
1048 n_edge = 0;
1049 for (i = isl_edge_first; i <= isl_edge_last; ++i)
1050 if (isl_union_map_foreach_map(sc->constraint[i],
1051 &add_n_basic_map, &n_edge) < 0)
1052 return -1;
1053 graph->max_row = n_edge + graph->maxvar;
1055 return 0;
1058 /* Does "bset" have any defining equalities for its set variables?
1060 static int has_any_defining_equality(__isl_keep isl_basic_set *bset)
1062 int i, n;
1064 if (!bset)
1065 return -1;
1067 n = isl_basic_set_dim(bset, isl_dim_set);
1068 for (i = 0; i < n; ++i) {
1069 int has;
1071 has = isl_basic_set_has_defining_equality(bset, isl_dim_set, i,
1072 NULL);
1073 if (has < 0 || has)
1074 return has;
1077 return 0;
1080 /* Add a new node to the graph representing the given space.
1081 * "nvar" is the (possibly compressed) number of variables and
1082 * may be smaller than then number of set variables in "space"
1083 * if "compressed" is set.
1084 * If "compressed" is set, then "hull" represents the constraints
1085 * that were used to derive the compression, while "compress" and
1086 * "decompress" map the original space to the compressed space and
1087 * vice versa.
1088 * If "compressed" is not set, then "hull", "compress" and "decompress"
1089 * should be NULL.
1091 static isl_stat add_node(struct isl_sched_graph *graph,
1092 __isl_take isl_space *space, int nvar, int compressed,
1093 __isl_take isl_set *hull, __isl_take isl_multi_aff *compress,
1094 __isl_take isl_multi_aff *decompress)
1096 int nparam;
1097 isl_ctx *ctx;
1098 isl_mat *sched;
1099 int *coincident;
1101 if (!space)
1102 return isl_stat_error;
1104 ctx = isl_space_get_ctx(space);
1105 nparam = isl_space_dim(space, isl_dim_param);
1106 if (!ctx->opt->schedule_parametric)
1107 nparam = 0;
1108 sched = isl_mat_alloc(ctx, 0, 1 + nparam + nvar);
1109 graph->node[graph->n].space = space;
1110 graph->node[graph->n].nvar = nvar;
1111 graph->node[graph->n].nparam = nparam;
1112 graph->node[graph->n].sched = sched;
1113 graph->node[graph->n].sched_map = NULL;
1114 coincident = isl_calloc_array(ctx, int, graph->max_row);
1115 graph->node[graph->n].coincident = coincident;
1116 graph->node[graph->n].compressed = compressed;
1117 graph->node[graph->n].hull = hull;
1118 graph->node[graph->n].compress = compress;
1119 graph->node[graph->n].decompress = decompress;
1120 graph->n++;
1122 if (!space || !sched || (graph->max_row && !coincident))
1123 return isl_stat_error;
1124 if (compressed && (!hull || !compress || !decompress))
1125 return isl_stat_error;
1127 return isl_stat_ok;
1130 /* Add a new node to the graph representing the given set.
1132 * If any of the set variables is defined by an equality, then
1133 * we perform variable compression such that we can perform
1134 * the scheduling on the compressed domain.
1136 static isl_stat extract_node(__isl_take isl_set *set, void *user)
1138 int nvar;
1139 int has_equality;
1140 isl_space *space;
1141 isl_basic_set *hull;
1142 isl_set *hull_set;
1143 isl_morph *morph;
1144 isl_multi_aff *compress, *decompress;
1145 struct isl_sched_graph *graph = user;
1147 space = isl_set_get_space(set);
1148 hull = isl_set_affine_hull(set);
1149 hull = isl_basic_set_remove_divs(hull);
1150 nvar = isl_space_dim(space, isl_dim_set);
1151 has_equality = has_any_defining_equality(hull);
1153 if (has_equality < 0)
1154 goto error;
1155 if (!has_equality) {
1156 isl_basic_set_free(hull);
1157 return add_node(graph, space, nvar, 0, NULL, NULL, NULL);
1160 morph = isl_basic_set_variable_compression(hull, isl_dim_set);
1161 nvar = isl_morph_ran_dim(morph, isl_dim_set);
1162 compress = isl_morph_get_var_multi_aff(morph);
1163 morph = isl_morph_inverse(morph);
1164 decompress = isl_morph_get_var_multi_aff(morph);
1165 isl_morph_free(morph);
1167 hull_set = isl_set_from_basic_set(hull);
1168 return add_node(graph, space, nvar, 1, hull_set, compress, decompress);
1169 error:
1170 isl_basic_set_free(hull);
1171 isl_space_free(space);
1172 return isl_stat_error;
1175 struct isl_extract_edge_data {
1176 enum isl_edge_type type;
1177 struct isl_sched_graph *graph;
1180 /* Merge edge2 into edge1, freeing the contents of edge2.
1181 * Return 0 on success and -1 on failure.
1183 * edge1 and edge2 are assumed to have the same value for the map field.
1185 static int merge_edge(struct isl_sched_edge *edge1,
1186 struct isl_sched_edge *edge2)
1188 edge1->types |= edge2->types;
1189 isl_map_free(edge2->map);
1191 if (is_condition(edge2)) {
1192 if (!edge1->tagged_condition)
1193 edge1->tagged_condition = edge2->tagged_condition;
1194 else
1195 edge1->tagged_condition =
1196 isl_union_map_union(edge1->tagged_condition,
1197 edge2->tagged_condition);
1200 if (is_conditional_validity(edge2)) {
1201 if (!edge1->tagged_validity)
1202 edge1->tagged_validity = edge2->tagged_validity;
1203 else
1204 edge1->tagged_validity =
1205 isl_union_map_union(edge1->tagged_validity,
1206 edge2->tagged_validity);
1209 if (is_condition(edge2) && !edge1->tagged_condition)
1210 return -1;
1211 if (is_conditional_validity(edge2) && !edge1->tagged_validity)
1212 return -1;
1214 return 0;
1217 /* Insert dummy tags in domain and range of "map".
1219 * In particular, if "map" is of the form
1221 * A -> B
1223 * then return
1225 * [A -> dummy_tag] -> [B -> dummy_tag]
1227 * where the dummy_tags are identical and equal to any dummy tags
1228 * introduced by any other call to this function.
1230 static __isl_give isl_map *insert_dummy_tags(__isl_take isl_map *map)
1232 static char dummy;
1233 isl_ctx *ctx;
1234 isl_id *id;
1235 isl_space *space;
1236 isl_set *domain, *range;
1238 ctx = isl_map_get_ctx(map);
1240 id = isl_id_alloc(ctx, NULL, &dummy);
1241 space = isl_space_params(isl_map_get_space(map));
1242 space = isl_space_set_from_params(space);
1243 space = isl_space_set_tuple_id(space, isl_dim_set, id);
1244 space = isl_space_map_from_set(space);
1246 domain = isl_map_wrap(map);
1247 range = isl_map_wrap(isl_map_universe(space));
1248 map = isl_map_from_domain_and_range(domain, range);
1249 map = isl_map_zip(map);
1251 return map;
1254 /* Given that at least one of "src" or "dst" is compressed, return
1255 * a map between the spaces of these nodes restricted to the affine
1256 * hull that was used in the compression.
1258 static __isl_give isl_map *extract_hull(struct isl_sched_node *src,
1259 struct isl_sched_node *dst)
1261 isl_set *dom, *ran;
1263 if (src->compressed)
1264 dom = isl_set_copy(src->hull);
1265 else
1266 dom = isl_set_universe(isl_space_copy(src->space));
1267 if (dst->compressed)
1268 ran = isl_set_copy(dst->hull);
1269 else
1270 ran = isl_set_universe(isl_space_copy(dst->space));
1272 return isl_map_from_domain_and_range(dom, ran);
1275 /* Intersect the domains of the nested relations in domain and range
1276 * of "tagged" with "map".
1278 static __isl_give isl_map *map_intersect_domains(__isl_take isl_map *tagged,
1279 __isl_keep isl_map *map)
1281 isl_set *set;
1283 tagged = isl_map_zip(tagged);
1284 set = isl_map_wrap(isl_map_copy(map));
1285 tagged = isl_map_intersect_domain(tagged, set);
1286 tagged = isl_map_zip(tagged);
1287 return tagged;
1290 /* Return a pointer to the node that lives in the domain space of "map"
1291 * or NULL if there is no such node.
1293 static struct isl_sched_node *find_domain_node(isl_ctx *ctx,
1294 struct isl_sched_graph *graph, __isl_keep isl_map *map)
1296 struct isl_sched_node *node;
1297 isl_space *space;
1299 space = isl_space_domain(isl_map_get_space(map));
1300 node = graph_find_node(ctx, graph, space);
1301 isl_space_free(space);
1303 return node;
1306 /* Return a pointer to the node that lives in the range space of "map"
1307 * or NULL if there is no such node.
1309 static struct isl_sched_node *find_range_node(isl_ctx *ctx,
1310 struct isl_sched_graph *graph, __isl_keep isl_map *map)
1312 struct isl_sched_node *node;
1313 isl_space *space;
1315 space = isl_space_range(isl_map_get_space(map));
1316 node = graph_find_node(ctx, graph, space);
1317 isl_space_free(space);
1319 return node;
1322 /* Add a new edge to the graph based on the given map
1323 * and add it to data->graph->edge_table[data->type].
1324 * If a dependence relation of a given type happens to be identical
1325 * to one of the dependence relations of a type that was added before,
1326 * then we don't create a new edge, but instead mark the original edge
1327 * as also representing a dependence of the current type.
1329 * Edges of type isl_edge_condition or isl_edge_conditional_validity
1330 * may be specified as "tagged" dependence relations. That is, "map"
1331 * may contain elements (i -> a) -> (j -> b), where i -> j denotes
1332 * the dependence on iterations and a and b are tags.
1333 * edge->map is set to the relation containing the elements i -> j,
1334 * while edge->tagged_condition and edge->tagged_validity contain
1335 * the union of all the "map" relations
1336 * for which extract_edge is called that result in the same edge->map.
1338 * If the source or the destination node is compressed, then
1339 * intersect both "map" and "tagged" with the constraints that
1340 * were used to construct the compression.
1341 * This ensures that there are no schedule constraints defined
1342 * outside of these domains, while the scheduler no longer has
1343 * any control over those outside parts.
1345 static isl_stat extract_edge(__isl_take isl_map *map, void *user)
1347 isl_ctx *ctx = isl_map_get_ctx(map);
1348 struct isl_extract_edge_data *data = user;
1349 struct isl_sched_graph *graph = data->graph;
1350 struct isl_sched_node *src, *dst;
1351 struct isl_sched_edge *edge;
1352 isl_map *tagged = NULL;
1354 if (data->type == isl_edge_condition ||
1355 data->type == isl_edge_conditional_validity) {
1356 if (isl_map_can_zip(map)) {
1357 tagged = isl_map_copy(map);
1358 map = isl_set_unwrap(isl_map_domain(isl_map_zip(map)));
1359 } else {
1360 tagged = insert_dummy_tags(isl_map_copy(map));
1364 src = find_domain_node(ctx, graph, map);
1365 dst = find_range_node(ctx, graph, map);
1367 if (!src || !dst) {
1368 isl_map_free(map);
1369 isl_map_free(tagged);
1370 return isl_stat_ok;
1373 if (src->compressed || dst->compressed) {
1374 isl_map *hull;
1375 hull = extract_hull(src, dst);
1376 if (tagged)
1377 tagged = map_intersect_domains(tagged, hull);
1378 map = isl_map_intersect(map, hull);
1381 graph->edge[graph->n_edge].src = src;
1382 graph->edge[graph->n_edge].dst = dst;
1383 graph->edge[graph->n_edge].map = map;
1384 graph->edge[graph->n_edge].types = 0;
1385 graph->edge[graph->n_edge].tagged_condition = NULL;
1386 graph->edge[graph->n_edge].tagged_validity = NULL;
1387 set_type(&graph->edge[graph->n_edge], data->type);
1388 if (data->type == isl_edge_condition)
1389 graph->edge[graph->n_edge].tagged_condition =
1390 isl_union_map_from_map(tagged);
1391 if (data->type == isl_edge_conditional_validity)
1392 graph->edge[graph->n_edge].tagged_validity =
1393 isl_union_map_from_map(tagged);
1395 edge = graph_find_matching_edge(graph, &graph->edge[graph->n_edge]);
1396 if (!edge) {
1397 graph->n_edge++;
1398 return isl_stat_error;
1400 if (edge == &graph->edge[graph->n_edge])
1401 return graph_edge_table_add(ctx, graph, data->type,
1402 &graph->edge[graph->n_edge++]);
1404 if (merge_edge(edge, &graph->edge[graph->n_edge]) < 0)
1405 return -1;
1407 return graph_edge_table_add(ctx, graph, data->type, edge);
1410 /* Initialize the schedule graph "graph" from the schedule constraints "sc".
1412 * The context is included in the domain before the nodes of
1413 * the graphs are extracted in order to be able to exploit
1414 * any possible additional equalities.
1415 * Note that this intersection is only performed locally here.
1417 static isl_stat graph_init(struct isl_sched_graph *graph,
1418 __isl_keep isl_schedule_constraints *sc)
1420 isl_ctx *ctx;
1421 isl_union_set *domain;
1422 struct isl_extract_edge_data data;
1423 enum isl_edge_type i;
1424 isl_stat r;
1426 if (!sc)
1427 return isl_stat_error;
1429 ctx = isl_schedule_constraints_get_ctx(sc);
1431 domain = isl_schedule_constraints_get_domain(sc);
1432 graph->n = isl_union_set_n_set(domain);
1433 isl_union_set_free(domain);
1435 if (graph_alloc(ctx, graph, graph->n,
1436 isl_schedule_constraints_n_map(sc)) < 0)
1437 return isl_stat_error;
1439 if (compute_max_row(graph, sc) < 0)
1440 return isl_stat_error;
1441 graph->root = 1;
1442 graph->n = 0;
1443 domain = isl_schedule_constraints_get_domain(sc);
1444 domain = isl_union_set_intersect_params(domain,
1445 isl_set_copy(sc->context));
1446 r = isl_union_set_foreach_set(domain, &extract_node, graph);
1447 isl_union_set_free(domain);
1448 if (r < 0)
1449 return isl_stat_error;
1450 if (graph_init_table(ctx, graph) < 0)
1451 return isl_stat_error;
1452 for (i = isl_edge_first; i <= isl_edge_last; ++i)
1453 graph->max_edge[i] = isl_union_map_n_map(sc->constraint[i]);
1454 if (graph_init_edge_tables(ctx, graph) < 0)
1455 return isl_stat_error;
1456 graph->n_edge = 0;
1457 data.graph = graph;
1458 for (i = isl_edge_first; i <= isl_edge_last; ++i) {
1459 data.type = i;
1460 if (isl_union_map_foreach_map(sc->constraint[i],
1461 &extract_edge, &data) < 0)
1462 return isl_stat_error;
1465 return isl_stat_ok;
1468 /* Check whether there is any dependence from node[j] to node[i]
1469 * or from node[i] to node[j].
1471 static isl_bool node_follows_weak(int i, int j, void *user)
1473 isl_bool f;
1474 struct isl_sched_graph *graph = user;
1476 f = graph_has_any_edge(graph, &graph->node[j], &graph->node[i]);
1477 if (f < 0 || f)
1478 return f;
1479 return graph_has_any_edge(graph, &graph->node[i], &graph->node[j]);
1482 /* Check whether there is a (conditional) validity dependence from node[j]
1483 * to node[i], forcing node[i] to follow node[j].
1485 static isl_bool node_follows_strong(int i, int j, void *user)
1487 struct isl_sched_graph *graph = user;
1489 return graph_has_validity_edge(graph, &graph->node[j], &graph->node[i]);
1492 /* Use Tarjan's algorithm for computing the strongly connected components
1493 * in the dependence graph only considering those edges defined by "follows".
1495 static int detect_ccs(isl_ctx *ctx, struct isl_sched_graph *graph,
1496 isl_bool (*follows)(int i, int j, void *user))
1498 int i, n;
1499 struct isl_tarjan_graph *g = NULL;
1501 g = isl_tarjan_graph_init(ctx, graph->n, follows, graph);
1502 if (!g)
1503 return -1;
1505 graph->scc = 0;
1506 i = 0;
1507 n = graph->n;
1508 while (n) {
1509 while (g->order[i] != -1) {
1510 graph->node[g->order[i]].scc = graph->scc;
1511 --n;
1512 ++i;
1514 ++i;
1515 graph->scc++;
1518 isl_tarjan_graph_free(g);
1520 return 0;
1523 /* Apply Tarjan's algorithm to detect the strongly connected components
1524 * in the dependence graph.
1525 * Only consider the (conditional) validity dependences and clear "weak".
1527 static int detect_sccs(isl_ctx *ctx, struct isl_sched_graph *graph)
1529 graph->weak = 0;
1530 return detect_ccs(ctx, graph, &node_follows_strong);
1533 /* Apply Tarjan's algorithm to detect the (weakly) connected components
1534 * in the dependence graph.
1535 * Consider all dependences and set "weak".
1537 static int detect_wccs(isl_ctx *ctx, struct isl_sched_graph *graph)
1539 graph->weak = 1;
1540 return detect_ccs(ctx, graph, &node_follows_weak);
1543 static int cmp_scc(const void *a, const void *b, void *data)
1545 struct isl_sched_graph *graph = data;
1546 const int *i1 = a;
1547 const int *i2 = b;
1549 return graph->node[*i1].scc - graph->node[*i2].scc;
1552 /* Sort the elements of graph->sorted according to the corresponding SCCs.
1554 static int sort_sccs(struct isl_sched_graph *graph)
1556 return isl_sort(graph->sorted, graph->n, sizeof(int), &cmp_scc, graph);
1559 /* Given a dependence relation R from "node" to itself,
1560 * construct the set of coefficients of valid constraints for elements
1561 * in that dependence relation.
1562 * In particular, the result contains tuples of coefficients
1563 * c_0, c_n, c_x such that
1565 * c_0 + c_n n + c_x y - c_x x >= 0 for each (x,y) in R
1567 * or, equivalently,
1569 * c_0 + c_n n + c_x d >= 0 for each d in delta R = { y - x | (x,y) in R }
1571 * We choose here to compute the dual of delta R.
1572 * Alternatively, we could have computed the dual of R, resulting
1573 * in a set of tuples c_0, c_n, c_x, c_y, and then
1574 * plugged in (c_0, c_n, c_x, -c_x).
1576 * If "node" has been compressed, then the dependence relation
1577 * is also compressed before the set of coefficients is computed.
1579 static __isl_give isl_basic_set *intra_coefficients(
1580 struct isl_sched_graph *graph, struct isl_sched_node *node,
1581 __isl_take isl_map *map)
1583 isl_set *delta;
1584 isl_map *key;
1585 isl_basic_set *coef;
1586 isl_maybe_isl_basic_set m;
1588 m = isl_map_to_basic_set_try_get(graph->intra_hmap, map);
1589 if (m.valid < 0 || m.valid) {
1590 isl_map_free(map);
1591 return m.value;
1594 key = isl_map_copy(map);
1595 if (node->compressed) {
1596 map = isl_map_preimage_domain_multi_aff(map,
1597 isl_multi_aff_copy(node->decompress));
1598 map = isl_map_preimage_range_multi_aff(map,
1599 isl_multi_aff_copy(node->decompress));
1601 delta = isl_set_remove_divs(isl_map_deltas(map));
1602 coef = isl_set_coefficients(delta);
1603 graph->intra_hmap = isl_map_to_basic_set_set(graph->intra_hmap, key,
1604 isl_basic_set_copy(coef));
1606 return coef;
1609 /* Given a dependence relation R, construct the set of coefficients
1610 * of valid constraints for elements in that dependence relation.
1611 * In particular, the result contains tuples of coefficients
1612 * c_0, c_n, c_x, c_y such that
1614 * c_0 + c_n n + c_x x + c_y y >= 0 for each (x,y) in R
1616 * If the source or destination nodes of "edge" have been compressed,
1617 * then the dependence relation is also compressed before
1618 * the set of coefficients is computed.
1620 static __isl_give isl_basic_set *inter_coefficients(
1621 struct isl_sched_graph *graph, struct isl_sched_edge *edge,
1622 __isl_take isl_map *map)
1624 isl_set *set;
1625 isl_map *key;
1626 isl_basic_set *coef;
1627 isl_maybe_isl_basic_set m;
1629 m = isl_map_to_basic_set_try_get(graph->inter_hmap, map);
1630 if (m.valid < 0 || m.valid) {
1631 isl_map_free(map);
1632 return m.value;
1635 key = isl_map_copy(map);
1636 if (edge->src->compressed)
1637 map = isl_map_preimage_domain_multi_aff(map,
1638 isl_multi_aff_copy(edge->src->decompress));
1639 if (edge->dst->compressed)
1640 map = isl_map_preimage_range_multi_aff(map,
1641 isl_multi_aff_copy(edge->dst->decompress));
1642 set = isl_map_wrap(isl_map_remove_divs(map));
1643 coef = isl_set_coefficients(set);
1644 graph->inter_hmap = isl_map_to_basic_set_set(graph->inter_hmap, key,
1645 isl_basic_set_copy(coef));
1647 return coef;
1650 /* Add constraints to graph->lp that force validity for the given
1651 * dependence from a node i to itself.
1652 * That is, add constraints that enforce
1654 * (c_i_0 + c_i_n n + c_i_x y) - (c_i_0 + c_i_n n + c_i_x x)
1655 * = c_i_x (y - x) >= 0
1657 * for each (x,y) in R.
1658 * We obtain general constraints on coefficients (c_0, c_n, c_x)
1659 * of valid constraints for (y - x) and then plug in (0, 0, c_i_x^+ - c_i_x^-),
1660 * where c_i_x = c_i_x^+ - c_i_x^-, with c_i_x^+ and c_i_x^- non-negative.
1661 * In graph->lp, the c_i_x^- appear before their c_i_x^+ counterpart.
1663 * Actually, we do not construct constraints for the c_i_x themselves,
1664 * but for the coefficients of c_i_x written as a linear combination
1665 * of the columns in node->cmap.
1667 static int add_intra_validity_constraints(struct isl_sched_graph *graph,
1668 struct isl_sched_edge *edge)
1670 unsigned total;
1671 isl_map *map = isl_map_copy(edge->map);
1672 isl_ctx *ctx = isl_map_get_ctx(map);
1673 isl_space *dim;
1674 isl_dim_map *dim_map;
1675 isl_basic_set *coef;
1676 struct isl_sched_node *node = edge->src;
1678 coef = intra_coefficients(graph, node, map);
1680 dim = isl_space_domain(isl_space_unwrap(isl_basic_set_get_space(coef)));
1682 coef = isl_basic_set_transform_dims(coef, isl_dim_set,
1683 isl_space_dim(dim, isl_dim_set), isl_mat_copy(node->cmap));
1684 if (!coef)
1685 goto error;
1687 total = isl_basic_set_total_dim(graph->lp);
1688 dim_map = isl_dim_map_alloc(ctx, total);
1689 isl_dim_map_range(dim_map, node->start + 2 * node->nparam + 1, 2,
1690 isl_space_dim(dim, isl_dim_set), 1,
1691 node->nvar, -1);
1692 isl_dim_map_range(dim_map, node->start + 2 * node->nparam + 2, 2,
1693 isl_space_dim(dim, isl_dim_set), 1,
1694 node->nvar, 1);
1695 graph->lp = isl_basic_set_extend_constraints(graph->lp,
1696 coef->n_eq, coef->n_ineq);
1697 graph->lp = isl_basic_set_add_constraints_dim_map(graph->lp,
1698 coef, dim_map);
1699 isl_space_free(dim);
1701 return 0;
1702 error:
1703 isl_space_free(dim);
1704 return -1;
1707 /* Add constraints to graph->lp that force validity for the given
1708 * dependence from node i to node j.
1709 * That is, add constraints that enforce
1711 * (c_j_0 + c_j_n n + c_j_x y) - (c_i_0 + c_i_n n + c_i_x x) >= 0
1713 * for each (x,y) in R.
1714 * We obtain general constraints on coefficients (c_0, c_n, c_x, c_y)
1715 * of valid constraints for R and then plug in
1716 * (c_j_0 - c_i_0, c_j_n^+ - c_j_n^- - (c_i_n^+ - c_i_n^-),
1717 * c_j_x^+ - c_j_x^- - (c_i_x^+ - c_i_x^-)),
1718 * where c_* = c_*^+ - c_*^-, with c_*^+ and c_*^- non-negative.
1719 * In graph->lp, the c_*^- appear before their c_*^+ counterpart.
1721 * Actually, we do not construct constraints for the c_*_x themselves,
1722 * but for the coefficients of c_*_x written as a linear combination
1723 * of the columns in node->cmap.
1725 static int add_inter_validity_constraints(struct isl_sched_graph *graph,
1726 struct isl_sched_edge *edge)
1728 unsigned total;
1729 isl_map *map = isl_map_copy(edge->map);
1730 isl_ctx *ctx = isl_map_get_ctx(map);
1731 isl_space *dim;
1732 isl_dim_map *dim_map;
1733 isl_basic_set *coef;
1734 struct isl_sched_node *src = edge->src;
1735 struct isl_sched_node *dst = edge->dst;
1737 coef = inter_coefficients(graph, edge, map);
1739 dim = isl_space_domain(isl_space_unwrap(isl_basic_set_get_space(coef)));
1741 coef = isl_basic_set_transform_dims(coef, isl_dim_set,
1742 isl_space_dim(dim, isl_dim_set), isl_mat_copy(src->cmap));
1743 coef = isl_basic_set_transform_dims(coef, isl_dim_set,
1744 isl_space_dim(dim, isl_dim_set) + src->nvar,
1745 isl_mat_copy(dst->cmap));
1746 if (!coef)
1747 goto error;
1749 total = isl_basic_set_total_dim(graph->lp);
1750 dim_map = isl_dim_map_alloc(ctx, total);
1752 isl_dim_map_range(dim_map, dst->start, 0, 0, 0, 1, 1);
1753 isl_dim_map_range(dim_map, dst->start + 1, 2, 1, 1, dst->nparam, -1);
1754 isl_dim_map_range(dim_map, dst->start + 2, 2, 1, 1, dst->nparam, 1);
1755 isl_dim_map_range(dim_map, dst->start + 2 * dst->nparam + 1, 2,
1756 isl_space_dim(dim, isl_dim_set) + src->nvar, 1,
1757 dst->nvar, -1);
1758 isl_dim_map_range(dim_map, dst->start + 2 * dst->nparam + 2, 2,
1759 isl_space_dim(dim, isl_dim_set) + src->nvar, 1,
1760 dst->nvar, 1);
1762 isl_dim_map_range(dim_map, src->start, 0, 0, 0, 1, -1);
1763 isl_dim_map_range(dim_map, src->start + 1, 2, 1, 1, src->nparam, 1);
1764 isl_dim_map_range(dim_map, src->start + 2, 2, 1, 1, src->nparam, -1);
1765 isl_dim_map_range(dim_map, src->start + 2 * src->nparam + 1, 2,
1766 isl_space_dim(dim, isl_dim_set), 1,
1767 src->nvar, 1);
1768 isl_dim_map_range(dim_map, src->start + 2 * src->nparam + 2, 2,
1769 isl_space_dim(dim, isl_dim_set), 1,
1770 src->nvar, -1);
1772 edge->start = graph->lp->n_ineq;
1773 graph->lp = isl_basic_set_extend_constraints(graph->lp,
1774 coef->n_eq, coef->n_ineq);
1775 graph->lp = isl_basic_set_add_constraints_dim_map(graph->lp,
1776 coef, dim_map);
1777 if (!graph->lp)
1778 goto error;
1779 isl_space_free(dim);
1780 edge->end = graph->lp->n_ineq;
1782 return 0;
1783 error:
1784 isl_space_free(dim);
1785 return -1;
1788 /* Add constraints to graph->lp that bound the dependence distance for the given
1789 * dependence from a node i to itself.
1790 * If s = 1, we add the constraint
1792 * c_i_x (y - x) <= m_0 + m_n n
1794 * or
1796 * -c_i_x (y - x) + m_0 + m_n n >= 0
1798 * for each (x,y) in R.
1799 * If s = -1, we add the constraint
1801 * -c_i_x (y - x) <= m_0 + m_n n
1803 * or
1805 * c_i_x (y - x) + m_0 + m_n n >= 0
1807 * for each (x,y) in R.
1808 * We obtain general constraints on coefficients (c_0, c_n, c_x)
1809 * of valid constraints for (y - x) and then plug in (m_0, m_n, -s * c_i_x),
1810 * with each coefficient (except m_0) represented as a pair of non-negative
1811 * coefficients.
1813 * Actually, we do not construct constraints for the c_i_x themselves,
1814 * but for the coefficients of c_i_x written as a linear combination
1815 * of the columns in node->cmap.
1818 * If "local" is set, then we add constraints
1820 * c_i_x (y - x) <= 0
1822 * or
1824 * -c_i_x (y - x) <= 0
1826 * instead, forcing the dependence distance to be (less than or) equal to 0.
1827 * That is, we plug in (0, 0, -s * c_i_x),
1828 * Note that dependences marked local are treated as validity constraints
1829 * by add_all_validity_constraints and therefore also have
1830 * their distances bounded by 0 from below.
1832 static int add_intra_proximity_constraints(struct isl_sched_graph *graph,
1833 struct isl_sched_edge *edge, int s, int local)
1835 unsigned total;
1836 unsigned nparam;
1837 isl_map *map = isl_map_copy(edge->map);
1838 isl_ctx *ctx = isl_map_get_ctx(map);
1839 isl_space *dim;
1840 isl_dim_map *dim_map;
1841 isl_basic_set *coef;
1842 struct isl_sched_node *node = edge->src;
1844 coef = intra_coefficients(graph, node, map);
1846 dim = isl_space_domain(isl_space_unwrap(isl_basic_set_get_space(coef)));
1848 coef = isl_basic_set_transform_dims(coef, isl_dim_set,
1849 isl_space_dim(dim, isl_dim_set), isl_mat_copy(node->cmap));
1850 if (!coef)
1851 goto error;
1853 nparam = isl_space_dim(node->space, isl_dim_param);
1854 total = isl_basic_set_total_dim(graph->lp);
1855 dim_map = isl_dim_map_alloc(ctx, total);
1857 if (!local) {
1858 isl_dim_map_range(dim_map, 1, 0, 0, 0, 1, 1);
1859 isl_dim_map_range(dim_map, 4, 2, 1, 1, nparam, -1);
1860 isl_dim_map_range(dim_map, 5, 2, 1, 1, nparam, 1);
1862 isl_dim_map_range(dim_map, node->start + 2 * node->nparam + 1, 2,
1863 isl_space_dim(dim, isl_dim_set), 1,
1864 node->nvar, s);
1865 isl_dim_map_range(dim_map, node->start + 2 * node->nparam + 2, 2,
1866 isl_space_dim(dim, isl_dim_set), 1,
1867 node->nvar, -s);
1868 graph->lp = isl_basic_set_extend_constraints(graph->lp,
1869 coef->n_eq, coef->n_ineq);
1870 graph->lp = isl_basic_set_add_constraints_dim_map(graph->lp,
1871 coef, dim_map);
1872 isl_space_free(dim);
1874 return 0;
1875 error:
1876 isl_space_free(dim);
1877 return -1;
1880 /* Add constraints to graph->lp that bound the dependence distance for the given
1881 * dependence from node i to node j.
1882 * If s = 1, we add the constraint
1884 * (c_j_0 + c_j_n n + c_j_x y) - (c_i_0 + c_i_n n + c_i_x x)
1885 * <= m_0 + m_n n
1887 * or
1889 * -(c_j_0 + c_j_n n + c_j_x y) + (c_i_0 + c_i_n n + c_i_x x) +
1890 * m_0 + m_n n >= 0
1892 * for each (x,y) in R.
1893 * If s = -1, we add the constraint
1895 * -((c_j_0 + c_j_n n + c_j_x y) - (c_i_0 + c_i_n n + c_i_x x))
1896 * <= m_0 + m_n n
1898 * or
1900 * (c_j_0 + c_j_n n + c_j_x y) - (c_i_0 + c_i_n n + c_i_x x) +
1901 * m_0 + m_n n >= 0
1903 * for each (x,y) in R.
1904 * We obtain general constraints on coefficients (c_0, c_n, c_x, c_y)
1905 * of valid constraints for R and then plug in
1906 * (m_0 - s*c_j_0 + s*c_i_0, m_n - s*c_j_n + s*c_i_n,
1907 * -s*c_j_x+s*c_i_x)
1908 * with each coefficient (except m_0, c_j_0 and c_i_0)
1909 * represented as a pair of non-negative coefficients.
1911 * Actually, we do not construct constraints for the c_*_x themselves,
1912 * but for the coefficients of c_*_x written as a linear combination
1913 * of the columns in node->cmap.
1916 * If "local" is set, then we add constraints
1918 * (c_j_0 + c_j_n n + c_j_x y) - (c_i_0 + c_i_n n + c_i_x x) <= 0
1920 * or
1922 * -((c_j_0 + c_j_n n + c_j_x y) - (c_i_0 + c_i_n n + c_i_x x)) <= 0
1924 * instead, forcing the dependence distance to be (less than or) equal to 0.
1925 * That is, we plug in
1926 * (-s*c_j_0 + s*c_i_0, -s*c_j_n + s*c_i_n, -s*c_j_x+s*c_i_x).
1927 * Note that dependences marked local are treated as validity constraints
1928 * by add_all_validity_constraints and therefore also have
1929 * their distances bounded by 0 from below.
1931 static int add_inter_proximity_constraints(struct isl_sched_graph *graph,
1932 struct isl_sched_edge *edge, int s, int local)
1934 unsigned total;
1935 unsigned nparam;
1936 isl_map *map = isl_map_copy(edge->map);
1937 isl_ctx *ctx = isl_map_get_ctx(map);
1938 isl_space *dim;
1939 isl_dim_map *dim_map;
1940 isl_basic_set *coef;
1941 struct isl_sched_node *src = edge->src;
1942 struct isl_sched_node *dst = edge->dst;
1944 coef = inter_coefficients(graph, edge, map);
1946 dim = isl_space_domain(isl_space_unwrap(isl_basic_set_get_space(coef)));
1948 coef = isl_basic_set_transform_dims(coef, isl_dim_set,
1949 isl_space_dim(dim, isl_dim_set), isl_mat_copy(src->cmap));
1950 coef = isl_basic_set_transform_dims(coef, isl_dim_set,
1951 isl_space_dim(dim, isl_dim_set) + src->nvar,
1952 isl_mat_copy(dst->cmap));
1953 if (!coef)
1954 goto error;
1956 nparam = isl_space_dim(src->space, isl_dim_param);
1957 total = isl_basic_set_total_dim(graph->lp);
1958 dim_map = isl_dim_map_alloc(ctx, total);
1960 if (!local) {
1961 isl_dim_map_range(dim_map, 1, 0, 0, 0, 1, 1);
1962 isl_dim_map_range(dim_map, 4, 2, 1, 1, nparam, -1);
1963 isl_dim_map_range(dim_map, 5, 2, 1, 1, nparam, 1);
1966 isl_dim_map_range(dim_map, dst->start, 0, 0, 0, 1, -s);
1967 isl_dim_map_range(dim_map, dst->start + 1, 2, 1, 1, dst->nparam, s);
1968 isl_dim_map_range(dim_map, dst->start + 2, 2, 1, 1, dst->nparam, -s);
1969 isl_dim_map_range(dim_map, dst->start + 2 * dst->nparam + 1, 2,
1970 isl_space_dim(dim, isl_dim_set) + src->nvar, 1,
1971 dst->nvar, s);
1972 isl_dim_map_range(dim_map, dst->start + 2 * dst->nparam + 2, 2,
1973 isl_space_dim(dim, isl_dim_set) + src->nvar, 1,
1974 dst->nvar, -s);
1976 isl_dim_map_range(dim_map, src->start, 0, 0, 0, 1, s);
1977 isl_dim_map_range(dim_map, src->start + 1, 2, 1, 1, src->nparam, -s);
1978 isl_dim_map_range(dim_map, src->start + 2, 2, 1, 1, src->nparam, s);
1979 isl_dim_map_range(dim_map, src->start + 2 * src->nparam + 1, 2,
1980 isl_space_dim(dim, isl_dim_set), 1,
1981 src->nvar, -s);
1982 isl_dim_map_range(dim_map, src->start + 2 * src->nparam + 2, 2,
1983 isl_space_dim(dim, isl_dim_set), 1,
1984 src->nvar, s);
1986 graph->lp = isl_basic_set_extend_constraints(graph->lp,
1987 coef->n_eq, coef->n_ineq);
1988 graph->lp = isl_basic_set_add_constraints_dim_map(graph->lp,
1989 coef, dim_map);
1990 isl_space_free(dim);
1992 return 0;
1993 error:
1994 isl_space_free(dim);
1995 return -1;
1998 /* Add all validity constraints to graph->lp.
2000 * An edge that is forced to be local needs to have its dependence
2001 * distances equal to zero. We take care of bounding them by 0 from below
2002 * here. add_all_proximity_constraints takes care of bounding them by 0
2003 * from above.
2005 * If "use_coincidence" is set, then we treat coincidence edges as local edges.
2006 * Otherwise, we ignore them.
2008 static int add_all_validity_constraints(struct isl_sched_graph *graph,
2009 int use_coincidence)
2011 int i;
2013 for (i = 0; i < graph->n_edge; ++i) {
2014 struct isl_sched_edge *edge= &graph->edge[i];
2015 int local;
2017 local = is_local(edge) ||
2018 (is_coincidence(edge) && use_coincidence);
2019 if (!is_validity(edge) && !local)
2020 continue;
2021 if (edge->src != edge->dst)
2022 continue;
2023 if (add_intra_validity_constraints(graph, edge) < 0)
2024 return -1;
2027 for (i = 0; i < graph->n_edge; ++i) {
2028 struct isl_sched_edge *edge = &graph->edge[i];
2029 int local;
2031 local = is_local(edge) ||
2032 (is_coincidence(edge) && use_coincidence);
2033 if (!is_validity(edge) && !local)
2034 continue;
2035 if (edge->src == edge->dst)
2036 continue;
2037 if (add_inter_validity_constraints(graph, edge) < 0)
2038 return -1;
2041 return 0;
2044 /* Add constraints to graph->lp that bound the dependence distance
2045 * for all dependence relations.
2046 * If a given proximity dependence is identical to a validity
2047 * dependence, then the dependence distance is already bounded
2048 * from below (by zero), so we only need to bound the distance
2049 * from above. (This includes the case of "local" dependences
2050 * which are treated as validity dependence by add_all_validity_constraints.)
2051 * Otherwise, we need to bound the distance both from above and from below.
2053 * If "use_coincidence" is set, then we treat coincidence edges as local edges.
2054 * Otherwise, we ignore them.
2056 static int add_all_proximity_constraints(struct isl_sched_graph *graph,
2057 int use_coincidence)
2059 int i;
2061 for (i = 0; i < graph->n_edge; ++i) {
2062 struct isl_sched_edge *edge= &graph->edge[i];
2063 int local;
2065 local = is_local(edge) ||
2066 (is_coincidence(edge) && use_coincidence);
2067 if (!is_proximity(edge) && !local)
2068 continue;
2069 if (edge->src == edge->dst &&
2070 add_intra_proximity_constraints(graph, edge, 1, local) < 0)
2071 return -1;
2072 if (edge->src != edge->dst &&
2073 add_inter_proximity_constraints(graph, edge, 1, local) < 0)
2074 return -1;
2075 if (is_validity(edge) || local)
2076 continue;
2077 if (edge->src == edge->dst &&
2078 add_intra_proximity_constraints(graph, edge, -1, 0) < 0)
2079 return -1;
2080 if (edge->src != edge->dst &&
2081 add_inter_proximity_constraints(graph, edge, -1, 0) < 0)
2082 return -1;
2085 return 0;
2088 /* Compute a basis for the rows in the linear part of the schedule
2089 * and extend this basis to a full basis. The remaining rows
2090 * can then be used to force linear independence from the rows
2091 * in the schedule.
2093 * In particular, given the schedule rows S, we compute
2095 * S = H Q
2096 * S U = H
2098 * with H the Hermite normal form of S. That is, all but the
2099 * first rank columns of H are zero and so each row in S is
2100 * a linear combination of the first rank rows of Q.
2101 * The matrix Q is then transposed because we will write the
2102 * coefficients of the next schedule row as a column vector s
2103 * and express this s as a linear combination s = Q c of the
2104 * computed basis.
2105 * Similarly, the matrix U is transposed such that we can
2106 * compute the coefficients c = U s from a schedule row s.
2108 static int node_update_cmap(struct isl_sched_node *node)
2110 isl_mat *H, *U, *Q;
2111 int n_row = isl_mat_rows(node->sched);
2113 H = isl_mat_sub_alloc(node->sched, 0, n_row,
2114 1 + node->nparam, node->nvar);
2116 H = isl_mat_left_hermite(H, 0, &U, &Q);
2117 isl_mat_free(node->cmap);
2118 isl_mat_free(node->cinv);
2119 isl_mat_free(node->ctrans);
2120 node->ctrans = isl_mat_copy(Q);
2121 node->cmap = isl_mat_transpose(Q);
2122 node->cinv = isl_mat_transpose(U);
2123 node->rank = isl_mat_initial_non_zero_cols(H);
2124 isl_mat_free(H);
2126 if (!node->cmap || !node->cinv || !node->ctrans || node->rank < 0)
2127 return -1;
2128 return 0;
2131 /* How many times should we count the constraints in "edge"?
2133 * If carry is set, then we are counting the number of
2134 * (validity or conditional validity) constraints that will be added
2135 * in setup_carry_lp and we count each edge exactly once.
2137 * Otherwise, we count as follows
2138 * validity -> 1 (>= 0)
2139 * validity+proximity -> 2 (>= 0 and upper bound)
2140 * proximity -> 2 (lower and upper bound)
2141 * local(+any) -> 2 (>= 0 and <= 0)
2143 * If an edge is only marked conditional_validity then it counts
2144 * as zero since it is only checked afterwards.
2146 * If "use_coincidence" is set, then we treat coincidence edges as local edges.
2147 * Otherwise, we ignore them.
2149 static int edge_multiplicity(struct isl_sched_edge *edge, int carry,
2150 int use_coincidence)
2152 if (carry && !is_validity(edge) && !is_conditional_validity(edge))
2153 return 0;
2154 if (carry)
2155 return 1;
2156 if (is_proximity(edge) || is_local(edge))
2157 return 2;
2158 if (use_coincidence && is_coincidence(edge))
2159 return 2;
2160 if (is_validity(edge))
2161 return 1;
2162 return 0;
2165 /* Count the number of equality and inequality constraints
2166 * that will be added for the given map.
2168 * "use_coincidence" is set if we should take into account coincidence edges.
2170 static int count_map_constraints(struct isl_sched_graph *graph,
2171 struct isl_sched_edge *edge, __isl_take isl_map *map,
2172 int *n_eq, int *n_ineq, int carry, int use_coincidence)
2174 isl_basic_set *coef;
2175 int f = edge_multiplicity(edge, carry, use_coincidence);
2177 if (f == 0) {
2178 isl_map_free(map);
2179 return 0;
2182 if (edge->src == edge->dst)
2183 coef = intra_coefficients(graph, edge->src, map);
2184 else
2185 coef = inter_coefficients(graph, edge, map);
2186 if (!coef)
2187 return -1;
2188 *n_eq += f * coef->n_eq;
2189 *n_ineq += f * coef->n_ineq;
2190 isl_basic_set_free(coef);
2192 return 0;
2195 /* Count the number of equality and inequality constraints
2196 * that will be added to the main lp problem.
2197 * We count as follows
2198 * validity -> 1 (>= 0)
2199 * validity+proximity -> 2 (>= 0 and upper bound)
2200 * proximity -> 2 (lower and upper bound)
2201 * local(+any) -> 2 (>= 0 and <= 0)
2203 * If "use_coincidence" is set, then we treat coincidence edges as local edges.
2204 * Otherwise, we ignore them.
2206 static int count_constraints(struct isl_sched_graph *graph,
2207 int *n_eq, int *n_ineq, int use_coincidence)
2209 int i;
2211 *n_eq = *n_ineq = 0;
2212 for (i = 0; i < graph->n_edge; ++i) {
2213 struct isl_sched_edge *edge= &graph->edge[i];
2214 isl_map *map = isl_map_copy(edge->map);
2216 if (count_map_constraints(graph, edge, map, n_eq, n_ineq,
2217 0, use_coincidence) < 0)
2218 return -1;
2221 return 0;
2224 /* Count the number of constraints that will be added by
2225 * add_bound_coefficient_constraints and increment *n_eq and *n_ineq
2226 * accordingly.
2228 * In practice, add_bound_coefficient_constraints only adds inequalities.
2230 static int count_bound_coefficient_constraints(isl_ctx *ctx,
2231 struct isl_sched_graph *graph, int *n_eq, int *n_ineq)
2233 int i;
2235 if (ctx->opt->schedule_max_coefficient == -1)
2236 return 0;
2238 for (i = 0; i < graph->n; ++i)
2239 *n_ineq += 2 * graph->node[i].nparam + 2 * graph->node[i].nvar;
2241 return 0;
2244 /* Add constraints that bound the values of the variable and parameter
2245 * coefficients of the schedule.
2247 * The maximal value of the coefficients is defined by the option
2248 * 'schedule_max_coefficient'.
2250 static int add_bound_coefficient_constraints(isl_ctx *ctx,
2251 struct isl_sched_graph *graph)
2253 int i, j, k;
2254 int max_coefficient;
2255 int total;
2257 max_coefficient = ctx->opt->schedule_max_coefficient;
2259 if (max_coefficient == -1)
2260 return 0;
2262 total = isl_basic_set_total_dim(graph->lp);
2264 for (i = 0; i < graph->n; ++i) {
2265 struct isl_sched_node *node = &graph->node[i];
2266 for (j = 0; j < 2 * node->nparam + 2 * node->nvar; ++j) {
2267 int dim;
2268 k = isl_basic_set_alloc_inequality(graph->lp);
2269 if (k < 0)
2270 return -1;
2271 dim = 1 + node->start + 1 + j;
2272 isl_seq_clr(graph->lp->ineq[k], 1 + total);
2273 isl_int_set_si(graph->lp->ineq[k][dim], -1);
2274 isl_int_set_si(graph->lp->ineq[k][0], max_coefficient);
2278 return 0;
2281 /* Construct an ILP problem for finding schedule coefficients
2282 * that result in non-negative, but small dependence distances
2283 * over all dependences.
2284 * In particular, the dependence distances over proximity edges
2285 * are bounded by m_0 + m_n n and we compute schedule coefficients
2286 * with small values (preferably zero) of m_n and m_0.
2288 * All variables of the ILP are non-negative. The actual coefficients
2289 * may be negative, so each coefficient is represented as the difference
2290 * of two non-negative variables. The negative part always appears
2291 * immediately before the positive part.
2292 * Other than that, the variables have the following order
2294 * - sum of positive and negative parts of m_n coefficients
2295 * - m_0
2296 * - sum of positive and negative parts of all c_n coefficients
2297 * (unconstrained when computing non-parametric schedules)
2298 * - sum of positive and negative parts of all c_x coefficients
2299 * - positive and negative parts of m_n coefficients
2300 * - for each node
2301 * - c_i_0
2302 * - positive and negative parts of c_i_n (if parametric)
2303 * - positive and negative parts of c_i_x
2305 * The c_i_x are not represented directly, but through the columns of
2306 * node->cmap. That is, the computed values are for variable t_i_x
2307 * such that c_i_x = Q t_i_x with Q equal to node->cmap.
2309 * The constraints are those from the edges plus two or three equalities
2310 * to express the sums.
2312 * If "use_coincidence" is set, then we treat coincidence edges as local edges.
2313 * Otherwise, we ignore them.
2315 static int setup_lp(isl_ctx *ctx, struct isl_sched_graph *graph,
2316 int use_coincidence)
2318 int i, j;
2319 int k;
2320 unsigned nparam;
2321 unsigned total;
2322 isl_space *dim;
2323 int parametric;
2324 int param_pos;
2325 int n_eq, n_ineq;
2326 int max_constant_term;
2328 max_constant_term = ctx->opt->schedule_max_constant_term;
2330 parametric = ctx->opt->schedule_parametric;
2331 nparam = isl_space_dim(graph->node[0].space, isl_dim_param);
2332 param_pos = 4;
2333 total = param_pos + 2 * nparam;
2334 for (i = 0; i < graph->n; ++i) {
2335 struct isl_sched_node *node = &graph->node[graph->sorted[i]];
2336 if (node_update_cmap(node) < 0)
2337 return -1;
2338 node->start = total;
2339 total += 1 + 2 * (node->nparam + node->nvar);
2342 if (count_constraints(graph, &n_eq, &n_ineq, use_coincidence) < 0)
2343 return -1;
2344 if (count_bound_coefficient_constraints(ctx, graph, &n_eq, &n_ineq) < 0)
2345 return -1;
2347 dim = isl_space_set_alloc(ctx, 0, total);
2348 isl_basic_set_free(graph->lp);
2349 n_eq += 2 + parametric;
2350 if (max_constant_term != -1)
2351 n_ineq += graph->n;
2353 graph->lp = isl_basic_set_alloc_space(dim, 0, n_eq, n_ineq);
2355 k = isl_basic_set_alloc_equality(graph->lp);
2356 if (k < 0)
2357 return -1;
2358 isl_seq_clr(graph->lp->eq[k], 1 + total);
2359 isl_int_set_si(graph->lp->eq[k][1], -1);
2360 for (i = 0; i < 2 * nparam; ++i)
2361 isl_int_set_si(graph->lp->eq[k][1 + param_pos + i], 1);
2363 if (parametric) {
2364 k = isl_basic_set_alloc_equality(graph->lp);
2365 if (k < 0)
2366 return -1;
2367 isl_seq_clr(graph->lp->eq[k], 1 + total);
2368 isl_int_set_si(graph->lp->eq[k][3], -1);
2369 for (i = 0; i < graph->n; ++i) {
2370 int pos = 1 + graph->node[i].start + 1;
2372 for (j = 0; j < 2 * graph->node[i].nparam; ++j)
2373 isl_int_set_si(graph->lp->eq[k][pos + j], 1);
2377 k = isl_basic_set_alloc_equality(graph->lp);
2378 if (k < 0)
2379 return -1;
2380 isl_seq_clr(graph->lp->eq[k], 1 + total);
2381 isl_int_set_si(graph->lp->eq[k][4], -1);
2382 for (i = 0; i < graph->n; ++i) {
2383 struct isl_sched_node *node = &graph->node[i];
2384 int pos = 1 + node->start + 1 + 2 * node->nparam;
2386 for (j = 0; j < 2 * node->nvar; ++j)
2387 isl_int_set_si(graph->lp->eq[k][pos + j], 1);
2390 if (max_constant_term != -1)
2391 for (i = 0; i < graph->n; ++i) {
2392 struct isl_sched_node *node = &graph->node[i];
2393 k = isl_basic_set_alloc_inequality(graph->lp);
2394 if (k < 0)
2395 return -1;
2396 isl_seq_clr(graph->lp->ineq[k], 1 + total);
2397 isl_int_set_si(graph->lp->ineq[k][1 + node->start], -1);
2398 isl_int_set_si(graph->lp->ineq[k][0], max_constant_term);
2401 if (add_bound_coefficient_constraints(ctx, graph) < 0)
2402 return -1;
2403 if (add_all_validity_constraints(graph, use_coincidence) < 0)
2404 return -1;
2405 if (add_all_proximity_constraints(graph, use_coincidence) < 0)
2406 return -1;
2408 return 0;
2411 /* Analyze the conflicting constraint found by
2412 * isl_tab_basic_set_non_trivial_lexmin. If it corresponds to the validity
2413 * constraint of one of the edges between distinct nodes, living, moreover
2414 * in distinct SCCs, then record the source and sink SCC as this may
2415 * be a good place to cut between SCCs.
2417 static int check_conflict(int con, void *user)
2419 int i;
2420 struct isl_sched_graph *graph = user;
2422 if (graph->src_scc >= 0)
2423 return 0;
2425 con -= graph->lp->n_eq;
2427 if (con >= graph->lp->n_ineq)
2428 return 0;
2430 for (i = 0; i < graph->n_edge; ++i) {
2431 if (!is_validity(&graph->edge[i]))
2432 continue;
2433 if (graph->edge[i].src == graph->edge[i].dst)
2434 continue;
2435 if (graph->edge[i].src->scc == graph->edge[i].dst->scc)
2436 continue;
2437 if (graph->edge[i].start > con)
2438 continue;
2439 if (graph->edge[i].end <= con)
2440 continue;
2441 graph->src_scc = graph->edge[i].src->scc;
2442 graph->dst_scc = graph->edge[i].dst->scc;
2445 return 0;
2448 /* Check whether the next schedule row of the given node needs to be
2449 * non-trivial. Lower-dimensional domains may have some trivial rows,
2450 * but as soon as the number of remaining required non-trivial rows
2451 * is as large as the number or remaining rows to be computed,
2452 * all remaining rows need to be non-trivial.
2454 static int needs_row(struct isl_sched_graph *graph, struct isl_sched_node *node)
2456 return node->nvar - node->rank >= graph->maxvar - graph->n_row;
2459 /* Solve the ILP problem constructed in setup_lp.
2460 * For each node such that all the remaining rows of its schedule
2461 * need to be non-trivial, we construct a non-triviality region.
2462 * This region imposes that the next row is independent of previous rows.
2463 * In particular the coefficients c_i_x are represented by t_i_x
2464 * variables with c_i_x = Q t_i_x and Q a unimodular matrix such that
2465 * its first columns span the rows of the previously computed part
2466 * of the schedule. The non-triviality region enforces that at least
2467 * one of the remaining components of t_i_x is non-zero, i.e.,
2468 * that the new schedule row depends on at least one of the remaining
2469 * columns of Q.
2471 static __isl_give isl_vec *solve_lp(struct isl_sched_graph *graph)
2473 int i;
2474 isl_vec *sol;
2475 isl_basic_set *lp;
2477 for (i = 0; i < graph->n; ++i) {
2478 struct isl_sched_node *node = &graph->node[i];
2479 int skip = node->rank;
2480 graph->region[i].pos = node->start + 1 + 2*(node->nparam+skip);
2481 if (needs_row(graph, node))
2482 graph->region[i].len = 2 * (node->nvar - skip);
2483 else
2484 graph->region[i].len = 0;
2486 lp = isl_basic_set_copy(graph->lp);
2487 sol = isl_tab_basic_set_non_trivial_lexmin(lp, 2, graph->n,
2488 graph->region, &check_conflict, graph);
2489 return sol;
2492 /* Update the schedules of all nodes based on the given solution
2493 * of the LP problem.
2494 * The new row is added to the current band.
2495 * All possibly negative coefficients are encoded as a difference
2496 * of two non-negative variables, so we need to perform the subtraction
2497 * here. Moreover, if use_cmap is set, then the solution does
2498 * not refer to the actual coefficients c_i_x, but instead to variables
2499 * t_i_x such that c_i_x = Q t_i_x and Q is equal to node->cmap.
2500 * In this case, we then also need to perform this multiplication
2501 * to obtain the values of c_i_x.
2503 * If coincident is set, then the caller guarantees that the new
2504 * row satisfies the coincidence constraints.
2506 static int update_schedule(struct isl_sched_graph *graph,
2507 __isl_take isl_vec *sol, int use_cmap, int coincident)
2509 int i, j;
2510 isl_vec *csol = NULL;
2512 if (!sol)
2513 goto error;
2514 if (sol->size == 0)
2515 isl_die(sol->ctx, isl_error_internal,
2516 "no solution found", goto error);
2517 if (graph->n_total_row >= graph->max_row)
2518 isl_die(sol->ctx, isl_error_internal,
2519 "too many schedule rows", goto error);
2521 for (i = 0; i < graph->n; ++i) {
2522 struct isl_sched_node *node = &graph->node[i];
2523 int pos = node->start;
2524 int row = isl_mat_rows(node->sched);
2526 isl_vec_free(csol);
2527 csol = isl_vec_alloc(sol->ctx, node->nvar);
2528 if (!csol)
2529 goto error;
2531 isl_map_free(node->sched_map);
2532 node->sched_map = NULL;
2533 node->sched = isl_mat_add_rows(node->sched, 1);
2534 if (!node->sched)
2535 goto error;
2536 node->sched = isl_mat_set_element(node->sched, row, 0,
2537 sol->el[1 + pos]);
2538 for (j = 0; j < node->nparam + node->nvar; ++j)
2539 isl_int_sub(sol->el[1 + pos + 1 + 2 * j + 1],
2540 sol->el[1 + pos + 1 + 2 * j + 1],
2541 sol->el[1 + pos + 1 + 2 * j]);
2542 for (j = 0; j < node->nparam; ++j)
2543 node->sched = isl_mat_set_element(node->sched,
2544 row, 1 + j, sol->el[1+pos+1+2*j+1]);
2545 for (j = 0; j < node->nvar; ++j)
2546 isl_int_set(csol->el[j],
2547 sol->el[1+pos+1+2*(node->nparam+j)+1]);
2548 if (use_cmap)
2549 csol = isl_mat_vec_product(isl_mat_copy(node->cmap),
2550 csol);
2551 if (!csol)
2552 goto error;
2553 for (j = 0; j < node->nvar; ++j)
2554 node->sched = isl_mat_set_element(node->sched,
2555 row, 1 + node->nparam + j, csol->el[j]);
2556 node->coincident[graph->n_total_row] = coincident;
2558 isl_vec_free(sol);
2559 isl_vec_free(csol);
2561 graph->n_row++;
2562 graph->n_total_row++;
2564 return 0;
2565 error:
2566 isl_vec_free(sol);
2567 isl_vec_free(csol);
2568 return -1;
2571 /* Convert row "row" of node->sched into an isl_aff living in "ls"
2572 * and return this isl_aff.
2574 static __isl_give isl_aff *extract_schedule_row(__isl_take isl_local_space *ls,
2575 struct isl_sched_node *node, int row)
2577 int j;
2578 isl_int v;
2579 isl_aff *aff;
2581 isl_int_init(v);
2583 aff = isl_aff_zero_on_domain(ls);
2584 isl_mat_get_element(node->sched, row, 0, &v);
2585 aff = isl_aff_set_constant(aff, v);
2586 for (j = 0; j < node->nparam; ++j) {
2587 isl_mat_get_element(node->sched, row, 1 + j, &v);
2588 aff = isl_aff_set_coefficient(aff, isl_dim_param, j, v);
2590 for (j = 0; j < node->nvar; ++j) {
2591 isl_mat_get_element(node->sched, row, 1 + node->nparam + j, &v);
2592 aff = isl_aff_set_coefficient(aff, isl_dim_in, j, v);
2595 isl_int_clear(v);
2597 return aff;
2600 /* Convert the "n" rows starting at "first" of node->sched into a multi_aff
2601 * and return this multi_aff.
2603 * The result is defined over the uncompressed node domain.
2605 static __isl_give isl_multi_aff *node_extract_partial_schedule_multi_aff(
2606 struct isl_sched_node *node, int first, int n)
2608 int i;
2609 isl_space *space;
2610 isl_local_space *ls;
2611 isl_aff *aff;
2612 isl_multi_aff *ma;
2613 int nrow;
2615 if (!node)
2616 return NULL;
2617 nrow = isl_mat_rows(node->sched);
2618 if (node->compressed)
2619 space = isl_multi_aff_get_domain_space(node->decompress);
2620 else
2621 space = isl_space_copy(node->space);
2622 ls = isl_local_space_from_space(isl_space_copy(space));
2623 space = isl_space_from_domain(space);
2624 space = isl_space_add_dims(space, isl_dim_out, n);
2625 ma = isl_multi_aff_zero(space);
2627 for (i = first; i < first + n; ++i) {
2628 aff = extract_schedule_row(isl_local_space_copy(ls), node, i);
2629 ma = isl_multi_aff_set_aff(ma, i - first, aff);
2632 isl_local_space_free(ls);
2634 if (node->compressed)
2635 ma = isl_multi_aff_pullback_multi_aff(ma,
2636 isl_multi_aff_copy(node->compress));
2638 return ma;
2641 /* Convert node->sched into a multi_aff and return this multi_aff.
2643 * The result is defined over the uncompressed node domain.
2645 static __isl_give isl_multi_aff *node_extract_schedule_multi_aff(
2646 struct isl_sched_node *node)
2648 int nrow;
2650 nrow = isl_mat_rows(node->sched);
2651 return node_extract_partial_schedule_multi_aff(node, 0, nrow);
2654 /* Convert node->sched into a map and return this map.
2656 * The result is cached in node->sched_map, which needs to be released
2657 * whenever node->sched is updated.
2658 * It is defined over the uncompressed node domain.
2660 static __isl_give isl_map *node_extract_schedule(struct isl_sched_node *node)
2662 if (!node->sched_map) {
2663 isl_multi_aff *ma;
2665 ma = node_extract_schedule_multi_aff(node);
2666 node->sched_map = isl_map_from_multi_aff(ma);
2669 return isl_map_copy(node->sched_map);
2672 /* Construct a map that can be used to update a dependence relation
2673 * based on the current schedule.
2674 * That is, construct a map expressing that source and sink
2675 * are executed within the same iteration of the current schedule.
2676 * This map can then be intersected with the dependence relation.
2677 * This is not the most efficient way, but this shouldn't be a critical
2678 * operation.
2680 static __isl_give isl_map *specializer(struct isl_sched_node *src,
2681 struct isl_sched_node *dst)
2683 isl_map *src_sched, *dst_sched;
2685 src_sched = node_extract_schedule(src);
2686 dst_sched = node_extract_schedule(dst);
2687 return isl_map_apply_range(src_sched, isl_map_reverse(dst_sched));
2690 /* Intersect the domains of the nested relations in domain and range
2691 * of "umap" with "map".
2693 static __isl_give isl_union_map *intersect_domains(
2694 __isl_take isl_union_map *umap, __isl_keep isl_map *map)
2696 isl_union_set *uset;
2698 umap = isl_union_map_zip(umap);
2699 uset = isl_union_set_from_set(isl_map_wrap(isl_map_copy(map)));
2700 umap = isl_union_map_intersect_domain(umap, uset);
2701 umap = isl_union_map_zip(umap);
2702 return umap;
2705 /* Update the dependence relation of the given edge based
2706 * on the current schedule.
2707 * If the dependence is carried completely by the current schedule, then
2708 * it is removed from the edge_tables. It is kept in the list of edges
2709 * as otherwise all edge_tables would have to be recomputed.
2711 static int update_edge(struct isl_sched_graph *graph,
2712 struct isl_sched_edge *edge)
2714 int empty;
2715 isl_map *id;
2717 id = specializer(edge->src, edge->dst);
2718 edge->map = isl_map_intersect(edge->map, isl_map_copy(id));
2719 if (!edge->map)
2720 goto error;
2722 if (edge->tagged_condition) {
2723 edge->tagged_condition =
2724 intersect_domains(edge->tagged_condition, id);
2725 if (!edge->tagged_condition)
2726 goto error;
2728 if (edge->tagged_validity) {
2729 edge->tagged_validity =
2730 intersect_domains(edge->tagged_validity, id);
2731 if (!edge->tagged_validity)
2732 goto error;
2735 empty = isl_map_plain_is_empty(edge->map);
2736 if (empty < 0)
2737 goto error;
2738 if (empty)
2739 graph_remove_edge(graph, edge);
2741 isl_map_free(id);
2742 return 0;
2743 error:
2744 isl_map_free(id);
2745 return -1;
2748 /* Does the domain of "umap" intersect "uset"?
2750 static int domain_intersects(__isl_keep isl_union_map *umap,
2751 __isl_keep isl_union_set *uset)
2753 int empty;
2755 umap = isl_union_map_copy(umap);
2756 umap = isl_union_map_intersect_domain(umap, isl_union_set_copy(uset));
2757 empty = isl_union_map_is_empty(umap);
2758 isl_union_map_free(umap);
2760 return empty < 0 ? -1 : !empty;
2763 /* Does the range of "umap" intersect "uset"?
2765 static int range_intersects(__isl_keep isl_union_map *umap,
2766 __isl_keep isl_union_set *uset)
2768 int empty;
2770 umap = isl_union_map_copy(umap);
2771 umap = isl_union_map_intersect_range(umap, isl_union_set_copy(uset));
2772 empty = isl_union_map_is_empty(umap);
2773 isl_union_map_free(umap);
2775 return empty < 0 ? -1 : !empty;
2778 /* Are the condition dependences of "edge" local with respect to
2779 * the current schedule?
2781 * That is, are domain and range of the condition dependences mapped
2782 * to the same point?
2784 * In other words, is the condition false?
2786 static int is_condition_false(struct isl_sched_edge *edge)
2788 isl_union_map *umap;
2789 isl_map *map, *sched, *test;
2790 int empty, local;
2792 empty = isl_union_map_is_empty(edge->tagged_condition);
2793 if (empty < 0 || empty)
2794 return empty;
2796 umap = isl_union_map_copy(edge->tagged_condition);
2797 umap = isl_union_map_zip(umap);
2798 umap = isl_union_set_unwrap(isl_union_map_domain(umap));
2799 map = isl_map_from_union_map(umap);
2801 sched = node_extract_schedule(edge->src);
2802 map = isl_map_apply_domain(map, sched);
2803 sched = node_extract_schedule(edge->dst);
2804 map = isl_map_apply_range(map, sched);
2806 test = isl_map_identity(isl_map_get_space(map));
2807 local = isl_map_is_subset(map, test);
2808 isl_map_free(map);
2809 isl_map_free(test);
2811 return local;
2814 /* For each conditional validity constraint that is adjacent
2815 * to a condition with domain in condition_source or range in condition_sink,
2816 * turn it into an unconditional validity constraint.
2818 static int unconditionalize_adjacent_validity(struct isl_sched_graph *graph,
2819 __isl_take isl_union_set *condition_source,
2820 __isl_take isl_union_set *condition_sink)
2822 int i;
2824 condition_source = isl_union_set_coalesce(condition_source);
2825 condition_sink = isl_union_set_coalesce(condition_sink);
2827 for (i = 0; i < graph->n_edge; ++i) {
2828 int adjacent;
2829 isl_union_map *validity;
2831 if (!is_conditional_validity(&graph->edge[i]))
2832 continue;
2833 if (is_validity(&graph->edge[i]))
2834 continue;
2836 validity = graph->edge[i].tagged_validity;
2837 adjacent = domain_intersects(validity, condition_sink);
2838 if (adjacent >= 0 && !adjacent)
2839 adjacent = range_intersects(validity, condition_source);
2840 if (adjacent < 0)
2841 goto error;
2842 if (!adjacent)
2843 continue;
2845 set_validity(&graph->edge[i]);
2848 isl_union_set_free(condition_source);
2849 isl_union_set_free(condition_sink);
2850 return 0;
2851 error:
2852 isl_union_set_free(condition_source);
2853 isl_union_set_free(condition_sink);
2854 return -1;
2857 /* Update the dependence relations of all edges based on the current schedule
2858 * and enforce conditional validity constraints that are adjacent
2859 * to satisfied condition constraints.
2861 * First check if any of the condition constraints are satisfied
2862 * (i.e., not local to the outer schedule) and keep track of
2863 * their domain and range.
2864 * Then update all dependence relations (which removes the non-local
2865 * constraints).
2866 * Finally, if any condition constraints turned out to be satisfied,
2867 * then turn all adjacent conditional validity constraints into
2868 * unconditional validity constraints.
2870 static int update_edges(isl_ctx *ctx, struct isl_sched_graph *graph)
2872 int i;
2873 int any = 0;
2874 isl_union_set *source, *sink;
2876 source = isl_union_set_empty(isl_space_params_alloc(ctx, 0));
2877 sink = isl_union_set_empty(isl_space_params_alloc(ctx, 0));
2878 for (i = 0; i < graph->n_edge; ++i) {
2879 int local;
2880 isl_union_set *uset;
2881 isl_union_map *umap;
2883 if (!is_condition(&graph->edge[i]))
2884 continue;
2885 if (is_local(&graph->edge[i]))
2886 continue;
2887 local = is_condition_false(&graph->edge[i]);
2888 if (local < 0)
2889 goto error;
2890 if (local)
2891 continue;
2893 any = 1;
2895 umap = isl_union_map_copy(graph->edge[i].tagged_condition);
2896 uset = isl_union_map_domain(umap);
2897 source = isl_union_set_union(source, uset);
2899 umap = isl_union_map_copy(graph->edge[i].tagged_condition);
2900 uset = isl_union_map_range(umap);
2901 sink = isl_union_set_union(sink, uset);
2904 for (i = graph->n_edge - 1; i >= 0; --i) {
2905 if (update_edge(graph, &graph->edge[i]) < 0)
2906 goto error;
2909 if (any)
2910 return unconditionalize_adjacent_validity(graph, source, sink);
2912 isl_union_set_free(source);
2913 isl_union_set_free(sink);
2914 return 0;
2915 error:
2916 isl_union_set_free(source);
2917 isl_union_set_free(sink);
2918 return -1;
2921 static void next_band(struct isl_sched_graph *graph)
2923 graph->band_start = graph->n_total_row;
2926 /* Return the union of the universe domains of the nodes in "graph"
2927 * that satisfy "pred".
2929 static __isl_give isl_union_set *isl_sched_graph_domain(isl_ctx *ctx,
2930 struct isl_sched_graph *graph,
2931 int (*pred)(struct isl_sched_node *node, int data), int data)
2933 int i;
2934 isl_set *set;
2935 isl_union_set *dom;
2937 for (i = 0; i < graph->n; ++i)
2938 if (pred(&graph->node[i], data))
2939 break;
2941 if (i >= graph->n)
2942 isl_die(ctx, isl_error_internal,
2943 "empty component", return NULL);
2945 set = isl_set_universe(isl_space_copy(graph->node[i].space));
2946 dom = isl_union_set_from_set(set);
2948 for (i = i + 1; i < graph->n; ++i) {
2949 if (!pred(&graph->node[i], data))
2950 continue;
2951 set = isl_set_universe(isl_space_copy(graph->node[i].space));
2952 dom = isl_union_set_union(dom, isl_union_set_from_set(set));
2955 return dom;
2958 /* Return a list of unions of universe domains, where each element
2959 * in the list corresponds to an SCC (or WCC) indexed by node->scc.
2961 static __isl_give isl_union_set_list *extract_sccs(isl_ctx *ctx,
2962 struct isl_sched_graph *graph)
2964 int i;
2965 isl_union_set_list *filters;
2967 filters = isl_union_set_list_alloc(ctx, graph->scc);
2968 for (i = 0; i < graph->scc; ++i) {
2969 isl_union_set *dom;
2971 dom = isl_sched_graph_domain(ctx, graph, &node_scc_exactly, i);
2972 filters = isl_union_set_list_add(filters, dom);
2975 return filters;
2978 /* Return a list of two unions of universe domains, one for the SCCs up
2979 * to and including graph->src_scc and another for the other SCCs.
2981 static __isl_give isl_union_set_list *extract_split(isl_ctx *ctx,
2982 struct isl_sched_graph *graph)
2984 isl_union_set *dom;
2985 isl_union_set_list *filters;
2987 filters = isl_union_set_list_alloc(ctx, 2);
2988 dom = isl_sched_graph_domain(ctx, graph,
2989 &node_scc_at_most, graph->src_scc);
2990 filters = isl_union_set_list_add(filters, dom);
2991 dom = isl_sched_graph_domain(ctx, graph,
2992 &node_scc_at_least, graph->src_scc + 1);
2993 filters = isl_union_set_list_add(filters, dom);
2995 return filters;
2998 /* Copy nodes that satisfy node_pred from the src dependence graph
2999 * to the dst dependence graph.
3001 static int copy_nodes(struct isl_sched_graph *dst, struct isl_sched_graph *src,
3002 int (*node_pred)(struct isl_sched_node *node, int data), int data)
3004 int i;
3006 dst->n = 0;
3007 for (i = 0; i < src->n; ++i) {
3008 int j;
3010 if (!node_pred(&src->node[i], data))
3011 continue;
3013 j = dst->n;
3014 dst->node[j].space = isl_space_copy(src->node[i].space);
3015 dst->node[j].compressed = src->node[i].compressed;
3016 dst->node[j].hull = isl_set_copy(src->node[i].hull);
3017 dst->node[j].compress =
3018 isl_multi_aff_copy(src->node[i].compress);
3019 dst->node[j].decompress =
3020 isl_multi_aff_copy(src->node[i].decompress);
3021 dst->node[j].nvar = src->node[i].nvar;
3022 dst->node[j].nparam = src->node[i].nparam;
3023 dst->node[j].sched = isl_mat_copy(src->node[i].sched);
3024 dst->node[j].sched_map = isl_map_copy(src->node[i].sched_map);
3025 dst->node[j].coincident = src->node[i].coincident;
3026 dst->n++;
3028 if (!dst->node[j].space || !dst->node[j].sched)
3029 return -1;
3030 if (dst->node[j].compressed &&
3031 (!dst->node[j].hull || !dst->node[j].compress ||
3032 !dst->node[j].decompress))
3033 return -1;
3036 return 0;
3039 /* Copy non-empty edges that satisfy edge_pred from the src dependence graph
3040 * to the dst dependence graph.
3041 * If the source or destination node of the edge is not in the destination
3042 * graph, then it must be a backward proximity edge and it should simply
3043 * be ignored.
3045 static int copy_edges(isl_ctx *ctx, struct isl_sched_graph *dst,
3046 struct isl_sched_graph *src,
3047 int (*edge_pred)(struct isl_sched_edge *edge, int data), int data)
3049 int i;
3050 enum isl_edge_type t;
3052 dst->n_edge = 0;
3053 for (i = 0; i < src->n_edge; ++i) {
3054 struct isl_sched_edge *edge = &src->edge[i];
3055 isl_map *map;
3056 isl_union_map *tagged_condition;
3057 isl_union_map *tagged_validity;
3058 struct isl_sched_node *dst_src, *dst_dst;
3060 if (!edge_pred(edge, data))
3061 continue;
3063 if (isl_map_plain_is_empty(edge->map))
3064 continue;
3066 dst_src = graph_find_node(ctx, dst, edge->src->space);
3067 dst_dst = graph_find_node(ctx, dst, edge->dst->space);
3068 if (!dst_src || !dst_dst) {
3069 if (is_validity(edge) || is_conditional_validity(edge))
3070 isl_die(ctx, isl_error_internal,
3071 "backward (conditional) validity edge",
3072 return -1);
3073 continue;
3076 map = isl_map_copy(edge->map);
3077 tagged_condition = isl_union_map_copy(edge->tagged_condition);
3078 tagged_validity = isl_union_map_copy(edge->tagged_validity);
3080 dst->edge[dst->n_edge].src = dst_src;
3081 dst->edge[dst->n_edge].dst = dst_dst;
3082 dst->edge[dst->n_edge].map = map;
3083 dst->edge[dst->n_edge].tagged_condition = tagged_condition;
3084 dst->edge[dst->n_edge].tagged_validity = tagged_validity;
3085 dst->edge[dst->n_edge].types = edge->types;
3086 dst->n_edge++;
3088 if (edge->tagged_condition && !tagged_condition)
3089 return -1;
3090 if (edge->tagged_validity && !tagged_validity)
3091 return -1;
3093 for (t = isl_edge_first; t <= isl_edge_last; ++t) {
3094 if (edge !=
3095 graph_find_edge(src, t, edge->src, edge->dst))
3096 continue;
3097 if (graph_edge_table_add(ctx, dst, t,
3098 &dst->edge[dst->n_edge - 1]) < 0)
3099 return -1;
3103 return 0;
3106 /* Compute the maximal number of variables over all nodes.
3107 * This is the maximal number of linearly independent schedule
3108 * rows that we need to compute.
3109 * Just in case we end up in a part of the dependence graph
3110 * with only lower-dimensional domains, we make sure we will
3111 * compute the required amount of extra linearly independent rows.
3113 static int compute_maxvar(struct isl_sched_graph *graph)
3115 int i;
3117 graph->maxvar = 0;
3118 for (i = 0; i < graph->n; ++i) {
3119 struct isl_sched_node *node = &graph->node[i];
3120 int nvar;
3122 if (node_update_cmap(node) < 0)
3123 return -1;
3124 nvar = node->nvar + graph->n_row - node->rank;
3125 if (nvar > graph->maxvar)
3126 graph->maxvar = nvar;
3129 return 0;
3132 /* Extract the subgraph of "graph" that consists of the node satisfying
3133 * "node_pred" and the edges satisfying "edge_pred" and store
3134 * the result in "sub".
3136 static int extract_sub_graph(isl_ctx *ctx, struct isl_sched_graph *graph,
3137 int (*node_pred)(struct isl_sched_node *node, int data),
3138 int (*edge_pred)(struct isl_sched_edge *edge, int data),
3139 int data, struct isl_sched_graph *sub)
3141 int i, n = 0, n_edge = 0;
3142 int t;
3144 for (i = 0; i < graph->n; ++i)
3145 if (node_pred(&graph->node[i], data))
3146 ++n;
3147 for (i = 0; i < graph->n_edge; ++i)
3148 if (edge_pred(&graph->edge[i], data))
3149 ++n_edge;
3150 if (graph_alloc(ctx, sub, n, n_edge) < 0)
3151 return -1;
3152 if (copy_nodes(sub, graph, node_pred, data) < 0)
3153 return -1;
3154 if (graph_init_table(ctx, sub) < 0)
3155 return -1;
3156 for (t = 0; t <= isl_edge_last; ++t)
3157 sub->max_edge[t] = graph->max_edge[t];
3158 if (graph_init_edge_tables(ctx, sub) < 0)
3159 return -1;
3160 if (copy_edges(ctx, sub, graph, edge_pred, data) < 0)
3161 return -1;
3162 sub->n_row = graph->n_row;
3163 sub->max_row = graph->max_row;
3164 sub->n_total_row = graph->n_total_row;
3165 sub->band_start = graph->band_start;
3167 return 0;
3170 static __isl_give isl_schedule_node *compute_schedule(isl_schedule_node *node,
3171 struct isl_sched_graph *graph);
3172 static __isl_give isl_schedule_node *compute_schedule_wcc(
3173 isl_schedule_node *node, struct isl_sched_graph *graph);
3175 /* Compute a schedule for a subgraph of "graph". In particular, for
3176 * the graph composed of nodes that satisfy node_pred and edges that
3177 * that satisfy edge_pred.
3178 * If the subgraph is known to consist of a single component, then wcc should
3179 * be set and then we call compute_schedule_wcc on the constructed subgraph.
3180 * Otherwise, we call compute_schedule, which will check whether the subgraph
3181 * is connected.
3183 * The schedule is inserted at "node" and the updated schedule node
3184 * is returned.
3186 static __isl_give isl_schedule_node *compute_sub_schedule(
3187 __isl_take isl_schedule_node *node, isl_ctx *ctx,
3188 struct isl_sched_graph *graph,
3189 int (*node_pred)(struct isl_sched_node *node, int data),
3190 int (*edge_pred)(struct isl_sched_edge *edge, int data),
3191 int data, int wcc)
3193 struct isl_sched_graph split = { 0 };
3195 if (extract_sub_graph(ctx, graph, node_pred, edge_pred, data,
3196 &split) < 0)
3197 goto error;
3199 if (wcc)
3200 node = compute_schedule_wcc(node, &split);
3201 else
3202 node = compute_schedule(node, &split);
3204 graph_free(ctx, &split);
3205 return node;
3206 error:
3207 graph_free(ctx, &split);
3208 return isl_schedule_node_free(node);
3211 static int edge_scc_exactly(struct isl_sched_edge *edge, int scc)
3213 return edge->src->scc == scc && edge->dst->scc == scc;
3216 static int edge_dst_scc_at_most(struct isl_sched_edge *edge, int scc)
3218 return edge->dst->scc <= scc;
3221 static int edge_src_scc_at_least(struct isl_sched_edge *edge, int scc)
3223 return edge->src->scc >= scc;
3226 /* Reset the current band by dropping all its schedule rows.
3228 static int reset_band(struct isl_sched_graph *graph)
3230 int i;
3231 int drop;
3233 drop = graph->n_total_row - graph->band_start;
3234 graph->n_total_row -= drop;
3235 graph->n_row -= drop;
3237 for (i = 0; i < graph->n; ++i) {
3238 struct isl_sched_node *node = &graph->node[i];
3240 isl_map_free(node->sched_map);
3241 node->sched_map = NULL;
3243 node->sched = isl_mat_drop_rows(node->sched,
3244 graph->band_start, drop);
3246 if (!node->sched)
3247 return -1;
3250 return 0;
3253 /* Split the current graph into two parts and compute a schedule for each
3254 * part individually. In particular, one part consists of all SCCs up
3255 * to and including graph->src_scc, while the other part contains the other
3256 * SCCs. The split is enforced by a sequence node inserted at position "node"
3257 * in the schedule tree. Return the updated schedule node.
3258 * If either of these two parts consists of a sequence, then it is spliced
3259 * into the sequence containing the two parts.
3261 * The current band is reset. It would be possible to reuse
3262 * the previously computed rows as the first rows in the next
3263 * band, but recomputing them may result in better rows as we are looking
3264 * at a smaller part of the dependence graph.
3266 static __isl_give isl_schedule_node *compute_split_schedule(
3267 __isl_take isl_schedule_node *node, struct isl_sched_graph *graph)
3269 int is_seq;
3270 isl_ctx *ctx;
3271 isl_union_set_list *filters;
3273 if (!node)
3274 return NULL;
3276 if (reset_band(graph) < 0)
3277 return isl_schedule_node_free(node);
3279 next_band(graph);
3281 ctx = isl_schedule_node_get_ctx(node);
3282 filters = extract_split(ctx, graph);
3283 node = isl_schedule_node_insert_sequence(node, filters);
3284 node = isl_schedule_node_child(node, 1);
3285 node = isl_schedule_node_child(node, 0);
3287 node = compute_sub_schedule(node, ctx, graph,
3288 &node_scc_at_least, &edge_src_scc_at_least,
3289 graph->src_scc + 1, 0);
3290 is_seq = isl_schedule_node_get_type(node) == isl_schedule_node_sequence;
3291 node = isl_schedule_node_parent(node);
3292 node = isl_schedule_node_parent(node);
3293 if (is_seq)
3294 node = isl_schedule_node_sequence_splice_child(node, 1);
3295 node = isl_schedule_node_child(node, 0);
3296 node = isl_schedule_node_child(node, 0);
3297 node = compute_sub_schedule(node, ctx, graph,
3298 &node_scc_at_most, &edge_dst_scc_at_most,
3299 graph->src_scc, 0);
3300 is_seq = isl_schedule_node_get_type(node) == isl_schedule_node_sequence;
3301 node = isl_schedule_node_parent(node);
3302 node = isl_schedule_node_parent(node);
3303 if (is_seq)
3304 node = isl_schedule_node_sequence_splice_child(node, 0);
3306 return node;
3309 /* Insert a band node at position "node" in the schedule tree corresponding
3310 * to the current band in "graph". Mark the band node permutable
3311 * if "permutable" is set.
3312 * The partial schedules and the coincidence property are extracted
3313 * from the graph nodes.
3314 * Return the updated schedule node.
3316 static __isl_give isl_schedule_node *insert_current_band(
3317 __isl_take isl_schedule_node *node, struct isl_sched_graph *graph,
3318 int permutable)
3320 int i;
3321 int start, end, n;
3322 isl_multi_aff *ma;
3323 isl_multi_pw_aff *mpa;
3324 isl_multi_union_pw_aff *mupa;
3326 if (!node)
3327 return NULL;
3329 if (graph->n < 1)
3330 isl_die(isl_schedule_node_get_ctx(node), isl_error_internal,
3331 "graph should have at least one node",
3332 return isl_schedule_node_free(node));
3334 start = graph->band_start;
3335 end = graph->n_total_row;
3336 n = end - start;
3338 ma = node_extract_partial_schedule_multi_aff(&graph->node[0], start, n);
3339 mpa = isl_multi_pw_aff_from_multi_aff(ma);
3340 mupa = isl_multi_union_pw_aff_from_multi_pw_aff(mpa);
3342 for (i = 1; i < graph->n; ++i) {
3343 isl_multi_union_pw_aff *mupa_i;
3345 ma = node_extract_partial_schedule_multi_aff(&graph->node[i],
3346 start, n);
3347 mpa = isl_multi_pw_aff_from_multi_aff(ma);
3348 mupa_i = isl_multi_union_pw_aff_from_multi_pw_aff(mpa);
3349 mupa = isl_multi_union_pw_aff_union_add(mupa, mupa_i);
3351 node = isl_schedule_node_insert_partial_schedule(node, mupa);
3353 for (i = 0; i < n; ++i)
3354 node = isl_schedule_node_band_member_set_coincident(node, i,
3355 graph->node[0].coincident[start + i]);
3356 node = isl_schedule_node_band_set_permutable(node, permutable);
3358 return node;
3361 /* Update the dependence relations based on the current schedule,
3362 * add the current band to "node" and then continue with the computation
3363 * of the next band.
3364 * Return the updated schedule node.
3366 static __isl_give isl_schedule_node *compute_next_band(
3367 __isl_take isl_schedule_node *node,
3368 struct isl_sched_graph *graph, int permutable)
3370 isl_ctx *ctx;
3372 if (!node)
3373 return NULL;
3375 ctx = isl_schedule_node_get_ctx(node);
3376 if (update_edges(ctx, graph) < 0)
3377 return isl_schedule_node_free(node);
3378 node = insert_current_band(node, graph, permutable);
3379 next_band(graph);
3381 node = isl_schedule_node_child(node, 0);
3382 node = compute_schedule(node, graph);
3383 node = isl_schedule_node_parent(node);
3385 return node;
3388 /* Add constraints to graph->lp that force the dependence "map" (which
3389 * is part of the dependence relation of "edge")
3390 * to be respected and attempt to carry it, where the edge is one from
3391 * a node j to itself. "pos" is the sequence number of the given map.
3392 * That is, add constraints that enforce
3394 * (c_j_0 + c_j_n n + c_j_x y) - (c_j_0 + c_j_n n + c_j_x x)
3395 * = c_j_x (y - x) >= e_i
3397 * for each (x,y) in R.
3398 * We obtain general constraints on coefficients (c_0, c_n, c_x)
3399 * of valid constraints for (y - x) and then plug in (-e_i, 0, c_j_x),
3400 * with each coefficient in c_j_x represented as a pair of non-negative
3401 * coefficients.
3403 static int add_intra_constraints(struct isl_sched_graph *graph,
3404 struct isl_sched_edge *edge, __isl_take isl_map *map, int pos)
3406 unsigned total;
3407 isl_ctx *ctx = isl_map_get_ctx(map);
3408 isl_space *dim;
3409 isl_dim_map *dim_map;
3410 isl_basic_set *coef;
3411 struct isl_sched_node *node = edge->src;
3413 coef = intra_coefficients(graph, node, map);
3414 if (!coef)
3415 return -1;
3417 dim = isl_space_domain(isl_space_unwrap(isl_basic_set_get_space(coef)));
3419 total = isl_basic_set_total_dim(graph->lp);
3420 dim_map = isl_dim_map_alloc(ctx, total);
3421 isl_dim_map_range(dim_map, 3 + pos, 0, 0, 0, 1, -1);
3422 isl_dim_map_range(dim_map, node->start + 2 * node->nparam + 1, 2,
3423 isl_space_dim(dim, isl_dim_set), 1,
3424 node->nvar, -1);
3425 isl_dim_map_range(dim_map, node->start + 2 * node->nparam + 2, 2,
3426 isl_space_dim(dim, isl_dim_set), 1,
3427 node->nvar, 1);
3428 graph->lp = isl_basic_set_extend_constraints(graph->lp,
3429 coef->n_eq, coef->n_ineq);
3430 graph->lp = isl_basic_set_add_constraints_dim_map(graph->lp,
3431 coef, dim_map);
3432 isl_space_free(dim);
3434 return 0;
3437 /* Add constraints to graph->lp that force the dependence "map" (which
3438 * is part of the dependence relation of "edge")
3439 * to be respected and attempt to carry it, where the edge is one from
3440 * node j to node k. "pos" is the sequence number of the given map.
3441 * That is, add constraints that enforce
3443 * (c_k_0 + c_k_n n + c_k_x y) - (c_j_0 + c_j_n n + c_j_x x) >= e_i
3445 * for each (x,y) in R.
3446 * We obtain general constraints on coefficients (c_0, c_n, c_x)
3447 * of valid constraints for R and then plug in
3448 * (-e_i + c_k_0 - c_j_0, c_k_n - c_j_n, c_k_x - c_j_x)
3449 * with each coefficient (except e_i, c_k_0 and c_j_0)
3450 * represented as a pair of non-negative coefficients.
3452 static int add_inter_constraints(struct isl_sched_graph *graph,
3453 struct isl_sched_edge *edge, __isl_take isl_map *map, int pos)
3455 unsigned total;
3456 isl_ctx *ctx = isl_map_get_ctx(map);
3457 isl_space *dim;
3458 isl_dim_map *dim_map;
3459 isl_basic_set *coef;
3460 struct isl_sched_node *src = edge->src;
3461 struct isl_sched_node *dst = edge->dst;
3463 coef = inter_coefficients(graph, edge, map);
3464 if (!coef)
3465 return -1;
3467 dim = isl_space_domain(isl_space_unwrap(isl_basic_set_get_space(coef)));
3469 total = isl_basic_set_total_dim(graph->lp);
3470 dim_map = isl_dim_map_alloc(ctx, total);
3472 isl_dim_map_range(dim_map, 3 + pos, 0, 0, 0, 1, -1);
3474 isl_dim_map_range(dim_map, dst->start, 0, 0, 0, 1, 1);
3475 isl_dim_map_range(dim_map, dst->start + 1, 2, 1, 1, dst->nparam, -1);
3476 isl_dim_map_range(dim_map, dst->start + 2, 2, 1, 1, dst->nparam, 1);
3477 isl_dim_map_range(dim_map, dst->start + 2 * dst->nparam + 1, 2,
3478 isl_space_dim(dim, isl_dim_set) + src->nvar, 1,
3479 dst->nvar, -1);
3480 isl_dim_map_range(dim_map, dst->start + 2 * dst->nparam + 2, 2,
3481 isl_space_dim(dim, isl_dim_set) + src->nvar, 1,
3482 dst->nvar, 1);
3484 isl_dim_map_range(dim_map, src->start, 0, 0, 0, 1, -1);
3485 isl_dim_map_range(dim_map, src->start + 1, 2, 1, 1, src->nparam, 1);
3486 isl_dim_map_range(dim_map, src->start + 2, 2, 1, 1, src->nparam, -1);
3487 isl_dim_map_range(dim_map, src->start + 2 * src->nparam + 1, 2,
3488 isl_space_dim(dim, isl_dim_set), 1,
3489 src->nvar, 1);
3490 isl_dim_map_range(dim_map, src->start + 2 * src->nparam + 2, 2,
3491 isl_space_dim(dim, isl_dim_set), 1,
3492 src->nvar, -1);
3494 graph->lp = isl_basic_set_extend_constraints(graph->lp,
3495 coef->n_eq, coef->n_ineq);
3496 graph->lp = isl_basic_set_add_constraints_dim_map(graph->lp,
3497 coef, dim_map);
3498 isl_space_free(dim);
3500 return 0;
3503 /* Add constraints to graph->lp that force all (conditional) validity
3504 * dependences to be respected and attempt to carry them.
3506 static int add_all_constraints(struct isl_sched_graph *graph)
3508 int i, j;
3509 int pos;
3511 pos = 0;
3512 for (i = 0; i < graph->n_edge; ++i) {
3513 struct isl_sched_edge *edge= &graph->edge[i];
3515 if (!is_validity(edge) && !is_conditional_validity(edge))
3516 continue;
3518 for (j = 0; j < edge->map->n; ++j) {
3519 isl_basic_map *bmap;
3520 isl_map *map;
3522 bmap = isl_basic_map_copy(edge->map->p[j]);
3523 map = isl_map_from_basic_map(bmap);
3525 if (edge->src == edge->dst &&
3526 add_intra_constraints(graph, edge, map, pos) < 0)
3527 return -1;
3528 if (edge->src != edge->dst &&
3529 add_inter_constraints(graph, edge, map, pos) < 0)
3530 return -1;
3531 ++pos;
3535 return 0;
3538 /* Count the number of equality and inequality constraints
3539 * that will be added to the carry_lp problem.
3540 * We count each edge exactly once.
3542 static int count_all_constraints(struct isl_sched_graph *graph,
3543 int *n_eq, int *n_ineq)
3545 int i, j;
3547 *n_eq = *n_ineq = 0;
3548 for (i = 0; i < graph->n_edge; ++i) {
3549 struct isl_sched_edge *edge= &graph->edge[i];
3550 for (j = 0; j < edge->map->n; ++j) {
3551 isl_basic_map *bmap;
3552 isl_map *map;
3554 bmap = isl_basic_map_copy(edge->map->p[j]);
3555 map = isl_map_from_basic_map(bmap);
3557 if (count_map_constraints(graph, edge, map,
3558 n_eq, n_ineq, 1, 0) < 0)
3559 return -1;
3563 return 0;
3566 /* Construct an LP problem for finding schedule coefficients
3567 * such that the schedule carries as many dependences as possible.
3568 * In particular, for each dependence i, we bound the dependence distance
3569 * from below by e_i, with 0 <= e_i <= 1 and then maximize the sum
3570 * of all e_i's. Dependences with e_i = 0 in the solution are simply
3571 * respected, while those with e_i > 0 (in practice e_i = 1) are carried.
3572 * Note that if the dependence relation is a union of basic maps,
3573 * then we have to consider each basic map individually as it may only
3574 * be possible to carry the dependences expressed by some of those
3575 * basic maps and not all of them.
3576 * Below, we consider each of those basic maps as a separate "edge".
3578 * All variables of the LP are non-negative. The actual coefficients
3579 * may be negative, so each coefficient is represented as the difference
3580 * of two non-negative variables. The negative part always appears
3581 * immediately before the positive part.
3582 * Other than that, the variables have the following order
3584 * - sum of (1 - e_i) over all edges
3585 * - sum of positive and negative parts of all c_n coefficients
3586 * (unconstrained when computing non-parametric schedules)
3587 * - sum of positive and negative parts of all c_x coefficients
3588 * - for each edge
3589 * - e_i
3590 * - for each node
3591 * - c_i_0
3592 * - positive and negative parts of c_i_n (if parametric)
3593 * - positive and negative parts of c_i_x
3595 * The constraints are those from the (validity) edges plus three equalities
3596 * to express the sums and n_edge inequalities to express e_i <= 1.
3598 static int setup_carry_lp(isl_ctx *ctx, struct isl_sched_graph *graph)
3600 int i, j;
3601 int k;
3602 isl_space *dim;
3603 unsigned total;
3604 int n_eq, n_ineq;
3605 int n_edge;
3607 n_edge = 0;
3608 for (i = 0; i < graph->n_edge; ++i)
3609 n_edge += graph->edge[i].map->n;
3611 total = 3 + n_edge;
3612 for (i = 0; i < graph->n; ++i) {
3613 struct isl_sched_node *node = &graph->node[graph->sorted[i]];
3614 node->start = total;
3615 total += 1 + 2 * (node->nparam + node->nvar);
3618 if (count_all_constraints(graph, &n_eq, &n_ineq) < 0)
3619 return -1;
3621 dim = isl_space_set_alloc(ctx, 0, total);
3622 isl_basic_set_free(graph->lp);
3623 n_eq += 3;
3624 n_ineq += n_edge;
3625 graph->lp = isl_basic_set_alloc_space(dim, 0, n_eq, n_ineq);
3626 graph->lp = isl_basic_set_set_rational(graph->lp);
3628 k = isl_basic_set_alloc_equality(graph->lp);
3629 if (k < 0)
3630 return -1;
3631 isl_seq_clr(graph->lp->eq[k], 1 + total);
3632 isl_int_set_si(graph->lp->eq[k][0], -n_edge);
3633 isl_int_set_si(graph->lp->eq[k][1], 1);
3634 for (i = 0; i < n_edge; ++i)
3635 isl_int_set_si(graph->lp->eq[k][4 + i], 1);
3637 k = isl_basic_set_alloc_equality(graph->lp);
3638 if (k < 0)
3639 return -1;
3640 isl_seq_clr(graph->lp->eq[k], 1 + total);
3641 isl_int_set_si(graph->lp->eq[k][2], -1);
3642 for (i = 0; i < graph->n; ++i) {
3643 int pos = 1 + graph->node[i].start + 1;
3645 for (j = 0; j < 2 * graph->node[i].nparam; ++j)
3646 isl_int_set_si(graph->lp->eq[k][pos + j], 1);
3649 k = isl_basic_set_alloc_equality(graph->lp);
3650 if (k < 0)
3651 return -1;
3652 isl_seq_clr(graph->lp->eq[k], 1 + total);
3653 isl_int_set_si(graph->lp->eq[k][3], -1);
3654 for (i = 0; i < graph->n; ++i) {
3655 struct isl_sched_node *node = &graph->node[i];
3656 int pos = 1 + node->start + 1 + 2 * node->nparam;
3658 for (j = 0; j < 2 * node->nvar; ++j)
3659 isl_int_set_si(graph->lp->eq[k][pos + j], 1);
3662 for (i = 0; i < n_edge; ++i) {
3663 k = isl_basic_set_alloc_inequality(graph->lp);
3664 if (k < 0)
3665 return -1;
3666 isl_seq_clr(graph->lp->ineq[k], 1 + total);
3667 isl_int_set_si(graph->lp->ineq[k][4 + i], -1);
3668 isl_int_set_si(graph->lp->ineq[k][0], 1);
3671 if (add_all_constraints(graph) < 0)
3672 return -1;
3674 return 0;
3677 static __isl_give isl_schedule_node *compute_component_schedule(
3678 __isl_take isl_schedule_node *node, struct isl_sched_graph *graph,
3679 int wcc);
3681 /* Comparison function for sorting the statements based on
3682 * the corresponding value in "r".
3684 static int smaller_value(const void *a, const void *b, void *data)
3686 isl_vec *r = data;
3687 const int *i1 = a;
3688 const int *i2 = b;
3690 return isl_int_cmp(r->el[*i1], r->el[*i2]);
3693 /* If the schedule_split_scaled option is set and if the linear
3694 * parts of the scheduling rows for all nodes in the graphs have
3695 * a non-trivial common divisor, then split off the remainder of the
3696 * constant term modulo this common divisor from the linear part.
3697 * Otherwise, insert a band node directly and continue with
3698 * the construction of the schedule.
3700 * If a non-trivial common divisor is found, then
3701 * the linear part is reduced and the remainder is enforced
3702 * by a sequence node with the children placed in the order
3703 * of this remainder.
3704 * In particular, we assign an scc index based on the remainder and
3705 * then rely on compute_component_schedule to insert the sequence and
3706 * to continue the schedule construction on each part.
3708 static __isl_give isl_schedule_node *split_scaled(
3709 __isl_take isl_schedule_node *node, struct isl_sched_graph *graph)
3711 int i;
3712 int row;
3713 int scc;
3714 isl_ctx *ctx;
3715 isl_int gcd, gcd_i;
3716 isl_vec *r;
3717 int *order;
3719 if (!node)
3720 return NULL;
3722 ctx = isl_schedule_node_get_ctx(node);
3723 if (!ctx->opt->schedule_split_scaled)
3724 return compute_next_band(node, graph, 0);
3725 if (graph->n <= 1)
3726 return compute_next_band(node, graph, 0);
3728 isl_int_init(gcd);
3729 isl_int_init(gcd_i);
3731 isl_int_set_si(gcd, 0);
3733 row = isl_mat_rows(graph->node[0].sched) - 1;
3735 for (i = 0; i < graph->n; ++i) {
3736 struct isl_sched_node *node = &graph->node[i];
3737 int cols = isl_mat_cols(node->sched);
3739 isl_seq_gcd(node->sched->row[row] + 1, cols - 1, &gcd_i);
3740 isl_int_gcd(gcd, gcd, gcd_i);
3743 isl_int_clear(gcd_i);
3745 if (isl_int_cmp_si(gcd, 1) <= 0) {
3746 isl_int_clear(gcd);
3747 return compute_next_band(node, graph, 0);
3750 r = isl_vec_alloc(ctx, graph->n);
3751 order = isl_calloc_array(ctx, int, graph->n);
3752 if (!r || !order)
3753 goto error;
3755 for (i = 0; i < graph->n; ++i) {
3756 struct isl_sched_node *node = &graph->node[i];
3758 order[i] = i;
3759 isl_int_fdiv_r(r->el[i], node->sched->row[row][0], gcd);
3760 isl_int_fdiv_q(node->sched->row[row][0],
3761 node->sched->row[row][0], gcd);
3762 isl_int_mul(node->sched->row[row][0],
3763 node->sched->row[row][0], gcd);
3764 node->sched = isl_mat_scale_down_row(node->sched, row, gcd);
3765 if (!node->sched)
3766 goto error;
3769 if (isl_sort(order, graph->n, sizeof(order[0]), &smaller_value, r) < 0)
3770 goto error;
3772 scc = 0;
3773 for (i = 0; i < graph->n; ++i) {
3774 if (i > 0 && isl_int_ne(r->el[order[i - 1]], r->el[order[i]]))
3775 ++scc;
3776 graph->node[order[i]].scc = scc;
3778 graph->scc = ++scc;
3779 graph->weak = 0;
3781 isl_int_clear(gcd);
3782 isl_vec_free(r);
3783 free(order);
3785 if (update_edges(ctx, graph) < 0)
3786 return isl_schedule_node_free(node);
3787 node = insert_current_band(node, graph, 0);
3788 next_band(graph);
3790 node = isl_schedule_node_child(node, 0);
3791 node = compute_component_schedule(node, graph, 0);
3792 node = isl_schedule_node_parent(node);
3794 return node;
3795 error:
3796 isl_vec_free(r);
3797 free(order);
3798 isl_int_clear(gcd);
3799 return isl_schedule_node_free(node);
3802 /* Is the schedule row "sol" trivial on node "node"?
3803 * That is, is the solution zero on the dimensions orthogonal to
3804 * the previously found solutions?
3805 * Return 1 if the solution is trivial, 0 if it is not and -1 on error.
3807 * Each coefficient is represented as the difference between
3808 * two non-negative values in "sol". "sol" has been computed
3809 * in terms of the original iterators (i.e., without use of cmap).
3810 * We construct the schedule row s and write it as a linear
3811 * combination of (linear combinations of) previously computed schedule rows.
3812 * s = Q c or c = U s.
3813 * If the final entries of c are all zero, then the solution is trivial.
3815 static int is_trivial(struct isl_sched_node *node, __isl_keep isl_vec *sol)
3817 int i;
3818 int pos;
3819 int trivial;
3820 isl_ctx *ctx;
3821 isl_vec *node_sol;
3823 if (!sol)
3824 return -1;
3825 if (node->nvar == node->rank)
3826 return 0;
3828 ctx = isl_vec_get_ctx(sol);
3829 node_sol = isl_vec_alloc(ctx, node->nvar);
3830 if (!node_sol)
3831 return -1;
3833 pos = 1 + node->start + 1 + 2 * node->nparam;
3835 for (i = 0; i < node->nvar; ++i)
3836 isl_int_sub(node_sol->el[i],
3837 sol->el[pos + 2 * i + 1], sol->el[pos + 2 * i]);
3839 node_sol = isl_mat_vec_product(isl_mat_copy(node->cinv), node_sol);
3841 if (!node_sol)
3842 return -1;
3844 trivial = isl_seq_first_non_zero(node_sol->el + node->rank,
3845 node->nvar - node->rank) == -1;
3847 isl_vec_free(node_sol);
3849 return trivial;
3852 /* Is the schedule row "sol" trivial on any node where it should
3853 * not be trivial?
3854 * "sol" has been computed in terms of the original iterators
3855 * (i.e., without use of cmap).
3856 * Return 1 if any solution is trivial, 0 if they are not and -1 on error.
3858 static int is_any_trivial(struct isl_sched_graph *graph,
3859 __isl_keep isl_vec *sol)
3861 int i;
3863 for (i = 0; i < graph->n; ++i) {
3864 struct isl_sched_node *node = &graph->node[i];
3865 int trivial;
3867 if (!needs_row(graph, node))
3868 continue;
3869 trivial = is_trivial(node, sol);
3870 if (trivial < 0 || trivial)
3871 return trivial;
3874 return 0;
3877 /* Construct a schedule row for each node such that as many dependences
3878 * as possible are carried and then continue with the next band.
3880 * Note that despite the fact that the problem is solved using a rational
3881 * solver, the solution is guaranteed to be integral.
3882 * Specifically, the dependence distance lower bounds e_i (and therefore
3883 * also their sum) are integers. See Lemma 5 of [1].
3885 * If the computed schedule row turns out to be trivial on one or
3886 * more nodes where it should not be trivial, then we throw it away
3887 * and try again on each component separately.
3889 * If there is only one component, then we accept the schedule row anyway,
3890 * but we do not consider it as a complete row and therefore do not
3891 * increment graph->n_row. Note that the ranks of the nodes that
3892 * do get a non-trivial schedule part will get updated regardless and
3893 * graph->maxvar is computed based on these ranks. The test for
3894 * whether more schedule rows are required in compute_schedule_wcc
3895 * is therefore not affected.
3897 * Insert a band corresponding to the schedule row at position "node"
3898 * of the schedule tree and continue with the construction of the schedule.
3899 * This insertion and the continued construction is performed by split_scaled
3900 * after optionally checking for non-trivial common divisors.
3902 * [1] P. Feautrier, Some Efficient Solutions to the Affine Scheduling
3903 * Problem, Part II: Multi-Dimensional Time.
3904 * In Intl. Journal of Parallel Programming, 1992.
3906 static __isl_give isl_schedule_node *carry_dependences(
3907 __isl_take isl_schedule_node *node, struct isl_sched_graph *graph)
3909 int i;
3910 int n_edge;
3911 int trivial;
3912 isl_ctx *ctx;
3913 isl_vec *sol;
3914 isl_basic_set *lp;
3916 if (!node)
3917 return NULL;
3919 n_edge = 0;
3920 for (i = 0; i < graph->n_edge; ++i)
3921 n_edge += graph->edge[i].map->n;
3923 ctx = isl_schedule_node_get_ctx(node);
3924 if (setup_carry_lp(ctx, graph) < 0)
3925 return isl_schedule_node_free(node);
3927 lp = isl_basic_set_copy(graph->lp);
3928 sol = isl_tab_basic_set_non_neg_lexmin(lp);
3929 if (!sol)
3930 return isl_schedule_node_free(node);
3932 if (sol->size == 0) {
3933 isl_vec_free(sol);
3934 isl_die(ctx, isl_error_internal,
3935 "error in schedule construction",
3936 return isl_schedule_node_free(node));
3939 isl_int_divexact(sol->el[1], sol->el[1], sol->el[0]);
3940 if (isl_int_cmp_si(sol->el[1], n_edge) >= 0) {
3941 isl_vec_free(sol);
3942 isl_die(ctx, isl_error_unknown,
3943 "unable to carry dependences",
3944 return isl_schedule_node_free(node));
3947 trivial = is_any_trivial(graph, sol);
3948 if (trivial < 0) {
3949 sol = isl_vec_free(sol);
3950 } else if (trivial && graph->scc > 1) {
3951 isl_vec_free(sol);
3952 return compute_component_schedule(node, graph, 1);
3955 if (update_schedule(graph, sol, 0, 0) < 0)
3956 return isl_schedule_node_free(node);
3957 if (trivial)
3958 graph->n_row--;
3960 return split_scaled(node, graph);
3963 /* Topologically sort statements mapped to the same schedule iteration
3964 * and add insert a sequence node in front of "node"
3965 * corresponding to this order.
3966 * If "initialized" is set, then it may be assumed that compute_maxvar
3967 * has been called on the current band. Otherwise, call
3968 * compute_maxvar if and before carry_dependences gets called.
3970 * If it turns out to be impossible to sort the statements apart,
3971 * because different dependences impose different orderings
3972 * on the statements, then we extend the schedule such that
3973 * it carries at least one more dependence.
3975 static __isl_give isl_schedule_node *sort_statements(
3976 __isl_take isl_schedule_node *node, struct isl_sched_graph *graph,
3977 int initialized)
3979 isl_ctx *ctx;
3980 isl_union_set_list *filters;
3982 if (!node)
3983 return NULL;
3985 ctx = isl_schedule_node_get_ctx(node);
3986 if (graph->n < 1)
3987 isl_die(ctx, isl_error_internal,
3988 "graph should have at least one node",
3989 return isl_schedule_node_free(node));
3991 if (graph->n == 1)
3992 return node;
3994 if (update_edges(ctx, graph) < 0)
3995 return isl_schedule_node_free(node);
3997 if (graph->n_edge == 0)
3998 return node;
4000 if (detect_sccs(ctx, graph) < 0)
4001 return isl_schedule_node_free(node);
4003 next_band(graph);
4004 if (graph->scc < graph->n) {
4005 if (!initialized && compute_maxvar(graph) < 0)
4006 return isl_schedule_node_free(node);
4007 return carry_dependences(node, graph);
4010 filters = extract_sccs(ctx, graph);
4011 node = isl_schedule_node_insert_sequence(node, filters);
4013 return node;
4016 /* Are there any (non-empty) (conditional) validity edges in the graph?
4018 static int has_validity_edges(struct isl_sched_graph *graph)
4020 int i;
4022 for (i = 0; i < graph->n_edge; ++i) {
4023 int empty;
4025 empty = isl_map_plain_is_empty(graph->edge[i].map);
4026 if (empty < 0)
4027 return -1;
4028 if (empty)
4029 continue;
4030 if (is_validity(&graph->edge[i]) ||
4031 is_conditional_validity(&graph->edge[i]))
4032 return 1;
4035 return 0;
4038 /* Should we apply a Feautrier step?
4039 * That is, did the user request the Feautrier algorithm and are
4040 * there any validity dependences (left)?
4042 static int need_feautrier_step(isl_ctx *ctx, struct isl_sched_graph *graph)
4044 if (ctx->opt->schedule_algorithm != ISL_SCHEDULE_ALGORITHM_FEAUTRIER)
4045 return 0;
4047 return has_validity_edges(graph);
4050 /* Compute a schedule for a connected dependence graph using Feautrier's
4051 * multi-dimensional scheduling algorithm and return the updated schedule node.
4053 * The original algorithm is described in [1].
4054 * The main idea is to minimize the number of scheduling dimensions, by
4055 * trying to satisfy as many dependences as possible per scheduling dimension.
4057 * [1] P. Feautrier, Some Efficient Solutions to the Affine Scheduling
4058 * Problem, Part II: Multi-Dimensional Time.
4059 * In Intl. Journal of Parallel Programming, 1992.
4061 static __isl_give isl_schedule_node *compute_schedule_wcc_feautrier(
4062 isl_schedule_node *node, struct isl_sched_graph *graph)
4064 return carry_dependences(node, graph);
4067 /* Turn off the "local" bit on all (condition) edges.
4069 static void clear_local_edges(struct isl_sched_graph *graph)
4071 int i;
4073 for (i = 0; i < graph->n_edge; ++i)
4074 if (is_condition(&graph->edge[i]))
4075 clear_local(&graph->edge[i]);
4078 /* Does "graph" have both condition and conditional validity edges?
4080 static int need_condition_check(struct isl_sched_graph *graph)
4082 int i;
4083 int any_condition = 0;
4084 int any_conditional_validity = 0;
4086 for (i = 0; i < graph->n_edge; ++i) {
4087 if (is_condition(&graph->edge[i]))
4088 any_condition = 1;
4089 if (is_conditional_validity(&graph->edge[i]))
4090 any_conditional_validity = 1;
4093 return any_condition && any_conditional_validity;
4096 /* Does "graph" contain any coincidence edge?
4098 static int has_any_coincidence(struct isl_sched_graph *graph)
4100 int i;
4102 for (i = 0; i < graph->n_edge; ++i)
4103 if (is_coincidence(&graph->edge[i]))
4104 return 1;
4106 return 0;
4109 /* Extract the final schedule row as a map with the iteration domain
4110 * of "node" as domain.
4112 static __isl_give isl_map *final_row(struct isl_sched_node *node)
4114 isl_local_space *ls;
4115 isl_aff *aff;
4116 int row;
4118 row = isl_mat_rows(node->sched) - 1;
4119 ls = isl_local_space_from_space(isl_space_copy(node->space));
4120 aff = extract_schedule_row(ls, node, row);
4121 return isl_map_from_aff(aff);
4124 /* Is the conditional validity dependence in the edge with index "edge_index"
4125 * violated by the latest (i.e., final) row of the schedule?
4126 * That is, is i scheduled after j
4127 * for any conditional validity dependence i -> j?
4129 static int is_violated(struct isl_sched_graph *graph, int edge_index)
4131 isl_map *src_sched, *dst_sched, *map;
4132 struct isl_sched_edge *edge = &graph->edge[edge_index];
4133 int empty;
4135 src_sched = final_row(edge->src);
4136 dst_sched = final_row(edge->dst);
4137 map = isl_map_copy(edge->map);
4138 map = isl_map_apply_domain(map, src_sched);
4139 map = isl_map_apply_range(map, dst_sched);
4140 map = isl_map_order_gt(map, isl_dim_in, 0, isl_dim_out, 0);
4141 empty = isl_map_is_empty(map);
4142 isl_map_free(map);
4144 if (empty < 0)
4145 return -1;
4147 return !empty;
4150 /* Does "graph" have any satisfied condition edges that
4151 * are adjacent to the conditional validity constraint with
4152 * domain "conditional_source" and range "conditional_sink"?
4154 * A satisfied condition is one that is not local.
4155 * If a condition was forced to be local already (i.e., marked as local)
4156 * then there is no need to check if it is in fact local.
4158 * Additionally, mark all adjacent condition edges found as local.
4160 static int has_adjacent_true_conditions(struct isl_sched_graph *graph,
4161 __isl_keep isl_union_set *conditional_source,
4162 __isl_keep isl_union_set *conditional_sink)
4164 int i;
4165 int any = 0;
4167 for (i = 0; i < graph->n_edge; ++i) {
4168 int adjacent, local;
4169 isl_union_map *condition;
4171 if (!is_condition(&graph->edge[i]))
4172 continue;
4173 if (is_local(&graph->edge[i]))
4174 continue;
4176 condition = graph->edge[i].tagged_condition;
4177 adjacent = domain_intersects(condition, conditional_sink);
4178 if (adjacent >= 0 && !adjacent)
4179 adjacent = range_intersects(condition,
4180 conditional_source);
4181 if (adjacent < 0)
4182 return -1;
4183 if (!adjacent)
4184 continue;
4186 set_local(&graph->edge[i]);
4188 local = is_condition_false(&graph->edge[i]);
4189 if (local < 0)
4190 return -1;
4191 if (!local)
4192 any = 1;
4195 return any;
4198 /* Are there any violated conditional validity dependences with
4199 * adjacent condition dependences that are not local with respect
4200 * to the current schedule?
4201 * That is, is the conditional validity constraint violated?
4203 * Additionally, mark all those adjacent condition dependences as local.
4204 * We also mark those adjacent condition dependences that were not marked
4205 * as local before, but just happened to be local already. This ensures
4206 * that they remain local if the schedule is recomputed.
4208 * We first collect domain and range of all violated conditional validity
4209 * dependences and then check if there are any adjacent non-local
4210 * condition dependences.
4212 static int has_violated_conditional_constraint(isl_ctx *ctx,
4213 struct isl_sched_graph *graph)
4215 int i;
4216 int any = 0;
4217 isl_union_set *source, *sink;
4219 source = isl_union_set_empty(isl_space_params_alloc(ctx, 0));
4220 sink = isl_union_set_empty(isl_space_params_alloc(ctx, 0));
4221 for (i = 0; i < graph->n_edge; ++i) {
4222 isl_union_set *uset;
4223 isl_union_map *umap;
4224 int violated;
4226 if (!is_conditional_validity(&graph->edge[i]))
4227 continue;
4229 violated = is_violated(graph, i);
4230 if (violated < 0)
4231 goto error;
4232 if (!violated)
4233 continue;
4235 any = 1;
4237 umap = isl_union_map_copy(graph->edge[i].tagged_validity);
4238 uset = isl_union_map_domain(umap);
4239 source = isl_union_set_union(source, uset);
4240 source = isl_union_set_coalesce(source);
4242 umap = isl_union_map_copy(graph->edge[i].tagged_validity);
4243 uset = isl_union_map_range(umap);
4244 sink = isl_union_set_union(sink, uset);
4245 sink = isl_union_set_coalesce(sink);
4248 if (any)
4249 any = has_adjacent_true_conditions(graph, source, sink);
4251 isl_union_set_free(source);
4252 isl_union_set_free(sink);
4253 return any;
4254 error:
4255 isl_union_set_free(source);
4256 isl_union_set_free(sink);
4257 return -1;
4260 /* Examine the current band (the rows between graph->band_start and
4261 * graph->n_total_row), deciding whether to drop it or add it to "node"
4262 * and then continue with the computation of the next band, if any.
4263 * If "initialized" is set, then it may be assumed that compute_maxvar
4264 * has been called on the current band. Otherwise, call
4265 * compute_maxvar if and before carry_dependences gets called.
4267 * The caller keeps looking for a new row as long as
4268 * graph->n_row < graph->maxvar. If the latest attempt to find
4269 * such a row failed (i.e., we still have graph->n_row < graph->maxvar),
4270 * then we either
4271 * - split between SCCs and start over (assuming we found an interesting
4272 * pair of SCCs between which to split)
4273 * - continue with the next band (assuming the current band has at least
4274 * one row)
4275 * - try to carry as many dependences as possible and continue with the next
4276 * band
4277 * In each case, we first insert a band node in the schedule tree
4278 * if any rows have been computed.
4280 * If the caller managed to complete the schedule, we insert a band node
4281 * (if any schedule rows were computed) and we finish off by topologically
4282 * sorting the statements based on the remaining dependences.
4284 static __isl_give isl_schedule_node *compute_schedule_finish_band(
4285 __isl_take isl_schedule_node *node, struct isl_sched_graph *graph,
4286 int initialized)
4288 int insert;
4290 if (!node)
4291 return NULL;
4293 if (graph->n_row < graph->maxvar) {
4294 isl_ctx *ctx;
4295 int empty = graph->n_total_row == graph->band_start;
4297 ctx = isl_schedule_node_get_ctx(node);
4298 if (!ctx->opt->schedule_maximize_band_depth && !empty)
4299 return compute_next_band(node, graph, 1);
4300 if (graph->src_scc >= 0)
4301 return compute_split_schedule(node, graph);
4302 if (!empty)
4303 return compute_next_band(node, graph, 1);
4304 if (!initialized && compute_maxvar(graph) < 0)
4305 return isl_schedule_node_free(node);
4306 return carry_dependences(node, graph);
4309 insert = graph->n_total_row > graph->band_start;
4310 if (insert) {
4311 node = insert_current_band(node, graph, 1);
4312 node = isl_schedule_node_child(node, 0);
4314 node = sort_statements(node, graph, initialized);
4315 if (insert)
4316 node = isl_schedule_node_parent(node);
4318 return node;
4321 /* Construct a band of schedule rows for a connected dependence graph.
4322 * The caller is responsible for determining the strongly connected
4323 * components and calling compute_maxvar first.
4325 * We try to find a sequence of as many schedule rows as possible that result
4326 * in non-negative dependence distances (independent of the previous rows
4327 * in the sequence, i.e., such that the sequence is tilable), with as
4328 * many of the initial rows as possible satisfying the coincidence constraints.
4329 * The computation stops if we can't find any more rows or if we have found
4330 * all the rows we wanted to find.
4332 * If ctx->opt->schedule_outer_coincidence is set, then we force the
4333 * outermost dimension to satisfy the coincidence constraints. If this
4334 * turns out to be impossible, we fall back on the general scheme above
4335 * and try to carry as many dependences as possible.
4337 * If "graph" contains both condition and conditional validity dependences,
4338 * then we need to check that that the conditional schedule constraint
4339 * is satisfied, i.e., there are no violated conditional validity dependences
4340 * that are adjacent to any non-local condition dependences.
4341 * If there are, then we mark all those adjacent condition dependences
4342 * as local and recompute the current band. Those dependences that
4343 * are marked local will then be forced to be local.
4344 * The initial computation is performed with no dependences marked as local.
4345 * If we are lucky, then there will be no violated conditional validity
4346 * dependences adjacent to any non-local condition dependences.
4347 * Otherwise, we mark some additional condition dependences as local and
4348 * recompute. We continue this process until there are no violations left or
4349 * until we are no longer able to compute a schedule.
4350 * Since there are only a finite number of dependences,
4351 * there will only be a finite number of iterations.
4353 static isl_stat compute_schedule_wcc_band(isl_ctx *ctx,
4354 struct isl_sched_graph *graph)
4356 int has_coincidence;
4357 int use_coincidence;
4358 int force_coincidence = 0;
4359 int check_conditional;
4361 if (sort_sccs(graph) < 0)
4362 return isl_stat_error;
4364 clear_local_edges(graph);
4365 check_conditional = need_condition_check(graph);
4366 has_coincidence = has_any_coincidence(graph);
4368 if (ctx->opt->schedule_outer_coincidence)
4369 force_coincidence = 1;
4371 use_coincidence = has_coincidence;
4372 while (graph->n_row < graph->maxvar) {
4373 isl_vec *sol;
4374 int violated;
4375 int coincident;
4377 graph->src_scc = -1;
4378 graph->dst_scc = -1;
4380 if (setup_lp(ctx, graph, use_coincidence) < 0)
4381 return isl_stat_error;
4382 sol = solve_lp(graph);
4383 if (!sol)
4384 return isl_stat_error;
4385 if (sol->size == 0) {
4386 int empty = graph->n_total_row == graph->band_start;
4388 isl_vec_free(sol);
4389 if (use_coincidence && (!force_coincidence || !empty)) {
4390 use_coincidence = 0;
4391 continue;
4393 return isl_stat_ok;
4395 coincident = !has_coincidence || use_coincidence;
4396 if (update_schedule(graph, sol, 1, coincident) < 0)
4397 return isl_stat_error;
4399 if (!check_conditional)
4400 continue;
4401 violated = has_violated_conditional_constraint(ctx, graph);
4402 if (violated < 0)
4403 return isl_stat_error;
4404 if (!violated)
4405 continue;
4406 if (reset_band(graph) < 0)
4407 return isl_stat_error;
4408 use_coincidence = has_coincidence;
4411 return isl_stat_ok;
4414 /* Compute a schedule for a connected dependence graph by considering
4415 * the graph as a whole and return the updated schedule node.
4417 * The actual schedule rows of the current band are computed by
4418 * compute_schedule_wcc_band. compute_schedule_finish_band takes
4419 * care of integrating the band into "node" and continuing
4420 * the computation.
4422 static __isl_give isl_schedule_node *compute_schedule_wcc_whole(
4423 __isl_take isl_schedule_node *node, struct isl_sched_graph *graph)
4425 isl_ctx *ctx;
4427 if (!node)
4428 return NULL;
4430 ctx = isl_schedule_node_get_ctx(node);
4431 if (compute_schedule_wcc_band(ctx, graph) < 0)
4432 return isl_schedule_node_free(node);
4434 return compute_schedule_finish_band(node, graph, 1);
4437 /* Clustering information used by compute_schedule_wcc_clustering.
4439 * "n" is the number of SCCs in the original dependence graph
4440 * "scc" is an array of "n" elements, each representing an SCC
4441 * of the original dependence graph. All entries in the same cluster
4442 * have the same number of schedule rows.
4443 * "scc_cluster" maps each SCC index to the cluster to which it belongs,
4444 * where each cluster is represented by the index of the first SCC
4445 * in the cluster. Initially, each SCC belongs to a cluster containing
4446 * only that SCC.
4448 * "scc_in_merge" is used by merge_clusters_along_edge to keep
4449 * track of which SCCs need to be merged.
4451 * "cluster" contains the merged clusters of SCCs after the clustering
4452 * has completed.
4454 * "scc_node" is a temporary data structure used inside copy_partial.
4455 * For each SCC, it keeps track of the number of nodes in the SCC
4456 * that have already been copied.
4458 struct isl_clustering {
4459 int n;
4460 struct isl_sched_graph *scc;
4461 struct isl_sched_graph *cluster;
4462 int *scc_cluster;
4463 int *scc_node;
4464 int *scc_in_merge;
4467 /* Initialize the clustering data structure "c" from "graph".
4469 * In particular, allocate memory, extract the SCCs from "graph"
4470 * into c->scc, initialize scc_cluster and construct
4471 * a band of schedule rows for each SCC.
4472 * Within each SCC, there is only one SCC by definition.
4473 * Each SCC initially belongs to a cluster containing only that SCC.
4475 static isl_stat clustering_init(isl_ctx *ctx, struct isl_clustering *c,
4476 struct isl_sched_graph *graph)
4478 int i;
4480 c->n = graph->scc;
4481 c->scc = isl_calloc_array(ctx, struct isl_sched_graph, c->n);
4482 c->cluster = isl_calloc_array(ctx, struct isl_sched_graph, c->n);
4483 c->scc_cluster = isl_calloc_array(ctx, int, c->n);
4484 c->scc_node = isl_calloc_array(ctx, int, c->n);
4485 c->scc_in_merge = isl_calloc_array(ctx, int, c->n);
4486 if (!c->scc || !c->cluster ||
4487 !c->scc_cluster || !c->scc_node || !c->scc_in_merge)
4488 return isl_stat_error;
4490 for (i = 0; i < c->n; ++i) {
4491 if (extract_sub_graph(ctx, graph, &node_scc_exactly,
4492 &edge_scc_exactly, i, &c->scc[i]) < 0)
4493 return isl_stat_error;
4494 c->scc[i].scc = 1;
4495 if (compute_maxvar(&c->scc[i]) < 0)
4496 return isl_stat_error;
4497 if (compute_schedule_wcc_band(ctx, &c->scc[i]) < 0)
4498 return isl_stat_error;
4499 c->scc_cluster[i] = i;
4502 return isl_stat_ok;
4505 /* Free all memory allocated for "c".
4507 static void clustering_free(isl_ctx *ctx, struct isl_clustering *c)
4509 int i;
4511 if (c->scc)
4512 for (i = 0; i < c->n; ++i)
4513 graph_free(ctx, &c->scc[i]);
4514 free(c->scc);
4515 if (c->cluster)
4516 for (i = 0; i < c->n; ++i)
4517 graph_free(ctx, &c->cluster[i]);
4518 free(c->cluster);
4519 free(c->scc_cluster);
4520 free(c->scc_node);
4521 free(c->scc_in_merge);
4524 /* Should we refrain from merging the cluster in "graph" with
4525 * any other cluster?
4526 * In particular, is its current schedule band empty and incomplete.
4528 static int bad_cluster(struct isl_sched_graph *graph)
4530 return graph->n_row < graph->maxvar &&
4531 graph->n_total_row == graph->band_start;
4534 /* Return the index of an edge in "graph" that can be used to merge
4535 * two clusters in "c".
4536 * Return graph->n_edge if no such edge can be found.
4537 * Return -1 on error.
4539 * In particular, return a proximity edge between two clusters
4540 * that is not marked "no_merge" and such that neither of the
4541 * two clusters has an incomplete, empty band.
4543 * If there are multiple such edges, then try and find the most
4544 * appropriate edge to use for merging. In particular, pick the edge
4545 * with the greatest weight. If there are multiple of those,
4546 * then pick one with the shortest distance between
4547 * the two cluster representatives.
4549 static int find_proximity(struct isl_sched_graph *graph,
4550 struct isl_clustering *c)
4552 int i, best = graph->n_edge, best_dist, best_weight;
4554 for (i = 0; i < graph->n_edge; ++i) {
4555 struct isl_sched_edge *edge = &graph->edge[i];
4556 int dist, weight;
4558 if (!is_proximity(edge))
4559 continue;
4560 if (edge->no_merge)
4561 continue;
4562 if (bad_cluster(&c->scc[edge->src->scc]) ||
4563 bad_cluster(&c->scc[edge->dst->scc]))
4564 continue;
4565 dist = c->scc_cluster[edge->dst->scc] -
4566 c->scc_cluster[edge->src->scc];
4567 if (dist == 0)
4568 continue;
4569 weight = edge->weight;
4570 if (best < graph->n_edge) {
4571 if (best_weight > weight)
4572 continue;
4573 if (best_weight == weight && best_dist <= dist)
4574 continue;
4576 best = i;
4577 best_dist = dist;
4578 best_weight = weight;
4581 return best;
4584 /* Internal data structure used in mark_merge_sccs.
4586 * "graph" is the dependence graph in which a strongly connected
4587 * component is constructed.
4588 * "scc_cluster" maps each SCC index to the cluster to which it belongs.
4589 * "src" and "dst" are the indices of the nodes that are being merged.
4591 struct isl_mark_merge_sccs_data {
4592 struct isl_sched_graph *graph;
4593 int *scc_cluster;
4594 int src;
4595 int dst;
4598 /* Check whether the cluster containing node "i" depends on the cluster
4599 * containing node "j". If "i" and "j" belong to the same cluster,
4600 * then they are taken to depend on each other to ensure that
4601 * the resulting strongly connected component consists of complete
4602 * clusters. Furthermore, if "i" and "j" are the two nodes that
4603 * are being merged, then they are taken to depend on each other as well.
4604 * Otherwise, check if there is a (conditional) validity dependence
4605 * from node[j] to node[i], forcing node[i] to follow node[j].
4607 static isl_bool cluster_follows(int i, int j, void *user)
4609 struct isl_mark_merge_sccs_data *data = user;
4610 struct isl_sched_graph *graph = data->graph;
4611 int *scc_cluster = data->scc_cluster;
4613 if (data->src == i && data->dst == j)
4614 return isl_bool_true;
4615 if (data->src == j && data->dst == i)
4616 return isl_bool_true;
4617 if (scc_cluster[graph->node[i].scc] == scc_cluster[graph->node[j].scc])
4618 return isl_bool_true;
4620 return graph_has_validity_edge(graph, &graph->node[j], &graph->node[i]);
4623 /* Mark all SCCs that belong to either of the two clusters in "c"
4624 * connected by the edge in "graph" with index "edge", or to any
4625 * of the intermediate clusters.
4626 * The marking is recorded in c->scc_in_merge.
4628 * The given edge has been selected for merging two clusters,
4629 * meaning that there is at least a proximity edge between the two nodes.
4630 * However, there may also be (indirect) validity dependences
4631 * between the two nodes. When merging the two clusters, all clusters
4632 * containing one or more of the intermediate nodes along the
4633 * indirect validity dependences need to be merged in as well.
4635 * First collect all such nodes by computing the strongly connected
4636 * component (SCC) containing the two nodes connected by the edge, where
4637 * the two nodes are considered to depend on each other to make
4638 * sure they end up in the same SCC. Similarly, each node is considered
4639 * to depend on every other node in the same cluster to ensure
4640 * that the SCC consists of complete clusters.
4642 * Then the original SCCs that contain any of these nodes are marked
4643 * in c->scc_in_merge.
4645 static isl_stat mark_merge_sccs(isl_ctx *ctx, struct isl_sched_graph *graph,
4646 int edge, struct isl_clustering *c)
4648 struct isl_mark_merge_sccs_data data;
4649 struct isl_tarjan_graph *g;
4650 int i;
4652 for (i = 0; i < c->n; ++i)
4653 c->scc_in_merge[i] = 0;
4655 data.graph = graph;
4656 data.scc_cluster = c->scc_cluster;
4657 data.src = graph->edge[edge].src - graph->node;
4658 data.dst = graph->edge[edge].dst - graph->node;
4660 g = isl_tarjan_graph_component(ctx, graph->n, data.dst,
4661 &cluster_follows, &data);
4662 if (!g)
4663 goto error;
4665 i = g->op;
4666 if (i < 3)
4667 isl_die(ctx, isl_error_internal,
4668 "expecting at least two nodes in component",
4669 goto error);
4670 if (g->order[--i] != -1)
4671 isl_die(ctx, isl_error_internal,
4672 "expecting end of component marker", goto error);
4674 for (--i; i >= 0 && g->order[i] != -1; --i) {
4675 int scc = graph->node[g->order[i]].scc;
4676 c->scc_in_merge[scc] = 1;
4679 isl_tarjan_graph_free(g);
4680 return isl_stat_ok;
4681 error:
4682 isl_tarjan_graph_free(g);
4683 return isl_stat_error;
4686 /* Construct the identifier "cluster_i".
4688 static __isl_give isl_id *cluster_id(isl_ctx *ctx, int i)
4690 char name[40];
4692 snprintf(name, sizeof(name), "cluster_%d", i);
4693 return isl_id_alloc(ctx, name, NULL);
4696 /* Construct the space of the cluster with index "i" containing
4697 * the strongly connected component "scc".
4699 * In particular, construct a space called cluster_i with dimension equal
4700 * to the number of schedule rows in the current band of "scc".
4702 static __isl_give isl_space *cluster_space(struct isl_sched_graph *scc, int i)
4704 int nvar;
4705 isl_space *space;
4706 isl_id *id;
4708 nvar = scc->n_total_row - scc->band_start;
4709 space = isl_space_copy(scc->node[0].space);
4710 space = isl_space_params(space);
4711 space = isl_space_set_from_params(space);
4712 space = isl_space_add_dims(space, isl_dim_set, nvar);
4713 id = cluster_id(isl_space_get_ctx(space), i);
4714 space = isl_space_set_tuple_id(space, isl_dim_set, id);
4716 return space;
4719 /* Collect the domain of the graph for merging clusters.
4721 * In particular, for each cluster with first SCC "i", construct
4722 * a set in the space called cluster_i with dimension equal
4723 * to the number of schedule rows in the current band of the cluster.
4725 static __isl_give isl_union_set *collect_domain(isl_ctx *ctx,
4726 struct isl_sched_graph *graph, struct isl_clustering *c)
4728 int i;
4729 isl_space *space;
4730 isl_union_set *domain;
4732 space = isl_space_params_alloc(ctx, 0);
4733 domain = isl_union_set_empty(space);
4735 for (i = 0; i < graph->scc; ++i) {
4736 isl_space *space;
4738 if (!c->scc_in_merge[i])
4739 continue;
4740 if (c->scc_cluster[i] != i)
4741 continue;
4742 space = cluster_space(&c->scc[i], i);
4743 domain = isl_union_set_add_set(domain, isl_set_universe(space));
4746 return domain;
4749 /* Construct a map from the original instances to the corresponding
4750 * cluster instance in the current bands of the clusters in "c".
4752 static __isl_give isl_union_map *collect_cluster_map(isl_ctx *ctx,
4753 struct isl_sched_graph *graph, struct isl_clustering *c)
4755 int i, j;
4756 isl_space *space;
4757 isl_union_map *cluster_map;
4759 space = isl_space_params_alloc(ctx, 0);
4760 cluster_map = isl_union_map_empty(space);
4761 for (i = 0; i < graph->scc; ++i) {
4762 int start, n;
4763 isl_id *id;
4765 if (!c->scc_in_merge[i])
4766 continue;
4768 id = cluster_id(ctx, c->scc_cluster[i]);
4769 start = c->scc[i].band_start;
4770 n = c->scc[i].n_total_row - start;
4771 for (j = 0; j < c->scc[i].n; ++j) {
4772 isl_multi_aff *ma;
4773 isl_map *map;
4774 struct isl_sched_node *node = &c->scc[i].node[j];
4776 ma = node_extract_partial_schedule_multi_aff(node,
4777 start, n);
4778 ma = isl_multi_aff_set_tuple_id(ma, isl_dim_out,
4779 isl_id_copy(id));
4780 map = isl_map_from_multi_aff(ma);
4781 cluster_map = isl_union_map_add_map(cluster_map, map);
4783 isl_id_free(id);
4786 return cluster_map;
4789 /* Add "umap" to the schedule constraints "sc" of all types of "edge"
4790 * that are not isl_edge_condition or isl_edge_conditional_validity.
4792 static __isl_give isl_schedule_constraints *add_non_conditional_constraints(
4793 struct isl_sched_edge *edge, __isl_keep isl_union_map *umap,
4794 __isl_take isl_schedule_constraints *sc)
4796 enum isl_edge_type t;
4798 if (!sc)
4799 return NULL;
4801 for (t = isl_edge_first; t <= isl_edge_last; ++t) {
4802 if (t == isl_edge_condition ||
4803 t == isl_edge_conditional_validity)
4804 continue;
4805 if (!is_type(edge, t))
4806 continue;
4807 sc->constraint[t] = isl_union_map_union(sc->constraint[t],
4808 isl_union_map_copy(umap));
4809 if (!sc->constraint[t])
4810 return isl_schedule_constraints_free(sc);
4813 return sc;
4816 /* Add schedule constraints of types isl_edge_condition and
4817 * isl_edge_conditional_validity to "sc" by applying "umap" to
4818 * the domains of the wrapped relations in domain and range
4819 * of the corresponding tagged constraints of "edge".
4821 static __isl_give isl_schedule_constraints *add_conditional_constraints(
4822 struct isl_sched_edge *edge, __isl_keep isl_union_map *umap,
4823 __isl_take isl_schedule_constraints *sc)
4825 enum isl_edge_type t;
4826 isl_union_map *tagged;
4828 for (t = isl_edge_condition; t <= isl_edge_conditional_validity; ++t) {
4829 if (!is_type(edge, t))
4830 continue;
4831 if (t == isl_edge_condition)
4832 tagged = isl_union_map_copy(edge->tagged_condition);
4833 else
4834 tagged = isl_union_map_copy(edge->tagged_validity);
4835 tagged = isl_union_map_zip(tagged);
4836 tagged = isl_union_map_apply_domain(tagged,
4837 isl_union_map_copy(umap));
4838 tagged = isl_union_map_zip(tagged);
4839 sc->constraint[t] = isl_union_map_union(sc->constraint[t],
4840 tagged);
4841 if (!sc->constraint[t])
4842 return isl_schedule_constraints_free(sc);
4845 return sc;
4848 /* Given a mapping "cluster_map" from the original instances to
4849 * the cluster instances, add schedule constraints on the clusters
4850 * to "sc" corresponding to the original constraints represented by "edge".
4852 * For non-tagged dependence constraints, the cluster constraints
4853 * are obtained by applying "cluster_map" to the edge->map.
4855 * For tagged dependence constraints, "cluster_map" needs to be applied
4856 * to the domains of the wrapped relations in domain and range
4857 * of the tagged dependence constraints. Pick out the mappings
4858 * from these domains from "cluster_map" and construct their product.
4859 * This mapping can then be applied to the pair of domains.
4861 static __isl_give isl_schedule_constraints *collect_edge_constraints(
4862 struct isl_sched_edge *edge, __isl_keep isl_union_map *cluster_map,
4863 __isl_take isl_schedule_constraints *sc)
4865 isl_union_map *umap;
4866 isl_space *space;
4867 isl_union_set *uset;
4868 isl_union_map *umap1, *umap2;
4870 if (!sc)
4871 return NULL;
4873 umap = isl_union_map_from_map(isl_map_copy(edge->map));
4874 umap = isl_union_map_apply_domain(umap,
4875 isl_union_map_copy(cluster_map));
4876 umap = isl_union_map_apply_range(umap,
4877 isl_union_map_copy(cluster_map));
4878 sc = add_non_conditional_constraints(edge, umap, sc);
4879 isl_union_map_free(umap);
4881 if (!sc || (!is_condition(edge) && !is_conditional_validity(edge)))
4882 return sc;
4884 space = isl_space_domain(isl_map_get_space(edge->map));
4885 uset = isl_union_set_from_set(isl_set_universe(space));
4886 umap1 = isl_union_map_copy(cluster_map);
4887 umap1 = isl_union_map_intersect_domain(umap1, uset);
4888 space = isl_space_range(isl_map_get_space(edge->map));
4889 uset = isl_union_set_from_set(isl_set_universe(space));
4890 umap2 = isl_union_map_copy(cluster_map);
4891 umap2 = isl_union_map_intersect_domain(umap2, uset);
4892 umap = isl_union_map_product(umap1, umap2);
4894 sc = add_conditional_constraints(edge, umap, sc);
4896 isl_union_map_free(umap);
4897 return sc;
4900 /* Given a mapping "cluster_map" from the original instances to
4901 * the cluster instances, add schedule constraints on the clusters
4902 * to "sc" corresponding to all edges in "graph" between nodes that
4903 * belong to SCCs that are marked for merging in "scc_in_merge".
4905 static __isl_give isl_schedule_constraints *collect_constraints(
4906 struct isl_sched_graph *graph, int *scc_in_merge,
4907 __isl_keep isl_union_map *cluster_map,
4908 __isl_take isl_schedule_constraints *sc)
4910 int i;
4912 for (i = 0; i < graph->n_edge; ++i) {
4913 struct isl_sched_edge *edge = &graph->edge[i];
4915 if (!scc_in_merge[edge->src->scc])
4916 continue;
4917 if (!scc_in_merge[edge->dst->scc])
4918 continue;
4919 sc = collect_edge_constraints(edge, cluster_map, sc);
4922 return sc;
4925 /* Construct a dependence graph for scheduling clusters with respect
4926 * to each other and store the result in "merge_graph".
4927 * In particular, the nodes of the graph correspond to the schedule
4928 * dimensions of the current bands of those clusters that have been
4929 * marked for merging in "c".
4931 * First construct an isl_schedule_constraints object for this domain
4932 * by transforming the edges in "graph" to the domain.
4933 * Then initialize a dependence graph for scheduling from these
4934 * constraints.
4936 static isl_stat init_merge_graph(isl_ctx *ctx, struct isl_sched_graph *graph,
4937 struct isl_clustering *c, struct isl_sched_graph *merge_graph)
4939 isl_union_set *domain;
4940 isl_union_map *cluster_map;
4941 isl_schedule_constraints *sc;
4942 isl_stat r;
4944 domain = collect_domain(ctx, graph, c);
4945 sc = isl_schedule_constraints_on_domain(domain);
4946 if (!sc)
4947 return isl_stat_error;
4948 cluster_map = collect_cluster_map(ctx, graph, c);
4949 sc = collect_constraints(graph, c->scc_in_merge, cluster_map, sc);
4950 isl_union_map_free(cluster_map);
4952 r = graph_init(merge_graph, sc);
4954 isl_schedule_constraints_free(sc);
4956 return r;
4959 /* Compute the maximal number of remaining schedule rows that still need
4960 * to be computed for the nodes that belong to clusters with the maximal
4961 * dimension for the current band (i.e., the band that is to be merged).
4962 * Only clusters that are about to be merged are considered.
4963 * "maxvar" is the maximal dimension for the current band.
4964 * "c" contains information about the clusters.
4966 * Return the maximal number of remaining schedule rows or -1 on error.
4968 static int compute_maxvar_max_slack(int maxvar, struct isl_clustering *c)
4970 int i, j;
4971 int max_slack;
4973 max_slack = 0;
4974 for (i = 0; i < c->n; ++i) {
4975 int nvar;
4976 struct isl_sched_graph *scc;
4978 if (!c->scc_in_merge[i])
4979 continue;
4980 scc = &c->scc[i];
4981 nvar = scc->n_total_row - scc->band_start;
4982 if (nvar != maxvar)
4983 continue;
4984 for (j = 0; j < scc->n; ++j) {
4985 struct isl_sched_node *node = &scc->node[j];
4986 int slack;
4988 if (node_update_cmap(node) < 0)
4989 return -1;
4990 slack = node->nvar - node->rank;
4991 if (slack > max_slack)
4992 max_slack = slack;
4996 return max_slack;
4999 /* If there are any clusters where the dimension of the current band
5000 * (i.e., the band that is to be merged) is smaller than "maxvar" and
5001 * if there are any nodes in such a cluster where the number
5002 * of remaining schedule rows that still need to be computed
5003 * is greater than "max_slack", then return the smallest current band
5004 * dimension of all these clusters. Otherwise return the original value
5005 * of "maxvar". Return -1 in case of any error.
5006 * Only clusters that are about to be merged are considered.
5007 * "c" contains information about the clusters.
5009 static int limit_maxvar_to_slack(int maxvar, int max_slack,
5010 struct isl_clustering *c)
5012 int i, j;
5014 for (i = 0; i < c->n; ++i) {
5015 int nvar;
5016 struct isl_sched_graph *scc;
5018 if (!c->scc_in_merge[i])
5019 continue;
5020 scc = &c->scc[i];
5021 nvar = scc->n_total_row - scc->band_start;
5022 if (nvar >= maxvar)
5023 continue;
5024 for (j = 0; j < scc->n; ++j) {
5025 struct isl_sched_node *node = &scc->node[j];
5026 int slack;
5028 if (node_update_cmap(node) < 0)
5029 return -1;
5030 slack = node->nvar - node->rank;
5031 if (slack > max_slack) {
5032 maxvar = nvar;
5033 break;
5038 return maxvar;
5041 /* Adjust merge_graph->maxvar based on the number of remaining schedule rows
5042 * that still need to be computed. In particular, if there is a node
5043 * in a cluster where the dimension of the current band is smaller
5044 * than merge_graph->maxvar, but the number of remaining schedule rows
5045 * is greater than that of any node in a cluster with the maximal
5046 * dimension for the current band (i.e., merge_graph->maxvar),
5047 * then adjust merge_graph->maxvar to the (smallest) current band dimension
5048 * of those clusters. Without this adjustment, the total number of
5049 * schedule dimensions would be increased, resulting in a skewed view
5050 * of the number of coincident dimensions.
5051 * "c" contains information about the clusters.
5053 * If the maximize_band_depth option is set and merge_graph->maxvar is reduced,
5054 * then there is no point in attempting any merge since it will be rejected
5055 * anyway. Set merge_graph->maxvar to zero in such cases.
5057 static isl_stat adjust_maxvar_to_slack(isl_ctx *ctx,
5058 struct isl_sched_graph *merge_graph, struct isl_clustering *c)
5060 int max_slack, maxvar;
5062 max_slack = compute_maxvar_max_slack(merge_graph->maxvar, c);
5063 if (max_slack < 0)
5064 return isl_stat_error;
5065 maxvar = limit_maxvar_to_slack(merge_graph->maxvar, max_slack, c);
5066 if (maxvar < 0)
5067 return isl_stat_error;
5069 if (maxvar < merge_graph->maxvar) {
5070 if (isl_options_get_schedule_maximize_band_depth(ctx))
5071 merge_graph->maxvar = 0;
5072 else
5073 merge_graph->maxvar = maxvar;
5076 return isl_stat_ok;
5079 /* Return the number of coincident dimensions in the current band of "graph",
5080 * where the nodes of "graph" are assumed to be scheduled by a single band.
5082 static int get_n_coincident(struct isl_sched_graph *graph)
5084 int i;
5086 for (i = graph->band_start; i < graph->n_total_row; ++i)
5087 if (!graph->node[0].coincident[i])
5088 break;
5090 return i - graph->band_start;
5093 /* Should the clusters be merged based on the cluster schedule
5094 * in the current (and only) band of "merge_graph", given that
5095 * coincidence should be maximized?
5097 * If the number of coincident schedule dimensions in the merged band
5098 * would be less than the maximal number of coincident schedule dimensions
5099 * in any of the merged clusters, then the clusters should not be merged.
5101 static isl_bool ok_to_merge_coincident(struct isl_clustering *c,
5102 struct isl_sched_graph *merge_graph)
5104 int i;
5105 int n_coincident;
5106 int max_coincident;
5108 max_coincident = 0;
5109 for (i = 0; i < c->n; ++i) {
5110 if (!c->scc_in_merge[i])
5111 continue;
5112 n_coincident = get_n_coincident(&c->scc[i]);
5113 if (n_coincident > max_coincident)
5114 max_coincident = n_coincident;
5117 n_coincident = get_n_coincident(merge_graph);
5119 return n_coincident >= max_coincident;
5122 /* Return the transformation on "node" expressed by the current (and only)
5123 * band of "merge_graph" applied to the clusters in "c".
5125 * First find the representation of "node" in its SCC in "c" and
5126 * extract the transformation expressed by the current band.
5127 * Then extract the transformation applied by "merge_graph"
5128 * to the cluster to which this SCC belongs.
5129 * Combine the two to obtain the complete transformation on the node.
5131 * Note that the range of the first transformation is an anonymous space,
5132 * while the domain of the second is named "cluster_X". The range
5133 * of the former therefore needs to be adjusted before the two
5134 * can be combined.
5136 static __isl_give isl_map *extract_node_transformation(isl_ctx *ctx,
5137 struct isl_sched_node *node, struct isl_clustering *c,
5138 struct isl_sched_graph *merge_graph)
5140 struct isl_sched_node *scc_node, *cluster_node;
5141 int start, n;
5142 isl_id *id;
5143 isl_space *space;
5144 isl_multi_aff *ma, *ma2;
5146 scc_node = graph_find_node(ctx, &c->scc[node->scc], node->space);
5147 start = c->scc[node->scc].band_start;
5148 n = c->scc[node->scc].n_total_row - start;
5149 ma = node_extract_partial_schedule_multi_aff(scc_node, start, n);
5150 space = cluster_space(&c->scc[node->scc], c->scc_cluster[node->scc]);
5151 cluster_node = graph_find_node(ctx, merge_graph, space);
5152 if (space && !cluster_node)
5153 isl_die(ctx, isl_error_internal, "unable to find cluster",
5154 space = isl_space_free(space));
5155 id = isl_space_get_tuple_id(space, isl_dim_set);
5156 ma = isl_multi_aff_set_tuple_id(ma, isl_dim_out, id);
5157 isl_space_free(space);
5158 n = merge_graph->n_total_row;
5159 ma2 = node_extract_partial_schedule_multi_aff(cluster_node, 0, n);
5160 ma = isl_multi_aff_pullback_multi_aff(ma2, ma);
5162 return isl_map_from_multi_aff(ma);
5165 /* Give a set of distances "set", are they bounded by a small constant
5166 * in direction "pos"?
5167 * In practice, check if they are bounded by 2 by checking that there
5168 * are no elements with a value greater than or equal to 3 or
5169 * smaller than or equal to -3.
5171 static isl_bool distance_is_bounded(__isl_keep isl_set *set, int pos)
5173 isl_bool bounded;
5174 isl_set *test;
5176 if (!set)
5177 return isl_bool_error;
5179 test = isl_set_copy(set);
5180 test = isl_set_lower_bound_si(test, isl_dim_set, pos, 3);
5181 bounded = isl_set_is_empty(test);
5182 isl_set_free(test);
5184 if (bounded < 0 || !bounded)
5185 return bounded;
5187 test = isl_set_copy(set);
5188 test = isl_set_upper_bound_si(test, isl_dim_set, pos, -3);
5189 bounded = isl_set_is_empty(test);
5190 isl_set_free(test);
5192 return bounded;
5195 /* Does the set "set" have a fixed (but possible parametric) value
5196 * at dimension "pos"?
5198 static isl_bool has_single_value(__isl_keep isl_set *set, int pos)
5200 int n;
5201 isl_bool single;
5203 if (!set)
5204 return isl_bool_error;
5205 set = isl_set_copy(set);
5206 n = isl_set_dim(set, isl_dim_set);
5207 set = isl_set_project_out(set, isl_dim_set, pos + 1, n - (pos + 1));
5208 set = isl_set_project_out(set, isl_dim_set, 0, pos);
5209 single = isl_set_is_singleton(set);
5210 isl_set_free(set);
5212 return single;
5215 /* Does "map" have a fixed (but possible parametric) value
5216 * at dimension "pos" of either its domain or its range?
5218 static isl_bool has_singular_src_or_dst(__isl_keep isl_map *map, int pos)
5220 isl_set *set;
5221 isl_bool single;
5223 set = isl_map_domain(isl_map_copy(map));
5224 single = has_single_value(set, pos);
5225 isl_set_free(set);
5227 if (single < 0 || single)
5228 return single;
5230 set = isl_map_range(isl_map_copy(map));
5231 single = has_single_value(set, pos);
5232 isl_set_free(set);
5234 return single;
5237 /* Does the edge "edge" from "graph" have bounded dependence distances
5238 * in the merged graph "merge_graph" of a selection of clusters in "c"?
5240 * Extract the complete transformations of the source and destination
5241 * nodes of the edge, apply them to the edge constraints and
5242 * compute the differences. Finally, check if these differences are bounded
5243 * in each direction.
5245 * If the dimension of the band is greater than the number of
5246 * dimensions that can be expected to be optimized by the edge
5247 * (based on its weight), then also allow the differences to be unbounded
5248 * in the remaining dimensions, but only if either the source or
5249 * the destination has a fixed value in that direction.
5250 * This allows a statement that produces values that are used by
5251 * several instance of another statement to be merged with that
5252 * other statement.
5253 * However, merging such clusters will introduce an inherently
5254 * large proximity distance inside the merged cluster, meaning
5255 * that proximity distances will no longer be optimized in
5256 * subsequent merges. These merges are therefore only allowed
5257 * after all other possible merges have been tried.
5258 * The first time such a merge is encountered, the weight of the edge
5259 * is replaced by a negative weight. The second time (i.e., after
5260 * all merges over edges with a non-negative weight have been tried),
5261 * the merge is allowed.
5263 static isl_bool has_bounded_distances(isl_ctx *ctx, struct isl_sched_edge *edge,
5264 struct isl_sched_graph *graph, struct isl_clustering *c,
5265 struct isl_sched_graph *merge_graph)
5267 int i, n, n_slack;
5268 isl_bool bounded;
5269 isl_map *map, *t;
5270 isl_set *dist;
5272 map = isl_map_copy(edge->map);
5273 t = extract_node_transformation(ctx, edge->src, c, merge_graph);
5274 map = isl_map_apply_domain(map, t);
5275 t = extract_node_transformation(ctx, edge->dst, c, merge_graph);
5276 map = isl_map_apply_range(map, t);
5277 dist = isl_map_deltas(isl_map_copy(map));
5279 bounded = isl_bool_true;
5280 n = isl_set_dim(dist, isl_dim_set);
5281 n_slack = n - edge->weight;
5282 if (edge->weight < 0)
5283 n_slack -= graph->max_weight + 1;
5284 for (i = 0; i < n; ++i) {
5285 isl_bool bounded_i, singular_i;
5287 bounded_i = distance_is_bounded(dist, i);
5288 if (bounded_i < 0)
5289 goto error;
5290 if (bounded_i)
5291 continue;
5292 if (edge->weight >= 0)
5293 bounded = isl_bool_false;
5294 n_slack--;
5295 if (n_slack < 0)
5296 break;
5297 singular_i = has_singular_src_or_dst(map, i);
5298 if (singular_i < 0)
5299 goto error;
5300 if (singular_i)
5301 continue;
5302 bounded = isl_bool_false;
5303 break;
5305 if (!bounded && i >= n && edge->weight >= 0)
5306 edge->weight -= graph->max_weight + 1;
5307 isl_map_free(map);
5308 isl_set_free(dist);
5310 return bounded;
5311 error:
5312 isl_map_free(map);
5313 isl_set_free(dist);
5314 return isl_bool_error;
5317 /* Should the clusters be merged based on the cluster schedule
5318 * in the current (and only) band of "merge_graph"?
5319 * "graph" is the original dependence graph, while "c" records
5320 * which SCCs are involved in the latest merge.
5322 * In particular, is there at least one proximity constraint
5323 * that is optimized by the merge?
5325 * A proximity constraint is considered to be optimized
5326 * if the dependence distances are small.
5328 static isl_bool ok_to_merge_proximity(isl_ctx *ctx,
5329 struct isl_sched_graph *graph, struct isl_clustering *c,
5330 struct isl_sched_graph *merge_graph)
5332 int i;
5334 for (i = 0; i < graph->n_edge; ++i) {
5335 struct isl_sched_edge *edge = &graph->edge[i];
5336 isl_bool bounded;
5338 if (!is_proximity(edge))
5339 continue;
5340 if (!c->scc_in_merge[edge->src->scc])
5341 continue;
5342 if (!c->scc_in_merge[edge->dst->scc])
5343 continue;
5344 if (c->scc_cluster[edge->dst->scc] ==
5345 c->scc_cluster[edge->src->scc])
5346 continue;
5347 bounded = has_bounded_distances(ctx, edge, graph, c,
5348 merge_graph);
5349 if (bounded < 0 || bounded)
5350 return bounded;
5353 return isl_bool_false;
5356 /* Should the clusters be merged based on the cluster schedule
5357 * in the current (and only) band of "merge_graph"?
5358 * "graph" is the original dependence graph, while "c" records
5359 * which SCCs are involved in the latest merge.
5361 * If the current band is empty, then the clusters should not be merged.
5363 * If the band depth should be maximized and the merge schedule
5364 * is incomplete (meaning that the dimension of some of the schedule
5365 * bands in the original schedule will be reduced), then the clusters
5366 * should not be merged.
5368 * If the schedule_maximize_coincidence option is set, then check that
5369 * the number of coincident schedule dimensions is not reduced.
5371 * Finally, only allow the merge if at least one proximity
5372 * constraint is optimized.
5374 static isl_bool ok_to_merge(isl_ctx *ctx, struct isl_sched_graph *graph,
5375 struct isl_clustering *c, struct isl_sched_graph *merge_graph)
5377 if (merge_graph->n_total_row == merge_graph->band_start)
5378 return isl_bool_false;
5380 if (isl_options_get_schedule_maximize_band_depth(ctx) &&
5381 merge_graph->n_total_row < merge_graph->maxvar)
5382 return isl_bool_false;
5384 if (isl_options_get_schedule_maximize_coincidence(ctx)) {
5385 isl_bool ok;
5387 ok = ok_to_merge_coincident(c, merge_graph);
5388 if (ok < 0 || !ok)
5389 return ok;
5392 return ok_to_merge_proximity(ctx, graph, c, merge_graph);
5395 /* Apply the schedule in "t_node" to the "n" rows starting at "first"
5396 * of the schedule in "node" and return the result.
5398 * That is, essentially compute
5400 * T * N(first:first+n-1)
5402 * taking into account the constant term and the parameter coefficients
5403 * in "t_node".
5405 static __isl_give isl_mat *node_transformation(isl_ctx *ctx,
5406 struct isl_sched_node *t_node, struct isl_sched_node *node,
5407 int first, int n)
5409 int i, j;
5410 isl_mat *t;
5411 int n_row, n_col, n_param, n_var;
5413 n_param = node->nparam;
5414 n_var = node->nvar;
5415 n_row = isl_mat_rows(t_node->sched);
5416 n_col = isl_mat_cols(node->sched);
5417 t = isl_mat_alloc(ctx, n_row, n_col);
5418 if (!t)
5419 return NULL;
5420 for (i = 0; i < n_row; ++i) {
5421 isl_seq_cpy(t->row[i], t_node->sched->row[i], 1 + n_param);
5422 isl_seq_clr(t->row[i] + 1 + n_param, n_var);
5423 for (j = 0; j < n; ++j)
5424 isl_seq_addmul(t->row[i],
5425 t_node->sched->row[i][1 + n_param + j],
5426 node->sched->row[first + j],
5427 1 + n_param + n_var);
5429 return t;
5432 /* Apply the cluster schedule in "t_node" to the current band
5433 * schedule of the nodes in "graph".
5435 * In particular, replace the rows starting at band_start
5436 * by the result of applying the cluster schedule in "t_node"
5437 * to the original rows.
5439 * The coincidence of the schedule is determined by the coincidence
5440 * of the cluster schedule.
5442 static isl_stat transform(isl_ctx *ctx, struct isl_sched_graph *graph,
5443 struct isl_sched_node *t_node)
5445 int i, j;
5446 int n_new;
5447 int start, n;
5449 start = graph->band_start;
5450 n = graph->n_total_row - start;
5452 n_new = isl_mat_rows(t_node->sched);
5453 for (i = 0; i < graph->n; ++i) {
5454 struct isl_sched_node *node = &graph->node[i];
5455 isl_mat *t;
5457 t = node_transformation(ctx, t_node, node, start, n);
5458 node->sched = isl_mat_drop_rows(node->sched, start, n);
5459 node->sched = isl_mat_concat(node->sched, t);
5460 node->sched_map = isl_map_free(node->sched_map);
5461 if (!node->sched)
5462 return isl_stat_error;
5463 for (j = 0; j < n_new; ++j)
5464 node->coincident[start + j] = t_node->coincident[j];
5466 graph->n_total_row -= n;
5467 graph->n_row -= n;
5468 graph->n_total_row += n_new;
5469 graph->n_row += n_new;
5472 return isl_stat_ok;
5475 /* Merge the clusters marked for merging in "c" into a single
5476 * cluster using the cluster schedule in the current band of "merge_graph".
5477 * The representative SCC for the new cluster is the SCC with
5478 * the smallest index.
5480 * The current band schedule of each SCC in the new cluster is obtained
5481 * by applying the schedule of the corresponding original cluster
5482 * to the original band schedule.
5483 * All SCCs in the new cluster have the same number of schedule rows.
5485 static isl_stat merge(isl_ctx *ctx, struct isl_clustering *c,
5486 struct isl_sched_graph *merge_graph)
5488 int i;
5489 int cluster = -1;
5490 isl_space *space;
5492 for (i = 0; i < c->n; ++i) {
5493 struct isl_sched_node *node;
5495 if (!c->scc_in_merge[i])
5496 continue;
5497 if (cluster < 0)
5498 cluster = i;
5499 space = cluster_space(&c->scc[i], c->scc_cluster[i]);
5500 if (!space)
5501 return isl_stat_error;
5502 node = graph_find_node(ctx, merge_graph, space);
5503 isl_space_free(space);
5504 if (!node)
5505 isl_die(ctx, isl_error_internal,
5506 "unable to find cluster",
5507 return isl_stat_error);
5508 if (transform(ctx, &c->scc[i], node) < 0)
5509 return isl_stat_error;
5510 c->scc_cluster[i] = cluster;
5513 return isl_stat_ok;
5516 /* Try and merge the clusters of SCCs marked in c->scc_in_merge
5517 * by scheduling the current cluster bands with respect to each other.
5519 * Construct a dependence graph with a space for each cluster and
5520 * with the coordinates of each space corresponding to the schedule
5521 * dimensions of the current band of that cluster.
5522 * Construct a cluster schedule in this cluster dependence graph and
5523 * apply it to the current cluster bands if it is applicable
5524 * according to ok_to_merge.
5526 * If the number of remaining schedule dimensions in a cluster
5527 * with a non-maximal current schedule dimension is greater than
5528 * the number of remaining schedule dimensions in clusters
5529 * with a maximal current schedule dimension, then restrict
5530 * the number of rows to be computed in the cluster schedule
5531 * to the minimal such non-maximal current schedule dimension.
5532 * Do this by adjusting merge_graph.maxvar.
5534 * Return isl_bool_true if the clusters have effectively been merged
5535 * into a single cluster.
5537 * Note that since the standard scheduling algorithm minimizes the maximal
5538 * distance over proximity constraints, the proximity constraints between
5539 * the merged clusters may not be optimized any further than what is
5540 * sufficient to bring the distances within the limits of the internal
5541 * proximity constraints inside the individual clusters.
5542 * It may therefore make sense to perform an additional translation step
5543 * to bring the clusters closer to each other, while maintaining
5544 * the linear part of the merging schedule found using the standard
5545 * scheduling algorithm.
5547 static isl_bool try_merge(isl_ctx *ctx, struct isl_sched_graph *graph,
5548 struct isl_clustering *c)
5550 struct isl_sched_graph merge_graph = { 0 };
5551 isl_bool merged;
5553 if (init_merge_graph(ctx, graph, c, &merge_graph) < 0)
5554 goto error;
5556 if (compute_maxvar(&merge_graph) < 0)
5557 goto error;
5558 if (adjust_maxvar_to_slack(ctx, &merge_graph,c) < 0)
5559 goto error;
5560 if (compute_schedule_wcc_band(ctx, &merge_graph) < 0)
5561 goto error;
5562 merged = ok_to_merge(ctx, graph, c, &merge_graph);
5563 if (merged && merge(ctx, c, &merge_graph) < 0)
5564 goto error;
5566 graph_free(ctx, &merge_graph);
5567 return merged;
5568 error:
5569 graph_free(ctx, &merge_graph);
5570 return isl_bool_error;
5573 /* Is there any edge marked "no_merge" between two SCCs that are
5574 * about to be merged (i.e., that are set in "scc_in_merge")?
5575 * "merge_edge" is the proximity edge along which the clusters of SCCs
5576 * are going to be merged.
5578 * If there is any edge between two SCCs with a negative weight,
5579 * while the weight of "merge_edge" is non-negative, then this
5580 * means that the edge was postponed. "merge_edge" should then
5581 * also be postponed since merging along the edge with negative weight should
5582 * be postponed until all edges with non-negative weight have been tried.
5583 * Replace the weight of "merge_edge" by a negative weight as well and
5584 * tell the caller not to attempt a merge.
5586 static int any_no_merge(struct isl_sched_graph *graph, int *scc_in_merge,
5587 struct isl_sched_edge *merge_edge)
5589 int i;
5591 for (i = 0; i < graph->n_edge; ++i) {
5592 struct isl_sched_edge *edge = &graph->edge[i];
5594 if (!scc_in_merge[edge->src->scc])
5595 continue;
5596 if (!scc_in_merge[edge->dst->scc])
5597 continue;
5598 if (edge->no_merge)
5599 return 1;
5600 if (merge_edge->weight >= 0 && edge->weight < 0) {
5601 merge_edge->weight -= graph->max_weight + 1;
5602 return 1;
5606 return 0;
5609 /* Merge the two clusters in "c" connected by the edge in "graph"
5610 * with index "edge" into a single cluster.
5611 * If it turns out to be impossible to merge these two clusters,
5612 * then mark the edge as "no_merge" such that it will not be
5613 * considered again.
5615 * First mark all SCCs that need to be merged. This includes the SCCs
5616 * in the two clusters, but it may also include the SCCs
5617 * of intermediate clusters.
5618 * If there is already a no_merge edge between any pair of such SCCs,
5619 * then simply mark the current edge as no_merge as well.
5620 * Likewise, if any of those edges was postponed by has_bounded_distances,
5621 * then postpone the current edge as well.
5622 * Otherwise, try and merge the clusters and mark "edge" as "no_merge"
5623 * if the clusters did not end up getting merged, unless the non-merge
5624 * is due to the fact that the edge was postponed. This postponement
5625 * can be recognized by a change in weight (from non-negative to negative).
5627 static isl_stat merge_clusters_along_edge(isl_ctx *ctx,
5628 struct isl_sched_graph *graph, int edge, struct isl_clustering *c)
5630 isl_bool merged;
5631 int edge_weight = graph->edge[edge].weight;
5633 if (mark_merge_sccs(ctx, graph, edge, c) < 0)
5634 return isl_stat_error;
5636 if (any_no_merge(graph, c->scc_in_merge, &graph->edge[edge]))
5637 merged = isl_bool_false;
5638 else
5639 merged = try_merge(ctx, graph, c);
5640 if (merged < 0)
5641 return isl_stat_error;
5642 if (!merged && edge_weight == graph->edge[edge].weight)
5643 graph->edge[edge].no_merge = 1;
5645 return isl_stat_ok;
5648 /* Does "node" belong to the cluster identified by "cluster"?
5650 static int node_cluster_exactly(struct isl_sched_node *node, int cluster)
5652 return node->cluster == cluster;
5655 /* Does "edge" connect two nodes belonging to the cluster
5656 * identified by "cluster"?
5658 static int edge_cluster_exactly(struct isl_sched_edge *edge, int cluster)
5660 return edge->src->cluster == cluster && edge->dst->cluster == cluster;
5663 /* Swap the schedule of "node1" and "node2".
5664 * Both nodes have been derived from the same node in a common parent graph.
5665 * Since the "coincident" field is shared with that node
5666 * in the parent graph, there is no need to also swap this field.
5668 static void swap_sched(struct isl_sched_node *node1,
5669 struct isl_sched_node *node2)
5671 isl_mat *sched;
5672 isl_map *sched_map;
5674 sched = node1->sched;
5675 node1->sched = node2->sched;
5676 node2->sched = sched;
5678 sched_map = node1->sched_map;
5679 node1->sched_map = node2->sched_map;
5680 node2->sched_map = sched_map;
5683 /* Copy the current band schedule from the SCCs that form the cluster
5684 * with index "pos" to the actual cluster at position "pos".
5685 * By construction, the index of the first SCC that belongs to the cluster
5686 * is also "pos".
5688 * The order of the nodes inside both the SCCs and the cluster
5689 * is assumed to be same as the order in the original "graph".
5691 * Since the SCC graphs will no longer be used after this function,
5692 * the schedules are actually swapped rather than copied.
5694 static isl_stat copy_partial(struct isl_sched_graph *graph,
5695 struct isl_clustering *c, int pos)
5697 int i, j;
5699 c->cluster[pos].n_total_row = c->scc[pos].n_total_row;
5700 c->cluster[pos].n_row = c->scc[pos].n_row;
5701 c->cluster[pos].maxvar = c->scc[pos].maxvar;
5702 j = 0;
5703 for (i = 0; i < graph->n; ++i) {
5704 int k;
5705 int s;
5707 if (graph->node[i].cluster != pos)
5708 continue;
5709 s = graph->node[i].scc;
5710 k = c->scc_node[s]++;
5711 swap_sched(&c->cluster[pos].node[j], &c->scc[s].node[k]);
5712 if (c->scc[s].maxvar > c->cluster[pos].maxvar)
5713 c->cluster[pos].maxvar = c->scc[s].maxvar;
5714 ++j;
5717 return isl_stat_ok;
5720 /* Is there a (conditional) validity dependence from node[j] to node[i],
5721 * forcing node[i] to follow node[j] or do the nodes belong to the same
5722 * cluster?
5724 static isl_bool node_follows_strong_or_same_cluster(int i, int j, void *user)
5726 struct isl_sched_graph *graph = user;
5728 if (graph->node[i].cluster == graph->node[j].cluster)
5729 return isl_bool_true;
5730 return graph_has_validity_edge(graph, &graph->node[j], &graph->node[i]);
5733 /* Extract the merged clusters of SCCs in "graph", sort them, and
5734 * store them in c->clusters. Update c->scc_cluster accordingly.
5736 * First keep track of the cluster containing the SCC to which a node
5737 * belongs in the node itself.
5738 * Then extract the clusters into c->clusters, copying the current
5739 * band schedule from the SCCs that belong to the cluster.
5740 * Do this only once per cluster.
5742 * Finally, topologically sort the clusters and update c->scc_cluster
5743 * to match the new scc numbering. While the SCCs were originally
5744 * sorted already, some SCCs that depend on some other SCCs may
5745 * have been merged with SCCs that appear before these other SCCs.
5746 * A reordering may therefore be required.
5748 static isl_stat extract_clusters(isl_ctx *ctx, struct isl_sched_graph *graph,
5749 struct isl_clustering *c)
5751 int i;
5753 for (i = 0; i < graph->n; ++i)
5754 graph->node[i].cluster = c->scc_cluster[graph->node[i].scc];
5756 for (i = 0; i < graph->scc; ++i) {
5757 if (c->scc_cluster[i] != i)
5758 continue;
5759 if (extract_sub_graph(ctx, graph, &node_cluster_exactly,
5760 &edge_cluster_exactly, i, &c->cluster[i]) < 0)
5761 return isl_stat_error;
5762 c->cluster[i].src_scc = -1;
5763 c->cluster[i].dst_scc = -1;
5764 if (copy_partial(graph, c, i) < 0)
5765 return isl_stat_error;
5768 if (detect_ccs(ctx, graph, &node_follows_strong_or_same_cluster) < 0)
5769 return isl_stat_error;
5770 for (i = 0; i < graph->n; ++i)
5771 c->scc_cluster[graph->node[i].scc] = graph->node[i].cluster;
5773 return isl_stat_ok;
5776 /* Compute weights on the proximity edges of "graph" that can
5777 * be used by find_proximity to find the most appropriate
5778 * proximity edge to use to merge two clusters in "c".
5779 * The weights are also used by has_bounded_distances to determine
5780 * whether the merge should be allowed.
5781 * Store the maximum of the computed weights in graph->max_weight.
5783 * The computed weight is a measure for the number of remaining schedule
5784 * dimensions that can still be completely aligned.
5785 * In particular, compute the number of equalities between
5786 * input dimensions and output dimensions in the proximity constraints.
5787 * The directions that are already handled by outer schedule bands
5788 * are projected out prior to determining this number.
5790 * Edges that will never be considered by find_proximity are ignored.
5792 static isl_stat compute_weights(struct isl_sched_graph *graph,
5793 struct isl_clustering *c)
5795 int i;
5797 graph->max_weight = 0;
5799 for (i = 0; i < graph->n_edge; ++i) {
5800 struct isl_sched_edge *edge = &graph->edge[i];
5801 struct isl_sched_node *src = edge->src;
5802 struct isl_sched_node *dst = edge->dst;
5803 isl_basic_map *hull;
5804 int n_in, n_out;
5806 if (!is_proximity(edge))
5807 continue;
5808 if (bad_cluster(&c->scc[edge->src->scc]) ||
5809 bad_cluster(&c->scc[edge->dst->scc]))
5810 continue;
5811 if (c->scc_cluster[edge->dst->scc] ==
5812 c->scc_cluster[edge->src->scc])
5813 continue;
5815 hull = isl_map_affine_hull(isl_map_copy(edge->map));
5816 hull = isl_basic_map_transform_dims(hull, isl_dim_in, 0,
5817 isl_mat_copy(src->ctrans));
5818 hull = isl_basic_map_transform_dims(hull, isl_dim_out, 0,
5819 isl_mat_copy(dst->ctrans));
5820 hull = isl_basic_map_project_out(hull,
5821 isl_dim_in, 0, src->rank);
5822 hull = isl_basic_map_project_out(hull,
5823 isl_dim_out, 0, dst->rank);
5824 hull = isl_basic_map_remove_divs(hull);
5825 n_in = isl_basic_map_dim(hull, isl_dim_in);
5826 n_out = isl_basic_map_dim(hull, isl_dim_out);
5827 hull = isl_basic_map_drop_constraints_not_involving_dims(hull,
5828 isl_dim_in, 0, n_in);
5829 hull = isl_basic_map_drop_constraints_not_involving_dims(hull,
5830 isl_dim_out, 0, n_out);
5831 if (!hull)
5832 return isl_stat_error;
5833 edge->weight = hull->n_eq;
5834 isl_basic_map_free(hull);
5836 if (edge->weight > graph->max_weight)
5837 graph->max_weight = edge->weight;
5840 return isl_stat_ok;
5843 /* Call compute_schedule_finish_band on each of the clusters in "c"
5844 * in their topological order. This order is determined by the scc
5845 * fields of the nodes in "graph".
5846 * Combine the results in a sequence expressing the topological order.
5848 * If there is only one cluster left, then there is no need to introduce
5849 * a sequence node. Also, in this case, the cluster necessarily contains
5850 * the SCC at position 0 in the original graph and is therefore also
5851 * stored in the first cluster of "c".
5853 static __isl_give isl_schedule_node *finish_bands_clustering(
5854 __isl_take isl_schedule_node *node, struct isl_sched_graph *graph,
5855 struct isl_clustering *c)
5857 int i;
5858 isl_ctx *ctx;
5859 isl_union_set_list *filters;
5861 if (graph->scc == 1)
5862 return compute_schedule_finish_band(node, &c->cluster[0], 0);
5864 ctx = isl_schedule_node_get_ctx(node);
5866 filters = extract_sccs(ctx, graph);
5867 node = isl_schedule_node_insert_sequence(node, filters);
5869 for (i = 0; i < graph->scc; ++i) {
5870 int j = c->scc_cluster[i];
5871 node = isl_schedule_node_child(node, i);
5872 node = isl_schedule_node_child(node, 0);
5873 node = compute_schedule_finish_band(node, &c->cluster[j], 0);
5874 node = isl_schedule_node_parent(node);
5875 node = isl_schedule_node_parent(node);
5878 return node;
5881 /* Compute a schedule for a connected dependence graph by first considering
5882 * each strongly connected component (SCC) in the graph separately and then
5883 * incrementally combining them into clusters.
5884 * Return the updated schedule node.
5886 * Initially, each cluster consists of a single SCC, each with its
5887 * own band schedule. The algorithm then tries to merge pairs
5888 * of clusters along a proximity edge until no more suitable
5889 * proximity edges can be found. During this merging, the schedule
5890 * is maintained in the individual SCCs.
5891 * After the merging is completed, the full resulting clusters
5892 * are extracted and in finish_bands_clustering,
5893 * compute_schedule_finish_band is called on each of them to integrate
5894 * the band into "node" and to continue the computation.
5896 * compute_weights initializes the weights that are used by find_proximity.
5898 static __isl_give isl_schedule_node *compute_schedule_wcc_clustering(
5899 __isl_take isl_schedule_node *node, struct isl_sched_graph *graph)
5901 isl_ctx *ctx;
5902 struct isl_clustering c;
5903 int i;
5905 ctx = isl_schedule_node_get_ctx(node);
5907 if (clustering_init(ctx, &c, graph) < 0)
5908 goto error;
5910 if (compute_weights(graph, &c) < 0)
5911 goto error;
5913 for (;;) {
5914 i = find_proximity(graph, &c);
5915 if (i < 0)
5916 goto error;
5917 if (i >= graph->n_edge)
5918 break;
5919 if (merge_clusters_along_edge(ctx, graph, i, &c) < 0)
5920 goto error;
5923 if (extract_clusters(ctx, graph, &c) < 0)
5924 goto error;
5926 node = finish_bands_clustering(node, graph, &c);
5928 clustering_free(ctx, &c);
5929 return node;
5930 error:
5931 clustering_free(ctx, &c);
5932 return isl_schedule_node_free(node);
5935 /* Compute a schedule for a connected dependence graph and return
5936 * the updated schedule node.
5938 * If Feautrier's algorithm is selected, we first recursively try to satisfy
5939 * as many validity dependences as possible. When all validity dependences
5940 * are satisfied we extend the schedule to a full-dimensional schedule.
5942 * Call compute_schedule_wcc_whole or compute_schedule_wcc_clustering
5943 * depending on whether the user has selected the option to try and
5944 * compute a schedule for the entire (weakly connected) component first.
5945 * If there is only a single strongly connected component (SCC), then
5946 * there is no point in trying to combine SCCs
5947 * in compute_schedule_wcc_clustering, so compute_schedule_wcc_whole
5948 * is called instead.
5950 static __isl_give isl_schedule_node *compute_schedule_wcc(
5951 __isl_take isl_schedule_node *node, struct isl_sched_graph *graph)
5953 isl_ctx *ctx;
5955 if (!node)
5956 return NULL;
5958 ctx = isl_schedule_node_get_ctx(node);
5959 if (detect_sccs(ctx, graph) < 0)
5960 return isl_schedule_node_free(node);
5962 if (compute_maxvar(graph) < 0)
5963 return isl_schedule_node_free(node);
5965 if (need_feautrier_step(ctx, graph))
5966 return compute_schedule_wcc_feautrier(node, graph);
5968 if (graph->scc <= 1 || isl_options_get_schedule_whole_component(ctx))
5969 return compute_schedule_wcc_whole(node, graph);
5970 else
5971 return compute_schedule_wcc_clustering(node, graph);
5974 /* Compute a schedule for each group of nodes identified by node->scc
5975 * separately and then combine them in a sequence node (or as set node
5976 * if graph->weak is set) inserted at position "node" of the schedule tree.
5977 * Return the updated schedule node.
5979 * If "wcc" is set then each of the groups belongs to a single
5980 * weakly connected component in the dependence graph so that
5981 * there is no need for compute_sub_schedule to look for weakly
5982 * connected components.
5984 static __isl_give isl_schedule_node *compute_component_schedule(
5985 __isl_take isl_schedule_node *node, struct isl_sched_graph *graph,
5986 int wcc)
5988 int component;
5989 isl_ctx *ctx;
5990 isl_union_set_list *filters;
5992 if (!node)
5993 return NULL;
5994 ctx = isl_schedule_node_get_ctx(node);
5996 filters = extract_sccs(ctx, graph);
5997 if (graph->weak)
5998 node = isl_schedule_node_insert_set(node, filters);
5999 else
6000 node = isl_schedule_node_insert_sequence(node, filters);
6002 for (component = 0; component < graph->scc; ++component) {
6003 node = isl_schedule_node_child(node, component);
6004 node = isl_schedule_node_child(node, 0);
6005 node = compute_sub_schedule(node, ctx, graph,
6006 &node_scc_exactly,
6007 &edge_scc_exactly, component, wcc);
6008 node = isl_schedule_node_parent(node);
6009 node = isl_schedule_node_parent(node);
6012 return node;
6015 /* Compute a schedule for the given dependence graph and insert it at "node".
6016 * Return the updated schedule node.
6018 * We first check if the graph is connected (through validity and conditional
6019 * validity dependences) and, if not, compute a schedule
6020 * for each component separately.
6021 * If the schedule_serialize_sccs option is set, then we check for strongly
6022 * connected components instead and compute a separate schedule for
6023 * each such strongly connected component.
6025 static __isl_give isl_schedule_node *compute_schedule(isl_schedule_node *node,
6026 struct isl_sched_graph *graph)
6028 isl_ctx *ctx;
6030 if (!node)
6031 return NULL;
6033 ctx = isl_schedule_node_get_ctx(node);
6034 if (isl_options_get_schedule_serialize_sccs(ctx)) {
6035 if (detect_sccs(ctx, graph) < 0)
6036 return isl_schedule_node_free(node);
6037 } else {
6038 if (detect_wccs(ctx, graph) < 0)
6039 return isl_schedule_node_free(node);
6042 if (graph->scc > 1)
6043 return compute_component_schedule(node, graph, 1);
6045 return compute_schedule_wcc(node, graph);
6048 /* Compute a schedule on sc->domain that respects the given schedule
6049 * constraints.
6051 * In particular, the schedule respects all the validity dependences.
6052 * If the default isl scheduling algorithm is used, it tries to minimize
6053 * the dependence distances over the proximity dependences.
6054 * If Feautrier's scheduling algorithm is used, the proximity dependence
6055 * distances are only minimized during the extension to a full-dimensional
6056 * schedule.
6058 * If there are any condition and conditional validity dependences,
6059 * then the conditional validity dependences may be violated inside
6060 * a tilable band, provided they have no adjacent non-local
6061 * condition dependences.
6063 __isl_give isl_schedule *isl_schedule_constraints_compute_schedule(
6064 __isl_take isl_schedule_constraints *sc)
6066 isl_ctx *ctx = isl_schedule_constraints_get_ctx(sc);
6067 struct isl_sched_graph graph = { 0 };
6068 isl_schedule *sched;
6069 isl_schedule_node *node;
6070 isl_union_set *domain;
6072 sc = isl_schedule_constraints_align_params(sc);
6074 domain = isl_schedule_constraints_get_domain(sc);
6075 if (isl_union_set_n_set(domain) == 0) {
6076 isl_schedule_constraints_free(sc);
6077 return isl_schedule_from_domain(domain);
6080 if (graph_init(&graph, sc) < 0)
6081 domain = isl_union_set_free(domain);
6083 node = isl_schedule_node_from_domain(domain);
6084 node = isl_schedule_node_child(node, 0);
6085 if (graph.n > 0)
6086 node = compute_schedule(node, &graph);
6087 sched = isl_schedule_node_get_schedule(node);
6088 isl_schedule_node_free(node);
6090 graph_free(ctx, &graph);
6091 isl_schedule_constraints_free(sc);
6093 return sched;
6096 /* Compute a schedule for the given union of domains that respects
6097 * all the validity dependences and minimizes
6098 * the dependence distances over the proximity dependences.
6100 * This function is kept for backward compatibility.
6102 __isl_give isl_schedule *isl_union_set_compute_schedule(
6103 __isl_take isl_union_set *domain,
6104 __isl_take isl_union_map *validity,
6105 __isl_take isl_union_map *proximity)
6107 isl_schedule_constraints *sc;
6109 sc = isl_schedule_constraints_on_domain(domain);
6110 sc = isl_schedule_constraints_set_validity(sc, validity);
6111 sc = isl_schedule_constraints_set_proximity(sc, proximity);
6113 return isl_schedule_constraints_compute_schedule(sc);