isl_scheduler.c: extract out shared extract_var_coef
[isl.git] / isl_scheduler.c
blobdb1930edd7e74026cfafd6e701d7d8d3098eb1b9
1 /*
2 * Copyright 2011 INRIA Saclay
3 * Copyright 2012-2014 Ecole Normale Superieure
4 * Copyright 2015-2016 Sven Verdoolaege
6 * Use of this software is governed by the MIT license
8 * Written by Sven Verdoolaege, INRIA Saclay - Ile-de-France,
9 * Parc Club Orsay Universite, ZAC des vignes, 4 rue Jacques Monod,
10 * 91893 Orsay, France
11 * and Ecole Normale Superieure, 45 rue d'Ulm, 75230 Paris, France
14 #include <isl_ctx_private.h>
15 #include <isl_map_private.h>
16 #include <isl_space_private.h>
17 #include <isl_aff_private.h>
18 #include <isl/hash.h>
19 #include <isl/constraint.h>
20 #include <isl/schedule.h>
21 #include <isl/schedule_node.h>
22 #include <isl_mat_private.h>
23 #include <isl_vec_private.h>
24 #include <isl/set.h>
25 #include <isl/union_set.h>
26 #include <isl_seq.h>
27 #include <isl_tab.h>
28 #include <isl_dim_map.h>
29 #include <isl/map_to_basic_set.h>
30 #include <isl_sort.h>
31 #include <isl_options_private.h>
32 #include <isl_tarjan.h>
33 #include <isl_morph.h>
36 * The scheduling algorithm implemented in this file was inspired by
37 * Bondhugula et al., "Automatic Transformations for Communication-Minimized
38 * Parallelization and Locality Optimization in the Polyhedral Model".
41 enum isl_edge_type {
42 isl_edge_validity = 0,
43 isl_edge_first = isl_edge_validity,
44 isl_edge_coincidence,
45 isl_edge_condition,
46 isl_edge_conditional_validity,
47 isl_edge_proximity,
48 isl_edge_last = isl_edge_proximity,
49 isl_edge_local
52 /* The constraints that need to be satisfied by a schedule on "domain".
54 * "context" specifies extra constraints on the parameters.
56 * "validity" constraints map domain elements i to domain elements
57 * that should be scheduled after i. (Hard constraint)
58 * "proximity" constraints map domain elements i to domains elements
59 * that should be scheduled as early as possible after i (or before i).
60 * (Soft constraint)
62 * "condition" and "conditional_validity" constraints map possibly "tagged"
63 * domain elements i -> s to "tagged" domain elements j -> t.
64 * The elements of the "conditional_validity" constraints, but without the
65 * tags (i.e., the elements i -> j) are treated as validity constraints,
66 * except that during the construction of a tilable band,
67 * the elements of the "conditional_validity" constraints may be violated
68 * provided that all adjacent elements of the "condition" constraints
69 * are local within the band.
70 * A dependence is local within a band if domain and range are mapped
71 * to the same schedule point by the band.
73 struct isl_schedule_constraints {
74 isl_union_set *domain;
75 isl_set *context;
77 isl_union_map *constraint[isl_edge_last + 1];
80 __isl_give isl_schedule_constraints *isl_schedule_constraints_copy(
81 __isl_keep isl_schedule_constraints *sc)
83 isl_ctx *ctx;
84 isl_schedule_constraints *sc_copy;
85 enum isl_edge_type i;
87 ctx = isl_union_set_get_ctx(sc->domain);
88 sc_copy = isl_calloc_type(ctx, struct isl_schedule_constraints);
89 if (!sc_copy)
90 return NULL;
92 sc_copy->domain = isl_union_set_copy(sc->domain);
93 sc_copy->context = isl_set_copy(sc->context);
94 if (!sc_copy->domain || !sc_copy->context)
95 return isl_schedule_constraints_free(sc_copy);
97 for (i = isl_edge_first; i <= isl_edge_last; ++i) {
98 sc_copy->constraint[i] = isl_union_map_copy(sc->constraint[i]);
99 if (!sc_copy->constraint[i])
100 return isl_schedule_constraints_free(sc_copy);
103 return sc_copy;
107 /* Construct an isl_schedule_constraints object for computing a schedule
108 * on "domain". The initial object does not impose any constraints.
110 __isl_give isl_schedule_constraints *isl_schedule_constraints_on_domain(
111 __isl_take isl_union_set *domain)
113 isl_ctx *ctx;
114 isl_space *space;
115 isl_schedule_constraints *sc;
116 isl_union_map *empty;
117 enum isl_edge_type i;
119 if (!domain)
120 return NULL;
122 ctx = isl_union_set_get_ctx(domain);
123 sc = isl_calloc_type(ctx, struct isl_schedule_constraints);
124 if (!sc)
125 goto error;
127 space = isl_union_set_get_space(domain);
128 sc->domain = domain;
129 sc->context = isl_set_universe(isl_space_copy(space));
130 empty = isl_union_map_empty(space);
131 for (i = isl_edge_first; i <= isl_edge_last; ++i) {
132 sc->constraint[i] = isl_union_map_copy(empty);
133 if (!sc->constraint[i])
134 sc->domain = isl_union_set_free(sc->domain);
136 isl_union_map_free(empty);
138 if (!sc->domain || !sc->context)
139 return isl_schedule_constraints_free(sc);
141 return sc;
142 error:
143 isl_union_set_free(domain);
144 return NULL;
147 /* Replace the context of "sc" by "context".
149 __isl_give isl_schedule_constraints *isl_schedule_constraints_set_context(
150 __isl_take isl_schedule_constraints *sc, __isl_take isl_set *context)
152 if (!sc || !context)
153 goto error;
155 isl_set_free(sc->context);
156 sc->context = context;
158 return sc;
159 error:
160 isl_schedule_constraints_free(sc);
161 isl_set_free(context);
162 return NULL;
165 /* Replace the validity constraints of "sc" by "validity".
167 __isl_give isl_schedule_constraints *isl_schedule_constraints_set_validity(
168 __isl_take isl_schedule_constraints *sc,
169 __isl_take isl_union_map *validity)
171 if (!sc || !validity)
172 goto error;
174 isl_union_map_free(sc->constraint[isl_edge_validity]);
175 sc->constraint[isl_edge_validity] = validity;
177 return sc;
178 error:
179 isl_schedule_constraints_free(sc);
180 isl_union_map_free(validity);
181 return NULL;
184 /* Replace the coincidence constraints of "sc" by "coincidence".
186 __isl_give isl_schedule_constraints *isl_schedule_constraints_set_coincidence(
187 __isl_take isl_schedule_constraints *sc,
188 __isl_take isl_union_map *coincidence)
190 if (!sc || !coincidence)
191 goto error;
193 isl_union_map_free(sc->constraint[isl_edge_coincidence]);
194 sc->constraint[isl_edge_coincidence] = coincidence;
196 return sc;
197 error:
198 isl_schedule_constraints_free(sc);
199 isl_union_map_free(coincidence);
200 return NULL;
203 /* Replace the proximity constraints of "sc" by "proximity".
205 __isl_give isl_schedule_constraints *isl_schedule_constraints_set_proximity(
206 __isl_take isl_schedule_constraints *sc,
207 __isl_take isl_union_map *proximity)
209 if (!sc || !proximity)
210 goto error;
212 isl_union_map_free(sc->constraint[isl_edge_proximity]);
213 sc->constraint[isl_edge_proximity] = proximity;
215 return sc;
216 error:
217 isl_schedule_constraints_free(sc);
218 isl_union_map_free(proximity);
219 return NULL;
222 /* Replace the conditional validity constraints of "sc" by "condition"
223 * and "validity".
225 __isl_give isl_schedule_constraints *
226 isl_schedule_constraints_set_conditional_validity(
227 __isl_take isl_schedule_constraints *sc,
228 __isl_take isl_union_map *condition,
229 __isl_take isl_union_map *validity)
231 if (!sc || !condition || !validity)
232 goto error;
234 isl_union_map_free(sc->constraint[isl_edge_condition]);
235 sc->constraint[isl_edge_condition] = condition;
236 isl_union_map_free(sc->constraint[isl_edge_conditional_validity]);
237 sc->constraint[isl_edge_conditional_validity] = validity;
239 return sc;
240 error:
241 isl_schedule_constraints_free(sc);
242 isl_union_map_free(condition);
243 isl_union_map_free(validity);
244 return NULL;
247 __isl_null isl_schedule_constraints *isl_schedule_constraints_free(
248 __isl_take isl_schedule_constraints *sc)
250 enum isl_edge_type i;
252 if (!sc)
253 return NULL;
255 isl_union_set_free(sc->domain);
256 isl_set_free(sc->context);
257 for (i = isl_edge_first; i <= isl_edge_last; ++i)
258 isl_union_map_free(sc->constraint[i]);
260 free(sc);
262 return NULL;
265 isl_ctx *isl_schedule_constraints_get_ctx(
266 __isl_keep isl_schedule_constraints *sc)
268 return sc ? isl_union_set_get_ctx(sc->domain) : NULL;
271 /* Return the domain of "sc".
273 __isl_give isl_union_set *isl_schedule_constraints_get_domain(
274 __isl_keep isl_schedule_constraints *sc)
276 if (!sc)
277 return NULL;
279 return isl_union_set_copy(sc->domain);
282 /* Return the validity constraints of "sc".
284 __isl_give isl_union_map *isl_schedule_constraints_get_validity(
285 __isl_keep isl_schedule_constraints *sc)
287 if (!sc)
288 return NULL;
290 return isl_union_map_copy(sc->constraint[isl_edge_validity]);
293 /* Return the coincidence constraints of "sc".
295 __isl_give isl_union_map *isl_schedule_constraints_get_coincidence(
296 __isl_keep isl_schedule_constraints *sc)
298 if (!sc)
299 return NULL;
301 return isl_union_map_copy(sc->constraint[isl_edge_coincidence]);
304 /* Return the proximity constraints of "sc".
306 __isl_give isl_union_map *isl_schedule_constraints_get_proximity(
307 __isl_keep isl_schedule_constraints *sc)
309 if (!sc)
310 return NULL;
312 return isl_union_map_copy(sc->constraint[isl_edge_proximity]);
315 /* Return the conditional validity constraints of "sc".
317 __isl_give isl_union_map *isl_schedule_constraints_get_conditional_validity(
318 __isl_keep isl_schedule_constraints *sc)
320 if (!sc)
321 return NULL;
323 return
324 isl_union_map_copy(sc->constraint[isl_edge_conditional_validity]);
327 /* Return the conditions for the conditional validity constraints of "sc".
329 __isl_give isl_union_map *
330 isl_schedule_constraints_get_conditional_validity_condition(
331 __isl_keep isl_schedule_constraints *sc)
333 if (!sc)
334 return NULL;
336 return isl_union_map_copy(sc->constraint[isl_edge_condition]);
339 /* Can a schedule constraint of type "type" be tagged?
341 static int may_be_tagged(enum isl_edge_type type)
343 if (type == isl_edge_condition || type == isl_edge_conditional_validity)
344 return 1;
345 return 0;
348 /* Apply "umap" to the domains of the wrapped relations
349 * inside the domain and range of "c".
351 * That is, for each map of the form
353 * [D -> S] -> [E -> T]
355 * in "c", apply "umap" to D and E.
357 * D is exposed by currying the relation to
359 * D -> [S -> [E -> T]]
361 * E is exposed by doing the same to the inverse of "c".
363 static __isl_give isl_union_map *apply_factor_domain(
364 __isl_take isl_union_map *c, __isl_keep isl_union_map *umap)
366 c = isl_union_map_curry(c);
367 c = isl_union_map_apply_domain(c, isl_union_map_copy(umap));
368 c = isl_union_map_uncurry(c);
370 c = isl_union_map_reverse(c);
371 c = isl_union_map_curry(c);
372 c = isl_union_map_apply_domain(c, isl_union_map_copy(umap));
373 c = isl_union_map_uncurry(c);
374 c = isl_union_map_reverse(c);
376 return c;
379 /* Apply "umap" to domain and range of "c".
380 * If "tag" is set, then "c" may contain tags and then "umap"
381 * needs to be applied to the domains of the wrapped relations
382 * inside the domain and range of "c".
384 static __isl_give isl_union_map *apply(__isl_take isl_union_map *c,
385 __isl_keep isl_union_map *umap, int tag)
387 isl_union_map *t;
389 if (tag)
390 t = isl_union_map_copy(c);
391 c = isl_union_map_apply_domain(c, isl_union_map_copy(umap));
392 c = isl_union_map_apply_range(c, isl_union_map_copy(umap));
393 if (!tag)
394 return c;
395 t = apply_factor_domain(t, umap);
396 c = isl_union_map_union(c, t);
397 return c;
400 /* Apply "umap" to the domain of the schedule constraints "sc".
402 * The two sides of the various schedule constraints are adjusted
403 * accordingly.
405 __isl_give isl_schedule_constraints *isl_schedule_constraints_apply(
406 __isl_take isl_schedule_constraints *sc,
407 __isl_take isl_union_map *umap)
409 enum isl_edge_type i;
411 if (!sc || !umap)
412 goto error;
414 for (i = isl_edge_first; i <= isl_edge_last; ++i) {
415 int tag = may_be_tagged(i);
417 sc->constraint[i] = apply(sc->constraint[i], umap, tag);
418 if (!sc->constraint[i])
419 goto error;
421 sc->domain = isl_union_set_apply(sc->domain, umap);
422 if (!sc->domain)
423 return isl_schedule_constraints_free(sc);
425 return sc;
426 error:
427 isl_schedule_constraints_free(sc);
428 isl_union_map_free(umap);
429 return NULL;
432 void isl_schedule_constraints_dump(__isl_keep isl_schedule_constraints *sc)
434 if (!sc)
435 return;
437 fprintf(stderr, "domain: ");
438 isl_union_set_dump(sc->domain);
439 fprintf(stderr, "context: ");
440 isl_set_dump(sc->context);
441 fprintf(stderr, "validity: ");
442 isl_union_map_dump(sc->constraint[isl_edge_validity]);
443 fprintf(stderr, "proximity: ");
444 isl_union_map_dump(sc->constraint[isl_edge_proximity]);
445 fprintf(stderr, "coincidence: ");
446 isl_union_map_dump(sc->constraint[isl_edge_coincidence]);
447 fprintf(stderr, "condition: ");
448 isl_union_map_dump(sc->constraint[isl_edge_condition]);
449 fprintf(stderr, "conditional_validity: ");
450 isl_union_map_dump(sc->constraint[isl_edge_conditional_validity]);
453 /* Align the parameters of the fields of "sc".
455 static __isl_give isl_schedule_constraints *
456 isl_schedule_constraints_align_params(__isl_take isl_schedule_constraints *sc)
458 isl_space *space;
459 enum isl_edge_type i;
461 if (!sc)
462 return NULL;
464 space = isl_union_set_get_space(sc->domain);
465 space = isl_space_align_params(space, isl_set_get_space(sc->context));
466 for (i = isl_edge_first; i <= isl_edge_last; ++i)
467 space = isl_space_align_params(space,
468 isl_union_map_get_space(sc->constraint[i]));
470 for (i = isl_edge_first; i <= isl_edge_last; ++i) {
471 sc->constraint[i] = isl_union_map_align_params(
472 sc->constraint[i], isl_space_copy(space));
473 if (!sc->constraint[i])
474 space = isl_space_free(space);
476 sc->context = isl_set_align_params(sc->context, isl_space_copy(space));
477 sc->domain = isl_union_set_align_params(sc->domain, space);
478 if (!sc->context || !sc->domain)
479 return isl_schedule_constraints_free(sc);
481 return sc;
484 /* Return the total number of isl_maps in the constraints of "sc".
486 static __isl_give int isl_schedule_constraints_n_map(
487 __isl_keep isl_schedule_constraints *sc)
489 enum isl_edge_type i;
490 int n = 0;
492 for (i = isl_edge_first; i <= isl_edge_last; ++i)
493 n += isl_union_map_n_map(sc->constraint[i]);
495 return n;
498 /* Internal information about a node that is used during the construction
499 * of a schedule.
500 * space represents the space in which the domain lives
501 * sched is a matrix representation of the schedule being constructed
502 * for this node; if compressed is set, then this schedule is
503 * defined over the compressed domain space
504 * sched_map is an isl_map representation of the same (partial) schedule
505 * sched_map may be NULL; if compressed is set, then this map
506 * is defined over the uncompressed domain space
507 * rank is the number of linearly independent rows in the linear part
508 * of sched
509 * the columns of cmap represent a change of basis for the schedule
510 * coefficients; the first rank columns span the linear part of
511 * the schedule rows
512 * cinv is the inverse of cmap.
513 * ctrans is the transpose of cmap.
514 * start is the first variable in the LP problem in the sequences that
515 * represents the schedule coefficients of this node
516 * nvar is the dimension of the domain
517 * nparam is the number of parameters or 0 if we are not constructing
518 * a parametric schedule
520 * If compressed is set, then hull represents the constraints
521 * that were used to derive the compression, while compress and
522 * decompress map the original space to the compressed space and
523 * vice versa.
525 * scc is the index of SCC (or WCC) this node belongs to
527 * "cluster" is only used inside extract_clusters and identifies
528 * the cluster of SCCs that the node belongs to.
530 * coincident contains a boolean for each of the rows of the schedule,
531 * indicating whether the corresponding scheduling dimension satisfies
532 * the coincidence constraints in the sense that the corresponding
533 * dependence distances are zero.
535 struct isl_sched_node {
536 isl_space *space;
537 int compressed;
538 isl_set *hull;
539 isl_multi_aff *compress;
540 isl_multi_aff *decompress;
541 isl_mat *sched;
542 isl_map *sched_map;
543 int rank;
544 isl_mat *cmap;
545 isl_mat *cinv;
546 isl_mat *ctrans;
547 int start;
548 int nvar;
549 int nparam;
551 int scc;
552 int cluster;
554 int *coincident;
557 static int node_has_space(const void *entry, const void *val)
559 struct isl_sched_node *node = (struct isl_sched_node *)entry;
560 isl_space *dim = (isl_space *)val;
562 return isl_space_is_equal(node->space, dim);
565 static int node_scc_exactly(struct isl_sched_node *node, int scc)
567 return node->scc == scc;
570 static int node_scc_at_most(struct isl_sched_node *node, int scc)
572 return node->scc <= scc;
575 static int node_scc_at_least(struct isl_sched_node *node, int scc)
577 return node->scc >= scc;
580 /* An edge in the dependence graph. An edge may be used to
581 * ensure validity of the generated schedule, to minimize the dependence
582 * distance or both
584 * map is the dependence relation, with i -> j in the map if j depends on i
585 * tagged_condition and tagged_validity contain the union of all tagged
586 * condition or conditional validity dependence relations that
587 * specialize the dependence relation "map"; that is,
588 * if (i -> a) -> (j -> b) is an element of "tagged_condition"
589 * or "tagged_validity", then i -> j is an element of "map".
590 * If these fields are NULL, then they represent the empty relation.
591 * src is the source node
592 * dst is the sink node
594 * types is a bit vector containing the types of this edge.
595 * validity is set if the edge is used to ensure correctness
596 * coincidence is used to enforce zero dependence distances
597 * proximity is set if the edge is used to minimize dependence distances
598 * condition is set if the edge represents a condition
599 * for a conditional validity schedule constraint
600 * local can only be set for condition edges and indicates that
601 * the dependence distance over the edge should be zero
602 * conditional_validity is set if the edge is used to conditionally
603 * ensure correctness
605 * For validity edges, start and end mark the sequence of inequality
606 * constraints in the LP problem that encode the validity constraint
607 * corresponding to this edge.
609 * During clustering, an edge may be marked "no_merge" if it should
610 * not be used to merge clusters.
611 * The weight is also only used during clustering and it is
612 * an indication of how many schedule dimensions on either side
613 * of the schedule constraints can be aligned.
614 * If the weight is negative, then this means that this edge was postponed
615 * by has_bounded_distances or any_no_merge. The original weight can
616 * be retrieved by adding 1 + graph->max_weight, with "graph"
617 * the graph containing this edge.
619 struct isl_sched_edge {
620 isl_map *map;
621 isl_union_map *tagged_condition;
622 isl_union_map *tagged_validity;
624 struct isl_sched_node *src;
625 struct isl_sched_node *dst;
627 unsigned types;
629 int start;
630 int end;
632 int no_merge;
633 int weight;
636 /* Is "edge" marked as being of type "type"?
638 static int is_type(struct isl_sched_edge *edge, enum isl_edge_type type)
640 return ISL_FL_ISSET(edge->types, 1 << type);
643 /* Mark "edge" as being of type "type".
645 static void set_type(struct isl_sched_edge *edge, enum isl_edge_type type)
647 ISL_FL_SET(edge->types, 1 << type);
650 /* No longer mark "edge" as being of type "type"?
652 static void clear_type(struct isl_sched_edge *edge, enum isl_edge_type type)
654 ISL_FL_CLR(edge->types, 1 << type);
657 /* Is "edge" marked as a validity edge?
659 static int is_validity(struct isl_sched_edge *edge)
661 return is_type(edge, isl_edge_validity);
664 /* Mark "edge" as a validity edge.
666 static void set_validity(struct isl_sched_edge *edge)
668 set_type(edge, isl_edge_validity);
671 /* Is "edge" marked as a proximity edge?
673 static int is_proximity(struct isl_sched_edge *edge)
675 return is_type(edge, isl_edge_proximity);
678 /* Is "edge" marked as a local edge?
680 static int is_local(struct isl_sched_edge *edge)
682 return is_type(edge, isl_edge_local);
685 /* Mark "edge" as a local edge.
687 static void set_local(struct isl_sched_edge *edge)
689 set_type(edge, isl_edge_local);
692 /* No longer mark "edge" as a local edge.
694 static void clear_local(struct isl_sched_edge *edge)
696 clear_type(edge, isl_edge_local);
699 /* Is "edge" marked as a coincidence edge?
701 static int is_coincidence(struct isl_sched_edge *edge)
703 return is_type(edge, isl_edge_coincidence);
706 /* Is "edge" marked as a condition edge?
708 static int is_condition(struct isl_sched_edge *edge)
710 return is_type(edge, isl_edge_condition);
713 /* Is "edge" marked as a conditional validity edge?
715 static int is_conditional_validity(struct isl_sched_edge *edge)
717 return is_type(edge, isl_edge_conditional_validity);
720 /* Internal information about the dependence graph used during
721 * the construction of the schedule.
723 * intra_hmap is a cache, mapping dependence relations to their dual,
724 * for dependences from a node to itself
725 * inter_hmap is a cache, mapping dependence relations to their dual,
726 * for dependences between distinct nodes
727 * if compression is involved then the key for these maps
728 * is the original, uncompressed dependence relation, while
729 * the value is the dual of the compressed dependence relation.
731 * n is the number of nodes
732 * node is the list of nodes
733 * maxvar is the maximal number of variables over all nodes
734 * max_row is the allocated number of rows in the schedule
735 * n_row is the current (maximal) number of linearly independent
736 * rows in the node schedules
737 * n_total_row is the current number of rows in the node schedules
738 * band_start is the starting row in the node schedules of the current band
739 * root is set if this graph is the original dependence graph,
740 * without any splitting
742 * sorted contains a list of node indices sorted according to the
743 * SCC to which a node belongs
745 * n_edge is the number of edges
746 * edge is the list of edges
747 * max_edge contains the maximal number of edges of each type;
748 * in particular, it contains the number of edges in the inital graph.
749 * edge_table contains pointers into the edge array, hashed on the source
750 * and sink spaces; there is one such table for each type;
751 * a given edge may be referenced from more than one table
752 * if the corresponding relation appears in more than one of the
753 * sets of dependences; however, for each type there is only
754 * a single edge between a given pair of source and sink space
755 * in the entire graph
757 * node_table contains pointers into the node array, hashed on the space
759 * region contains a list of variable sequences that should be non-trivial
761 * lp contains the (I)LP problem used to obtain new schedule rows
763 * src_scc and dst_scc are the source and sink SCCs of an edge with
764 * conflicting constraints
766 * scc represents the number of components
767 * weak is set if the components are weakly connected
769 * max_weight is used during clustering and represents the maximal
770 * weight of the relevant proximity edges.
772 struct isl_sched_graph {
773 isl_map_to_basic_set *intra_hmap;
774 isl_map_to_basic_set *inter_hmap;
776 struct isl_sched_node *node;
777 int n;
778 int maxvar;
779 int max_row;
780 int n_row;
782 int *sorted;
784 int n_total_row;
785 int band_start;
787 int root;
789 struct isl_sched_edge *edge;
790 int n_edge;
791 int max_edge[isl_edge_last + 1];
792 struct isl_hash_table *edge_table[isl_edge_last + 1];
794 struct isl_hash_table *node_table;
795 struct isl_region *region;
797 isl_basic_set *lp;
799 int src_scc;
800 int dst_scc;
802 int scc;
803 int weak;
805 int max_weight;
808 /* Initialize node_table based on the list of nodes.
810 static int graph_init_table(isl_ctx *ctx, struct isl_sched_graph *graph)
812 int i;
814 graph->node_table = isl_hash_table_alloc(ctx, graph->n);
815 if (!graph->node_table)
816 return -1;
818 for (i = 0; i < graph->n; ++i) {
819 struct isl_hash_table_entry *entry;
820 uint32_t hash;
822 hash = isl_space_get_hash(graph->node[i].space);
823 entry = isl_hash_table_find(ctx, graph->node_table, hash,
824 &node_has_space,
825 graph->node[i].space, 1);
826 if (!entry)
827 return -1;
828 entry->data = &graph->node[i];
831 return 0;
834 /* Return a pointer to the node that lives within the given space,
835 * or NULL if there is no such node.
837 static struct isl_sched_node *graph_find_node(isl_ctx *ctx,
838 struct isl_sched_graph *graph, __isl_keep isl_space *dim)
840 struct isl_hash_table_entry *entry;
841 uint32_t hash;
843 hash = isl_space_get_hash(dim);
844 entry = isl_hash_table_find(ctx, graph->node_table, hash,
845 &node_has_space, dim, 0);
847 return entry ? entry->data : NULL;
850 static int edge_has_src_and_dst(const void *entry, const void *val)
852 const struct isl_sched_edge *edge = entry;
853 const struct isl_sched_edge *temp = val;
855 return edge->src == temp->src && edge->dst == temp->dst;
858 /* Add the given edge to graph->edge_table[type].
860 static isl_stat graph_edge_table_add(isl_ctx *ctx,
861 struct isl_sched_graph *graph, enum isl_edge_type type,
862 struct isl_sched_edge *edge)
864 struct isl_hash_table_entry *entry;
865 uint32_t hash;
867 hash = isl_hash_init();
868 hash = isl_hash_builtin(hash, edge->src);
869 hash = isl_hash_builtin(hash, edge->dst);
870 entry = isl_hash_table_find(ctx, graph->edge_table[type], hash,
871 &edge_has_src_and_dst, edge, 1);
872 if (!entry)
873 return isl_stat_error;
874 entry->data = edge;
876 return isl_stat_ok;
879 /* Allocate the edge_tables based on the maximal number of edges of
880 * each type.
882 static int graph_init_edge_tables(isl_ctx *ctx, struct isl_sched_graph *graph)
884 int i;
886 for (i = 0; i <= isl_edge_last; ++i) {
887 graph->edge_table[i] = isl_hash_table_alloc(ctx,
888 graph->max_edge[i]);
889 if (!graph->edge_table[i])
890 return -1;
893 return 0;
896 /* If graph->edge_table[type] contains an edge from the given source
897 * to the given destination, then return the hash table entry of this edge.
898 * Otherwise, return NULL.
900 static struct isl_hash_table_entry *graph_find_edge_entry(
901 struct isl_sched_graph *graph,
902 enum isl_edge_type type,
903 struct isl_sched_node *src, struct isl_sched_node *dst)
905 isl_ctx *ctx = isl_space_get_ctx(src->space);
906 uint32_t hash;
907 struct isl_sched_edge temp = { .src = src, .dst = dst };
909 hash = isl_hash_init();
910 hash = isl_hash_builtin(hash, temp.src);
911 hash = isl_hash_builtin(hash, temp.dst);
912 return isl_hash_table_find(ctx, graph->edge_table[type], hash,
913 &edge_has_src_and_dst, &temp, 0);
917 /* If graph->edge_table[type] contains an edge from the given source
918 * to the given destination, then return this edge.
919 * Otherwise, return NULL.
921 static struct isl_sched_edge *graph_find_edge(struct isl_sched_graph *graph,
922 enum isl_edge_type type,
923 struct isl_sched_node *src, struct isl_sched_node *dst)
925 struct isl_hash_table_entry *entry;
927 entry = graph_find_edge_entry(graph, type, src, dst);
928 if (!entry)
929 return NULL;
931 return entry->data;
934 /* Check whether the dependence graph has an edge of the given type
935 * between the given two nodes.
937 static isl_bool graph_has_edge(struct isl_sched_graph *graph,
938 enum isl_edge_type type,
939 struct isl_sched_node *src, struct isl_sched_node *dst)
941 struct isl_sched_edge *edge;
942 isl_bool empty;
944 edge = graph_find_edge(graph, type, src, dst);
945 if (!edge)
946 return 0;
948 empty = isl_map_plain_is_empty(edge->map);
949 if (empty < 0)
950 return isl_bool_error;
952 return !empty;
955 /* Look for any edge with the same src, dst and map fields as "model".
957 * Return the matching edge if one can be found.
958 * Return "model" if no matching edge is found.
959 * Return NULL on error.
961 static struct isl_sched_edge *graph_find_matching_edge(
962 struct isl_sched_graph *graph, struct isl_sched_edge *model)
964 enum isl_edge_type i;
965 struct isl_sched_edge *edge;
967 for (i = isl_edge_first; i <= isl_edge_last; ++i) {
968 int is_equal;
970 edge = graph_find_edge(graph, i, model->src, model->dst);
971 if (!edge)
972 continue;
973 is_equal = isl_map_plain_is_equal(model->map, edge->map);
974 if (is_equal < 0)
975 return NULL;
976 if (is_equal)
977 return edge;
980 return model;
983 /* Remove the given edge from all the edge_tables that refer to it.
985 static void graph_remove_edge(struct isl_sched_graph *graph,
986 struct isl_sched_edge *edge)
988 isl_ctx *ctx = isl_map_get_ctx(edge->map);
989 enum isl_edge_type i;
991 for (i = isl_edge_first; i <= isl_edge_last; ++i) {
992 struct isl_hash_table_entry *entry;
994 entry = graph_find_edge_entry(graph, i, edge->src, edge->dst);
995 if (!entry)
996 continue;
997 if (entry->data != edge)
998 continue;
999 isl_hash_table_remove(ctx, graph->edge_table[i], entry);
1003 /* Check whether the dependence graph has any edge
1004 * between the given two nodes.
1006 static isl_bool graph_has_any_edge(struct isl_sched_graph *graph,
1007 struct isl_sched_node *src, struct isl_sched_node *dst)
1009 enum isl_edge_type i;
1010 isl_bool r;
1012 for (i = isl_edge_first; i <= isl_edge_last; ++i) {
1013 r = graph_has_edge(graph, i, src, dst);
1014 if (r < 0 || r)
1015 return r;
1018 return r;
1021 /* Check whether the dependence graph has a validity edge
1022 * between the given two nodes.
1024 * Conditional validity edges are essentially validity edges that
1025 * can be ignored if the corresponding condition edges are iteration private.
1026 * Here, we are only checking for the presence of validity
1027 * edges, so we need to consider the conditional validity edges too.
1028 * In particular, this function is used during the detection
1029 * of strongly connected components and we cannot ignore
1030 * conditional validity edges during this detection.
1032 static isl_bool graph_has_validity_edge(struct isl_sched_graph *graph,
1033 struct isl_sched_node *src, struct isl_sched_node *dst)
1035 isl_bool r;
1037 r = graph_has_edge(graph, isl_edge_validity, src, dst);
1038 if (r < 0 || r)
1039 return r;
1041 return graph_has_edge(graph, isl_edge_conditional_validity, src, dst);
1044 static int graph_alloc(isl_ctx *ctx, struct isl_sched_graph *graph,
1045 int n_node, int n_edge)
1047 int i;
1049 graph->n = n_node;
1050 graph->n_edge = n_edge;
1051 graph->node = isl_calloc_array(ctx, struct isl_sched_node, graph->n);
1052 graph->sorted = isl_calloc_array(ctx, int, graph->n);
1053 graph->region = isl_alloc_array(ctx, struct isl_region, graph->n);
1054 graph->edge = isl_calloc_array(ctx,
1055 struct isl_sched_edge, graph->n_edge);
1057 graph->intra_hmap = isl_map_to_basic_set_alloc(ctx, 2 * n_edge);
1058 graph->inter_hmap = isl_map_to_basic_set_alloc(ctx, 2 * n_edge);
1060 if (!graph->node || !graph->region || (graph->n_edge && !graph->edge) ||
1061 !graph->sorted)
1062 return -1;
1064 for(i = 0; i < graph->n; ++i)
1065 graph->sorted[i] = i;
1067 return 0;
1070 static void graph_free(isl_ctx *ctx, struct isl_sched_graph *graph)
1072 int i;
1074 isl_map_to_basic_set_free(graph->intra_hmap);
1075 isl_map_to_basic_set_free(graph->inter_hmap);
1077 if (graph->node)
1078 for (i = 0; i < graph->n; ++i) {
1079 isl_space_free(graph->node[i].space);
1080 isl_set_free(graph->node[i].hull);
1081 isl_multi_aff_free(graph->node[i].compress);
1082 isl_multi_aff_free(graph->node[i].decompress);
1083 isl_mat_free(graph->node[i].sched);
1084 isl_map_free(graph->node[i].sched_map);
1085 isl_mat_free(graph->node[i].cmap);
1086 isl_mat_free(graph->node[i].cinv);
1087 isl_mat_free(graph->node[i].ctrans);
1088 if (graph->root)
1089 free(graph->node[i].coincident);
1091 free(graph->node);
1092 free(graph->sorted);
1093 if (graph->edge)
1094 for (i = 0; i < graph->n_edge; ++i) {
1095 isl_map_free(graph->edge[i].map);
1096 isl_union_map_free(graph->edge[i].tagged_condition);
1097 isl_union_map_free(graph->edge[i].tagged_validity);
1099 free(graph->edge);
1100 free(graph->region);
1101 for (i = 0; i <= isl_edge_last; ++i)
1102 isl_hash_table_free(ctx, graph->edge_table[i]);
1103 isl_hash_table_free(ctx, graph->node_table);
1104 isl_basic_set_free(graph->lp);
1107 /* For each "set" on which this function is called, increment
1108 * graph->n by one and update graph->maxvar.
1110 static isl_stat init_n_maxvar(__isl_take isl_set *set, void *user)
1112 struct isl_sched_graph *graph = user;
1113 int nvar = isl_set_dim(set, isl_dim_set);
1115 graph->n++;
1116 if (nvar > graph->maxvar)
1117 graph->maxvar = nvar;
1119 isl_set_free(set);
1121 return isl_stat_ok;
1124 /* Add the number of basic maps in "map" to *n.
1126 static isl_stat add_n_basic_map(__isl_take isl_map *map, void *user)
1128 int *n = user;
1130 *n += isl_map_n_basic_map(map);
1131 isl_map_free(map);
1133 return isl_stat_ok;
1136 /* Compute the number of rows that should be allocated for the schedule.
1137 * In particular, we need one row for each variable or one row
1138 * for each basic map in the dependences.
1139 * Note that it is practically impossible to exhaust both
1140 * the number of dependences and the number of variables.
1142 static int compute_max_row(struct isl_sched_graph *graph,
1143 __isl_keep isl_schedule_constraints *sc)
1145 enum isl_edge_type i;
1146 int n_edge;
1148 graph->n = 0;
1149 graph->maxvar = 0;
1150 if (isl_union_set_foreach_set(sc->domain, &init_n_maxvar, graph) < 0)
1151 return -1;
1152 n_edge = 0;
1153 for (i = isl_edge_first; i <= isl_edge_last; ++i)
1154 if (isl_union_map_foreach_map(sc->constraint[i],
1155 &add_n_basic_map, &n_edge) < 0)
1156 return -1;
1157 graph->max_row = n_edge + graph->maxvar;
1159 return 0;
1162 /* Does "bset" have any defining equalities for its set variables?
1164 static int has_any_defining_equality(__isl_keep isl_basic_set *bset)
1166 int i, n;
1168 if (!bset)
1169 return -1;
1171 n = isl_basic_set_dim(bset, isl_dim_set);
1172 for (i = 0; i < n; ++i) {
1173 int has;
1175 has = isl_basic_set_has_defining_equality(bset, isl_dim_set, i,
1176 NULL);
1177 if (has < 0 || has)
1178 return has;
1181 return 0;
1184 /* Add a new node to the graph representing the given space.
1185 * "nvar" is the (possibly compressed) number of variables and
1186 * may be smaller than then number of set variables in "space"
1187 * if "compressed" is set.
1188 * If "compressed" is set, then "hull" represents the constraints
1189 * that were used to derive the compression, while "compress" and
1190 * "decompress" map the original space to the compressed space and
1191 * vice versa.
1192 * If "compressed" is not set, then "hull", "compress" and "decompress"
1193 * should be NULL.
1195 static isl_stat add_node(struct isl_sched_graph *graph,
1196 __isl_take isl_space *space, int nvar, int compressed,
1197 __isl_take isl_set *hull, __isl_take isl_multi_aff *compress,
1198 __isl_take isl_multi_aff *decompress)
1200 int nparam;
1201 isl_ctx *ctx;
1202 isl_mat *sched;
1203 int *coincident;
1205 if (!space)
1206 return isl_stat_error;
1208 ctx = isl_space_get_ctx(space);
1209 nparam = isl_space_dim(space, isl_dim_param);
1210 if (!ctx->opt->schedule_parametric)
1211 nparam = 0;
1212 sched = isl_mat_alloc(ctx, 0, 1 + nparam + nvar);
1213 graph->node[graph->n].space = space;
1214 graph->node[graph->n].nvar = nvar;
1215 graph->node[graph->n].nparam = nparam;
1216 graph->node[graph->n].sched = sched;
1217 graph->node[graph->n].sched_map = NULL;
1218 coincident = isl_calloc_array(ctx, int, graph->max_row);
1219 graph->node[graph->n].coincident = coincident;
1220 graph->node[graph->n].compressed = compressed;
1221 graph->node[graph->n].hull = hull;
1222 graph->node[graph->n].compress = compress;
1223 graph->node[graph->n].decompress = decompress;
1224 graph->n++;
1226 if (!space || !sched || (graph->max_row && !coincident))
1227 return isl_stat_error;
1228 if (compressed && (!hull || !compress || !decompress))
1229 return isl_stat_error;
1231 return isl_stat_ok;
1234 /* Add a new node to the graph representing the given set.
1236 * If any of the set variables is defined by an equality, then
1237 * we perform variable compression such that we can perform
1238 * the scheduling on the compressed domain.
1240 static isl_stat extract_node(__isl_take isl_set *set, void *user)
1242 int nvar;
1243 int has_equality;
1244 isl_space *space;
1245 isl_basic_set *hull;
1246 isl_set *hull_set;
1247 isl_morph *morph;
1248 isl_multi_aff *compress, *decompress;
1249 struct isl_sched_graph *graph = user;
1251 space = isl_set_get_space(set);
1252 hull = isl_set_affine_hull(set);
1253 hull = isl_basic_set_remove_divs(hull);
1254 nvar = isl_space_dim(space, isl_dim_set);
1255 has_equality = has_any_defining_equality(hull);
1257 if (has_equality < 0)
1258 goto error;
1259 if (!has_equality) {
1260 isl_basic_set_free(hull);
1261 return add_node(graph, space, nvar, 0, NULL, NULL, NULL);
1264 morph = isl_basic_set_variable_compression(hull, isl_dim_set);
1265 nvar = isl_morph_ran_dim(morph, isl_dim_set);
1266 compress = isl_morph_get_var_multi_aff(morph);
1267 morph = isl_morph_inverse(morph);
1268 decompress = isl_morph_get_var_multi_aff(morph);
1269 isl_morph_free(morph);
1271 hull_set = isl_set_from_basic_set(hull);
1272 return add_node(graph, space, nvar, 1, hull_set, compress, decompress);
1273 error:
1274 isl_basic_set_free(hull);
1275 isl_space_free(space);
1276 return isl_stat_error;
1279 struct isl_extract_edge_data {
1280 enum isl_edge_type type;
1281 struct isl_sched_graph *graph;
1284 /* Merge edge2 into edge1, freeing the contents of edge2.
1285 * Return 0 on success and -1 on failure.
1287 * edge1 and edge2 are assumed to have the same value for the map field.
1289 static int merge_edge(struct isl_sched_edge *edge1,
1290 struct isl_sched_edge *edge2)
1292 edge1->types |= edge2->types;
1293 isl_map_free(edge2->map);
1295 if (is_condition(edge2)) {
1296 if (!edge1->tagged_condition)
1297 edge1->tagged_condition = edge2->tagged_condition;
1298 else
1299 edge1->tagged_condition =
1300 isl_union_map_union(edge1->tagged_condition,
1301 edge2->tagged_condition);
1304 if (is_conditional_validity(edge2)) {
1305 if (!edge1->tagged_validity)
1306 edge1->tagged_validity = edge2->tagged_validity;
1307 else
1308 edge1->tagged_validity =
1309 isl_union_map_union(edge1->tagged_validity,
1310 edge2->tagged_validity);
1313 if (is_condition(edge2) && !edge1->tagged_condition)
1314 return -1;
1315 if (is_conditional_validity(edge2) && !edge1->tagged_validity)
1316 return -1;
1318 return 0;
1321 /* Insert dummy tags in domain and range of "map".
1323 * In particular, if "map" is of the form
1325 * A -> B
1327 * then return
1329 * [A -> dummy_tag] -> [B -> dummy_tag]
1331 * where the dummy_tags are identical and equal to any dummy tags
1332 * introduced by any other call to this function.
1334 static __isl_give isl_map *insert_dummy_tags(__isl_take isl_map *map)
1336 static char dummy;
1337 isl_ctx *ctx;
1338 isl_id *id;
1339 isl_space *space;
1340 isl_set *domain, *range;
1342 ctx = isl_map_get_ctx(map);
1344 id = isl_id_alloc(ctx, NULL, &dummy);
1345 space = isl_space_params(isl_map_get_space(map));
1346 space = isl_space_set_from_params(space);
1347 space = isl_space_set_tuple_id(space, isl_dim_set, id);
1348 space = isl_space_map_from_set(space);
1350 domain = isl_map_wrap(map);
1351 range = isl_map_wrap(isl_map_universe(space));
1352 map = isl_map_from_domain_and_range(domain, range);
1353 map = isl_map_zip(map);
1355 return map;
1358 /* Given that at least one of "src" or "dst" is compressed, return
1359 * a map between the spaces of these nodes restricted to the affine
1360 * hull that was used in the compression.
1362 static __isl_give isl_map *extract_hull(struct isl_sched_node *src,
1363 struct isl_sched_node *dst)
1365 isl_set *dom, *ran;
1367 if (src->compressed)
1368 dom = isl_set_copy(src->hull);
1369 else
1370 dom = isl_set_universe(isl_space_copy(src->space));
1371 if (dst->compressed)
1372 ran = isl_set_copy(dst->hull);
1373 else
1374 ran = isl_set_universe(isl_space_copy(dst->space));
1376 return isl_map_from_domain_and_range(dom, ran);
1379 /* Intersect the domains of the nested relations in domain and range
1380 * of "tagged" with "map".
1382 static __isl_give isl_map *map_intersect_domains(__isl_take isl_map *tagged,
1383 __isl_keep isl_map *map)
1385 isl_set *set;
1387 tagged = isl_map_zip(tagged);
1388 set = isl_map_wrap(isl_map_copy(map));
1389 tagged = isl_map_intersect_domain(tagged, set);
1390 tagged = isl_map_zip(tagged);
1391 return tagged;
1394 /* Return a pointer to the node that lives in the domain space of "map"
1395 * or NULL if there is no such node.
1397 static struct isl_sched_node *find_domain_node(isl_ctx *ctx,
1398 struct isl_sched_graph *graph, __isl_keep isl_map *map)
1400 struct isl_sched_node *node;
1401 isl_space *space;
1403 space = isl_space_domain(isl_map_get_space(map));
1404 node = graph_find_node(ctx, graph, space);
1405 isl_space_free(space);
1407 return node;
1410 /* Return a pointer to the node that lives in the range space of "map"
1411 * or NULL if there is no such node.
1413 static struct isl_sched_node *find_range_node(isl_ctx *ctx,
1414 struct isl_sched_graph *graph, __isl_keep isl_map *map)
1416 struct isl_sched_node *node;
1417 isl_space *space;
1419 space = isl_space_range(isl_map_get_space(map));
1420 node = graph_find_node(ctx, graph, space);
1421 isl_space_free(space);
1423 return node;
1426 /* Add a new edge to the graph based on the given map
1427 * and add it to data->graph->edge_table[data->type].
1428 * If a dependence relation of a given type happens to be identical
1429 * to one of the dependence relations of a type that was added before,
1430 * then we don't create a new edge, but instead mark the original edge
1431 * as also representing a dependence of the current type.
1433 * Edges of type isl_edge_condition or isl_edge_conditional_validity
1434 * may be specified as "tagged" dependence relations. That is, "map"
1435 * may contain elements (i -> a) -> (j -> b), where i -> j denotes
1436 * the dependence on iterations and a and b are tags.
1437 * edge->map is set to the relation containing the elements i -> j,
1438 * while edge->tagged_condition and edge->tagged_validity contain
1439 * the union of all the "map" relations
1440 * for which extract_edge is called that result in the same edge->map.
1442 * If the source or the destination node is compressed, then
1443 * intersect both "map" and "tagged" with the constraints that
1444 * were used to construct the compression.
1445 * This ensures that there are no schedule constraints defined
1446 * outside of these domains, while the scheduler no longer has
1447 * any control over those outside parts.
1449 static isl_stat extract_edge(__isl_take isl_map *map, void *user)
1451 isl_ctx *ctx = isl_map_get_ctx(map);
1452 struct isl_extract_edge_data *data = user;
1453 struct isl_sched_graph *graph = data->graph;
1454 struct isl_sched_node *src, *dst;
1455 struct isl_sched_edge *edge;
1456 isl_map *tagged = NULL;
1458 if (data->type == isl_edge_condition ||
1459 data->type == isl_edge_conditional_validity) {
1460 if (isl_map_can_zip(map)) {
1461 tagged = isl_map_copy(map);
1462 map = isl_set_unwrap(isl_map_domain(isl_map_zip(map)));
1463 } else {
1464 tagged = insert_dummy_tags(isl_map_copy(map));
1468 src = find_domain_node(ctx, graph, map);
1469 dst = find_range_node(ctx, graph, map);
1471 if (!src || !dst) {
1472 isl_map_free(map);
1473 isl_map_free(tagged);
1474 return isl_stat_ok;
1477 if (src->compressed || dst->compressed) {
1478 isl_map *hull;
1479 hull = extract_hull(src, dst);
1480 if (tagged)
1481 tagged = map_intersect_domains(tagged, hull);
1482 map = isl_map_intersect(map, hull);
1485 graph->edge[graph->n_edge].src = src;
1486 graph->edge[graph->n_edge].dst = dst;
1487 graph->edge[graph->n_edge].map = map;
1488 graph->edge[graph->n_edge].types = 0;
1489 graph->edge[graph->n_edge].tagged_condition = NULL;
1490 graph->edge[graph->n_edge].tagged_validity = NULL;
1491 set_type(&graph->edge[graph->n_edge], data->type);
1492 if (data->type == isl_edge_condition)
1493 graph->edge[graph->n_edge].tagged_condition =
1494 isl_union_map_from_map(tagged);
1495 if (data->type == isl_edge_conditional_validity)
1496 graph->edge[graph->n_edge].tagged_validity =
1497 isl_union_map_from_map(tagged);
1499 edge = graph_find_matching_edge(graph, &graph->edge[graph->n_edge]);
1500 if (!edge) {
1501 graph->n_edge++;
1502 return isl_stat_error;
1504 if (edge == &graph->edge[graph->n_edge])
1505 return graph_edge_table_add(ctx, graph, data->type,
1506 &graph->edge[graph->n_edge++]);
1508 if (merge_edge(edge, &graph->edge[graph->n_edge]) < 0)
1509 return -1;
1511 return graph_edge_table_add(ctx, graph, data->type, edge);
1514 /* Initialize the schedule graph "graph" from the schedule constraints "sc".
1516 * The context is included in the domain before the nodes of
1517 * the graphs are extracted in order to be able to exploit
1518 * any possible additional equalities.
1519 * Note that this intersection is only performed locally here.
1521 static isl_stat graph_init(struct isl_sched_graph *graph,
1522 __isl_keep isl_schedule_constraints *sc)
1524 isl_ctx *ctx;
1525 isl_union_set *domain;
1526 struct isl_extract_edge_data data;
1527 enum isl_edge_type i;
1528 isl_stat r;
1530 if (!sc)
1531 return isl_stat_error;
1533 ctx = isl_schedule_constraints_get_ctx(sc);
1535 domain = isl_schedule_constraints_get_domain(sc);
1536 graph->n = isl_union_set_n_set(domain);
1537 isl_union_set_free(domain);
1539 if (graph_alloc(ctx, graph, graph->n,
1540 isl_schedule_constraints_n_map(sc)) < 0)
1541 return isl_stat_error;
1543 if (compute_max_row(graph, sc) < 0)
1544 return isl_stat_error;
1545 graph->root = 1;
1546 graph->n = 0;
1547 domain = isl_schedule_constraints_get_domain(sc);
1548 domain = isl_union_set_intersect_params(domain,
1549 isl_set_copy(sc->context));
1550 r = isl_union_set_foreach_set(domain, &extract_node, graph);
1551 isl_union_set_free(domain);
1552 if (r < 0)
1553 return isl_stat_error;
1554 if (graph_init_table(ctx, graph) < 0)
1555 return isl_stat_error;
1556 for (i = isl_edge_first; i <= isl_edge_last; ++i)
1557 graph->max_edge[i] = isl_union_map_n_map(sc->constraint[i]);
1558 if (graph_init_edge_tables(ctx, graph) < 0)
1559 return isl_stat_error;
1560 graph->n_edge = 0;
1561 data.graph = graph;
1562 for (i = isl_edge_first; i <= isl_edge_last; ++i) {
1563 data.type = i;
1564 if (isl_union_map_foreach_map(sc->constraint[i],
1565 &extract_edge, &data) < 0)
1566 return isl_stat_error;
1569 return isl_stat_ok;
1572 /* Check whether there is any dependence from node[j] to node[i]
1573 * or from node[i] to node[j].
1575 static isl_bool node_follows_weak(int i, int j, void *user)
1577 isl_bool f;
1578 struct isl_sched_graph *graph = user;
1580 f = graph_has_any_edge(graph, &graph->node[j], &graph->node[i]);
1581 if (f < 0 || f)
1582 return f;
1583 return graph_has_any_edge(graph, &graph->node[i], &graph->node[j]);
1586 /* Check whether there is a (conditional) validity dependence from node[j]
1587 * to node[i], forcing node[i] to follow node[j].
1589 static isl_bool node_follows_strong(int i, int j, void *user)
1591 struct isl_sched_graph *graph = user;
1593 return graph_has_validity_edge(graph, &graph->node[j], &graph->node[i]);
1596 /* Use Tarjan's algorithm for computing the strongly connected components
1597 * in the dependence graph only considering those edges defined by "follows".
1599 static int detect_ccs(isl_ctx *ctx, struct isl_sched_graph *graph,
1600 isl_bool (*follows)(int i, int j, void *user))
1602 int i, n;
1603 struct isl_tarjan_graph *g = NULL;
1605 g = isl_tarjan_graph_init(ctx, graph->n, follows, graph);
1606 if (!g)
1607 return -1;
1609 graph->scc = 0;
1610 i = 0;
1611 n = graph->n;
1612 while (n) {
1613 while (g->order[i] != -1) {
1614 graph->node[g->order[i]].scc = graph->scc;
1615 --n;
1616 ++i;
1618 ++i;
1619 graph->scc++;
1622 isl_tarjan_graph_free(g);
1624 return 0;
1627 /* Apply Tarjan's algorithm to detect the strongly connected components
1628 * in the dependence graph.
1629 * Only consider the (conditional) validity dependences and clear "weak".
1631 static int detect_sccs(isl_ctx *ctx, struct isl_sched_graph *graph)
1633 graph->weak = 0;
1634 return detect_ccs(ctx, graph, &node_follows_strong);
1637 /* Apply Tarjan's algorithm to detect the (weakly) connected components
1638 * in the dependence graph.
1639 * Consider all dependences and set "weak".
1641 static int detect_wccs(isl_ctx *ctx, struct isl_sched_graph *graph)
1643 graph->weak = 1;
1644 return detect_ccs(ctx, graph, &node_follows_weak);
1647 static int cmp_scc(const void *a, const void *b, void *data)
1649 struct isl_sched_graph *graph = data;
1650 const int *i1 = a;
1651 const int *i2 = b;
1653 return graph->node[*i1].scc - graph->node[*i2].scc;
1656 /* Sort the elements of graph->sorted according to the corresponding SCCs.
1658 static int sort_sccs(struct isl_sched_graph *graph)
1660 return isl_sort(graph->sorted, graph->n, sizeof(int), &cmp_scc, graph);
1663 /* Given a dependence relation R from "node" to itself,
1664 * construct the set of coefficients of valid constraints for elements
1665 * in that dependence relation.
1666 * In particular, the result contains tuples of coefficients
1667 * c_0, c_n, c_x such that
1669 * c_0 + c_n n + c_x y - c_x x >= 0 for each (x,y) in R
1671 * or, equivalently,
1673 * c_0 + c_n n + c_x d >= 0 for each d in delta R = { y - x | (x,y) in R }
1675 * We choose here to compute the dual of delta R.
1676 * Alternatively, we could have computed the dual of R, resulting
1677 * in a set of tuples c_0, c_n, c_x, c_y, and then
1678 * plugged in (c_0, c_n, c_x, -c_x).
1680 * If "node" has been compressed, then the dependence relation
1681 * is also compressed before the set of coefficients is computed.
1683 static __isl_give isl_basic_set *intra_coefficients(
1684 struct isl_sched_graph *graph, struct isl_sched_node *node,
1685 __isl_take isl_map *map)
1687 isl_set *delta;
1688 isl_map *key;
1689 isl_basic_set *coef;
1690 isl_maybe_isl_basic_set m;
1692 m = isl_map_to_basic_set_try_get(graph->intra_hmap, map);
1693 if (m.valid < 0 || m.valid) {
1694 isl_map_free(map);
1695 return m.value;
1698 key = isl_map_copy(map);
1699 if (node->compressed) {
1700 map = isl_map_preimage_domain_multi_aff(map,
1701 isl_multi_aff_copy(node->decompress));
1702 map = isl_map_preimage_range_multi_aff(map,
1703 isl_multi_aff_copy(node->decompress));
1705 delta = isl_set_remove_divs(isl_map_deltas(map));
1706 coef = isl_set_coefficients(delta);
1707 graph->intra_hmap = isl_map_to_basic_set_set(graph->intra_hmap, key,
1708 isl_basic_set_copy(coef));
1710 return coef;
1713 /* Given a dependence relation R, construct the set of coefficients
1714 * of valid constraints for elements in that dependence relation.
1715 * In particular, the result contains tuples of coefficients
1716 * c_0, c_n, c_x, c_y such that
1718 * c_0 + c_n n + c_x x + c_y y >= 0 for each (x,y) in R
1720 * If the source or destination nodes of "edge" have been compressed,
1721 * then the dependence relation is also compressed before
1722 * the set of coefficients is computed.
1724 static __isl_give isl_basic_set *inter_coefficients(
1725 struct isl_sched_graph *graph, struct isl_sched_edge *edge,
1726 __isl_take isl_map *map)
1728 isl_set *set;
1729 isl_map *key;
1730 isl_basic_set *coef;
1731 isl_maybe_isl_basic_set m;
1733 m = isl_map_to_basic_set_try_get(graph->inter_hmap, map);
1734 if (m.valid < 0 || m.valid) {
1735 isl_map_free(map);
1736 return m.value;
1739 key = isl_map_copy(map);
1740 if (edge->src->compressed)
1741 map = isl_map_preimage_domain_multi_aff(map,
1742 isl_multi_aff_copy(edge->src->decompress));
1743 if (edge->dst->compressed)
1744 map = isl_map_preimage_range_multi_aff(map,
1745 isl_multi_aff_copy(edge->dst->decompress));
1746 set = isl_map_wrap(isl_map_remove_divs(map));
1747 coef = isl_set_coefficients(set);
1748 graph->inter_hmap = isl_map_to_basic_set_set(graph->inter_hmap, key,
1749 isl_basic_set_copy(coef));
1751 return coef;
1754 /* Return the position of the coefficients of the variables in
1755 * the coefficients constraints "coef".
1757 * The space of "coef" is of the form
1759 * { coefficients[[cst, params] -> S] }
1761 * Return the position of S.
1763 static int coef_var_offset(__isl_keep isl_basic_set *coef)
1765 int offset;
1766 isl_space *space;
1768 space = isl_space_unwrap(isl_basic_set_get_space(coef));
1769 offset = isl_space_dim(space, isl_dim_in);
1770 isl_space_free(space);
1772 return offset;
1775 /* Construct an isl_dim_map for mapping constraints on coefficients
1776 * for "node" to the corresponding positions in graph->lp.
1777 * "offset" is the offset of the coefficients for the variables
1778 * in the input constraints.
1779 * "s" is the sign of the mapping.
1781 * The input constraints are given in terms of the coefficients (c_0, c_n, c_x).
1782 * The mapping produced by this function essentially plugs in
1783 * (0, 0, c_i_x^+ - c_i_x^-) if s = 1 and
1784 * (0, 0, -c_i_x^+ + c_i_x^-) if s = -1.
1785 * In graph->lp, the c_i_x^- appear before their c_i_x^+ counterpart.
1787 * The caller can extend the mapping to also map the other coefficients
1788 * (and therefore not plug in 0).
1790 static __isl_give isl_dim_map *intra_dim_map(isl_ctx *ctx,
1791 struct isl_sched_graph *graph, struct isl_sched_node *node,
1792 int offset, int s)
1794 int pos;
1795 unsigned total;
1796 isl_dim_map *dim_map;
1798 total = isl_basic_set_total_dim(graph->lp);
1799 pos = node->start + 1 + 2 * node->nparam;
1800 dim_map = isl_dim_map_alloc(ctx, total);
1801 isl_dim_map_range(dim_map, pos, 2, offset, 1, node->nvar, -s);
1802 isl_dim_map_range(dim_map, pos + 1, 2, offset, 1, node->nvar, s);
1804 return dim_map;
1807 /* Construct an isl_dim_map for mapping constraints on coefficients
1808 * for "src" (node i) and "dst" (node j) to the corresponding positions
1809 * in graph->lp.
1810 * "offset" is the offset of the coefficients for the variables of "src"
1811 * in the input constraints.
1812 * "s" is the sign of the mapping.
1814 * The input constraints are given in terms of the coefficients
1815 * (c_0, c_n, c_x, c_y).
1816 * The mapping produced by this function essentially plugs in
1817 * (c_j_0 - c_i_0, c_j_n^+ - c_j_n^- - (c_i_n^+ - c_i_n^-),
1818 * c_j_x^+ - c_j_x^-, -(c_i_x^+ - c_i_x^-)) if s = 1 and
1819 * (-c_j_0 + c_i_0, - (c_j_n^+ - c_j_n^-) + (c_i_n^+ - c_i_n^-),
1820 * - (c_j_x^+ - c_j_x^-), c_i_x^+ - c_i_x^-) if s = -1.
1821 * In graph->lp, the c_*^- appear before their c_*^+ counterpart.
1823 * The caller can further extend the mapping.
1825 static __isl_give isl_dim_map *inter_dim_map(isl_ctx *ctx,
1826 struct isl_sched_graph *graph, struct isl_sched_node *src,
1827 struct isl_sched_node *dst, int offset, int s)
1829 int pos;
1830 unsigned total;
1831 isl_dim_map *dim_map;
1833 total = isl_basic_set_total_dim(graph->lp);
1834 dim_map = isl_dim_map_alloc(ctx, total);
1836 isl_dim_map_range(dim_map, dst->start, 0, 0, 0, 1, s);
1837 isl_dim_map_range(dim_map, dst->start + 1, 2, 1, 1, dst->nparam, -s);
1838 isl_dim_map_range(dim_map, dst->start + 2, 2, 1, 1, dst->nparam, s);
1839 pos = dst->start + 1 + 2 * dst->nparam;
1840 isl_dim_map_range(dim_map, pos, 2, offset + src->nvar, 1,
1841 dst->nvar, -s);
1842 isl_dim_map_range(dim_map, pos + 1, 2, offset + src->nvar, 1,
1843 dst->nvar, s);
1845 isl_dim_map_range(dim_map, src->start, 0, 0, 0, 1, -s);
1846 isl_dim_map_range(dim_map, src->start + 1, 2, 1, 1, src->nparam, s);
1847 isl_dim_map_range(dim_map, src->start + 2, 2, 1, 1, src->nparam, -s);
1848 pos = src->start + 1 + 2 * src->nparam;
1849 isl_dim_map_range(dim_map, pos, 2, offset, 1, src->nvar, s);
1850 isl_dim_map_range(dim_map, pos + 1, 2, offset, 1, src->nvar, -s);
1852 return dim_map;
1855 /* Add constraints to graph->lp that force validity for the given
1856 * dependence from a node i to itself.
1857 * That is, add constraints that enforce
1859 * (c_i_0 + c_i_n n + c_i_x y) - (c_i_0 + c_i_n n + c_i_x x)
1860 * = c_i_x (y - x) >= 0
1862 * for each (x,y) in R.
1863 * We obtain general constraints on coefficients (c_0, c_n, c_x)
1864 * of valid constraints for (y - x) and then plug in (0, 0, c_i_x^+ - c_i_x^-),
1865 * where c_i_x = c_i_x^+ - c_i_x^-, with c_i_x^+ and c_i_x^- non-negative.
1866 * In graph->lp, the c_i_x^- appear before their c_i_x^+ counterpart.
1868 * Actually, we do not construct constraints for the c_i_x themselves,
1869 * but for the coefficients of c_i_x written as a linear combination
1870 * of the columns in node->cmap.
1872 static isl_stat add_intra_validity_constraints(struct isl_sched_graph *graph,
1873 struct isl_sched_edge *edge)
1875 int offset;
1876 isl_map *map = isl_map_copy(edge->map);
1877 isl_ctx *ctx = isl_map_get_ctx(map);
1878 isl_dim_map *dim_map;
1879 isl_basic_set *coef;
1880 struct isl_sched_node *node = edge->src;
1882 coef = intra_coefficients(graph, node, map);
1884 offset = coef_var_offset(coef);
1886 coef = isl_basic_set_transform_dims(coef, isl_dim_set,
1887 offset, isl_mat_copy(node->cmap));
1888 if (!coef)
1889 return isl_stat_error;
1891 dim_map = intra_dim_map(ctx, graph, node, offset, 1);
1892 graph->lp = isl_basic_set_extend_constraints(graph->lp,
1893 coef->n_eq, coef->n_ineq);
1894 graph->lp = isl_basic_set_add_constraints_dim_map(graph->lp,
1895 coef, dim_map);
1897 return isl_stat_ok;
1900 /* Add constraints to graph->lp that force validity for the given
1901 * dependence from node i to node j.
1902 * That is, add constraints that enforce
1904 * (c_j_0 + c_j_n n + c_j_x y) - (c_i_0 + c_i_n n + c_i_x x) >= 0
1906 * for each (x,y) in R.
1907 * We obtain general constraints on coefficients (c_0, c_n, c_x, c_y)
1908 * of valid constraints for R and then plug in
1909 * (c_j_0 - c_i_0, c_j_n^+ - c_j_n^- - (c_i_n^+ - c_i_n^-),
1910 * c_j_x^+ - c_j_x^- - (c_i_x^+ - c_i_x^-)),
1911 * where c_* = c_*^+ - c_*^-, with c_*^+ and c_*^- non-negative.
1912 * In graph->lp, the c_*^- appear before their c_*^+ counterpart.
1914 * Actually, we do not construct constraints for the c_*_x themselves,
1915 * but for the coefficients of c_*_x written as a linear combination
1916 * of the columns in node->cmap.
1918 static isl_stat add_inter_validity_constraints(struct isl_sched_graph *graph,
1919 struct isl_sched_edge *edge)
1921 int offset;
1922 isl_map *map = isl_map_copy(edge->map);
1923 isl_ctx *ctx = isl_map_get_ctx(map);
1924 isl_dim_map *dim_map;
1925 isl_basic_set *coef;
1926 struct isl_sched_node *src = edge->src;
1927 struct isl_sched_node *dst = edge->dst;
1929 coef = inter_coefficients(graph, edge, map);
1931 offset = coef_var_offset(coef);
1933 coef = isl_basic_set_transform_dims(coef, isl_dim_set,
1934 offset, isl_mat_copy(src->cmap));
1935 coef = isl_basic_set_transform_dims(coef, isl_dim_set,
1936 offset + src->nvar, isl_mat_copy(dst->cmap));
1937 if (!coef)
1938 return isl_stat_error;
1940 dim_map = inter_dim_map(ctx, graph, src, dst, offset, 1);
1942 edge->start = graph->lp->n_ineq;
1943 graph->lp = isl_basic_set_extend_constraints(graph->lp,
1944 coef->n_eq, coef->n_ineq);
1945 graph->lp = isl_basic_set_add_constraints_dim_map(graph->lp,
1946 coef, dim_map);
1947 if (!graph->lp)
1948 return isl_stat_error;
1949 edge->end = graph->lp->n_ineq;
1951 return isl_stat_ok;
1954 /* Add constraints to graph->lp that bound the dependence distance for the given
1955 * dependence from a node i to itself.
1956 * If s = 1, we add the constraint
1958 * c_i_x (y - x) <= m_0 + m_n n
1960 * or
1962 * -c_i_x (y - x) + m_0 + m_n n >= 0
1964 * for each (x,y) in R.
1965 * If s = -1, we add the constraint
1967 * -c_i_x (y - x) <= m_0 + m_n n
1969 * or
1971 * c_i_x (y - x) + m_0 + m_n n >= 0
1973 * for each (x,y) in R.
1974 * We obtain general constraints on coefficients (c_0, c_n, c_x)
1975 * of valid constraints for (y - x) and then plug in (m_0, m_n, -s * c_i_x),
1976 * with each coefficient (except m_0) represented as a pair of non-negative
1977 * coefficients.
1979 * Actually, we do not construct constraints for the c_i_x themselves,
1980 * but for the coefficients of c_i_x written as a linear combination
1981 * of the columns in node->cmap.
1984 * If "local" is set, then we add constraints
1986 * c_i_x (y - x) <= 0
1988 * or
1990 * -c_i_x (y - x) <= 0
1992 * instead, forcing the dependence distance to be (less than or) equal to 0.
1993 * That is, we plug in (0, 0, -s * c_i_x),
1994 * Note that dependences marked local are treated as validity constraints
1995 * by add_all_validity_constraints and therefore also have
1996 * their distances bounded by 0 from below.
1998 static isl_stat add_intra_proximity_constraints(struct isl_sched_graph *graph,
1999 struct isl_sched_edge *edge, int s, int local)
2001 int offset;
2002 unsigned nparam;
2003 isl_map *map = isl_map_copy(edge->map);
2004 isl_ctx *ctx = isl_map_get_ctx(map);
2005 isl_dim_map *dim_map;
2006 isl_basic_set *coef;
2007 struct isl_sched_node *node = edge->src;
2009 coef = intra_coefficients(graph, node, map);
2011 offset = coef_var_offset(coef);
2013 coef = isl_basic_set_transform_dims(coef, isl_dim_set,
2014 offset, isl_mat_copy(node->cmap));
2015 if (!coef)
2016 return isl_stat_error;
2018 nparam = isl_space_dim(node->space, isl_dim_param);
2019 dim_map = intra_dim_map(ctx, graph, node, offset, -s);
2021 if (!local) {
2022 isl_dim_map_range(dim_map, 1, 0, 0, 0, 1, 1);
2023 isl_dim_map_range(dim_map, 4, 2, 1, 1, nparam, -1);
2024 isl_dim_map_range(dim_map, 5, 2, 1, 1, nparam, 1);
2026 graph->lp = isl_basic_set_extend_constraints(graph->lp,
2027 coef->n_eq, coef->n_ineq);
2028 graph->lp = isl_basic_set_add_constraints_dim_map(graph->lp,
2029 coef, dim_map);
2031 return isl_stat_ok;
2034 /* Add constraints to graph->lp that bound the dependence distance for the given
2035 * dependence from node i to node j.
2036 * If s = 1, we add the constraint
2038 * (c_j_0 + c_j_n n + c_j_x y) - (c_i_0 + c_i_n n + c_i_x x)
2039 * <= m_0 + m_n n
2041 * or
2043 * -(c_j_0 + c_j_n n + c_j_x y) + (c_i_0 + c_i_n n + c_i_x x) +
2044 * m_0 + m_n n >= 0
2046 * for each (x,y) in R.
2047 * If s = -1, we add the constraint
2049 * -((c_j_0 + c_j_n n + c_j_x y) - (c_i_0 + c_i_n n + c_i_x x))
2050 * <= m_0 + m_n n
2052 * or
2054 * (c_j_0 + c_j_n n + c_j_x y) - (c_i_0 + c_i_n n + c_i_x x) +
2055 * m_0 + m_n n >= 0
2057 * for each (x,y) in R.
2058 * We obtain general constraints on coefficients (c_0, c_n, c_x, c_y)
2059 * of valid constraints for R and then plug in
2060 * (m_0 - s*c_j_0 + s*c_i_0, m_n - s*c_j_n + s*c_i_n,
2061 * -s*c_j_x+s*c_i_x)
2062 * with each coefficient (except m_0, c_j_0 and c_i_0)
2063 * represented as a pair of non-negative coefficients.
2065 * Actually, we do not construct constraints for the c_*_x themselves,
2066 * but for the coefficients of c_*_x written as a linear combination
2067 * of the columns in node->cmap.
2070 * If "local" is set, then we add constraints
2072 * (c_j_0 + c_j_n n + c_j_x y) - (c_i_0 + c_i_n n + c_i_x x) <= 0
2074 * or
2076 * -((c_j_0 + c_j_n n + c_j_x y) - (c_i_0 + c_i_n n + c_i_x x)) <= 0
2078 * instead, forcing the dependence distance to be (less than or) equal to 0.
2079 * That is, we plug in
2080 * (-s*c_j_0 + s*c_i_0, -s*c_j_n + s*c_i_n, -s*c_j_x+s*c_i_x).
2081 * Note that dependences marked local are treated as validity constraints
2082 * by add_all_validity_constraints and therefore also have
2083 * their distances bounded by 0 from below.
2085 static isl_stat add_inter_proximity_constraints(struct isl_sched_graph *graph,
2086 struct isl_sched_edge *edge, int s, int local)
2088 int offset;
2089 unsigned nparam;
2090 isl_map *map = isl_map_copy(edge->map);
2091 isl_ctx *ctx = isl_map_get_ctx(map);
2092 isl_dim_map *dim_map;
2093 isl_basic_set *coef;
2094 struct isl_sched_node *src = edge->src;
2095 struct isl_sched_node *dst = edge->dst;
2097 coef = inter_coefficients(graph, edge, map);
2099 offset = coef_var_offset(coef);
2101 coef = isl_basic_set_transform_dims(coef, isl_dim_set,
2102 offset, isl_mat_copy(src->cmap));
2103 coef = isl_basic_set_transform_dims(coef, isl_dim_set,
2104 offset + src->nvar, isl_mat_copy(dst->cmap));
2105 if (!coef)
2106 return isl_stat_error;
2108 nparam = isl_space_dim(src->space, isl_dim_param);
2109 dim_map = inter_dim_map(ctx, graph, src, dst, offset, -s);
2111 if (!local) {
2112 isl_dim_map_range(dim_map, 1, 0, 0, 0, 1, 1);
2113 isl_dim_map_range(dim_map, 4, 2, 1, 1, nparam, -1);
2114 isl_dim_map_range(dim_map, 5, 2, 1, 1, nparam, 1);
2117 graph->lp = isl_basic_set_extend_constraints(graph->lp,
2118 coef->n_eq, coef->n_ineq);
2119 graph->lp = isl_basic_set_add_constraints_dim_map(graph->lp,
2120 coef, dim_map);
2122 return isl_stat_ok;
2125 /* Add all validity constraints to graph->lp.
2127 * An edge that is forced to be local needs to have its dependence
2128 * distances equal to zero. We take care of bounding them by 0 from below
2129 * here. add_all_proximity_constraints takes care of bounding them by 0
2130 * from above.
2132 * If "use_coincidence" is set, then we treat coincidence edges as local edges.
2133 * Otherwise, we ignore them.
2135 static int add_all_validity_constraints(struct isl_sched_graph *graph,
2136 int use_coincidence)
2138 int i;
2140 for (i = 0; i < graph->n_edge; ++i) {
2141 struct isl_sched_edge *edge= &graph->edge[i];
2142 int local;
2144 local = is_local(edge) ||
2145 (is_coincidence(edge) && use_coincidence);
2146 if (!is_validity(edge) && !local)
2147 continue;
2148 if (edge->src != edge->dst)
2149 continue;
2150 if (add_intra_validity_constraints(graph, edge) < 0)
2151 return -1;
2154 for (i = 0; i < graph->n_edge; ++i) {
2155 struct isl_sched_edge *edge = &graph->edge[i];
2156 int local;
2158 local = is_local(edge) ||
2159 (is_coincidence(edge) && use_coincidence);
2160 if (!is_validity(edge) && !local)
2161 continue;
2162 if (edge->src == edge->dst)
2163 continue;
2164 if (add_inter_validity_constraints(graph, edge) < 0)
2165 return -1;
2168 return 0;
2171 /* Add constraints to graph->lp that bound the dependence distance
2172 * for all dependence relations.
2173 * If a given proximity dependence is identical to a validity
2174 * dependence, then the dependence distance is already bounded
2175 * from below (by zero), so we only need to bound the distance
2176 * from above. (This includes the case of "local" dependences
2177 * which are treated as validity dependence by add_all_validity_constraints.)
2178 * Otherwise, we need to bound the distance both from above and from below.
2180 * If "use_coincidence" is set, then we treat coincidence edges as local edges.
2181 * Otherwise, we ignore them.
2183 static int add_all_proximity_constraints(struct isl_sched_graph *graph,
2184 int use_coincidence)
2186 int i;
2188 for (i = 0; i < graph->n_edge; ++i) {
2189 struct isl_sched_edge *edge= &graph->edge[i];
2190 int local;
2192 local = is_local(edge) ||
2193 (is_coincidence(edge) && use_coincidence);
2194 if (!is_proximity(edge) && !local)
2195 continue;
2196 if (edge->src == edge->dst &&
2197 add_intra_proximity_constraints(graph, edge, 1, local) < 0)
2198 return -1;
2199 if (edge->src != edge->dst &&
2200 add_inter_proximity_constraints(graph, edge, 1, local) < 0)
2201 return -1;
2202 if (is_validity(edge) || local)
2203 continue;
2204 if (edge->src == edge->dst &&
2205 add_intra_proximity_constraints(graph, edge, -1, 0) < 0)
2206 return -1;
2207 if (edge->src != edge->dst &&
2208 add_inter_proximity_constraints(graph, edge, -1, 0) < 0)
2209 return -1;
2212 return 0;
2215 /* Compute a basis for the rows in the linear part of the schedule
2216 * and extend this basis to a full basis. The remaining rows
2217 * can then be used to force linear independence from the rows
2218 * in the schedule.
2220 * In particular, given the schedule rows S, we compute
2222 * S = H Q
2223 * S U = H
2225 * with H the Hermite normal form of S. That is, all but the
2226 * first rank columns of H are zero and so each row in S is
2227 * a linear combination of the first rank rows of Q.
2228 * The matrix Q is then transposed because we will write the
2229 * coefficients of the next schedule row as a column vector s
2230 * and express this s as a linear combination s = Q c of the
2231 * computed basis.
2232 * Similarly, the matrix U is transposed such that we can
2233 * compute the coefficients c = U s from a schedule row s.
2235 static int node_update_cmap(struct isl_sched_node *node)
2237 isl_mat *H, *U, *Q;
2238 int n_row = isl_mat_rows(node->sched);
2240 H = isl_mat_sub_alloc(node->sched, 0, n_row,
2241 1 + node->nparam, node->nvar);
2243 H = isl_mat_left_hermite(H, 0, &U, &Q);
2244 isl_mat_free(node->cmap);
2245 isl_mat_free(node->cinv);
2246 isl_mat_free(node->ctrans);
2247 node->ctrans = isl_mat_copy(Q);
2248 node->cmap = isl_mat_transpose(Q);
2249 node->cinv = isl_mat_transpose(U);
2250 node->rank = isl_mat_initial_non_zero_cols(H);
2251 isl_mat_free(H);
2253 if (!node->cmap || !node->cinv || !node->ctrans || node->rank < 0)
2254 return -1;
2255 return 0;
2258 /* Is "edge" marked as a validity or a conditional validity edge?
2260 static int is_any_validity(struct isl_sched_edge *edge)
2262 return is_validity(edge) || is_conditional_validity(edge);
2265 /* How many times should we count the constraints in "edge"?
2267 * If carry is set, then we are counting the number of
2268 * (validity or conditional validity) constraints that will be added
2269 * in setup_carry_lp and we count each edge exactly once.
2271 * Otherwise, we count as follows
2272 * validity -> 1 (>= 0)
2273 * validity+proximity -> 2 (>= 0 and upper bound)
2274 * proximity -> 2 (lower and upper bound)
2275 * local(+any) -> 2 (>= 0 and <= 0)
2277 * If an edge is only marked conditional_validity then it counts
2278 * as zero since it is only checked afterwards.
2280 * If "use_coincidence" is set, then we treat coincidence edges as local edges.
2281 * Otherwise, we ignore them.
2283 static int edge_multiplicity(struct isl_sched_edge *edge, int carry,
2284 int use_coincidence)
2286 if (carry)
2287 return 1;
2288 if (is_proximity(edge) || is_local(edge))
2289 return 2;
2290 if (use_coincidence && is_coincidence(edge))
2291 return 2;
2292 if (is_validity(edge))
2293 return 1;
2294 return 0;
2297 /* Count the number of equality and inequality constraints
2298 * that will be added for the given map.
2300 * "use_coincidence" is set if we should take into account coincidence edges.
2302 static int count_map_constraints(struct isl_sched_graph *graph,
2303 struct isl_sched_edge *edge, __isl_take isl_map *map,
2304 int *n_eq, int *n_ineq, int carry, int use_coincidence)
2306 isl_basic_set *coef;
2307 int f = edge_multiplicity(edge, carry, use_coincidence);
2309 if (f == 0) {
2310 isl_map_free(map);
2311 return 0;
2314 if (edge->src == edge->dst)
2315 coef = intra_coefficients(graph, edge->src, map);
2316 else
2317 coef = inter_coefficients(graph, edge, map);
2318 if (!coef)
2319 return -1;
2320 *n_eq += f * coef->n_eq;
2321 *n_ineq += f * coef->n_ineq;
2322 isl_basic_set_free(coef);
2324 return 0;
2327 /* Count the number of equality and inequality constraints
2328 * that will be added to the main lp problem.
2329 * We count as follows
2330 * validity -> 1 (>= 0)
2331 * validity+proximity -> 2 (>= 0 and upper bound)
2332 * proximity -> 2 (lower and upper bound)
2333 * local(+any) -> 2 (>= 0 and <= 0)
2335 * If "use_coincidence" is set, then we treat coincidence edges as local edges.
2336 * Otherwise, we ignore them.
2338 static int count_constraints(struct isl_sched_graph *graph,
2339 int *n_eq, int *n_ineq, int use_coincidence)
2341 int i;
2343 *n_eq = *n_ineq = 0;
2344 for (i = 0; i < graph->n_edge; ++i) {
2345 struct isl_sched_edge *edge= &graph->edge[i];
2346 isl_map *map = isl_map_copy(edge->map);
2348 if (count_map_constraints(graph, edge, map, n_eq, n_ineq,
2349 0, use_coincidence) < 0)
2350 return -1;
2353 return 0;
2356 /* Count the number of constraints that will be added by
2357 * add_bound_constant_constraints to bound the values of the constant terms
2358 * and increment *n_eq and *n_ineq accordingly.
2360 * In practice, add_bound_constant_constraints only adds inequalities.
2362 static isl_stat count_bound_constant_constraints(isl_ctx *ctx,
2363 struct isl_sched_graph *graph, int *n_eq, int *n_ineq)
2365 if (isl_options_get_schedule_max_constant_term(ctx) == -1)
2366 return isl_stat_ok;
2368 *n_ineq += graph->n;
2370 return isl_stat_ok;
2373 /* Add constraints to bound the values of the constant terms in the schedule,
2374 * if requested by the user.
2376 * The maximal value of the constant terms is defined by the option
2377 * "schedule_max_constant_term".
2379 * Within each node, the coefficients have the following order:
2380 * - c_i_0
2381 * - positive and negative parts of c_i_n (if parametric)
2382 * - positive and negative parts of c_i_x
2384 static isl_stat add_bound_constant_constraints(isl_ctx *ctx,
2385 struct isl_sched_graph *graph)
2387 int i, k;
2388 int max;
2389 int total;
2391 max = isl_options_get_schedule_max_constant_term(ctx);
2392 if (max == -1)
2393 return isl_stat_ok;
2395 total = isl_basic_set_dim(graph->lp, isl_dim_set);
2397 for (i = 0; i < graph->n; ++i) {
2398 struct isl_sched_node *node = &graph->node[i];
2399 k = isl_basic_set_alloc_inequality(graph->lp);
2400 if (k < 0)
2401 return isl_stat_error;
2402 isl_seq_clr(graph->lp->ineq[k], 1 + total);
2403 isl_int_set_si(graph->lp->ineq[k][1 + node->start], -1);
2404 isl_int_set_si(graph->lp->ineq[k][0], max);
2407 return isl_stat_ok;
2410 /* Count the number of constraints that will be added by
2411 * add_bound_coefficient_constraints and increment *n_eq and *n_ineq
2412 * accordingly.
2414 * In practice, add_bound_coefficient_constraints only adds inequalities.
2416 static int count_bound_coefficient_constraints(isl_ctx *ctx,
2417 struct isl_sched_graph *graph, int *n_eq, int *n_ineq)
2419 int i;
2421 if (ctx->opt->schedule_max_coefficient == -1)
2422 return 0;
2424 for (i = 0; i < graph->n; ++i)
2425 *n_ineq += 2 * graph->node[i].nparam + 2 * graph->node[i].nvar;
2427 return 0;
2430 /* Add constraints that bound the values of the variable and parameter
2431 * coefficients of the schedule.
2433 * The maximal value of the coefficients is defined by the option
2434 * 'schedule_max_coefficient'.
2436 static int add_bound_coefficient_constraints(isl_ctx *ctx,
2437 struct isl_sched_graph *graph)
2439 int i, j, k;
2440 int max_coefficient;
2441 int total;
2443 max_coefficient = ctx->opt->schedule_max_coefficient;
2445 if (max_coefficient == -1)
2446 return 0;
2448 total = isl_basic_set_total_dim(graph->lp);
2450 for (i = 0; i < graph->n; ++i) {
2451 struct isl_sched_node *node = &graph->node[i];
2452 for (j = 0; j < 2 * node->nparam + 2 * node->nvar; ++j) {
2453 int dim;
2454 k = isl_basic_set_alloc_inequality(graph->lp);
2455 if (k < 0)
2456 return -1;
2457 dim = 1 + node->start + 1 + j;
2458 isl_seq_clr(graph->lp->ineq[k], 1 + total);
2459 isl_int_set_si(graph->lp->ineq[k][dim], -1);
2460 isl_int_set_si(graph->lp->ineq[k][0], max_coefficient);
2464 return 0;
2467 /* Add a constraint to graph->lp that equates the value at position
2468 * "sum_pos" to the sum of the "n" values starting at "first".
2470 static isl_stat add_sum_constraint(struct isl_sched_graph *graph,
2471 int sum_pos, int first, int n)
2473 int i, k;
2474 int total;
2476 total = isl_basic_set_dim(graph->lp, isl_dim_set);
2478 k = isl_basic_set_alloc_equality(graph->lp);
2479 if (k < 0)
2480 return isl_stat_error;
2481 isl_seq_clr(graph->lp->eq[k], 1 + total);
2482 isl_int_set_si(graph->lp->eq[k][1 + sum_pos], -1);
2483 for (i = 0; i < n; ++i)
2484 isl_int_set_si(graph->lp->eq[k][1 + first + i], 1);
2486 return isl_stat_ok;
2489 /* Add a constraint to graph->lp that equates the value at position
2490 * "sum_pos" to the sum of the parameter coefficients of all nodes.
2492 * Within each node, the coefficients have the following order:
2493 * - c_i_0
2494 * - positive and negative parts of c_i_n (if parametric)
2495 * - positive and negative parts of c_i_x
2497 static isl_stat add_param_sum_constraint(struct isl_sched_graph *graph,
2498 int sum_pos)
2500 int i, j, k;
2501 int total;
2503 total = isl_basic_set_dim(graph->lp, isl_dim_set);
2505 k = isl_basic_set_alloc_equality(graph->lp);
2506 if (k < 0)
2507 return isl_stat_error;
2508 isl_seq_clr(graph->lp->eq[k], 1 + total);
2509 isl_int_set_si(graph->lp->eq[k][1 + sum_pos], -1);
2510 for (i = 0; i < graph->n; ++i) {
2511 int pos = 1 + graph->node[i].start + 1;
2513 for (j = 0; j < 2 * graph->node[i].nparam; ++j)
2514 isl_int_set_si(graph->lp->eq[k][pos + j], 1);
2517 return isl_stat_ok;
2520 /* Add a constraint to graph->lp that equates the value at position
2521 * "sum_pos" to the sum of the variable coefficients of all nodes.
2523 * Within each node, the coefficients have the following order:
2524 * - c_i_0
2525 * - positive and negative parts of c_i_n (if parametric)
2526 * - positive and negative parts of c_i_x
2528 static isl_stat add_var_sum_constraint(struct isl_sched_graph *graph,
2529 int sum_pos)
2531 int i, j, k;
2532 int total;
2534 total = isl_basic_set_dim(graph->lp, isl_dim_set);
2536 k = isl_basic_set_alloc_equality(graph->lp);
2537 if (k < 0)
2538 return isl_stat_error;
2539 isl_seq_clr(graph->lp->eq[k], 1 + total);
2540 isl_int_set_si(graph->lp->eq[k][1 + sum_pos], -1);
2541 for (i = 0; i < graph->n; ++i) {
2542 struct isl_sched_node *node = &graph->node[i];
2543 int pos = 1 + node->start + 1 + 2 * node->nparam;
2545 for (j = 0; j < 2 * node->nvar; ++j)
2546 isl_int_set_si(graph->lp->eq[k][pos + j], 1);
2549 return isl_stat_ok;
2552 /* Construct an ILP problem for finding schedule coefficients
2553 * that result in non-negative, but small dependence distances
2554 * over all dependences.
2555 * In particular, the dependence distances over proximity edges
2556 * are bounded by m_0 + m_n n and we compute schedule coefficients
2557 * with small values (preferably zero) of m_n and m_0.
2559 * All variables of the ILP are non-negative. The actual coefficients
2560 * may be negative, so each coefficient is represented as the difference
2561 * of two non-negative variables. The negative part always appears
2562 * immediately before the positive part.
2563 * Other than that, the variables have the following order
2565 * - sum of positive and negative parts of m_n coefficients
2566 * - m_0
2567 * - sum of positive and negative parts of all c_n coefficients
2568 * (unconstrained when computing non-parametric schedules)
2569 * - sum of positive and negative parts of all c_x coefficients
2570 * - positive and negative parts of m_n coefficients
2571 * - for each node
2572 * - c_i_0
2573 * - positive and negative parts of c_i_n (if parametric)
2574 * - positive and negative parts of c_i_x
2576 * The c_i_x are not represented directly, but through the columns of
2577 * node->cmap. That is, the computed values are for variable t_i_x
2578 * such that c_i_x = Q t_i_x with Q equal to node->cmap.
2580 * The constraints are those from the edges plus two or three equalities
2581 * to express the sums.
2583 * If "use_coincidence" is set, then we treat coincidence edges as local edges.
2584 * Otherwise, we ignore them.
2586 static isl_stat setup_lp(isl_ctx *ctx, struct isl_sched_graph *graph,
2587 int use_coincidence)
2589 int i;
2590 unsigned nparam;
2591 unsigned total;
2592 isl_space *space;
2593 int parametric;
2594 int param_pos;
2595 int n_eq, n_ineq;
2597 parametric = ctx->opt->schedule_parametric;
2598 nparam = isl_space_dim(graph->node[0].space, isl_dim_param);
2599 param_pos = 4;
2600 total = param_pos + 2 * nparam;
2601 for (i = 0; i < graph->n; ++i) {
2602 struct isl_sched_node *node = &graph->node[graph->sorted[i]];
2603 if (node_update_cmap(node) < 0)
2604 return isl_stat_error;
2605 node->start = total;
2606 total += 1 + 2 * (node->nparam + node->nvar);
2609 if (count_constraints(graph, &n_eq, &n_ineq, use_coincidence) < 0)
2610 return isl_stat_error;
2611 if (count_bound_constant_constraints(ctx, graph, &n_eq, &n_ineq) < 0)
2612 return isl_stat_error;
2613 if (count_bound_coefficient_constraints(ctx, graph, &n_eq, &n_ineq) < 0)
2614 return isl_stat_error;
2616 space = isl_space_set_alloc(ctx, 0, total);
2617 isl_basic_set_free(graph->lp);
2618 n_eq += 2 + parametric;
2620 graph->lp = isl_basic_set_alloc_space(space, 0, n_eq, n_ineq);
2622 if (add_sum_constraint(graph, 0, param_pos, 2 * nparam) < 0)
2623 return isl_stat_error;
2624 if (parametric && add_param_sum_constraint(graph, 2) < 0)
2625 return isl_stat_error;
2626 if (add_var_sum_constraint(graph, 3) < 0)
2627 return isl_stat_error;
2628 if (add_bound_constant_constraints(ctx, graph) < 0)
2629 return isl_stat_error;
2630 if (add_bound_coefficient_constraints(ctx, graph) < 0)
2631 return isl_stat_error;
2632 if (add_all_validity_constraints(graph, use_coincidence) < 0)
2633 return isl_stat_error;
2634 if (add_all_proximity_constraints(graph, use_coincidence) < 0)
2635 return isl_stat_error;
2637 return isl_stat_ok;
2640 /* Analyze the conflicting constraint found by
2641 * isl_tab_basic_set_non_trivial_lexmin. If it corresponds to the validity
2642 * constraint of one of the edges between distinct nodes, living, moreover
2643 * in distinct SCCs, then record the source and sink SCC as this may
2644 * be a good place to cut between SCCs.
2646 static int check_conflict(int con, void *user)
2648 int i;
2649 struct isl_sched_graph *graph = user;
2651 if (graph->src_scc >= 0)
2652 return 0;
2654 con -= graph->lp->n_eq;
2656 if (con >= graph->lp->n_ineq)
2657 return 0;
2659 for (i = 0; i < graph->n_edge; ++i) {
2660 if (!is_validity(&graph->edge[i]))
2661 continue;
2662 if (graph->edge[i].src == graph->edge[i].dst)
2663 continue;
2664 if (graph->edge[i].src->scc == graph->edge[i].dst->scc)
2665 continue;
2666 if (graph->edge[i].start > con)
2667 continue;
2668 if (graph->edge[i].end <= con)
2669 continue;
2670 graph->src_scc = graph->edge[i].src->scc;
2671 graph->dst_scc = graph->edge[i].dst->scc;
2674 return 0;
2677 /* Check whether the next schedule row of the given node needs to be
2678 * non-trivial. Lower-dimensional domains may have some trivial rows,
2679 * but as soon as the number of remaining required non-trivial rows
2680 * is as large as the number or remaining rows to be computed,
2681 * all remaining rows need to be non-trivial.
2683 static int needs_row(struct isl_sched_graph *graph, struct isl_sched_node *node)
2685 return node->nvar - node->rank >= graph->maxvar - graph->n_row;
2688 /* Solve the ILP problem constructed in setup_lp.
2689 * For each node such that all the remaining rows of its schedule
2690 * need to be non-trivial, we construct a non-triviality region.
2691 * This region imposes that the next row is independent of previous rows.
2692 * In particular the coefficients c_i_x are represented by t_i_x
2693 * variables with c_i_x = Q t_i_x and Q a unimodular matrix such that
2694 * its first columns span the rows of the previously computed part
2695 * of the schedule. The non-triviality region enforces that at least
2696 * one of the remaining components of t_i_x is non-zero, i.e.,
2697 * that the new schedule row depends on at least one of the remaining
2698 * columns of Q.
2700 static __isl_give isl_vec *solve_lp(struct isl_sched_graph *graph)
2702 int i;
2703 isl_vec *sol;
2704 isl_basic_set *lp;
2706 for (i = 0; i < graph->n; ++i) {
2707 struct isl_sched_node *node = &graph->node[i];
2708 int skip = node->rank;
2709 graph->region[i].pos = node->start + 1 + 2*(node->nparam+skip);
2710 if (needs_row(graph, node))
2711 graph->region[i].len = 2 * (node->nvar - skip);
2712 else
2713 graph->region[i].len = 0;
2715 lp = isl_basic_set_copy(graph->lp);
2716 sol = isl_tab_basic_set_non_trivial_lexmin(lp, 2, graph->n,
2717 graph->region, &check_conflict, graph);
2718 return sol;
2721 /* Extract the coefficients for the variables of "node" from "sol".
2723 * Within each node, the coefficients have the following order:
2724 * - c_i_0
2725 * - positive and negative parts of c_i_n (if parametric)
2726 * - positive and negative parts of c_i_x
2728 * The c_i_x^- appear before their c_i_x^+ counterpart.
2730 * Return c_i_x = c_i_x^+ - c_i_x^-
2732 static __isl_give isl_vec *extract_var_coef(struct isl_sched_node *node,
2733 __isl_keep isl_vec *sol)
2735 int i;
2736 int pos;
2737 isl_vec *csol;
2739 if (!sol)
2740 return NULL;
2741 csol = isl_vec_alloc(isl_vec_get_ctx(sol), node->nvar);
2742 if (!csol)
2743 return NULL;
2745 pos = 1 + node->start + 1 + 2 * node->nparam;
2746 for (i = 0; i < node->nvar; ++i)
2747 isl_int_sub(csol->el[i],
2748 sol->el[pos + 2 * i + 1], sol->el[pos + 2 * i]);
2750 return csol;
2753 /* Update the schedules of all nodes based on the given solution
2754 * of the LP problem.
2755 * The new row is added to the current band.
2756 * All possibly negative coefficients are encoded as a difference
2757 * of two non-negative variables, so we need to perform the subtraction
2758 * here. Moreover, if use_cmap is set, then the solution does
2759 * not refer to the actual coefficients c_i_x, but instead to variables
2760 * t_i_x such that c_i_x = Q t_i_x and Q is equal to node->cmap.
2761 * In this case, we then also need to perform this multiplication
2762 * to obtain the values of c_i_x.
2764 * If coincident is set, then the caller guarantees that the new
2765 * row satisfies the coincidence constraints.
2767 static int update_schedule(struct isl_sched_graph *graph,
2768 __isl_take isl_vec *sol, int use_cmap, int coincident)
2770 int i, j;
2771 isl_vec *csol = NULL;
2773 if (!sol)
2774 goto error;
2775 if (sol->size == 0)
2776 isl_die(sol->ctx, isl_error_internal,
2777 "no solution found", goto error);
2778 if (graph->n_total_row >= graph->max_row)
2779 isl_die(sol->ctx, isl_error_internal,
2780 "too many schedule rows", goto error);
2782 for (i = 0; i < graph->n; ++i) {
2783 struct isl_sched_node *node = &graph->node[i];
2784 int pos = node->start;
2785 int row = isl_mat_rows(node->sched);
2787 isl_vec_free(csol);
2788 csol = extract_var_coef(node, sol);
2789 if (!csol)
2790 goto error;
2792 isl_map_free(node->sched_map);
2793 node->sched_map = NULL;
2794 node->sched = isl_mat_add_rows(node->sched, 1);
2795 if (!node->sched)
2796 goto error;
2797 node->sched = isl_mat_set_element(node->sched, row, 0,
2798 sol->el[1 + pos]);
2799 for (j = 0; j < node->nparam; ++j)
2800 isl_int_sub(sol->el[1 + pos + 1 + 2 * j + 1],
2801 sol->el[1 + pos + 1 + 2 * j + 1],
2802 sol->el[1 + pos + 1 + 2 * j]);
2803 for (j = 0; j < node->nparam; ++j)
2804 node->sched = isl_mat_set_element(node->sched,
2805 row, 1 + j, sol->el[1+pos+1+2*j+1]);
2806 if (use_cmap)
2807 csol = isl_mat_vec_product(isl_mat_copy(node->cmap),
2808 csol);
2809 if (!csol)
2810 goto error;
2811 for (j = 0; j < node->nvar; ++j)
2812 node->sched = isl_mat_set_element(node->sched,
2813 row, 1 + node->nparam + j, csol->el[j]);
2814 node->coincident[graph->n_total_row] = coincident;
2816 isl_vec_free(sol);
2817 isl_vec_free(csol);
2819 graph->n_row++;
2820 graph->n_total_row++;
2822 return 0;
2823 error:
2824 isl_vec_free(sol);
2825 isl_vec_free(csol);
2826 return -1;
2829 /* Convert row "row" of node->sched into an isl_aff living in "ls"
2830 * and return this isl_aff.
2832 static __isl_give isl_aff *extract_schedule_row(__isl_take isl_local_space *ls,
2833 struct isl_sched_node *node, int row)
2835 int j;
2836 isl_int v;
2837 isl_aff *aff;
2839 isl_int_init(v);
2841 aff = isl_aff_zero_on_domain(ls);
2842 isl_mat_get_element(node->sched, row, 0, &v);
2843 aff = isl_aff_set_constant(aff, v);
2844 for (j = 0; j < node->nparam; ++j) {
2845 isl_mat_get_element(node->sched, row, 1 + j, &v);
2846 aff = isl_aff_set_coefficient(aff, isl_dim_param, j, v);
2848 for (j = 0; j < node->nvar; ++j) {
2849 isl_mat_get_element(node->sched, row, 1 + node->nparam + j, &v);
2850 aff = isl_aff_set_coefficient(aff, isl_dim_in, j, v);
2853 isl_int_clear(v);
2855 return aff;
2858 /* Convert the "n" rows starting at "first" of node->sched into a multi_aff
2859 * and return this multi_aff.
2861 * The result is defined over the uncompressed node domain.
2863 static __isl_give isl_multi_aff *node_extract_partial_schedule_multi_aff(
2864 struct isl_sched_node *node, int first, int n)
2866 int i;
2867 isl_space *space;
2868 isl_local_space *ls;
2869 isl_aff *aff;
2870 isl_multi_aff *ma;
2871 int nrow;
2873 if (!node)
2874 return NULL;
2875 nrow = isl_mat_rows(node->sched);
2876 if (node->compressed)
2877 space = isl_multi_aff_get_domain_space(node->decompress);
2878 else
2879 space = isl_space_copy(node->space);
2880 ls = isl_local_space_from_space(isl_space_copy(space));
2881 space = isl_space_from_domain(space);
2882 space = isl_space_add_dims(space, isl_dim_out, n);
2883 ma = isl_multi_aff_zero(space);
2885 for (i = first; i < first + n; ++i) {
2886 aff = extract_schedule_row(isl_local_space_copy(ls), node, i);
2887 ma = isl_multi_aff_set_aff(ma, i - first, aff);
2890 isl_local_space_free(ls);
2892 if (node->compressed)
2893 ma = isl_multi_aff_pullback_multi_aff(ma,
2894 isl_multi_aff_copy(node->compress));
2896 return ma;
2899 /* Convert node->sched into a multi_aff and return this multi_aff.
2901 * The result is defined over the uncompressed node domain.
2903 static __isl_give isl_multi_aff *node_extract_schedule_multi_aff(
2904 struct isl_sched_node *node)
2906 int nrow;
2908 nrow = isl_mat_rows(node->sched);
2909 return node_extract_partial_schedule_multi_aff(node, 0, nrow);
2912 /* Convert node->sched into a map and return this map.
2914 * The result is cached in node->sched_map, which needs to be released
2915 * whenever node->sched is updated.
2916 * It is defined over the uncompressed node domain.
2918 static __isl_give isl_map *node_extract_schedule(struct isl_sched_node *node)
2920 if (!node->sched_map) {
2921 isl_multi_aff *ma;
2923 ma = node_extract_schedule_multi_aff(node);
2924 node->sched_map = isl_map_from_multi_aff(ma);
2927 return isl_map_copy(node->sched_map);
2930 /* Construct a map that can be used to update a dependence relation
2931 * based on the current schedule.
2932 * That is, construct a map expressing that source and sink
2933 * are executed within the same iteration of the current schedule.
2934 * This map can then be intersected with the dependence relation.
2935 * This is not the most efficient way, but this shouldn't be a critical
2936 * operation.
2938 static __isl_give isl_map *specializer(struct isl_sched_node *src,
2939 struct isl_sched_node *dst)
2941 isl_map *src_sched, *dst_sched;
2943 src_sched = node_extract_schedule(src);
2944 dst_sched = node_extract_schedule(dst);
2945 return isl_map_apply_range(src_sched, isl_map_reverse(dst_sched));
2948 /* Intersect the domains of the nested relations in domain and range
2949 * of "umap" with "map".
2951 static __isl_give isl_union_map *intersect_domains(
2952 __isl_take isl_union_map *umap, __isl_keep isl_map *map)
2954 isl_union_set *uset;
2956 umap = isl_union_map_zip(umap);
2957 uset = isl_union_set_from_set(isl_map_wrap(isl_map_copy(map)));
2958 umap = isl_union_map_intersect_domain(umap, uset);
2959 umap = isl_union_map_zip(umap);
2960 return umap;
2963 /* Update the dependence relation of the given edge based
2964 * on the current schedule.
2965 * If the dependence is carried completely by the current schedule, then
2966 * it is removed from the edge_tables. It is kept in the list of edges
2967 * as otherwise all edge_tables would have to be recomputed.
2969 static int update_edge(struct isl_sched_graph *graph,
2970 struct isl_sched_edge *edge)
2972 int empty;
2973 isl_map *id;
2975 id = specializer(edge->src, edge->dst);
2976 edge->map = isl_map_intersect(edge->map, isl_map_copy(id));
2977 if (!edge->map)
2978 goto error;
2980 if (edge->tagged_condition) {
2981 edge->tagged_condition =
2982 intersect_domains(edge->tagged_condition, id);
2983 if (!edge->tagged_condition)
2984 goto error;
2986 if (edge->tagged_validity) {
2987 edge->tagged_validity =
2988 intersect_domains(edge->tagged_validity, id);
2989 if (!edge->tagged_validity)
2990 goto error;
2993 empty = isl_map_plain_is_empty(edge->map);
2994 if (empty < 0)
2995 goto error;
2996 if (empty)
2997 graph_remove_edge(graph, edge);
2999 isl_map_free(id);
3000 return 0;
3001 error:
3002 isl_map_free(id);
3003 return -1;
3006 /* Does the domain of "umap" intersect "uset"?
3008 static int domain_intersects(__isl_keep isl_union_map *umap,
3009 __isl_keep isl_union_set *uset)
3011 int empty;
3013 umap = isl_union_map_copy(umap);
3014 umap = isl_union_map_intersect_domain(umap, isl_union_set_copy(uset));
3015 empty = isl_union_map_is_empty(umap);
3016 isl_union_map_free(umap);
3018 return empty < 0 ? -1 : !empty;
3021 /* Does the range of "umap" intersect "uset"?
3023 static int range_intersects(__isl_keep isl_union_map *umap,
3024 __isl_keep isl_union_set *uset)
3026 int empty;
3028 umap = isl_union_map_copy(umap);
3029 umap = isl_union_map_intersect_range(umap, isl_union_set_copy(uset));
3030 empty = isl_union_map_is_empty(umap);
3031 isl_union_map_free(umap);
3033 return empty < 0 ? -1 : !empty;
3036 /* Are the condition dependences of "edge" local with respect to
3037 * the current schedule?
3039 * That is, are domain and range of the condition dependences mapped
3040 * to the same point?
3042 * In other words, is the condition false?
3044 static int is_condition_false(struct isl_sched_edge *edge)
3046 isl_union_map *umap;
3047 isl_map *map, *sched, *test;
3048 int empty, local;
3050 empty = isl_union_map_is_empty(edge->tagged_condition);
3051 if (empty < 0 || empty)
3052 return empty;
3054 umap = isl_union_map_copy(edge->tagged_condition);
3055 umap = isl_union_map_zip(umap);
3056 umap = isl_union_set_unwrap(isl_union_map_domain(umap));
3057 map = isl_map_from_union_map(umap);
3059 sched = node_extract_schedule(edge->src);
3060 map = isl_map_apply_domain(map, sched);
3061 sched = node_extract_schedule(edge->dst);
3062 map = isl_map_apply_range(map, sched);
3064 test = isl_map_identity(isl_map_get_space(map));
3065 local = isl_map_is_subset(map, test);
3066 isl_map_free(map);
3067 isl_map_free(test);
3069 return local;
3072 /* For each conditional validity constraint that is adjacent
3073 * to a condition with domain in condition_source or range in condition_sink,
3074 * turn it into an unconditional validity constraint.
3076 static int unconditionalize_adjacent_validity(struct isl_sched_graph *graph,
3077 __isl_take isl_union_set *condition_source,
3078 __isl_take isl_union_set *condition_sink)
3080 int i;
3082 condition_source = isl_union_set_coalesce(condition_source);
3083 condition_sink = isl_union_set_coalesce(condition_sink);
3085 for (i = 0; i < graph->n_edge; ++i) {
3086 int adjacent;
3087 isl_union_map *validity;
3089 if (!is_conditional_validity(&graph->edge[i]))
3090 continue;
3091 if (is_validity(&graph->edge[i]))
3092 continue;
3094 validity = graph->edge[i].tagged_validity;
3095 adjacent = domain_intersects(validity, condition_sink);
3096 if (adjacent >= 0 && !adjacent)
3097 adjacent = range_intersects(validity, condition_source);
3098 if (adjacent < 0)
3099 goto error;
3100 if (!adjacent)
3101 continue;
3103 set_validity(&graph->edge[i]);
3106 isl_union_set_free(condition_source);
3107 isl_union_set_free(condition_sink);
3108 return 0;
3109 error:
3110 isl_union_set_free(condition_source);
3111 isl_union_set_free(condition_sink);
3112 return -1;
3115 /* Update the dependence relations of all edges based on the current schedule
3116 * and enforce conditional validity constraints that are adjacent
3117 * to satisfied condition constraints.
3119 * First check if any of the condition constraints are satisfied
3120 * (i.e., not local to the outer schedule) and keep track of
3121 * their domain and range.
3122 * Then update all dependence relations (which removes the non-local
3123 * constraints).
3124 * Finally, if any condition constraints turned out to be satisfied,
3125 * then turn all adjacent conditional validity constraints into
3126 * unconditional validity constraints.
3128 static int update_edges(isl_ctx *ctx, struct isl_sched_graph *graph)
3130 int i;
3131 int any = 0;
3132 isl_union_set *source, *sink;
3134 source = isl_union_set_empty(isl_space_params_alloc(ctx, 0));
3135 sink = isl_union_set_empty(isl_space_params_alloc(ctx, 0));
3136 for (i = 0; i < graph->n_edge; ++i) {
3137 int local;
3138 isl_union_set *uset;
3139 isl_union_map *umap;
3141 if (!is_condition(&graph->edge[i]))
3142 continue;
3143 if (is_local(&graph->edge[i]))
3144 continue;
3145 local = is_condition_false(&graph->edge[i]);
3146 if (local < 0)
3147 goto error;
3148 if (local)
3149 continue;
3151 any = 1;
3153 umap = isl_union_map_copy(graph->edge[i].tagged_condition);
3154 uset = isl_union_map_domain(umap);
3155 source = isl_union_set_union(source, uset);
3157 umap = isl_union_map_copy(graph->edge[i].tagged_condition);
3158 uset = isl_union_map_range(umap);
3159 sink = isl_union_set_union(sink, uset);
3162 for (i = graph->n_edge - 1; i >= 0; --i) {
3163 if (update_edge(graph, &graph->edge[i]) < 0)
3164 goto error;
3167 if (any)
3168 return unconditionalize_adjacent_validity(graph, source, sink);
3170 isl_union_set_free(source);
3171 isl_union_set_free(sink);
3172 return 0;
3173 error:
3174 isl_union_set_free(source);
3175 isl_union_set_free(sink);
3176 return -1;
3179 static void next_band(struct isl_sched_graph *graph)
3181 graph->band_start = graph->n_total_row;
3184 /* Return the union of the universe domains of the nodes in "graph"
3185 * that satisfy "pred".
3187 static __isl_give isl_union_set *isl_sched_graph_domain(isl_ctx *ctx,
3188 struct isl_sched_graph *graph,
3189 int (*pred)(struct isl_sched_node *node, int data), int data)
3191 int i;
3192 isl_set *set;
3193 isl_union_set *dom;
3195 for (i = 0; i < graph->n; ++i)
3196 if (pred(&graph->node[i], data))
3197 break;
3199 if (i >= graph->n)
3200 isl_die(ctx, isl_error_internal,
3201 "empty component", return NULL);
3203 set = isl_set_universe(isl_space_copy(graph->node[i].space));
3204 dom = isl_union_set_from_set(set);
3206 for (i = i + 1; i < graph->n; ++i) {
3207 if (!pred(&graph->node[i], data))
3208 continue;
3209 set = isl_set_universe(isl_space_copy(graph->node[i].space));
3210 dom = isl_union_set_union(dom, isl_union_set_from_set(set));
3213 return dom;
3216 /* Return a list of unions of universe domains, where each element
3217 * in the list corresponds to an SCC (or WCC) indexed by node->scc.
3219 static __isl_give isl_union_set_list *extract_sccs(isl_ctx *ctx,
3220 struct isl_sched_graph *graph)
3222 int i;
3223 isl_union_set_list *filters;
3225 filters = isl_union_set_list_alloc(ctx, graph->scc);
3226 for (i = 0; i < graph->scc; ++i) {
3227 isl_union_set *dom;
3229 dom = isl_sched_graph_domain(ctx, graph, &node_scc_exactly, i);
3230 filters = isl_union_set_list_add(filters, dom);
3233 return filters;
3236 /* Return a list of two unions of universe domains, one for the SCCs up
3237 * to and including graph->src_scc and another for the other SCCs.
3239 static __isl_give isl_union_set_list *extract_split(isl_ctx *ctx,
3240 struct isl_sched_graph *graph)
3242 isl_union_set *dom;
3243 isl_union_set_list *filters;
3245 filters = isl_union_set_list_alloc(ctx, 2);
3246 dom = isl_sched_graph_domain(ctx, graph,
3247 &node_scc_at_most, graph->src_scc);
3248 filters = isl_union_set_list_add(filters, dom);
3249 dom = isl_sched_graph_domain(ctx, graph,
3250 &node_scc_at_least, graph->src_scc + 1);
3251 filters = isl_union_set_list_add(filters, dom);
3253 return filters;
3256 /* Copy nodes that satisfy node_pred from the src dependence graph
3257 * to the dst dependence graph.
3259 static int copy_nodes(struct isl_sched_graph *dst, struct isl_sched_graph *src,
3260 int (*node_pred)(struct isl_sched_node *node, int data), int data)
3262 int i;
3264 dst->n = 0;
3265 for (i = 0; i < src->n; ++i) {
3266 int j;
3268 if (!node_pred(&src->node[i], data))
3269 continue;
3271 j = dst->n;
3272 dst->node[j].space = isl_space_copy(src->node[i].space);
3273 dst->node[j].compressed = src->node[i].compressed;
3274 dst->node[j].hull = isl_set_copy(src->node[i].hull);
3275 dst->node[j].compress =
3276 isl_multi_aff_copy(src->node[i].compress);
3277 dst->node[j].decompress =
3278 isl_multi_aff_copy(src->node[i].decompress);
3279 dst->node[j].nvar = src->node[i].nvar;
3280 dst->node[j].nparam = src->node[i].nparam;
3281 dst->node[j].sched = isl_mat_copy(src->node[i].sched);
3282 dst->node[j].sched_map = isl_map_copy(src->node[i].sched_map);
3283 dst->node[j].coincident = src->node[i].coincident;
3284 dst->n++;
3286 if (!dst->node[j].space || !dst->node[j].sched)
3287 return -1;
3288 if (dst->node[j].compressed &&
3289 (!dst->node[j].hull || !dst->node[j].compress ||
3290 !dst->node[j].decompress))
3291 return -1;
3294 return 0;
3297 /* Copy non-empty edges that satisfy edge_pred from the src dependence graph
3298 * to the dst dependence graph.
3299 * If the source or destination node of the edge is not in the destination
3300 * graph, then it must be a backward proximity edge and it should simply
3301 * be ignored.
3303 static int copy_edges(isl_ctx *ctx, struct isl_sched_graph *dst,
3304 struct isl_sched_graph *src,
3305 int (*edge_pred)(struct isl_sched_edge *edge, int data), int data)
3307 int i;
3308 enum isl_edge_type t;
3310 dst->n_edge = 0;
3311 for (i = 0; i < src->n_edge; ++i) {
3312 struct isl_sched_edge *edge = &src->edge[i];
3313 isl_map *map;
3314 isl_union_map *tagged_condition;
3315 isl_union_map *tagged_validity;
3316 struct isl_sched_node *dst_src, *dst_dst;
3318 if (!edge_pred(edge, data))
3319 continue;
3321 if (isl_map_plain_is_empty(edge->map))
3322 continue;
3324 dst_src = graph_find_node(ctx, dst, edge->src->space);
3325 dst_dst = graph_find_node(ctx, dst, edge->dst->space);
3326 if (!dst_src || !dst_dst) {
3327 if (is_validity(edge) || is_conditional_validity(edge))
3328 isl_die(ctx, isl_error_internal,
3329 "backward (conditional) validity edge",
3330 return -1);
3331 continue;
3334 map = isl_map_copy(edge->map);
3335 tagged_condition = isl_union_map_copy(edge->tagged_condition);
3336 tagged_validity = isl_union_map_copy(edge->tagged_validity);
3338 dst->edge[dst->n_edge].src = dst_src;
3339 dst->edge[dst->n_edge].dst = dst_dst;
3340 dst->edge[dst->n_edge].map = map;
3341 dst->edge[dst->n_edge].tagged_condition = tagged_condition;
3342 dst->edge[dst->n_edge].tagged_validity = tagged_validity;
3343 dst->edge[dst->n_edge].types = edge->types;
3344 dst->n_edge++;
3346 if (edge->tagged_condition && !tagged_condition)
3347 return -1;
3348 if (edge->tagged_validity && !tagged_validity)
3349 return -1;
3351 for (t = isl_edge_first; t <= isl_edge_last; ++t) {
3352 if (edge !=
3353 graph_find_edge(src, t, edge->src, edge->dst))
3354 continue;
3355 if (graph_edge_table_add(ctx, dst, t,
3356 &dst->edge[dst->n_edge - 1]) < 0)
3357 return -1;
3361 return 0;
3364 /* Compute the maximal number of variables over all nodes.
3365 * This is the maximal number of linearly independent schedule
3366 * rows that we need to compute.
3367 * Just in case we end up in a part of the dependence graph
3368 * with only lower-dimensional domains, we make sure we will
3369 * compute the required amount of extra linearly independent rows.
3371 static int compute_maxvar(struct isl_sched_graph *graph)
3373 int i;
3375 graph->maxvar = 0;
3376 for (i = 0; i < graph->n; ++i) {
3377 struct isl_sched_node *node = &graph->node[i];
3378 int nvar;
3380 if (node_update_cmap(node) < 0)
3381 return -1;
3382 nvar = node->nvar + graph->n_row - node->rank;
3383 if (nvar > graph->maxvar)
3384 graph->maxvar = nvar;
3387 return 0;
3390 /* Extract the subgraph of "graph" that consists of the node satisfying
3391 * "node_pred" and the edges satisfying "edge_pred" and store
3392 * the result in "sub".
3394 static int extract_sub_graph(isl_ctx *ctx, struct isl_sched_graph *graph,
3395 int (*node_pred)(struct isl_sched_node *node, int data),
3396 int (*edge_pred)(struct isl_sched_edge *edge, int data),
3397 int data, struct isl_sched_graph *sub)
3399 int i, n = 0, n_edge = 0;
3400 int t;
3402 for (i = 0; i < graph->n; ++i)
3403 if (node_pred(&graph->node[i], data))
3404 ++n;
3405 for (i = 0; i < graph->n_edge; ++i)
3406 if (edge_pred(&graph->edge[i], data))
3407 ++n_edge;
3408 if (graph_alloc(ctx, sub, n, n_edge) < 0)
3409 return -1;
3410 if (copy_nodes(sub, graph, node_pred, data) < 0)
3411 return -1;
3412 if (graph_init_table(ctx, sub) < 0)
3413 return -1;
3414 for (t = 0; t <= isl_edge_last; ++t)
3415 sub->max_edge[t] = graph->max_edge[t];
3416 if (graph_init_edge_tables(ctx, sub) < 0)
3417 return -1;
3418 if (copy_edges(ctx, sub, graph, edge_pred, data) < 0)
3419 return -1;
3420 sub->n_row = graph->n_row;
3421 sub->max_row = graph->max_row;
3422 sub->n_total_row = graph->n_total_row;
3423 sub->band_start = graph->band_start;
3425 return 0;
3428 static __isl_give isl_schedule_node *compute_schedule(isl_schedule_node *node,
3429 struct isl_sched_graph *graph);
3430 static __isl_give isl_schedule_node *compute_schedule_wcc(
3431 isl_schedule_node *node, struct isl_sched_graph *graph);
3433 /* Compute a schedule for a subgraph of "graph". In particular, for
3434 * the graph composed of nodes that satisfy node_pred and edges that
3435 * that satisfy edge_pred.
3436 * If the subgraph is known to consist of a single component, then wcc should
3437 * be set and then we call compute_schedule_wcc on the constructed subgraph.
3438 * Otherwise, we call compute_schedule, which will check whether the subgraph
3439 * is connected.
3441 * The schedule is inserted at "node" and the updated schedule node
3442 * is returned.
3444 static __isl_give isl_schedule_node *compute_sub_schedule(
3445 __isl_take isl_schedule_node *node, isl_ctx *ctx,
3446 struct isl_sched_graph *graph,
3447 int (*node_pred)(struct isl_sched_node *node, int data),
3448 int (*edge_pred)(struct isl_sched_edge *edge, int data),
3449 int data, int wcc)
3451 struct isl_sched_graph split = { 0 };
3453 if (extract_sub_graph(ctx, graph, node_pred, edge_pred, data,
3454 &split) < 0)
3455 goto error;
3457 if (wcc)
3458 node = compute_schedule_wcc(node, &split);
3459 else
3460 node = compute_schedule(node, &split);
3462 graph_free(ctx, &split);
3463 return node;
3464 error:
3465 graph_free(ctx, &split);
3466 return isl_schedule_node_free(node);
3469 static int edge_scc_exactly(struct isl_sched_edge *edge, int scc)
3471 return edge->src->scc == scc && edge->dst->scc == scc;
3474 static int edge_dst_scc_at_most(struct isl_sched_edge *edge, int scc)
3476 return edge->dst->scc <= scc;
3479 static int edge_src_scc_at_least(struct isl_sched_edge *edge, int scc)
3481 return edge->src->scc >= scc;
3484 /* Reset the current band by dropping all its schedule rows.
3486 static int reset_band(struct isl_sched_graph *graph)
3488 int i;
3489 int drop;
3491 drop = graph->n_total_row - graph->band_start;
3492 graph->n_total_row -= drop;
3493 graph->n_row -= drop;
3495 for (i = 0; i < graph->n; ++i) {
3496 struct isl_sched_node *node = &graph->node[i];
3498 isl_map_free(node->sched_map);
3499 node->sched_map = NULL;
3501 node->sched = isl_mat_drop_rows(node->sched,
3502 graph->band_start, drop);
3504 if (!node->sched)
3505 return -1;
3508 return 0;
3511 /* Split the current graph into two parts and compute a schedule for each
3512 * part individually. In particular, one part consists of all SCCs up
3513 * to and including graph->src_scc, while the other part contains the other
3514 * SCCs. The split is enforced by a sequence node inserted at position "node"
3515 * in the schedule tree. Return the updated schedule node.
3516 * If either of these two parts consists of a sequence, then it is spliced
3517 * into the sequence containing the two parts.
3519 * The current band is reset. It would be possible to reuse
3520 * the previously computed rows as the first rows in the next
3521 * band, but recomputing them may result in better rows as we are looking
3522 * at a smaller part of the dependence graph.
3524 static __isl_give isl_schedule_node *compute_split_schedule(
3525 __isl_take isl_schedule_node *node, struct isl_sched_graph *graph)
3527 int is_seq;
3528 isl_ctx *ctx;
3529 isl_union_set_list *filters;
3531 if (!node)
3532 return NULL;
3534 if (reset_band(graph) < 0)
3535 return isl_schedule_node_free(node);
3537 next_band(graph);
3539 ctx = isl_schedule_node_get_ctx(node);
3540 filters = extract_split(ctx, graph);
3541 node = isl_schedule_node_insert_sequence(node, filters);
3542 node = isl_schedule_node_child(node, 1);
3543 node = isl_schedule_node_child(node, 0);
3545 node = compute_sub_schedule(node, ctx, graph,
3546 &node_scc_at_least, &edge_src_scc_at_least,
3547 graph->src_scc + 1, 0);
3548 is_seq = isl_schedule_node_get_type(node) == isl_schedule_node_sequence;
3549 node = isl_schedule_node_parent(node);
3550 node = isl_schedule_node_parent(node);
3551 if (is_seq)
3552 node = isl_schedule_node_sequence_splice_child(node, 1);
3553 node = isl_schedule_node_child(node, 0);
3554 node = isl_schedule_node_child(node, 0);
3555 node = compute_sub_schedule(node, ctx, graph,
3556 &node_scc_at_most, &edge_dst_scc_at_most,
3557 graph->src_scc, 0);
3558 is_seq = isl_schedule_node_get_type(node) == isl_schedule_node_sequence;
3559 node = isl_schedule_node_parent(node);
3560 node = isl_schedule_node_parent(node);
3561 if (is_seq)
3562 node = isl_schedule_node_sequence_splice_child(node, 0);
3564 return node;
3567 /* Insert a band node at position "node" in the schedule tree corresponding
3568 * to the current band in "graph". Mark the band node permutable
3569 * if "permutable" is set.
3570 * The partial schedules and the coincidence property are extracted
3571 * from the graph nodes.
3572 * Return the updated schedule node.
3574 static __isl_give isl_schedule_node *insert_current_band(
3575 __isl_take isl_schedule_node *node, struct isl_sched_graph *graph,
3576 int permutable)
3578 int i;
3579 int start, end, n;
3580 isl_multi_aff *ma;
3581 isl_multi_pw_aff *mpa;
3582 isl_multi_union_pw_aff *mupa;
3584 if (!node)
3585 return NULL;
3587 if (graph->n < 1)
3588 isl_die(isl_schedule_node_get_ctx(node), isl_error_internal,
3589 "graph should have at least one node",
3590 return isl_schedule_node_free(node));
3592 start = graph->band_start;
3593 end = graph->n_total_row;
3594 n = end - start;
3596 ma = node_extract_partial_schedule_multi_aff(&graph->node[0], start, n);
3597 mpa = isl_multi_pw_aff_from_multi_aff(ma);
3598 mupa = isl_multi_union_pw_aff_from_multi_pw_aff(mpa);
3600 for (i = 1; i < graph->n; ++i) {
3601 isl_multi_union_pw_aff *mupa_i;
3603 ma = node_extract_partial_schedule_multi_aff(&graph->node[i],
3604 start, n);
3605 mpa = isl_multi_pw_aff_from_multi_aff(ma);
3606 mupa_i = isl_multi_union_pw_aff_from_multi_pw_aff(mpa);
3607 mupa = isl_multi_union_pw_aff_union_add(mupa, mupa_i);
3609 node = isl_schedule_node_insert_partial_schedule(node, mupa);
3611 for (i = 0; i < n; ++i)
3612 node = isl_schedule_node_band_member_set_coincident(node, i,
3613 graph->node[0].coincident[start + i]);
3614 node = isl_schedule_node_band_set_permutable(node, permutable);
3616 return node;
3619 /* Update the dependence relations based on the current schedule,
3620 * add the current band to "node" and then continue with the computation
3621 * of the next band.
3622 * Return the updated schedule node.
3624 static __isl_give isl_schedule_node *compute_next_band(
3625 __isl_take isl_schedule_node *node,
3626 struct isl_sched_graph *graph, int permutable)
3628 isl_ctx *ctx;
3630 if (!node)
3631 return NULL;
3633 ctx = isl_schedule_node_get_ctx(node);
3634 if (update_edges(ctx, graph) < 0)
3635 return isl_schedule_node_free(node);
3636 node = insert_current_band(node, graph, permutable);
3637 next_band(graph);
3639 node = isl_schedule_node_child(node, 0);
3640 node = compute_schedule(node, graph);
3641 node = isl_schedule_node_parent(node);
3643 return node;
3646 /* Add constraints to graph->lp that force the dependence "map" (which
3647 * is part of the dependence relation of "edge")
3648 * to be respected and attempt to carry it, where the edge is one from
3649 * a node j to itself. "pos" is the sequence number of the given map.
3650 * That is, add constraints that enforce
3652 * (c_j_0 + c_j_n n + c_j_x y) - (c_j_0 + c_j_n n + c_j_x x)
3653 * = c_j_x (y - x) >= e_i
3655 * for each (x,y) in R.
3656 * We obtain general constraints on coefficients (c_0, c_n, c_x)
3657 * of valid constraints for (y - x) and then plug in (-e_i, 0, c_j_x),
3658 * with each coefficient in c_j_x represented as a pair of non-negative
3659 * coefficients.
3661 static int add_intra_constraints(struct isl_sched_graph *graph,
3662 struct isl_sched_edge *edge, __isl_take isl_map *map, int pos)
3664 int offset;
3665 isl_ctx *ctx = isl_map_get_ctx(map);
3666 isl_dim_map *dim_map;
3667 isl_basic_set *coef;
3668 struct isl_sched_node *node = edge->src;
3670 coef = intra_coefficients(graph, node, map);
3671 if (!coef)
3672 return -1;
3674 offset = coef_var_offset(coef);
3675 dim_map = intra_dim_map(ctx, graph, node, offset, 1);
3676 isl_dim_map_range(dim_map, 3 + pos, 0, 0, 0, 1, -1);
3677 graph->lp = isl_basic_set_extend_constraints(graph->lp,
3678 coef->n_eq, coef->n_ineq);
3679 graph->lp = isl_basic_set_add_constraints_dim_map(graph->lp,
3680 coef, dim_map);
3682 return 0;
3685 /* Add constraints to graph->lp that force the dependence "map" (which
3686 * is part of the dependence relation of "edge")
3687 * to be respected and attempt to carry it, where the edge is one from
3688 * node j to node k. "pos" is the sequence number of the given map.
3689 * That is, add constraints that enforce
3691 * (c_k_0 + c_k_n n + c_k_x y) - (c_j_0 + c_j_n n + c_j_x x) >= e_i
3693 * for each (x,y) in R.
3694 * We obtain general constraints on coefficients (c_0, c_n, c_x)
3695 * of valid constraints for R and then plug in
3696 * (-e_i + c_k_0 - c_j_0, c_k_n - c_j_n, c_k_x - c_j_x)
3697 * with each coefficient (except e_i, c_k_0 and c_j_0)
3698 * represented as a pair of non-negative coefficients.
3700 static int add_inter_constraints(struct isl_sched_graph *graph,
3701 struct isl_sched_edge *edge, __isl_take isl_map *map, int pos)
3703 int offset;
3704 isl_ctx *ctx = isl_map_get_ctx(map);
3705 isl_dim_map *dim_map;
3706 isl_basic_set *coef;
3707 struct isl_sched_node *src = edge->src;
3708 struct isl_sched_node *dst = edge->dst;
3710 coef = inter_coefficients(graph, edge, map);
3711 if (!coef)
3712 return -1;
3714 offset = coef_var_offset(coef);
3715 dim_map = inter_dim_map(ctx, graph, src, dst, offset, 1);
3716 isl_dim_map_range(dim_map, 3 + pos, 0, 0, 0, 1, -1);
3717 graph->lp = isl_basic_set_extend_constraints(graph->lp,
3718 coef->n_eq, coef->n_ineq);
3719 graph->lp = isl_basic_set_add_constraints_dim_map(graph->lp,
3720 coef, dim_map);
3722 return 0;
3725 /* Add constraints to graph->lp that force all (conditional) validity
3726 * dependences to be respected and attempt to carry them.
3728 static int add_all_constraints(struct isl_sched_graph *graph)
3730 int i, j;
3731 int pos;
3733 pos = 0;
3734 for (i = 0; i < graph->n_edge; ++i) {
3735 struct isl_sched_edge *edge= &graph->edge[i];
3737 if (!is_any_validity(edge))
3738 continue;
3740 for (j = 0; j < edge->map->n; ++j) {
3741 isl_basic_map *bmap;
3742 isl_map *map;
3744 bmap = isl_basic_map_copy(edge->map->p[j]);
3745 map = isl_map_from_basic_map(bmap);
3747 if (edge->src == edge->dst &&
3748 add_intra_constraints(graph, edge, map, pos) < 0)
3749 return -1;
3750 if (edge->src != edge->dst &&
3751 add_inter_constraints(graph, edge, map, pos) < 0)
3752 return -1;
3753 ++pos;
3757 return 0;
3760 /* Count the number of equality and inequality constraints
3761 * that will be added to the carry_lp problem.
3762 * We count each edge exactly once.
3764 static int count_all_constraints(struct isl_sched_graph *graph,
3765 int *n_eq, int *n_ineq)
3767 int i, j;
3769 *n_eq = *n_ineq = 0;
3770 for (i = 0; i < graph->n_edge; ++i) {
3771 struct isl_sched_edge *edge= &graph->edge[i];
3773 if (!is_any_validity(edge))
3774 continue;
3776 for (j = 0; j < edge->map->n; ++j) {
3777 isl_basic_map *bmap;
3778 isl_map *map;
3780 bmap = isl_basic_map_copy(edge->map->p[j]);
3781 map = isl_map_from_basic_map(bmap);
3783 if (count_map_constraints(graph, edge, map,
3784 n_eq, n_ineq, 1, 0) < 0)
3785 return -1;
3789 return 0;
3792 /* Construct an LP problem for finding schedule coefficients
3793 * such that the schedule carries as many dependences as possible.
3794 * In particular, for each dependence i, we bound the dependence distance
3795 * from below by e_i, with 0 <= e_i <= 1 and then maximize the sum
3796 * of all e_i's. Dependences with e_i = 0 in the solution are simply
3797 * respected, while those with e_i > 0 (in practice e_i = 1) are carried.
3798 * Note that if the dependence relation is a union of basic maps,
3799 * then we have to consider each basic map individually as it may only
3800 * be possible to carry the dependences expressed by some of those
3801 * basic maps and not all of them.
3802 * Below, we consider each of those basic maps as a separate "edge".
3804 * All variables of the LP are non-negative. The actual coefficients
3805 * may be negative, so each coefficient is represented as the difference
3806 * of two non-negative variables. The negative part always appears
3807 * immediately before the positive part.
3808 * Other than that, the variables have the following order
3810 * - sum of (1 - e_i) over all edges
3811 * - sum of positive and negative parts of all c_n coefficients
3812 * (unconstrained when computing non-parametric schedules)
3813 * - sum of positive and negative parts of all c_x coefficients
3814 * - for each edge
3815 * - e_i
3816 * - for each node
3817 * - c_i_0
3818 * - positive and negative parts of c_i_n (if parametric)
3819 * - positive and negative parts of c_i_x
3821 * The constraints are those from the (validity) edges plus three equalities
3822 * to express the sums and n_edge inequalities to express e_i <= 1.
3824 static isl_stat setup_carry_lp(isl_ctx *ctx, struct isl_sched_graph *graph)
3826 int i;
3827 int k;
3828 isl_space *dim;
3829 unsigned total;
3830 int n_eq, n_ineq;
3831 int n_edge;
3833 n_edge = 0;
3834 for (i = 0; i < graph->n_edge; ++i)
3835 n_edge += graph->edge[i].map->n;
3837 total = 3 + n_edge;
3838 for (i = 0; i < graph->n; ++i) {
3839 struct isl_sched_node *node = &graph->node[graph->sorted[i]];
3840 node->start = total;
3841 total += 1 + 2 * (node->nparam + node->nvar);
3844 if (count_all_constraints(graph, &n_eq, &n_ineq) < 0)
3845 return isl_stat_error;
3847 dim = isl_space_set_alloc(ctx, 0, total);
3848 isl_basic_set_free(graph->lp);
3849 n_eq += 3;
3850 n_ineq += n_edge;
3851 graph->lp = isl_basic_set_alloc_space(dim, 0, n_eq, n_ineq);
3852 graph->lp = isl_basic_set_set_rational(graph->lp);
3854 k = isl_basic_set_alloc_equality(graph->lp);
3855 if (k < 0)
3856 return isl_stat_error;
3857 isl_seq_clr(graph->lp->eq[k], 1 + total);
3858 isl_int_set_si(graph->lp->eq[k][0], -n_edge);
3859 isl_int_set_si(graph->lp->eq[k][1], 1);
3860 for (i = 0; i < n_edge; ++i)
3861 isl_int_set_si(graph->lp->eq[k][4 + i], 1);
3863 if (add_param_sum_constraint(graph, 1) < 0)
3864 return isl_stat_error;
3865 if (add_var_sum_constraint(graph, 2) < 0)
3866 return isl_stat_error;
3868 for (i = 0; i < n_edge; ++i) {
3869 k = isl_basic_set_alloc_inequality(graph->lp);
3870 if (k < 0)
3871 return isl_stat_error;
3872 isl_seq_clr(graph->lp->ineq[k], 1 + total);
3873 isl_int_set_si(graph->lp->ineq[k][4 + i], -1);
3874 isl_int_set_si(graph->lp->ineq[k][0], 1);
3877 if (add_all_constraints(graph) < 0)
3878 return isl_stat_error;
3880 return isl_stat_ok;
3883 static __isl_give isl_schedule_node *compute_component_schedule(
3884 __isl_take isl_schedule_node *node, struct isl_sched_graph *graph,
3885 int wcc);
3887 /* Comparison function for sorting the statements based on
3888 * the corresponding value in "r".
3890 static int smaller_value(const void *a, const void *b, void *data)
3892 isl_vec *r = data;
3893 const int *i1 = a;
3894 const int *i2 = b;
3896 return isl_int_cmp(r->el[*i1], r->el[*i2]);
3899 /* If the schedule_split_scaled option is set and if the linear
3900 * parts of the scheduling rows for all nodes in the graphs have
3901 * a non-trivial common divisor, then split off the remainder of the
3902 * constant term modulo this common divisor from the linear part.
3903 * Otherwise, insert a band node directly and continue with
3904 * the construction of the schedule.
3906 * If a non-trivial common divisor is found, then
3907 * the linear part is reduced and the remainder is enforced
3908 * by a sequence node with the children placed in the order
3909 * of this remainder.
3910 * In particular, we assign an scc index based on the remainder and
3911 * then rely on compute_component_schedule to insert the sequence and
3912 * to continue the schedule construction on each part.
3914 static __isl_give isl_schedule_node *split_scaled(
3915 __isl_take isl_schedule_node *node, struct isl_sched_graph *graph)
3917 int i;
3918 int row;
3919 int scc;
3920 isl_ctx *ctx;
3921 isl_int gcd, gcd_i;
3922 isl_vec *r;
3923 int *order;
3925 if (!node)
3926 return NULL;
3928 ctx = isl_schedule_node_get_ctx(node);
3929 if (!ctx->opt->schedule_split_scaled)
3930 return compute_next_band(node, graph, 0);
3931 if (graph->n <= 1)
3932 return compute_next_band(node, graph, 0);
3934 isl_int_init(gcd);
3935 isl_int_init(gcd_i);
3937 isl_int_set_si(gcd, 0);
3939 row = isl_mat_rows(graph->node[0].sched) - 1;
3941 for (i = 0; i < graph->n; ++i) {
3942 struct isl_sched_node *node = &graph->node[i];
3943 int cols = isl_mat_cols(node->sched);
3945 isl_seq_gcd(node->sched->row[row] + 1, cols - 1, &gcd_i);
3946 isl_int_gcd(gcd, gcd, gcd_i);
3949 isl_int_clear(gcd_i);
3951 if (isl_int_cmp_si(gcd, 1) <= 0) {
3952 isl_int_clear(gcd);
3953 return compute_next_band(node, graph, 0);
3956 r = isl_vec_alloc(ctx, graph->n);
3957 order = isl_calloc_array(ctx, int, graph->n);
3958 if (!r || !order)
3959 goto error;
3961 for (i = 0; i < graph->n; ++i) {
3962 struct isl_sched_node *node = &graph->node[i];
3964 order[i] = i;
3965 isl_int_fdiv_r(r->el[i], node->sched->row[row][0], gcd);
3966 isl_int_fdiv_q(node->sched->row[row][0],
3967 node->sched->row[row][0], gcd);
3968 isl_int_mul(node->sched->row[row][0],
3969 node->sched->row[row][0], gcd);
3970 node->sched = isl_mat_scale_down_row(node->sched, row, gcd);
3971 if (!node->sched)
3972 goto error;
3975 if (isl_sort(order, graph->n, sizeof(order[0]), &smaller_value, r) < 0)
3976 goto error;
3978 scc = 0;
3979 for (i = 0; i < graph->n; ++i) {
3980 if (i > 0 && isl_int_ne(r->el[order[i - 1]], r->el[order[i]]))
3981 ++scc;
3982 graph->node[order[i]].scc = scc;
3984 graph->scc = ++scc;
3985 graph->weak = 0;
3987 isl_int_clear(gcd);
3988 isl_vec_free(r);
3989 free(order);
3991 if (update_edges(ctx, graph) < 0)
3992 return isl_schedule_node_free(node);
3993 node = insert_current_band(node, graph, 0);
3994 next_band(graph);
3996 node = isl_schedule_node_child(node, 0);
3997 node = compute_component_schedule(node, graph, 0);
3998 node = isl_schedule_node_parent(node);
4000 return node;
4001 error:
4002 isl_vec_free(r);
4003 free(order);
4004 isl_int_clear(gcd);
4005 return isl_schedule_node_free(node);
4008 /* Is the schedule row "sol" trivial on node "node"?
4009 * That is, is the solution zero on the dimensions orthogonal to
4010 * the previously found solutions?
4011 * Return 1 if the solution is trivial, 0 if it is not and -1 on error.
4013 * Each coefficient is represented as the difference between
4014 * two non-negative values in "sol". "sol" has been computed
4015 * in terms of the original iterators (i.e., without use of cmap).
4016 * We construct the schedule row s and write it as a linear
4017 * combination of (linear combinations of) previously computed schedule rows.
4018 * s = Q c or c = U s.
4019 * If the final entries of c are all zero, then the solution is trivial.
4021 static int is_trivial(struct isl_sched_node *node, __isl_keep isl_vec *sol)
4023 int trivial;
4024 isl_vec *node_sol;
4026 if (!sol)
4027 return -1;
4028 if (node->nvar == node->rank)
4029 return 0;
4031 node_sol = extract_var_coef(node, sol);
4032 node_sol = isl_mat_vec_product(isl_mat_copy(node->cinv), node_sol);
4033 if (!node_sol)
4034 return -1;
4036 trivial = isl_seq_first_non_zero(node_sol->el + node->rank,
4037 node->nvar - node->rank) == -1;
4039 isl_vec_free(node_sol);
4041 return trivial;
4044 /* Is the schedule row "sol" trivial on any node where it should
4045 * not be trivial?
4046 * "sol" has been computed in terms of the original iterators
4047 * (i.e., without use of cmap).
4048 * Return 1 if any solution is trivial, 0 if they are not and -1 on error.
4050 static int is_any_trivial(struct isl_sched_graph *graph,
4051 __isl_keep isl_vec *sol)
4053 int i;
4055 for (i = 0; i < graph->n; ++i) {
4056 struct isl_sched_node *node = &graph->node[i];
4057 int trivial;
4059 if (!needs_row(graph, node))
4060 continue;
4061 trivial = is_trivial(node, sol);
4062 if (trivial < 0 || trivial)
4063 return trivial;
4066 return 0;
4069 /* Construct a schedule row for each node such that as many dependences
4070 * as possible are carried and then continue with the next band.
4072 * Note that despite the fact that the problem is solved using a rational
4073 * solver, the solution is guaranteed to be integral.
4074 * Specifically, the dependence distance lower bounds e_i (and therefore
4075 * also their sum) are integers. See Lemma 5 of [1].
4077 * If the computed schedule row turns out to be trivial on one or
4078 * more nodes where it should not be trivial, then we throw it away
4079 * and try again on each component separately.
4081 * If there is only one component, then we accept the schedule row anyway,
4082 * but we do not consider it as a complete row and therefore do not
4083 * increment graph->n_row. Note that the ranks of the nodes that
4084 * do get a non-trivial schedule part will get updated regardless and
4085 * graph->maxvar is computed based on these ranks. The test for
4086 * whether more schedule rows are required in compute_schedule_wcc
4087 * is therefore not affected.
4089 * Insert a band corresponding to the schedule row at position "node"
4090 * of the schedule tree and continue with the construction of the schedule.
4091 * This insertion and the continued construction is performed by split_scaled
4092 * after optionally checking for non-trivial common divisors.
4094 * [1] P. Feautrier, Some Efficient Solutions to the Affine Scheduling
4095 * Problem, Part II: Multi-Dimensional Time.
4096 * In Intl. Journal of Parallel Programming, 1992.
4098 static __isl_give isl_schedule_node *carry_dependences(
4099 __isl_take isl_schedule_node *node, struct isl_sched_graph *graph)
4101 int i;
4102 int n_edge;
4103 int trivial;
4104 isl_ctx *ctx;
4105 isl_vec *sol;
4106 isl_basic_set *lp;
4108 if (!node)
4109 return NULL;
4111 n_edge = 0;
4112 for (i = 0; i < graph->n_edge; ++i)
4113 n_edge += graph->edge[i].map->n;
4115 ctx = isl_schedule_node_get_ctx(node);
4116 if (setup_carry_lp(ctx, graph) < 0)
4117 return isl_schedule_node_free(node);
4119 lp = isl_basic_set_copy(graph->lp);
4120 sol = isl_tab_basic_set_non_neg_lexmin(lp);
4121 if (!sol)
4122 return isl_schedule_node_free(node);
4124 if (sol->size == 0) {
4125 isl_vec_free(sol);
4126 isl_die(ctx, isl_error_internal,
4127 "error in schedule construction",
4128 return isl_schedule_node_free(node));
4131 isl_int_divexact(sol->el[1], sol->el[1], sol->el[0]);
4132 if (isl_int_cmp_si(sol->el[1], n_edge) >= 0) {
4133 isl_vec_free(sol);
4134 isl_die(ctx, isl_error_unknown,
4135 "unable to carry dependences",
4136 return isl_schedule_node_free(node));
4139 trivial = is_any_trivial(graph, sol);
4140 if (trivial < 0) {
4141 sol = isl_vec_free(sol);
4142 } else if (trivial && graph->scc > 1) {
4143 isl_vec_free(sol);
4144 return compute_component_schedule(node, graph, 1);
4147 if (update_schedule(graph, sol, 0, 0) < 0)
4148 return isl_schedule_node_free(node);
4149 if (trivial)
4150 graph->n_row--;
4152 return split_scaled(node, graph);
4155 /* Topologically sort statements mapped to the same schedule iteration
4156 * and add insert a sequence node in front of "node"
4157 * corresponding to this order.
4158 * If "initialized" is set, then it may be assumed that compute_maxvar
4159 * has been called on the current band. Otherwise, call
4160 * compute_maxvar if and before carry_dependences gets called.
4162 * If it turns out to be impossible to sort the statements apart,
4163 * because different dependences impose different orderings
4164 * on the statements, then we extend the schedule such that
4165 * it carries at least one more dependence.
4167 static __isl_give isl_schedule_node *sort_statements(
4168 __isl_take isl_schedule_node *node, struct isl_sched_graph *graph,
4169 int initialized)
4171 isl_ctx *ctx;
4172 isl_union_set_list *filters;
4174 if (!node)
4175 return NULL;
4177 ctx = isl_schedule_node_get_ctx(node);
4178 if (graph->n < 1)
4179 isl_die(ctx, isl_error_internal,
4180 "graph should have at least one node",
4181 return isl_schedule_node_free(node));
4183 if (graph->n == 1)
4184 return node;
4186 if (update_edges(ctx, graph) < 0)
4187 return isl_schedule_node_free(node);
4189 if (graph->n_edge == 0)
4190 return node;
4192 if (detect_sccs(ctx, graph) < 0)
4193 return isl_schedule_node_free(node);
4195 next_band(graph);
4196 if (graph->scc < graph->n) {
4197 if (!initialized && compute_maxvar(graph) < 0)
4198 return isl_schedule_node_free(node);
4199 return carry_dependences(node, graph);
4202 filters = extract_sccs(ctx, graph);
4203 node = isl_schedule_node_insert_sequence(node, filters);
4205 return node;
4208 /* Are there any (non-empty) (conditional) validity edges in the graph?
4210 static int has_validity_edges(struct isl_sched_graph *graph)
4212 int i;
4214 for (i = 0; i < graph->n_edge; ++i) {
4215 int empty;
4217 empty = isl_map_plain_is_empty(graph->edge[i].map);
4218 if (empty < 0)
4219 return -1;
4220 if (empty)
4221 continue;
4222 if (is_any_validity(&graph->edge[i]))
4223 return 1;
4226 return 0;
4229 /* Should we apply a Feautrier step?
4230 * That is, did the user request the Feautrier algorithm and are
4231 * there any validity dependences (left)?
4233 static int need_feautrier_step(isl_ctx *ctx, struct isl_sched_graph *graph)
4235 if (ctx->opt->schedule_algorithm != ISL_SCHEDULE_ALGORITHM_FEAUTRIER)
4236 return 0;
4238 return has_validity_edges(graph);
4241 /* Compute a schedule for a connected dependence graph using Feautrier's
4242 * multi-dimensional scheduling algorithm and return the updated schedule node.
4244 * The original algorithm is described in [1].
4245 * The main idea is to minimize the number of scheduling dimensions, by
4246 * trying to satisfy as many dependences as possible per scheduling dimension.
4248 * [1] P. Feautrier, Some Efficient Solutions to the Affine Scheduling
4249 * Problem, Part II: Multi-Dimensional Time.
4250 * In Intl. Journal of Parallel Programming, 1992.
4252 static __isl_give isl_schedule_node *compute_schedule_wcc_feautrier(
4253 isl_schedule_node *node, struct isl_sched_graph *graph)
4255 return carry_dependences(node, graph);
4258 /* Turn off the "local" bit on all (condition) edges.
4260 static void clear_local_edges(struct isl_sched_graph *graph)
4262 int i;
4264 for (i = 0; i < graph->n_edge; ++i)
4265 if (is_condition(&graph->edge[i]))
4266 clear_local(&graph->edge[i]);
4269 /* Does "graph" have both condition and conditional validity edges?
4271 static int need_condition_check(struct isl_sched_graph *graph)
4273 int i;
4274 int any_condition = 0;
4275 int any_conditional_validity = 0;
4277 for (i = 0; i < graph->n_edge; ++i) {
4278 if (is_condition(&graph->edge[i]))
4279 any_condition = 1;
4280 if (is_conditional_validity(&graph->edge[i]))
4281 any_conditional_validity = 1;
4284 return any_condition && any_conditional_validity;
4287 /* Does "graph" contain any coincidence edge?
4289 static int has_any_coincidence(struct isl_sched_graph *graph)
4291 int i;
4293 for (i = 0; i < graph->n_edge; ++i)
4294 if (is_coincidence(&graph->edge[i]))
4295 return 1;
4297 return 0;
4300 /* Extract the final schedule row as a map with the iteration domain
4301 * of "node" as domain.
4303 static __isl_give isl_map *final_row(struct isl_sched_node *node)
4305 isl_local_space *ls;
4306 isl_aff *aff;
4307 int row;
4309 row = isl_mat_rows(node->sched) - 1;
4310 ls = isl_local_space_from_space(isl_space_copy(node->space));
4311 aff = extract_schedule_row(ls, node, row);
4312 return isl_map_from_aff(aff);
4315 /* Is the conditional validity dependence in the edge with index "edge_index"
4316 * violated by the latest (i.e., final) row of the schedule?
4317 * That is, is i scheduled after j
4318 * for any conditional validity dependence i -> j?
4320 static int is_violated(struct isl_sched_graph *graph, int edge_index)
4322 isl_map *src_sched, *dst_sched, *map;
4323 struct isl_sched_edge *edge = &graph->edge[edge_index];
4324 int empty;
4326 src_sched = final_row(edge->src);
4327 dst_sched = final_row(edge->dst);
4328 map = isl_map_copy(edge->map);
4329 map = isl_map_apply_domain(map, src_sched);
4330 map = isl_map_apply_range(map, dst_sched);
4331 map = isl_map_order_gt(map, isl_dim_in, 0, isl_dim_out, 0);
4332 empty = isl_map_is_empty(map);
4333 isl_map_free(map);
4335 if (empty < 0)
4336 return -1;
4338 return !empty;
4341 /* Does "graph" have any satisfied condition edges that
4342 * are adjacent to the conditional validity constraint with
4343 * domain "conditional_source" and range "conditional_sink"?
4345 * A satisfied condition is one that is not local.
4346 * If a condition was forced to be local already (i.e., marked as local)
4347 * then there is no need to check if it is in fact local.
4349 * Additionally, mark all adjacent condition edges found as local.
4351 static int has_adjacent_true_conditions(struct isl_sched_graph *graph,
4352 __isl_keep isl_union_set *conditional_source,
4353 __isl_keep isl_union_set *conditional_sink)
4355 int i;
4356 int any = 0;
4358 for (i = 0; i < graph->n_edge; ++i) {
4359 int adjacent, local;
4360 isl_union_map *condition;
4362 if (!is_condition(&graph->edge[i]))
4363 continue;
4364 if (is_local(&graph->edge[i]))
4365 continue;
4367 condition = graph->edge[i].tagged_condition;
4368 adjacent = domain_intersects(condition, conditional_sink);
4369 if (adjacent >= 0 && !adjacent)
4370 adjacent = range_intersects(condition,
4371 conditional_source);
4372 if (adjacent < 0)
4373 return -1;
4374 if (!adjacent)
4375 continue;
4377 set_local(&graph->edge[i]);
4379 local = is_condition_false(&graph->edge[i]);
4380 if (local < 0)
4381 return -1;
4382 if (!local)
4383 any = 1;
4386 return any;
4389 /* Are there any violated conditional validity dependences with
4390 * adjacent condition dependences that are not local with respect
4391 * to the current schedule?
4392 * That is, is the conditional validity constraint violated?
4394 * Additionally, mark all those adjacent condition dependences as local.
4395 * We also mark those adjacent condition dependences that were not marked
4396 * as local before, but just happened to be local already. This ensures
4397 * that they remain local if the schedule is recomputed.
4399 * We first collect domain and range of all violated conditional validity
4400 * dependences and then check if there are any adjacent non-local
4401 * condition dependences.
4403 static int has_violated_conditional_constraint(isl_ctx *ctx,
4404 struct isl_sched_graph *graph)
4406 int i;
4407 int any = 0;
4408 isl_union_set *source, *sink;
4410 source = isl_union_set_empty(isl_space_params_alloc(ctx, 0));
4411 sink = isl_union_set_empty(isl_space_params_alloc(ctx, 0));
4412 for (i = 0; i < graph->n_edge; ++i) {
4413 isl_union_set *uset;
4414 isl_union_map *umap;
4415 int violated;
4417 if (!is_conditional_validity(&graph->edge[i]))
4418 continue;
4420 violated = is_violated(graph, i);
4421 if (violated < 0)
4422 goto error;
4423 if (!violated)
4424 continue;
4426 any = 1;
4428 umap = isl_union_map_copy(graph->edge[i].tagged_validity);
4429 uset = isl_union_map_domain(umap);
4430 source = isl_union_set_union(source, uset);
4431 source = isl_union_set_coalesce(source);
4433 umap = isl_union_map_copy(graph->edge[i].tagged_validity);
4434 uset = isl_union_map_range(umap);
4435 sink = isl_union_set_union(sink, uset);
4436 sink = isl_union_set_coalesce(sink);
4439 if (any)
4440 any = has_adjacent_true_conditions(graph, source, sink);
4442 isl_union_set_free(source);
4443 isl_union_set_free(sink);
4444 return any;
4445 error:
4446 isl_union_set_free(source);
4447 isl_union_set_free(sink);
4448 return -1;
4451 /* Examine the current band (the rows between graph->band_start and
4452 * graph->n_total_row), deciding whether to drop it or add it to "node"
4453 * and then continue with the computation of the next band, if any.
4454 * If "initialized" is set, then it may be assumed that compute_maxvar
4455 * has been called on the current band. Otherwise, call
4456 * compute_maxvar if and before carry_dependences gets called.
4458 * The caller keeps looking for a new row as long as
4459 * graph->n_row < graph->maxvar. If the latest attempt to find
4460 * such a row failed (i.e., we still have graph->n_row < graph->maxvar),
4461 * then we either
4462 * - split between SCCs and start over (assuming we found an interesting
4463 * pair of SCCs between which to split)
4464 * - continue with the next band (assuming the current band has at least
4465 * one row)
4466 * - try to carry as many dependences as possible and continue with the next
4467 * band
4468 * In each case, we first insert a band node in the schedule tree
4469 * if any rows have been computed.
4471 * If the caller managed to complete the schedule, we insert a band node
4472 * (if any schedule rows were computed) and we finish off by topologically
4473 * sorting the statements based on the remaining dependences.
4475 static __isl_give isl_schedule_node *compute_schedule_finish_band(
4476 __isl_take isl_schedule_node *node, struct isl_sched_graph *graph,
4477 int initialized)
4479 int insert;
4481 if (!node)
4482 return NULL;
4484 if (graph->n_row < graph->maxvar) {
4485 isl_ctx *ctx;
4486 int empty = graph->n_total_row == graph->band_start;
4488 ctx = isl_schedule_node_get_ctx(node);
4489 if (!ctx->opt->schedule_maximize_band_depth && !empty)
4490 return compute_next_band(node, graph, 1);
4491 if (graph->src_scc >= 0)
4492 return compute_split_schedule(node, graph);
4493 if (!empty)
4494 return compute_next_band(node, graph, 1);
4495 if (!initialized && compute_maxvar(graph) < 0)
4496 return isl_schedule_node_free(node);
4497 return carry_dependences(node, graph);
4500 insert = graph->n_total_row > graph->band_start;
4501 if (insert) {
4502 node = insert_current_band(node, graph, 1);
4503 node = isl_schedule_node_child(node, 0);
4505 node = sort_statements(node, graph, initialized);
4506 if (insert)
4507 node = isl_schedule_node_parent(node);
4509 return node;
4512 /* Construct a band of schedule rows for a connected dependence graph.
4513 * The caller is responsible for determining the strongly connected
4514 * components and calling compute_maxvar first.
4516 * We try to find a sequence of as many schedule rows as possible that result
4517 * in non-negative dependence distances (independent of the previous rows
4518 * in the sequence, i.e., such that the sequence is tilable), with as
4519 * many of the initial rows as possible satisfying the coincidence constraints.
4520 * The computation stops if we can't find any more rows or if we have found
4521 * all the rows we wanted to find.
4523 * If ctx->opt->schedule_outer_coincidence is set, then we force the
4524 * outermost dimension to satisfy the coincidence constraints. If this
4525 * turns out to be impossible, we fall back on the general scheme above
4526 * and try to carry as many dependences as possible.
4528 * If "graph" contains both condition and conditional validity dependences,
4529 * then we need to check that that the conditional schedule constraint
4530 * is satisfied, i.e., there are no violated conditional validity dependences
4531 * that are adjacent to any non-local condition dependences.
4532 * If there are, then we mark all those adjacent condition dependences
4533 * as local and recompute the current band. Those dependences that
4534 * are marked local will then be forced to be local.
4535 * The initial computation is performed with no dependences marked as local.
4536 * If we are lucky, then there will be no violated conditional validity
4537 * dependences adjacent to any non-local condition dependences.
4538 * Otherwise, we mark some additional condition dependences as local and
4539 * recompute. We continue this process until there are no violations left or
4540 * until we are no longer able to compute a schedule.
4541 * Since there are only a finite number of dependences,
4542 * there will only be a finite number of iterations.
4544 static isl_stat compute_schedule_wcc_band(isl_ctx *ctx,
4545 struct isl_sched_graph *graph)
4547 int has_coincidence;
4548 int use_coincidence;
4549 int force_coincidence = 0;
4550 int check_conditional;
4552 if (sort_sccs(graph) < 0)
4553 return isl_stat_error;
4555 clear_local_edges(graph);
4556 check_conditional = need_condition_check(graph);
4557 has_coincidence = has_any_coincidence(graph);
4559 if (ctx->opt->schedule_outer_coincidence)
4560 force_coincidence = 1;
4562 use_coincidence = has_coincidence;
4563 while (graph->n_row < graph->maxvar) {
4564 isl_vec *sol;
4565 int violated;
4566 int coincident;
4568 graph->src_scc = -1;
4569 graph->dst_scc = -1;
4571 if (setup_lp(ctx, graph, use_coincidence) < 0)
4572 return isl_stat_error;
4573 sol = solve_lp(graph);
4574 if (!sol)
4575 return isl_stat_error;
4576 if (sol->size == 0) {
4577 int empty = graph->n_total_row == graph->band_start;
4579 isl_vec_free(sol);
4580 if (use_coincidence && (!force_coincidence || !empty)) {
4581 use_coincidence = 0;
4582 continue;
4584 return isl_stat_ok;
4586 coincident = !has_coincidence || use_coincidence;
4587 if (update_schedule(graph, sol, 1, coincident) < 0)
4588 return isl_stat_error;
4590 if (!check_conditional)
4591 continue;
4592 violated = has_violated_conditional_constraint(ctx, graph);
4593 if (violated < 0)
4594 return isl_stat_error;
4595 if (!violated)
4596 continue;
4597 if (reset_band(graph) < 0)
4598 return isl_stat_error;
4599 use_coincidence = has_coincidence;
4602 return isl_stat_ok;
4605 /* Compute a schedule for a connected dependence graph by considering
4606 * the graph as a whole and return the updated schedule node.
4608 * The actual schedule rows of the current band are computed by
4609 * compute_schedule_wcc_band. compute_schedule_finish_band takes
4610 * care of integrating the band into "node" and continuing
4611 * the computation.
4613 static __isl_give isl_schedule_node *compute_schedule_wcc_whole(
4614 __isl_take isl_schedule_node *node, struct isl_sched_graph *graph)
4616 isl_ctx *ctx;
4618 if (!node)
4619 return NULL;
4621 ctx = isl_schedule_node_get_ctx(node);
4622 if (compute_schedule_wcc_band(ctx, graph) < 0)
4623 return isl_schedule_node_free(node);
4625 return compute_schedule_finish_band(node, graph, 1);
4628 /* Clustering information used by compute_schedule_wcc_clustering.
4630 * "n" is the number of SCCs in the original dependence graph
4631 * "scc" is an array of "n" elements, each representing an SCC
4632 * of the original dependence graph. All entries in the same cluster
4633 * have the same number of schedule rows.
4634 * "scc_cluster" maps each SCC index to the cluster to which it belongs,
4635 * where each cluster is represented by the index of the first SCC
4636 * in the cluster. Initially, each SCC belongs to a cluster containing
4637 * only that SCC.
4639 * "scc_in_merge" is used by merge_clusters_along_edge to keep
4640 * track of which SCCs need to be merged.
4642 * "cluster" contains the merged clusters of SCCs after the clustering
4643 * has completed.
4645 * "scc_node" is a temporary data structure used inside copy_partial.
4646 * For each SCC, it keeps track of the number of nodes in the SCC
4647 * that have already been copied.
4649 struct isl_clustering {
4650 int n;
4651 struct isl_sched_graph *scc;
4652 struct isl_sched_graph *cluster;
4653 int *scc_cluster;
4654 int *scc_node;
4655 int *scc_in_merge;
4658 /* Initialize the clustering data structure "c" from "graph".
4660 * In particular, allocate memory, extract the SCCs from "graph"
4661 * into c->scc, initialize scc_cluster and construct
4662 * a band of schedule rows for each SCC.
4663 * Within each SCC, there is only one SCC by definition.
4664 * Each SCC initially belongs to a cluster containing only that SCC.
4666 static isl_stat clustering_init(isl_ctx *ctx, struct isl_clustering *c,
4667 struct isl_sched_graph *graph)
4669 int i;
4671 c->n = graph->scc;
4672 c->scc = isl_calloc_array(ctx, struct isl_sched_graph, c->n);
4673 c->cluster = isl_calloc_array(ctx, struct isl_sched_graph, c->n);
4674 c->scc_cluster = isl_calloc_array(ctx, int, c->n);
4675 c->scc_node = isl_calloc_array(ctx, int, c->n);
4676 c->scc_in_merge = isl_calloc_array(ctx, int, c->n);
4677 if (!c->scc || !c->cluster ||
4678 !c->scc_cluster || !c->scc_node || !c->scc_in_merge)
4679 return isl_stat_error;
4681 for (i = 0; i < c->n; ++i) {
4682 if (extract_sub_graph(ctx, graph, &node_scc_exactly,
4683 &edge_scc_exactly, i, &c->scc[i]) < 0)
4684 return isl_stat_error;
4685 c->scc[i].scc = 1;
4686 if (compute_maxvar(&c->scc[i]) < 0)
4687 return isl_stat_error;
4688 if (compute_schedule_wcc_band(ctx, &c->scc[i]) < 0)
4689 return isl_stat_error;
4690 c->scc_cluster[i] = i;
4693 return isl_stat_ok;
4696 /* Free all memory allocated for "c".
4698 static void clustering_free(isl_ctx *ctx, struct isl_clustering *c)
4700 int i;
4702 if (c->scc)
4703 for (i = 0; i < c->n; ++i)
4704 graph_free(ctx, &c->scc[i]);
4705 free(c->scc);
4706 if (c->cluster)
4707 for (i = 0; i < c->n; ++i)
4708 graph_free(ctx, &c->cluster[i]);
4709 free(c->cluster);
4710 free(c->scc_cluster);
4711 free(c->scc_node);
4712 free(c->scc_in_merge);
4715 /* Should we refrain from merging the cluster in "graph" with
4716 * any other cluster?
4717 * In particular, is its current schedule band empty and incomplete.
4719 static int bad_cluster(struct isl_sched_graph *graph)
4721 return graph->n_row < graph->maxvar &&
4722 graph->n_total_row == graph->band_start;
4725 /* Return the index of an edge in "graph" that can be used to merge
4726 * two clusters in "c".
4727 * Return graph->n_edge if no such edge can be found.
4728 * Return -1 on error.
4730 * In particular, return a proximity edge between two clusters
4731 * that is not marked "no_merge" and such that neither of the
4732 * two clusters has an incomplete, empty band.
4734 * If there are multiple such edges, then try and find the most
4735 * appropriate edge to use for merging. In particular, pick the edge
4736 * with the greatest weight. If there are multiple of those,
4737 * then pick one with the shortest distance between
4738 * the two cluster representatives.
4740 static int find_proximity(struct isl_sched_graph *graph,
4741 struct isl_clustering *c)
4743 int i, best = graph->n_edge, best_dist, best_weight;
4745 for (i = 0; i < graph->n_edge; ++i) {
4746 struct isl_sched_edge *edge = &graph->edge[i];
4747 int dist, weight;
4749 if (!is_proximity(edge))
4750 continue;
4751 if (edge->no_merge)
4752 continue;
4753 if (bad_cluster(&c->scc[edge->src->scc]) ||
4754 bad_cluster(&c->scc[edge->dst->scc]))
4755 continue;
4756 dist = c->scc_cluster[edge->dst->scc] -
4757 c->scc_cluster[edge->src->scc];
4758 if (dist == 0)
4759 continue;
4760 weight = edge->weight;
4761 if (best < graph->n_edge) {
4762 if (best_weight > weight)
4763 continue;
4764 if (best_weight == weight && best_dist <= dist)
4765 continue;
4767 best = i;
4768 best_dist = dist;
4769 best_weight = weight;
4772 return best;
4775 /* Internal data structure used in mark_merge_sccs.
4777 * "graph" is the dependence graph in which a strongly connected
4778 * component is constructed.
4779 * "scc_cluster" maps each SCC index to the cluster to which it belongs.
4780 * "src" and "dst" are the indices of the nodes that are being merged.
4782 struct isl_mark_merge_sccs_data {
4783 struct isl_sched_graph *graph;
4784 int *scc_cluster;
4785 int src;
4786 int dst;
4789 /* Check whether the cluster containing node "i" depends on the cluster
4790 * containing node "j". If "i" and "j" belong to the same cluster,
4791 * then they are taken to depend on each other to ensure that
4792 * the resulting strongly connected component consists of complete
4793 * clusters. Furthermore, if "i" and "j" are the two nodes that
4794 * are being merged, then they are taken to depend on each other as well.
4795 * Otherwise, check if there is a (conditional) validity dependence
4796 * from node[j] to node[i], forcing node[i] to follow node[j].
4798 static isl_bool cluster_follows(int i, int j, void *user)
4800 struct isl_mark_merge_sccs_data *data = user;
4801 struct isl_sched_graph *graph = data->graph;
4802 int *scc_cluster = data->scc_cluster;
4804 if (data->src == i && data->dst == j)
4805 return isl_bool_true;
4806 if (data->src == j && data->dst == i)
4807 return isl_bool_true;
4808 if (scc_cluster[graph->node[i].scc] == scc_cluster[graph->node[j].scc])
4809 return isl_bool_true;
4811 return graph_has_validity_edge(graph, &graph->node[j], &graph->node[i]);
4814 /* Mark all SCCs that belong to either of the two clusters in "c"
4815 * connected by the edge in "graph" with index "edge", or to any
4816 * of the intermediate clusters.
4817 * The marking is recorded in c->scc_in_merge.
4819 * The given edge has been selected for merging two clusters,
4820 * meaning that there is at least a proximity edge between the two nodes.
4821 * However, there may also be (indirect) validity dependences
4822 * between the two nodes. When merging the two clusters, all clusters
4823 * containing one or more of the intermediate nodes along the
4824 * indirect validity dependences need to be merged in as well.
4826 * First collect all such nodes by computing the strongly connected
4827 * component (SCC) containing the two nodes connected by the edge, where
4828 * the two nodes are considered to depend on each other to make
4829 * sure they end up in the same SCC. Similarly, each node is considered
4830 * to depend on every other node in the same cluster to ensure
4831 * that the SCC consists of complete clusters.
4833 * Then the original SCCs that contain any of these nodes are marked
4834 * in c->scc_in_merge.
4836 static isl_stat mark_merge_sccs(isl_ctx *ctx, struct isl_sched_graph *graph,
4837 int edge, struct isl_clustering *c)
4839 struct isl_mark_merge_sccs_data data;
4840 struct isl_tarjan_graph *g;
4841 int i;
4843 for (i = 0; i < c->n; ++i)
4844 c->scc_in_merge[i] = 0;
4846 data.graph = graph;
4847 data.scc_cluster = c->scc_cluster;
4848 data.src = graph->edge[edge].src - graph->node;
4849 data.dst = graph->edge[edge].dst - graph->node;
4851 g = isl_tarjan_graph_component(ctx, graph->n, data.dst,
4852 &cluster_follows, &data);
4853 if (!g)
4854 goto error;
4856 i = g->op;
4857 if (i < 3)
4858 isl_die(ctx, isl_error_internal,
4859 "expecting at least two nodes in component",
4860 goto error);
4861 if (g->order[--i] != -1)
4862 isl_die(ctx, isl_error_internal,
4863 "expecting end of component marker", goto error);
4865 for (--i; i >= 0 && g->order[i] != -1; --i) {
4866 int scc = graph->node[g->order[i]].scc;
4867 c->scc_in_merge[scc] = 1;
4870 isl_tarjan_graph_free(g);
4871 return isl_stat_ok;
4872 error:
4873 isl_tarjan_graph_free(g);
4874 return isl_stat_error;
4877 /* Construct the identifier "cluster_i".
4879 static __isl_give isl_id *cluster_id(isl_ctx *ctx, int i)
4881 char name[40];
4883 snprintf(name, sizeof(name), "cluster_%d", i);
4884 return isl_id_alloc(ctx, name, NULL);
4887 /* Construct the space of the cluster with index "i" containing
4888 * the strongly connected component "scc".
4890 * In particular, construct a space called cluster_i with dimension equal
4891 * to the number of schedule rows in the current band of "scc".
4893 static __isl_give isl_space *cluster_space(struct isl_sched_graph *scc, int i)
4895 int nvar;
4896 isl_space *space;
4897 isl_id *id;
4899 nvar = scc->n_total_row - scc->band_start;
4900 space = isl_space_copy(scc->node[0].space);
4901 space = isl_space_params(space);
4902 space = isl_space_set_from_params(space);
4903 space = isl_space_add_dims(space, isl_dim_set, nvar);
4904 id = cluster_id(isl_space_get_ctx(space), i);
4905 space = isl_space_set_tuple_id(space, isl_dim_set, id);
4907 return space;
4910 /* Collect the domain of the graph for merging clusters.
4912 * In particular, for each cluster with first SCC "i", construct
4913 * a set in the space called cluster_i with dimension equal
4914 * to the number of schedule rows in the current band of the cluster.
4916 static __isl_give isl_union_set *collect_domain(isl_ctx *ctx,
4917 struct isl_sched_graph *graph, struct isl_clustering *c)
4919 int i;
4920 isl_space *space;
4921 isl_union_set *domain;
4923 space = isl_space_params_alloc(ctx, 0);
4924 domain = isl_union_set_empty(space);
4926 for (i = 0; i < graph->scc; ++i) {
4927 isl_space *space;
4929 if (!c->scc_in_merge[i])
4930 continue;
4931 if (c->scc_cluster[i] != i)
4932 continue;
4933 space = cluster_space(&c->scc[i], i);
4934 domain = isl_union_set_add_set(domain, isl_set_universe(space));
4937 return domain;
4940 /* Construct a map from the original instances to the corresponding
4941 * cluster instance in the current bands of the clusters in "c".
4943 static __isl_give isl_union_map *collect_cluster_map(isl_ctx *ctx,
4944 struct isl_sched_graph *graph, struct isl_clustering *c)
4946 int i, j;
4947 isl_space *space;
4948 isl_union_map *cluster_map;
4950 space = isl_space_params_alloc(ctx, 0);
4951 cluster_map = isl_union_map_empty(space);
4952 for (i = 0; i < graph->scc; ++i) {
4953 int start, n;
4954 isl_id *id;
4956 if (!c->scc_in_merge[i])
4957 continue;
4959 id = cluster_id(ctx, c->scc_cluster[i]);
4960 start = c->scc[i].band_start;
4961 n = c->scc[i].n_total_row - start;
4962 for (j = 0; j < c->scc[i].n; ++j) {
4963 isl_multi_aff *ma;
4964 isl_map *map;
4965 struct isl_sched_node *node = &c->scc[i].node[j];
4967 ma = node_extract_partial_schedule_multi_aff(node,
4968 start, n);
4969 ma = isl_multi_aff_set_tuple_id(ma, isl_dim_out,
4970 isl_id_copy(id));
4971 map = isl_map_from_multi_aff(ma);
4972 cluster_map = isl_union_map_add_map(cluster_map, map);
4974 isl_id_free(id);
4977 return cluster_map;
4980 /* Add "umap" to the schedule constraints "sc" of all types of "edge"
4981 * that are not isl_edge_condition or isl_edge_conditional_validity.
4983 static __isl_give isl_schedule_constraints *add_non_conditional_constraints(
4984 struct isl_sched_edge *edge, __isl_keep isl_union_map *umap,
4985 __isl_take isl_schedule_constraints *sc)
4987 enum isl_edge_type t;
4989 if (!sc)
4990 return NULL;
4992 for (t = isl_edge_first; t <= isl_edge_last; ++t) {
4993 if (t == isl_edge_condition ||
4994 t == isl_edge_conditional_validity)
4995 continue;
4996 if (!is_type(edge, t))
4997 continue;
4998 sc->constraint[t] = isl_union_map_union(sc->constraint[t],
4999 isl_union_map_copy(umap));
5000 if (!sc->constraint[t])
5001 return isl_schedule_constraints_free(sc);
5004 return sc;
5007 /* Add schedule constraints of types isl_edge_condition and
5008 * isl_edge_conditional_validity to "sc" by applying "umap" to
5009 * the domains of the wrapped relations in domain and range
5010 * of the corresponding tagged constraints of "edge".
5012 static __isl_give isl_schedule_constraints *add_conditional_constraints(
5013 struct isl_sched_edge *edge, __isl_keep isl_union_map *umap,
5014 __isl_take isl_schedule_constraints *sc)
5016 enum isl_edge_type t;
5017 isl_union_map *tagged;
5019 for (t = isl_edge_condition; t <= isl_edge_conditional_validity; ++t) {
5020 if (!is_type(edge, t))
5021 continue;
5022 if (t == isl_edge_condition)
5023 tagged = isl_union_map_copy(edge->tagged_condition);
5024 else
5025 tagged = isl_union_map_copy(edge->tagged_validity);
5026 tagged = isl_union_map_zip(tagged);
5027 tagged = isl_union_map_apply_domain(tagged,
5028 isl_union_map_copy(umap));
5029 tagged = isl_union_map_zip(tagged);
5030 sc->constraint[t] = isl_union_map_union(sc->constraint[t],
5031 tagged);
5032 if (!sc->constraint[t])
5033 return isl_schedule_constraints_free(sc);
5036 return sc;
5039 /* Given a mapping "cluster_map" from the original instances to
5040 * the cluster instances, add schedule constraints on the clusters
5041 * to "sc" corresponding to the original constraints represented by "edge".
5043 * For non-tagged dependence constraints, the cluster constraints
5044 * are obtained by applying "cluster_map" to the edge->map.
5046 * For tagged dependence constraints, "cluster_map" needs to be applied
5047 * to the domains of the wrapped relations in domain and range
5048 * of the tagged dependence constraints. Pick out the mappings
5049 * from these domains from "cluster_map" and construct their product.
5050 * This mapping can then be applied to the pair of domains.
5052 static __isl_give isl_schedule_constraints *collect_edge_constraints(
5053 struct isl_sched_edge *edge, __isl_keep isl_union_map *cluster_map,
5054 __isl_take isl_schedule_constraints *sc)
5056 isl_union_map *umap;
5057 isl_space *space;
5058 isl_union_set *uset;
5059 isl_union_map *umap1, *umap2;
5061 if (!sc)
5062 return NULL;
5064 umap = isl_union_map_from_map(isl_map_copy(edge->map));
5065 umap = isl_union_map_apply_domain(umap,
5066 isl_union_map_copy(cluster_map));
5067 umap = isl_union_map_apply_range(umap,
5068 isl_union_map_copy(cluster_map));
5069 sc = add_non_conditional_constraints(edge, umap, sc);
5070 isl_union_map_free(umap);
5072 if (!sc || (!is_condition(edge) && !is_conditional_validity(edge)))
5073 return sc;
5075 space = isl_space_domain(isl_map_get_space(edge->map));
5076 uset = isl_union_set_from_set(isl_set_universe(space));
5077 umap1 = isl_union_map_copy(cluster_map);
5078 umap1 = isl_union_map_intersect_domain(umap1, uset);
5079 space = isl_space_range(isl_map_get_space(edge->map));
5080 uset = isl_union_set_from_set(isl_set_universe(space));
5081 umap2 = isl_union_map_copy(cluster_map);
5082 umap2 = isl_union_map_intersect_domain(umap2, uset);
5083 umap = isl_union_map_product(umap1, umap2);
5085 sc = add_conditional_constraints(edge, umap, sc);
5087 isl_union_map_free(umap);
5088 return sc;
5091 /* Given a mapping "cluster_map" from the original instances to
5092 * the cluster instances, add schedule constraints on the clusters
5093 * to "sc" corresponding to all edges in "graph" between nodes that
5094 * belong to SCCs that are marked for merging in "scc_in_merge".
5096 static __isl_give isl_schedule_constraints *collect_constraints(
5097 struct isl_sched_graph *graph, int *scc_in_merge,
5098 __isl_keep isl_union_map *cluster_map,
5099 __isl_take isl_schedule_constraints *sc)
5101 int i;
5103 for (i = 0; i < graph->n_edge; ++i) {
5104 struct isl_sched_edge *edge = &graph->edge[i];
5106 if (!scc_in_merge[edge->src->scc])
5107 continue;
5108 if (!scc_in_merge[edge->dst->scc])
5109 continue;
5110 sc = collect_edge_constraints(edge, cluster_map, sc);
5113 return sc;
5116 /* Construct a dependence graph for scheduling clusters with respect
5117 * to each other and store the result in "merge_graph".
5118 * In particular, the nodes of the graph correspond to the schedule
5119 * dimensions of the current bands of those clusters that have been
5120 * marked for merging in "c".
5122 * First construct an isl_schedule_constraints object for this domain
5123 * by transforming the edges in "graph" to the domain.
5124 * Then initialize a dependence graph for scheduling from these
5125 * constraints.
5127 static isl_stat init_merge_graph(isl_ctx *ctx, struct isl_sched_graph *graph,
5128 struct isl_clustering *c, struct isl_sched_graph *merge_graph)
5130 isl_union_set *domain;
5131 isl_union_map *cluster_map;
5132 isl_schedule_constraints *sc;
5133 isl_stat r;
5135 domain = collect_domain(ctx, graph, c);
5136 sc = isl_schedule_constraints_on_domain(domain);
5137 if (!sc)
5138 return isl_stat_error;
5139 cluster_map = collect_cluster_map(ctx, graph, c);
5140 sc = collect_constraints(graph, c->scc_in_merge, cluster_map, sc);
5141 isl_union_map_free(cluster_map);
5143 r = graph_init(merge_graph, sc);
5145 isl_schedule_constraints_free(sc);
5147 return r;
5150 /* Compute the maximal number of remaining schedule rows that still need
5151 * to be computed for the nodes that belong to clusters with the maximal
5152 * dimension for the current band (i.e., the band that is to be merged).
5153 * Only clusters that are about to be merged are considered.
5154 * "maxvar" is the maximal dimension for the current band.
5155 * "c" contains information about the clusters.
5157 * Return the maximal number of remaining schedule rows or -1 on error.
5159 static int compute_maxvar_max_slack(int maxvar, struct isl_clustering *c)
5161 int i, j;
5162 int max_slack;
5164 max_slack = 0;
5165 for (i = 0; i < c->n; ++i) {
5166 int nvar;
5167 struct isl_sched_graph *scc;
5169 if (!c->scc_in_merge[i])
5170 continue;
5171 scc = &c->scc[i];
5172 nvar = scc->n_total_row - scc->band_start;
5173 if (nvar != maxvar)
5174 continue;
5175 for (j = 0; j < scc->n; ++j) {
5176 struct isl_sched_node *node = &scc->node[j];
5177 int slack;
5179 if (node_update_cmap(node) < 0)
5180 return -1;
5181 slack = node->nvar - node->rank;
5182 if (slack > max_slack)
5183 max_slack = slack;
5187 return max_slack;
5190 /* If there are any clusters where the dimension of the current band
5191 * (i.e., the band that is to be merged) is smaller than "maxvar" and
5192 * if there are any nodes in such a cluster where the number
5193 * of remaining schedule rows that still need to be computed
5194 * is greater than "max_slack", then return the smallest current band
5195 * dimension of all these clusters. Otherwise return the original value
5196 * of "maxvar". Return -1 in case of any error.
5197 * Only clusters that are about to be merged are considered.
5198 * "c" contains information about the clusters.
5200 static int limit_maxvar_to_slack(int maxvar, int max_slack,
5201 struct isl_clustering *c)
5203 int i, j;
5205 for (i = 0; i < c->n; ++i) {
5206 int nvar;
5207 struct isl_sched_graph *scc;
5209 if (!c->scc_in_merge[i])
5210 continue;
5211 scc = &c->scc[i];
5212 nvar = scc->n_total_row - scc->band_start;
5213 if (nvar >= maxvar)
5214 continue;
5215 for (j = 0; j < scc->n; ++j) {
5216 struct isl_sched_node *node = &scc->node[j];
5217 int slack;
5219 if (node_update_cmap(node) < 0)
5220 return -1;
5221 slack = node->nvar - node->rank;
5222 if (slack > max_slack) {
5223 maxvar = nvar;
5224 break;
5229 return maxvar;
5232 /* Adjust merge_graph->maxvar based on the number of remaining schedule rows
5233 * that still need to be computed. In particular, if there is a node
5234 * in a cluster where the dimension of the current band is smaller
5235 * than merge_graph->maxvar, but the number of remaining schedule rows
5236 * is greater than that of any node in a cluster with the maximal
5237 * dimension for the current band (i.e., merge_graph->maxvar),
5238 * then adjust merge_graph->maxvar to the (smallest) current band dimension
5239 * of those clusters. Without this adjustment, the total number of
5240 * schedule dimensions would be increased, resulting in a skewed view
5241 * of the number of coincident dimensions.
5242 * "c" contains information about the clusters.
5244 * If the maximize_band_depth option is set and merge_graph->maxvar is reduced,
5245 * then there is no point in attempting any merge since it will be rejected
5246 * anyway. Set merge_graph->maxvar to zero in such cases.
5248 static isl_stat adjust_maxvar_to_slack(isl_ctx *ctx,
5249 struct isl_sched_graph *merge_graph, struct isl_clustering *c)
5251 int max_slack, maxvar;
5253 max_slack = compute_maxvar_max_slack(merge_graph->maxvar, c);
5254 if (max_slack < 0)
5255 return isl_stat_error;
5256 maxvar = limit_maxvar_to_slack(merge_graph->maxvar, max_slack, c);
5257 if (maxvar < 0)
5258 return isl_stat_error;
5260 if (maxvar < merge_graph->maxvar) {
5261 if (isl_options_get_schedule_maximize_band_depth(ctx))
5262 merge_graph->maxvar = 0;
5263 else
5264 merge_graph->maxvar = maxvar;
5267 return isl_stat_ok;
5270 /* Return the number of coincident dimensions in the current band of "graph",
5271 * where the nodes of "graph" are assumed to be scheduled by a single band.
5273 static int get_n_coincident(struct isl_sched_graph *graph)
5275 int i;
5277 for (i = graph->band_start; i < graph->n_total_row; ++i)
5278 if (!graph->node[0].coincident[i])
5279 break;
5281 return i - graph->band_start;
5284 /* Should the clusters be merged based on the cluster schedule
5285 * in the current (and only) band of "merge_graph", given that
5286 * coincidence should be maximized?
5288 * If the number of coincident schedule dimensions in the merged band
5289 * would be less than the maximal number of coincident schedule dimensions
5290 * in any of the merged clusters, then the clusters should not be merged.
5292 static isl_bool ok_to_merge_coincident(struct isl_clustering *c,
5293 struct isl_sched_graph *merge_graph)
5295 int i;
5296 int n_coincident;
5297 int max_coincident;
5299 max_coincident = 0;
5300 for (i = 0; i < c->n; ++i) {
5301 if (!c->scc_in_merge[i])
5302 continue;
5303 n_coincident = get_n_coincident(&c->scc[i]);
5304 if (n_coincident > max_coincident)
5305 max_coincident = n_coincident;
5308 n_coincident = get_n_coincident(merge_graph);
5310 return n_coincident >= max_coincident;
5313 /* Return the transformation on "node" expressed by the current (and only)
5314 * band of "merge_graph" applied to the clusters in "c".
5316 * First find the representation of "node" in its SCC in "c" and
5317 * extract the transformation expressed by the current band.
5318 * Then extract the transformation applied by "merge_graph"
5319 * to the cluster to which this SCC belongs.
5320 * Combine the two to obtain the complete transformation on the node.
5322 * Note that the range of the first transformation is an anonymous space,
5323 * while the domain of the second is named "cluster_X". The range
5324 * of the former therefore needs to be adjusted before the two
5325 * can be combined.
5327 static __isl_give isl_map *extract_node_transformation(isl_ctx *ctx,
5328 struct isl_sched_node *node, struct isl_clustering *c,
5329 struct isl_sched_graph *merge_graph)
5331 struct isl_sched_node *scc_node, *cluster_node;
5332 int start, n;
5333 isl_id *id;
5334 isl_space *space;
5335 isl_multi_aff *ma, *ma2;
5337 scc_node = graph_find_node(ctx, &c->scc[node->scc], node->space);
5338 start = c->scc[node->scc].band_start;
5339 n = c->scc[node->scc].n_total_row - start;
5340 ma = node_extract_partial_schedule_multi_aff(scc_node, start, n);
5341 space = cluster_space(&c->scc[node->scc], c->scc_cluster[node->scc]);
5342 cluster_node = graph_find_node(ctx, merge_graph, space);
5343 if (space && !cluster_node)
5344 isl_die(ctx, isl_error_internal, "unable to find cluster",
5345 space = isl_space_free(space));
5346 id = isl_space_get_tuple_id(space, isl_dim_set);
5347 ma = isl_multi_aff_set_tuple_id(ma, isl_dim_out, id);
5348 isl_space_free(space);
5349 n = merge_graph->n_total_row;
5350 ma2 = node_extract_partial_schedule_multi_aff(cluster_node, 0, n);
5351 ma = isl_multi_aff_pullback_multi_aff(ma2, ma);
5353 return isl_map_from_multi_aff(ma);
5356 /* Give a set of distances "set", are they bounded by a small constant
5357 * in direction "pos"?
5358 * In practice, check if they are bounded by 2 by checking that there
5359 * are no elements with a value greater than or equal to 3 or
5360 * smaller than or equal to -3.
5362 static isl_bool distance_is_bounded(__isl_keep isl_set *set, int pos)
5364 isl_bool bounded;
5365 isl_set *test;
5367 if (!set)
5368 return isl_bool_error;
5370 test = isl_set_copy(set);
5371 test = isl_set_lower_bound_si(test, isl_dim_set, pos, 3);
5372 bounded = isl_set_is_empty(test);
5373 isl_set_free(test);
5375 if (bounded < 0 || !bounded)
5376 return bounded;
5378 test = isl_set_copy(set);
5379 test = isl_set_upper_bound_si(test, isl_dim_set, pos, -3);
5380 bounded = isl_set_is_empty(test);
5381 isl_set_free(test);
5383 return bounded;
5386 /* Does the set "set" have a fixed (but possible parametric) value
5387 * at dimension "pos"?
5389 static isl_bool has_single_value(__isl_keep isl_set *set, int pos)
5391 int n;
5392 isl_bool single;
5394 if (!set)
5395 return isl_bool_error;
5396 set = isl_set_copy(set);
5397 n = isl_set_dim(set, isl_dim_set);
5398 set = isl_set_project_out(set, isl_dim_set, pos + 1, n - (pos + 1));
5399 set = isl_set_project_out(set, isl_dim_set, 0, pos);
5400 single = isl_set_is_singleton(set);
5401 isl_set_free(set);
5403 return single;
5406 /* Does "map" have a fixed (but possible parametric) value
5407 * at dimension "pos" of either its domain or its range?
5409 static isl_bool has_singular_src_or_dst(__isl_keep isl_map *map, int pos)
5411 isl_set *set;
5412 isl_bool single;
5414 set = isl_map_domain(isl_map_copy(map));
5415 single = has_single_value(set, pos);
5416 isl_set_free(set);
5418 if (single < 0 || single)
5419 return single;
5421 set = isl_map_range(isl_map_copy(map));
5422 single = has_single_value(set, pos);
5423 isl_set_free(set);
5425 return single;
5428 /* Does the edge "edge" from "graph" have bounded dependence distances
5429 * in the merged graph "merge_graph" of a selection of clusters in "c"?
5431 * Extract the complete transformations of the source and destination
5432 * nodes of the edge, apply them to the edge constraints and
5433 * compute the differences. Finally, check if these differences are bounded
5434 * in each direction.
5436 * If the dimension of the band is greater than the number of
5437 * dimensions that can be expected to be optimized by the edge
5438 * (based on its weight), then also allow the differences to be unbounded
5439 * in the remaining dimensions, but only if either the source or
5440 * the destination has a fixed value in that direction.
5441 * This allows a statement that produces values that are used by
5442 * several instance of another statement to be merged with that
5443 * other statement.
5444 * However, merging such clusters will introduce an inherently
5445 * large proximity distance inside the merged cluster, meaning
5446 * that proximity distances will no longer be optimized in
5447 * subsequent merges. These merges are therefore only allowed
5448 * after all other possible merges have been tried.
5449 * The first time such a merge is encountered, the weight of the edge
5450 * is replaced by a negative weight. The second time (i.e., after
5451 * all merges over edges with a non-negative weight have been tried),
5452 * the merge is allowed.
5454 static isl_bool has_bounded_distances(isl_ctx *ctx, struct isl_sched_edge *edge,
5455 struct isl_sched_graph *graph, struct isl_clustering *c,
5456 struct isl_sched_graph *merge_graph)
5458 int i, n, n_slack;
5459 isl_bool bounded;
5460 isl_map *map, *t;
5461 isl_set *dist;
5463 map = isl_map_copy(edge->map);
5464 t = extract_node_transformation(ctx, edge->src, c, merge_graph);
5465 map = isl_map_apply_domain(map, t);
5466 t = extract_node_transformation(ctx, edge->dst, c, merge_graph);
5467 map = isl_map_apply_range(map, t);
5468 dist = isl_map_deltas(isl_map_copy(map));
5470 bounded = isl_bool_true;
5471 n = isl_set_dim(dist, isl_dim_set);
5472 n_slack = n - edge->weight;
5473 if (edge->weight < 0)
5474 n_slack -= graph->max_weight + 1;
5475 for (i = 0; i < n; ++i) {
5476 isl_bool bounded_i, singular_i;
5478 bounded_i = distance_is_bounded(dist, i);
5479 if (bounded_i < 0)
5480 goto error;
5481 if (bounded_i)
5482 continue;
5483 if (edge->weight >= 0)
5484 bounded = isl_bool_false;
5485 n_slack--;
5486 if (n_slack < 0)
5487 break;
5488 singular_i = has_singular_src_or_dst(map, i);
5489 if (singular_i < 0)
5490 goto error;
5491 if (singular_i)
5492 continue;
5493 bounded = isl_bool_false;
5494 break;
5496 if (!bounded && i >= n && edge->weight >= 0)
5497 edge->weight -= graph->max_weight + 1;
5498 isl_map_free(map);
5499 isl_set_free(dist);
5501 return bounded;
5502 error:
5503 isl_map_free(map);
5504 isl_set_free(dist);
5505 return isl_bool_error;
5508 /* Should the clusters be merged based on the cluster schedule
5509 * in the current (and only) band of "merge_graph"?
5510 * "graph" is the original dependence graph, while "c" records
5511 * which SCCs are involved in the latest merge.
5513 * In particular, is there at least one proximity constraint
5514 * that is optimized by the merge?
5516 * A proximity constraint is considered to be optimized
5517 * if the dependence distances are small.
5519 static isl_bool ok_to_merge_proximity(isl_ctx *ctx,
5520 struct isl_sched_graph *graph, struct isl_clustering *c,
5521 struct isl_sched_graph *merge_graph)
5523 int i;
5525 for (i = 0; i < graph->n_edge; ++i) {
5526 struct isl_sched_edge *edge = &graph->edge[i];
5527 isl_bool bounded;
5529 if (!is_proximity(edge))
5530 continue;
5531 if (!c->scc_in_merge[edge->src->scc])
5532 continue;
5533 if (!c->scc_in_merge[edge->dst->scc])
5534 continue;
5535 if (c->scc_cluster[edge->dst->scc] ==
5536 c->scc_cluster[edge->src->scc])
5537 continue;
5538 bounded = has_bounded_distances(ctx, edge, graph, c,
5539 merge_graph);
5540 if (bounded < 0 || bounded)
5541 return bounded;
5544 return isl_bool_false;
5547 /* Should the clusters be merged based on the cluster schedule
5548 * in the current (and only) band of "merge_graph"?
5549 * "graph" is the original dependence graph, while "c" records
5550 * which SCCs are involved in the latest merge.
5552 * If the current band is empty, then the clusters should not be merged.
5554 * If the band depth should be maximized and the merge schedule
5555 * is incomplete (meaning that the dimension of some of the schedule
5556 * bands in the original schedule will be reduced), then the clusters
5557 * should not be merged.
5559 * If the schedule_maximize_coincidence option is set, then check that
5560 * the number of coincident schedule dimensions is not reduced.
5562 * Finally, only allow the merge if at least one proximity
5563 * constraint is optimized.
5565 static isl_bool ok_to_merge(isl_ctx *ctx, struct isl_sched_graph *graph,
5566 struct isl_clustering *c, struct isl_sched_graph *merge_graph)
5568 if (merge_graph->n_total_row == merge_graph->band_start)
5569 return isl_bool_false;
5571 if (isl_options_get_schedule_maximize_band_depth(ctx) &&
5572 merge_graph->n_total_row < merge_graph->maxvar)
5573 return isl_bool_false;
5575 if (isl_options_get_schedule_maximize_coincidence(ctx)) {
5576 isl_bool ok;
5578 ok = ok_to_merge_coincident(c, merge_graph);
5579 if (ok < 0 || !ok)
5580 return ok;
5583 return ok_to_merge_proximity(ctx, graph, c, merge_graph);
5586 /* Apply the schedule in "t_node" to the "n" rows starting at "first"
5587 * of the schedule in "node" and return the result.
5589 * That is, essentially compute
5591 * T * N(first:first+n-1)
5593 * taking into account the constant term and the parameter coefficients
5594 * in "t_node".
5596 static __isl_give isl_mat *node_transformation(isl_ctx *ctx,
5597 struct isl_sched_node *t_node, struct isl_sched_node *node,
5598 int first, int n)
5600 int i, j;
5601 isl_mat *t;
5602 int n_row, n_col, n_param, n_var;
5604 n_param = node->nparam;
5605 n_var = node->nvar;
5606 n_row = isl_mat_rows(t_node->sched);
5607 n_col = isl_mat_cols(node->sched);
5608 t = isl_mat_alloc(ctx, n_row, n_col);
5609 if (!t)
5610 return NULL;
5611 for (i = 0; i < n_row; ++i) {
5612 isl_seq_cpy(t->row[i], t_node->sched->row[i], 1 + n_param);
5613 isl_seq_clr(t->row[i] + 1 + n_param, n_var);
5614 for (j = 0; j < n; ++j)
5615 isl_seq_addmul(t->row[i],
5616 t_node->sched->row[i][1 + n_param + j],
5617 node->sched->row[first + j],
5618 1 + n_param + n_var);
5620 return t;
5623 /* Apply the cluster schedule in "t_node" to the current band
5624 * schedule of the nodes in "graph".
5626 * In particular, replace the rows starting at band_start
5627 * by the result of applying the cluster schedule in "t_node"
5628 * to the original rows.
5630 * The coincidence of the schedule is determined by the coincidence
5631 * of the cluster schedule.
5633 static isl_stat transform(isl_ctx *ctx, struct isl_sched_graph *graph,
5634 struct isl_sched_node *t_node)
5636 int i, j;
5637 int n_new;
5638 int start, n;
5640 start = graph->band_start;
5641 n = graph->n_total_row - start;
5643 n_new = isl_mat_rows(t_node->sched);
5644 for (i = 0; i < graph->n; ++i) {
5645 struct isl_sched_node *node = &graph->node[i];
5646 isl_mat *t;
5648 t = node_transformation(ctx, t_node, node, start, n);
5649 node->sched = isl_mat_drop_rows(node->sched, start, n);
5650 node->sched = isl_mat_concat(node->sched, t);
5651 node->sched_map = isl_map_free(node->sched_map);
5652 if (!node->sched)
5653 return isl_stat_error;
5654 for (j = 0; j < n_new; ++j)
5655 node->coincident[start + j] = t_node->coincident[j];
5657 graph->n_total_row -= n;
5658 graph->n_row -= n;
5659 graph->n_total_row += n_new;
5660 graph->n_row += n_new;
5662 return isl_stat_ok;
5665 /* Merge the clusters marked for merging in "c" into a single
5666 * cluster using the cluster schedule in the current band of "merge_graph".
5667 * The representative SCC for the new cluster is the SCC with
5668 * the smallest index.
5670 * The current band schedule of each SCC in the new cluster is obtained
5671 * by applying the schedule of the corresponding original cluster
5672 * to the original band schedule.
5673 * All SCCs in the new cluster have the same number of schedule rows.
5675 static isl_stat merge(isl_ctx *ctx, struct isl_clustering *c,
5676 struct isl_sched_graph *merge_graph)
5678 int i;
5679 int cluster = -1;
5680 isl_space *space;
5682 for (i = 0; i < c->n; ++i) {
5683 struct isl_sched_node *node;
5685 if (!c->scc_in_merge[i])
5686 continue;
5687 if (cluster < 0)
5688 cluster = i;
5689 space = cluster_space(&c->scc[i], c->scc_cluster[i]);
5690 if (!space)
5691 return isl_stat_error;
5692 node = graph_find_node(ctx, merge_graph, space);
5693 isl_space_free(space);
5694 if (!node)
5695 isl_die(ctx, isl_error_internal,
5696 "unable to find cluster",
5697 return isl_stat_error);
5698 if (transform(ctx, &c->scc[i], node) < 0)
5699 return isl_stat_error;
5700 c->scc_cluster[i] = cluster;
5703 return isl_stat_ok;
5706 /* Try and merge the clusters of SCCs marked in c->scc_in_merge
5707 * by scheduling the current cluster bands with respect to each other.
5709 * Construct a dependence graph with a space for each cluster and
5710 * with the coordinates of each space corresponding to the schedule
5711 * dimensions of the current band of that cluster.
5712 * Construct a cluster schedule in this cluster dependence graph and
5713 * apply it to the current cluster bands if it is applicable
5714 * according to ok_to_merge.
5716 * If the number of remaining schedule dimensions in a cluster
5717 * with a non-maximal current schedule dimension is greater than
5718 * the number of remaining schedule dimensions in clusters
5719 * with a maximal current schedule dimension, then restrict
5720 * the number of rows to be computed in the cluster schedule
5721 * to the minimal such non-maximal current schedule dimension.
5722 * Do this by adjusting merge_graph.maxvar.
5724 * Return isl_bool_true if the clusters have effectively been merged
5725 * into a single cluster.
5727 * Note that since the standard scheduling algorithm minimizes the maximal
5728 * distance over proximity constraints, the proximity constraints between
5729 * the merged clusters may not be optimized any further than what is
5730 * sufficient to bring the distances within the limits of the internal
5731 * proximity constraints inside the individual clusters.
5732 * It may therefore make sense to perform an additional translation step
5733 * to bring the clusters closer to each other, while maintaining
5734 * the linear part of the merging schedule found using the standard
5735 * scheduling algorithm.
5737 static isl_bool try_merge(isl_ctx *ctx, struct isl_sched_graph *graph,
5738 struct isl_clustering *c)
5740 struct isl_sched_graph merge_graph = { 0 };
5741 isl_bool merged;
5743 if (init_merge_graph(ctx, graph, c, &merge_graph) < 0)
5744 goto error;
5746 if (compute_maxvar(&merge_graph) < 0)
5747 goto error;
5748 if (adjust_maxvar_to_slack(ctx, &merge_graph,c) < 0)
5749 goto error;
5750 if (compute_schedule_wcc_band(ctx, &merge_graph) < 0)
5751 goto error;
5752 merged = ok_to_merge(ctx, graph, c, &merge_graph);
5753 if (merged && merge(ctx, c, &merge_graph) < 0)
5754 goto error;
5756 graph_free(ctx, &merge_graph);
5757 return merged;
5758 error:
5759 graph_free(ctx, &merge_graph);
5760 return isl_bool_error;
5763 /* Is there any edge marked "no_merge" between two SCCs that are
5764 * about to be merged (i.e., that are set in "scc_in_merge")?
5765 * "merge_edge" is the proximity edge along which the clusters of SCCs
5766 * are going to be merged.
5768 * If there is any edge between two SCCs with a negative weight,
5769 * while the weight of "merge_edge" is non-negative, then this
5770 * means that the edge was postponed. "merge_edge" should then
5771 * also be postponed since merging along the edge with negative weight should
5772 * be postponed until all edges with non-negative weight have been tried.
5773 * Replace the weight of "merge_edge" by a negative weight as well and
5774 * tell the caller not to attempt a merge.
5776 static int any_no_merge(struct isl_sched_graph *graph, int *scc_in_merge,
5777 struct isl_sched_edge *merge_edge)
5779 int i;
5781 for (i = 0; i < graph->n_edge; ++i) {
5782 struct isl_sched_edge *edge = &graph->edge[i];
5784 if (!scc_in_merge[edge->src->scc])
5785 continue;
5786 if (!scc_in_merge[edge->dst->scc])
5787 continue;
5788 if (edge->no_merge)
5789 return 1;
5790 if (merge_edge->weight >= 0 && edge->weight < 0) {
5791 merge_edge->weight -= graph->max_weight + 1;
5792 return 1;
5796 return 0;
5799 /* Merge the two clusters in "c" connected by the edge in "graph"
5800 * with index "edge" into a single cluster.
5801 * If it turns out to be impossible to merge these two clusters,
5802 * then mark the edge as "no_merge" such that it will not be
5803 * considered again.
5805 * First mark all SCCs that need to be merged. This includes the SCCs
5806 * in the two clusters, but it may also include the SCCs
5807 * of intermediate clusters.
5808 * If there is already a no_merge edge between any pair of such SCCs,
5809 * then simply mark the current edge as no_merge as well.
5810 * Likewise, if any of those edges was postponed by has_bounded_distances,
5811 * then postpone the current edge as well.
5812 * Otherwise, try and merge the clusters and mark "edge" as "no_merge"
5813 * if the clusters did not end up getting merged, unless the non-merge
5814 * is due to the fact that the edge was postponed. This postponement
5815 * can be recognized by a change in weight (from non-negative to negative).
5817 static isl_stat merge_clusters_along_edge(isl_ctx *ctx,
5818 struct isl_sched_graph *graph, int edge, struct isl_clustering *c)
5820 isl_bool merged;
5821 int edge_weight = graph->edge[edge].weight;
5823 if (mark_merge_sccs(ctx, graph, edge, c) < 0)
5824 return isl_stat_error;
5826 if (any_no_merge(graph, c->scc_in_merge, &graph->edge[edge]))
5827 merged = isl_bool_false;
5828 else
5829 merged = try_merge(ctx, graph, c);
5830 if (merged < 0)
5831 return isl_stat_error;
5832 if (!merged && edge_weight == graph->edge[edge].weight)
5833 graph->edge[edge].no_merge = 1;
5835 return isl_stat_ok;
5838 /* Does "node" belong to the cluster identified by "cluster"?
5840 static int node_cluster_exactly(struct isl_sched_node *node, int cluster)
5842 return node->cluster == cluster;
5845 /* Does "edge" connect two nodes belonging to the cluster
5846 * identified by "cluster"?
5848 static int edge_cluster_exactly(struct isl_sched_edge *edge, int cluster)
5850 return edge->src->cluster == cluster && edge->dst->cluster == cluster;
5853 /* Swap the schedule of "node1" and "node2".
5854 * Both nodes have been derived from the same node in a common parent graph.
5855 * Since the "coincident" field is shared with that node
5856 * in the parent graph, there is no need to also swap this field.
5858 static void swap_sched(struct isl_sched_node *node1,
5859 struct isl_sched_node *node2)
5861 isl_mat *sched;
5862 isl_map *sched_map;
5864 sched = node1->sched;
5865 node1->sched = node2->sched;
5866 node2->sched = sched;
5868 sched_map = node1->sched_map;
5869 node1->sched_map = node2->sched_map;
5870 node2->sched_map = sched_map;
5873 /* Copy the current band schedule from the SCCs that form the cluster
5874 * with index "pos" to the actual cluster at position "pos".
5875 * By construction, the index of the first SCC that belongs to the cluster
5876 * is also "pos".
5878 * The order of the nodes inside both the SCCs and the cluster
5879 * is assumed to be same as the order in the original "graph".
5881 * Since the SCC graphs will no longer be used after this function,
5882 * the schedules are actually swapped rather than copied.
5884 static isl_stat copy_partial(struct isl_sched_graph *graph,
5885 struct isl_clustering *c, int pos)
5887 int i, j;
5889 c->cluster[pos].n_total_row = c->scc[pos].n_total_row;
5890 c->cluster[pos].n_row = c->scc[pos].n_row;
5891 c->cluster[pos].maxvar = c->scc[pos].maxvar;
5892 j = 0;
5893 for (i = 0; i < graph->n; ++i) {
5894 int k;
5895 int s;
5897 if (graph->node[i].cluster != pos)
5898 continue;
5899 s = graph->node[i].scc;
5900 k = c->scc_node[s]++;
5901 swap_sched(&c->cluster[pos].node[j], &c->scc[s].node[k]);
5902 if (c->scc[s].maxvar > c->cluster[pos].maxvar)
5903 c->cluster[pos].maxvar = c->scc[s].maxvar;
5904 ++j;
5907 return isl_stat_ok;
5910 /* Is there a (conditional) validity dependence from node[j] to node[i],
5911 * forcing node[i] to follow node[j] or do the nodes belong to the same
5912 * cluster?
5914 static isl_bool node_follows_strong_or_same_cluster(int i, int j, void *user)
5916 struct isl_sched_graph *graph = user;
5918 if (graph->node[i].cluster == graph->node[j].cluster)
5919 return isl_bool_true;
5920 return graph_has_validity_edge(graph, &graph->node[j], &graph->node[i]);
5923 /* Extract the merged clusters of SCCs in "graph", sort them, and
5924 * store them in c->clusters. Update c->scc_cluster accordingly.
5926 * First keep track of the cluster containing the SCC to which a node
5927 * belongs in the node itself.
5928 * Then extract the clusters into c->clusters, copying the current
5929 * band schedule from the SCCs that belong to the cluster.
5930 * Do this only once per cluster.
5932 * Finally, topologically sort the clusters and update c->scc_cluster
5933 * to match the new scc numbering. While the SCCs were originally
5934 * sorted already, some SCCs that depend on some other SCCs may
5935 * have been merged with SCCs that appear before these other SCCs.
5936 * A reordering may therefore be required.
5938 static isl_stat extract_clusters(isl_ctx *ctx, struct isl_sched_graph *graph,
5939 struct isl_clustering *c)
5941 int i;
5943 for (i = 0; i < graph->n; ++i)
5944 graph->node[i].cluster = c->scc_cluster[graph->node[i].scc];
5946 for (i = 0; i < graph->scc; ++i) {
5947 if (c->scc_cluster[i] != i)
5948 continue;
5949 if (extract_sub_graph(ctx, graph, &node_cluster_exactly,
5950 &edge_cluster_exactly, i, &c->cluster[i]) < 0)
5951 return isl_stat_error;
5952 c->cluster[i].src_scc = -1;
5953 c->cluster[i].dst_scc = -1;
5954 if (copy_partial(graph, c, i) < 0)
5955 return isl_stat_error;
5958 if (detect_ccs(ctx, graph, &node_follows_strong_or_same_cluster) < 0)
5959 return isl_stat_error;
5960 for (i = 0; i < graph->n; ++i)
5961 c->scc_cluster[graph->node[i].scc] = graph->node[i].cluster;
5963 return isl_stat_ok;
5966 /* Compute weights on the proximity edges of "graph" that can
5967 * be used by find_proximity to find the most appropriate
5968 * proximity edge to use to merge two clusters in "c".
5969 * The weights are also used by has_bounded_distances to determine
5970 * whether the merge should be allowed.
5971 * Store the maximum of the computed weights in graph->max_weight.
5973 * The computed weight is a measure for the number of remaining schedule
5974 * dimensions that can still be completely aligned.
5975 * In particular, compute the number of equalities between
5976 * input dimensions and output dimensions in the proximity constraints.
5977 * The directions that are already handled by outer schedule bands
5978 * are projected out prior to determining this number.
5980 * Edges that will never be considered by find_proximity are ignored.
5982 static isl_stat compute_weights(struct isl_sched_graph *graph,
5983 struct isl_clustering *c)
5985 int i;
5987 graph->max_weight = 0;
5989 for (i = 0; i < graph->n_edge; ++i) {
5990 struct isl_sched_edge *edge = &graph->edge[i];
5991 struct isl_sched_node *src = edge->src;
5992 struct isl_sched_node *dst = edge->dst;
5993 isl_basic_map *hull;
5994 int n_in, n_out;
5996 if (!is_proximity(edge))
5997 continue;
5998 if (bad_cluster(&c->scc[edge->src->scc]) ||
5999 bad_cluster(&c->scc[edge->dst->scc]))
6000 continue;
6001 if (c->scc_cluster[edge->dst->scc] ==
6002 c->scc_cluster[edge->src->scc])
6003 continue;
6005 hull = isl_map_affine_hull(isl_map_copy(edge->map));
6006 hull = isl_basic_map_transform_dims(hull, isl_dim_in, 0,
6007 isl_mat_copy(src->ctrans));
6008 hull = isl_basic_map_transform_dims(hull, isl_dim_out, 0,
6009 isl_mat_copy(dst->ctrans));
6010 hull = isl_basic_map_project_out(hull,
6011 isl_dim_in, 0, src->rank);
6012 hull = isl_basic_map_project_out(hull,
6013 isl_dim_out, 0, dst->rank);
6014 hull = isl_basic_map_remove_divs(hull);
6015 n_in = isl_basic_map_dim(hull, isl_dim_in);
6016 n_out = isl_basic_map_dim(hull, isl_dim_out);
6017 hull = isl_basic_map_drop_constraints_not_involving_dims(hull,
6018 isl_dim_in, 0, n_in);
6019 hull = isl_basic_map_drop_constraints_not_involving_dims(hull,
6020 isl_dim_out, 0, n_out);
6021 if (!hull)
6022 return isl_stat_error;
6023 edge->weight = hull->n_eq;
6024 isl_basic_map_free(hull);
6026 if (edge->weight > graph->max_weight)
6027 graph->max_weight = edge->weight;
6030 return isl_stat_ok;
6033 /* Call compute_schedule_finish_band on each of the clusters in "c"
6034 * in their topological order. This order is determined by the scc
6035 * fields of the nodes in "graph".
6036 * Combine the results in a sequence expressing the topological order.
6038 * If there is only one cluster left, then there is no need to introduce
6039 * a sequence node. Also, in this case, the cluster necessarily contains
6040 * the SCC at position 0 in the original graph and is therefore also
6041 * stored in the first cluster of "c".
6043 static __isl_give isl_schedule_node *finish_bands_clustering(
6044 __isl_take isl_schedule_node *node, struct isl_sched_graph *graph,
6045 struct isl_clustering *c)
6047 int i;
6048 isl_ctx *ctx;
6049 isl_union_set_list *filters;
6051 if (graph->scc == 1)
6052 return compute_schedule_finish_band(node, &c->cluster[0], 0);
6054 ctx = isl_schedule_node_get_ctx(node);
6056 filters = extract_sccs(ctx, graph);
6057 node = isl_schedule_node_insert_sequence(node, filters);
6059 for (i = 0; i < graph->scc; ++i) {
6060 int j = c->scc_cluster[i];
6061 node = isl_schedule_node_child(node, i);
6062 node = isl_schedule_node_child(node, 0);
6063 node = compute_schedule_finish_band(node, &c->cluster[j], 0);
6064 node = isl_schedule_node_parent(node);
6065 node = isl_schedule_node_parent(node);
6068 return node;
6071 /* Compute a schedule for a connected dependence graph by first considering
6072 * each strongly connected component (SCC) in the graph separately and then
6073 * incrementally combining them into clusters.
6074 * Return the updated schedule node.
6076 * Initially, each cluster consists of a single SCC, each with its
6077 * own band schedule. The algorithm then tries to merge pairs
6078 * of clusters along a proximity edge until no more suitable
6079 * proximity edges can be found. During this merging, the schedule
6080 * is maintained in the individual SCCs.
6081 * After the merging is completed, the full resulting clusters
6082 * are extracted and in finish_bands_clustering,
6083 * compute_schedule_finish_band is called on each of them to integrate
6084 * the band into "node" and to continue the computation.
6086 * compute_weights initializes the weights that are used by find_proximity.
6088 static __isl_give isl_schedule_node *compute_schedule_wcc_clustering(
6089 __isl_take isl_schedule_node *node, struct isl_sched_graph *graph)
6091 isl_ctx *ctx;
6092 struct isl_clustering c;
6093 int i;
6095 ctx = isl_schedule_node_get_ctx(node);
6097 if (clustering_init(ctx, &c, graph) < 0)
6098 goto error;
6100 if (compute_weights(graph, &c) < 0)
6101 goto error;
6103 for (;;) {
6104 i = find_proximity(graph, &c);
6105 if (i < 0)
6106 goto error;
6107 if (i >= graph->n_edge)
6108 break;
6109 if (merge_clusters_along_edge(ctx, graph, i, &c) < 0)
6110 goto error;
6113 if (extract_clusters(ctx, graph, &c) < 0)
6114 goto error;
6116 node = finish_bands_clustering(node, graph, &c);
6118 clustering_free(ctx, &c);
6119 return node;
6120 error:
6121 clustering_free(ctx, &c);
6122 return isl_schedule_node_free(node);
6125 /* Compute a schedule for a connected dependence graph and return
6126 * the updated schedule node.
6128 * If Feautrier's algorithm is selected, we first recursively try to satisfy
6129 * as many validity dependences as possible. When all validity dependences
6130 * are satisfied we extend the schedule to a full-dimensional schedule.
6132 * Call compute_schedule_wcc_whole or compute_schedule_wcc_clustering
6133 * depending on whether the user has selected the option to try and
6134 * compute a schedule for the entire (weakly connected) component first.
6135 * If there is only a single strongly connected component (SCC), then
6136 * there is no point in trying to combine SCCs
6137 * in compute_schedule_wcc_clustering, so compute_schedule_wcc_whole
6138 * is called instead.
6140 static __isl_give isl_schedule_node *compute_schedule_wcc(
6141 __isl_take isl_schedule_node *node, struct isl_sched_graph *graph)
6143 isl_ctx *ctx;
6145 if (!node)
6146 return NULL;
6148 ctx = isl_schedule_node_get_ctx(node);
6149 if (detect_sccs(ctx, graph) < 0)
6150 return isl_schedule_node_free(node);
6152 if (compute_maxvar(graph) < 0)
6153 return isl_schedule_node_free(node);
6155 if (need_feautrier_step(ctx, graph))
6156 return compute_schedule_wcc_feautrier(node, graph);
6158 if (graph->scc <= 1 || isl_options_get_schedule_whole_component(ctx))
6159 return compute_schedule_wcc_whole(node, graph);
6160 else
6161 return compute_schedule_wcc_clustering(node, graph);
6164 /* Compute a schedule for each group of nodes identified by node->scc
6165 * separately and then combine them in a sequence node (or as set node
6166 * if graph->weak is set) inserted at position "node" of the schedule tree.
6167 * Return the updated schedule node.
6169 * If "wcc" is set then each of the groups belongs to a single
6170 * weakly connected component in the dependence graph so that
6171 * there is no need for compute_sub_schedule to look for weakly
6172 * connected components.
6174 static __isl_give isl_schedule_node *compute_component_schedule(
6175 __isl_take isl_schedule_node *node, struct isl_sched_graph *graph,
6176 int wcc)
6178 int component;
6179 isl_ctx *ctx;
6180 isl_union_set_list *filters;
6182 if (!node)
6183 return NULL;
6184 ctx = isl_schedule_node_get_ctx(node);
6186 filters = extract_sccs(ctx, graph);
6187 if (graph->weak)
6188 node = isl_schedule_node_insert_set(node, filters);
6189 else
6190 node = isl_schedule_node_insert_sequence(node, filters);
6192 for (component = 0; component < graph->scc; ++component) {
6193 node = isl_schedule_node_child(node, component);
6194 node = isl_schedule_node_child(node, 0);
6195 node = compute_sub_schedule(node, ctx, graph,
6196 &node_scc_exactly,
6197 &edge_scc_exactly, component, wcc);
6198 node = isl_schedule_node_parent(node);
6199 node = isl_schedule_node_parent(node);
6202 return node;
6205 /* Compute a schedule for the given dependence graph and insert it at "node".
6206 * Return the updated schedule node.
6208 * We first check if the graph is connected (through validity and conditional
6209 * validity dependences) and, if not, compute a schedule
6210 * for each component separately.
6211 * If the schedule_serialize_sccs option is set, then we check for strongly
6212 * connected components instead and compute a separate schedule for
6213 * each such strongly connected component.
6215 static __isl_give isl_schedule_node *compute_schedule(isl_schedule_node *node,
6216 struct isl_sched_graph *graph)
6218 isl_ctx *ctx;
6220 if (!node)
6221 return NULL;
6223 ctx = isl_schedule_node_get_ctx(node);
6224 if (isl_options_get_schedule_serialize_sccs(ctx)) {
6225 if (detect_sccs(ctx, graph) < 0)
6226 return isl_schedule_node_free(node);
6227 } else {
6228 if (detect_wccs(ctx, graph) < 0)
6229 return isl_schedule_node_free(node);
6232 if (graph->scc > 1)
6233 return compute_component_schedule(node, graph, 1);
6235 return compute_schedule_wcc(node, graph);
6238 /* Compute a schedule on sc->domain that respects the given schedule
6239 * constraints.
6241 * In particular, the schedule respects all the validity dependences.
6242 * If the default isl scheduling algorithm is used, it tries to minimize
6243 * the dependence distances over the proximity dependences.
6244 * If Feautrier's scheduling algorithm is used, the proximity dependence
6245 * distances are only minimized during the extension to a full-dimensional
6246 * schedule.
6248 * If there are any condition and conditional validity dependences,
6249 * then the conditional validity dependences may be violated inside
6250 * a tilable band, provided they have no adjacent non-local
6251 * condition dependences.
6253 __isl_give isl_schedule *isl_schedule_constraints_compute_schedule(
6254 __isl_take isl_schedule_constraints *sc)
6256 isl_ctx *ctx = isl_schedule_constraints_get_ctx(sc);
6257 struct isl_sched_graph graph = { 0 };
6258 isl_schedule *sched;
6259 isl_schedule_node *node;
6260 isl_union_set *domain;
6262 sc = isl_schedule_constraints_align_params(sc);
6264 domain = isl_schedule_constraints_get_domain(sc);
6265 if (isl_union_set_n_set(domain) == 0) {
6266 isl_schedule_constraints_free(sc);
6267 return isl_schedule_from_domain(domain);
6270 if (graph_init(&graph, sc) < 0)
6271 domain = isl_union_set_free(domain);
6273 node = isl_schedule_node_from_domain(domain);
6274 node = isl_schedule_node_child(node, 0);
6275 if (graph.n > 0)
6276 node = compute_schedule(node, &graph);
6277 sched = isl_schedule_node_get_schedule(node);
6278 isl_schedule_node_free(node);
6280 graph_free(ctx, &graph);
6281 isl_schedule_constraints_free(sc);
6283 return sched;
6286 /* Compute a schedule for the given union of domains that respects
6287 * all the validity dependences and minimizes
6288 * the dependence distances over the proximity dependences.
6290 * This function is kept for backward compatibility.
6292 __isl_give isl_schedule *isl_union_set_compute_schedule(
6293 __isl_take isl_union_set *domain,
6294 __isl_take isl_union_map *validity,
6295 __isl_take isl_union_map *proximity)
6297 isl_schedule_constraints *sc;
6299 sc = isl_schedule_constraints_on_domain(domain);
6300 sc = isl_schedule_constraints_set_validity(sc, validity);
6301 sc = isl_schedule_constraints_set_proximity(sc, proximity);
6303 return isl_schedule_constraints_compute_schedule(sc);