2 * Copyright 2011 INRIA Saclay
3 * Copyright 2012-2014 Ecole Normale Superieure
4 * Copyright 2015-2016 Sven Verdoolaege
6 * Use of this software is governed by the MIT license
8 * Written by Sven Verdoolaege, INRIA Saclay - Ile-de-France,
9 * Parc Club Orsay Universite, ZAC des vignes, 4 rue Jacques Monod,
11 * and Ecole Normale Superieure, 45 rue d'Ulm, 75230 Paris, France
14 #include <isl_ctx_private.h>
15 #include <isl_map_private.h>
16 #include <isl_space_private.h>
17 #include <isl_aff_private.h>
19 #include <isl/constraint.h>
20 #include <isl/schedule.h>
21 #include <isl/schedule_node.h>
22 #include <isl_mat_private.h>
23 #include <isl_vec_private.h>
25 #include <isl/union_set.h>
28 #include <isl_dim_map.h>
29 #include <isl/map_to_basic_set.h>
31 #include <isl_options_private.h>
32 #include <isl_tarjan.h>
33 #include <isl_morph.h>
36 * The scheduling algorithm implemented in this file was inspired by
37 * Bondhugula et al., "Automatic Transformations for Communication-Minimized
38 * Parallelization and Locality Optimization in the Polyhedral Model".
42 isl_edge_validity
= 0,
43 isl_edge_first
= isl_edge_validity
,
46 isl_edge_conditional_validity
,
48 isl_edge_last
= isl_edge_proximity
,
52 /* The constraints that need to be satisfied by a schedule on "domain".
54 * "context" specifies extra constraints on the parameters.
56 * "validity" constraints map domain elements i to domain elements
57 * that should be scheduled after i. (Hard constraint)
58 * "proximity" constraints map domain elements i to domains elements
59 * that should be scheduled as early as possible after i (or before i).
62 * "condition" and "conditional_validity" constraints map possibly "tagged"
63 * domain elements i -> s to "tagged" domain elements j -> t.
64 * The elements of the "conditional_validity" constraints, but without the
65 * tags (i.e., the elements i -> j) are treated as validity constraints,
66 * except that during the construction of a tilable band,
67 * the elements of the "conditional_validity" constraints may be violated
68 * provided that all adjacent elements of the "condition" constraints
69 * are local within the band.
70 * A dependence is local within a band if domain and range are mapped
71 * to the same schedule point by the band.
73 struct isl_schedule_constraints
{
74 isl_union_set
*domain
;
77 isl_union_map
*constraint
[isl_edge_last
+ 1];
80 __isl_give isl_schedule_constraints
*isl_schedule_constraints_copy(
81 __isl_keep isl_schedule_constraints
*sc
)
84 isl_schedule_constraints
*sc_copy
;
87 ctx
= isl_union_set_get_ctx(sc
->domain
);
88 sc_copy
= isl_calloc_type(ctx
, struct isl_schedule_constraints
);
92 sc_copy
->domain
= isl_union_set_copy(sc
->domain
);
93 sc_copy
->context
= isl_set_copy(sc
->context
);
94 if (!sc_copy
->domain
|| !sc_copy
->context
)
95 return isl_schedule_constraints_free(sc_copy
);
97 for (i
= isl_edge_first
; i
<= isl_edge_last
; ++i
) {
98 sc_copy
->constraint
[i
] = isl_union_map_copy(sc
->constraint
[i
]);
99 if (!sc_copy
->constraint
[i
])
100 return isl_schedule_constraints_free(sc_copy
);
107 /* Construct an isl_schedule_constraints object for computing a schedule
108 * on "domain". The initial object does not impose any constraints.
110 __isl_give isl_schedule_constraints
*isl_schedule_constraints_on_domain(
111 __isl_take isl_union_set
*domain
)
115 isl_schedule_constraints
*sc
;
116 isl_union_map
*empty
;
117 enum isl_edge_type i
;
122 ctx
= isl_union_set_get_ctx(domain
);
123 sc
= isl_calloc_type(ctx
, struct isl_schedule_constraints
);
127 space
= isl_union_set_get_space(domain
);
129 sc
->context
= isl_set_universe(isl_space_copy(space
));
130 empty
= isl_union_map_empty(space
);
131 for (i
= isl_edge_first
; i
<= isl_edge_last
; ++i
) {
132 sc
->constraint
[i
] = isl_union_map_copy(empty
);
133 if (!sc
->constraint
[i
])
134 sc
->domain
= isl_union_set_free(sc
->domain
);
136 isl_union_map_free(empty
);
138 if (!sc
->domain
|| !sc
->context
)
139 return isl_schedule_constraints_free(sc
);
143 isl_union_set_free(domain
);
147 /* Replace the context of "sc" by "context".
149 __isl_give isl_schedule_constraints
*isl_schedule_constraints_set_context(
150 __isl_take isl_schedule_constraints
*sc
, __isl_take isl_set
*context
)
155 isl_set_free(sc
->context
);
156 sc
->context
= context
;
160 isl_schedule_constraints_free(sc
);
161 isl_set_free(context
);
165 /* Replace the validity constraints of "sc" by "validity".
167 __isl_give isl_schedule_constraints
*isl_schedule_constraints_set_validity(
168 __isl_take isl_schedule_constraints
*sc
,
169 __isl_take isl_union_map
*validity
)
171 if (!sc
|| !validity
)
174 isl_union_map_free(sc
->constraint
[isl_edge_validity
]);
175 sc
->constraint
[isl_edge_validity
] = validity
;
179 isl_schedule_constraints_free(sc
);
180 isl_union_map_free(validity
);
184 /* Replace the coincidence constraints of "sc" by "coincidence".
186 __isl_give isl_schedule_constraints
*isl_schedule_constraints_set_coincidence(
187 __isl_take isl_schedule_constraints
*sc
,
188 __isl_take isl_union_map
*coincidence
)
190 if (!sc
|| !coincidence
)
193 isl_union_map_free(sc
->constraint
[isl_edge_coincidence
]);
194 sc
->constraint
[isl_edge_coincidence
] = coincidence
;
198 isl_schedule_constraints_free(sc
);
199 isl_union_map_free(coincidence
);
203 /* Replace the proximity constraints of "sc" by "proximity".
205 __isl_give isl_schedule_constraints
*isl_schedule_constraints_set_proximity(
206 __isl_take isl_schedule_constraints
*sc
,
207 __isl_take isl_union_map
*proximity
)
209 if (!sc
|| !proximity
)
212 isl_union_map_free(sc
->constraint
[isl_edge_proximity
]);
213 sc
->constraint
[isl_edge_proximity
] = proximity
;
217 isl_schedule_constraints_free(sc
);
218 isl_union_map_free(proximity
);
222 /* Replace the conditional validity constraints of "sc" by "condition"
225 __isl_give isl_schedule_constraints
*
226 isl_schedule_constraints_set_conditional_validity(
227 __isl_take isl_schedule_constraints
*sc
,
228 __isl_take isl_union_map
*condition
,
229 __isl_take isl_union_map
*validity
)
231 if (!sc
|| !condition
|| !validity
)
234 isl_union_map_free(sc
->constraint
[isl_edge_condition
]);
235 sc
->constraint
[isl_edge_condition
] = condition
;
236 isl_union_map_free(sc
->constraint
[isl_edge_conditional_validity
]);
237 sc
->constraint
[isl_edge_conditional_validity
] = validity
;
241 isl_schedule_constraints_free(sc
);
242 isl_union_map_free(condition
);
243 isl_union_map_free(validity
);
247 __isl_null isl_schedule_constraints
*isl_schedule_constraints_free(
248 __isl_take isl_schedule_constraints
*sc
)
250 enum isl_edge_type i
;
255 isl_union_set_free(sc
->domain
);
256 isl_set_free(sc
->context
);
257 for (i
= isl_edge_first
; i
<= isl_edge_last
; ++i
)
258 isl_union_map_free(sc
->constraint
[i
]);
265 isl_ctx
*isl_schedule_constraints_get_ctx(
266 __isl_keep isl_schedule_constraints
*sc
)
268 return sc
? isl_union_set_get_ctx(sc
->domain
) : NULL
;
271 /* Return the domain of "sc".
273 __isl_give isl_union_set
*isl_schedule_constraints_get_domain(
274 __isl_keep isl_schedule_constraints
*sc
)
279 return isl_union_set_copy(sc
->domain
);
282 /* Return the validity constraints of "sc".
284 __isl_give isl_union_map
*isl_schedule_constraints_get_validity(
285 __isl_keep isl_schedule_constraints
*sc
)
290 return isl_union_map_copy(sc
->constraint
[isl_edge_validity
]);
293 /* Return the coincidence constraints of "sc".
295 __isl_give isl_union_map
*isl_schedule_constraints_get_coincidence(
296 __isl_keep isl_schedule_constraints
*sc
)
301 return isl_union_map_copy(sc
->constraint
[isl_edge_coincidence
]);
304 /* Return the proximity constraints of "sc".
306 __isl_give isl_union_map
*isl_schedule_constraints_get_proximity(
307 __isl_keep isl_schedule_constraints
*sc
)
312 return isl_union_map_copy(sc
->constraint
[isl_edge_proximity
]);
315 /* Return the conditional validity constraints of "sc".
317 __isl_give isl_union_map
*isl_schedule_constraints_get_conditional_validity(
318 __isl_keep isl_schedule_constraints
*sc
)
324 isl_union_map_copy(sc
->constraint
[isl_edge_conditional_validity
]);
327 /* Return the conditions for the conditional validity constraints of "sc".
329 __isl_give isl_union_map
*
330 isl_schedule_constraints_get_conditional_validity_condition(
331 __isl_keep isl_schedule_constraints
*sc
)
336 return isl_union_map_copy(sc
->constraint
[isl_edge_condition
]);
339 /* Can a schedule constraint of type "type" be tagged?
341 static int may_be_tagged(enum isl_edge_type type
)
343 if (type
== isl_edge_condition
|| type
== isl_edge_conditional_validity
)
348 /* Apply "umap" to the domains of the wrapped relations
349 * inside the domain and range of "c".
351 * That is, for each map of the form
353 * [D -> S] -> [E -> T]
355 * in "c", apply "umap" to D and E.
357 * D is exposed by currying the relation to
359 * D -> [S -> [E -> T]]
361 * E is exposed by doing the same to the inverse of "c".
363 static __isl_give isl_union_map
*apply_factor_domain(
364 __isl_take isl_union_map
*c
, __isl_keep isl_union_map
*umap
)
366 c
= isl_union_map_curry(c
);
367 c
= isl_union_map_apply_domain(c
, isl_union_map_copy(umap
));
368 c
= isl_union_map_uncurry(c
);
370 c
= isl_union_map_reverse(c
);
371 c
= isl_union_map_curry(c
);
372 c
= isl_union_map_apply_domain(c
, isl_union_map_copy(umap
));
373 c
= isl_union_map_uncurry(c
);
374 c
= isl_union_map_reverse(c
);
379 /* Apply "umap" to domain and range of "c".
380 * If "tag" is set, then "c" may contain tags and then "umap"
381 * needs to be applied to the domains of the wrapped relations
382 * inside the domain and range of "c".
384 static __isl_give isl_union_map
*apply(__isl_take isl_union_map
*c
,
385 __isl_keep isl_union_map
*umap
, int tag
)
390 t
= isl_union_map_copy(c
);
391 c
= isl_union_map_apply_domain(c
, isl_union_map_copy(umap
));
392 c
= isl_union_map_apply_range(c
, isl_union_map_copy(umap
));
395 t
= apply_factor_domain(t
, umap
);
396 c
= isl_union_map_union(c
, t
);
400 /* Apply "umap" to the domain of the schedule constraints "sc".
402 * The two sides of the various schedule constraints are adjusted
405 __isl_give isl_schedule_constraints
*isl_schedule_constraints_apply(
406 __isl_take isl_schedule_constraints
*sc
,
407 __isl_take isl_union_map
*umap
)
409 enum isl_edge_type i
;
414 for (i
= isl_edge_first
; i
<= isl_edge_last
; ++i
) {
415 int tag
= may_be_tagged(i
);
417 sc
->constraint
[i
] = apply(sc
->constraint
[i
], umap
, tag
);
418 if (!sc
->constraint
[i
])
421 sc
->domain
= isl_union_set_apply(sc
->domain
, umap
);
423 return isl_schedule_constraints_free(sc
);
427 isl_schedule_constraints_free(sc
);
428 isl_union_map_free(umap
);
432 void isl_schedule_constraints_dump(__isl_keep isl_schedule_constraints
*sc
)
437 fprintf(stderr
, "domain: ");
438 isl_union_set_dump(sc
->domain
);
439 fprintf(stderr
, "context: ");
440 isl_set_dump(sc
->context
);
441 fprintf(stderr
, "validity: ");
442 isl_union_map_dump(sc
->constraint
[isl_edge_validity
]);
443 fprintf(stderr
, "proximity: ");
444 isl_union_map_dump(sc
->constraint
[isl_edge_proximity
]);
445 fprintf(stderr
, "coincidence: ");
446 isl_union_map_dump(sc
->constraint
[isl_edge_coincidence
]);
447 fprintf(stderr
, "condition: ");
448 isl_union_map_dump(sc
->constraint
[isl_edge_condition
]);
449 fprintf(stderr
, "conditional_validity: ");
450 isl_union_map_dump(sc
->constraint
[isl_edge_conditional_validity
]);
453 /* Align the parameters of the fields of "sc".
455 static __isl_give isl_schedule_constraints
*
456 isl_schedule_constraints_align_params(__isl_take isl_schedule_constraints
*sc
)
459 enum isl_edge_type i
;
464 space
= isl_union_set_get_space(sc
->domain
);
465 space
= isl_space_align_params(space
, isl_set_get_space(sc
->context
));
466 for (i
= isl_edge_first
; i
<= isl_edge_last
; ++i
)
467 space
= isl_space_align_params(space
,
468 isl_union_map_get_space(sc
->constraint
[i
]));
470 for (i
= isl_edge_first
; i
<= isl_edge_last
; ++i
) {
471 sc
->constraint
[i
] = isl_union_map_align_params(
472 sc
->constraint
[i
], isl_space_copy(space
));
473 if (!sc
->constraint
[i
])
474 space
= isl_space_free(space
);
476 sc
->context
= isl_set_align_params(sc
->context
, isl_space_copy(space
));
477 sc
->domain
= isl_union_set_align_params(sc
->domain
, space
);
478 if (!sc
->context
|| !sc
->domain
)
479 return isl_schedule_constraints_free(sc
);
484 /* Return the total number of isl_maps in the constraints of "sc".
486 static __isl_give
int isl_schedule_constraints_n_map(
487 __isl_keep isl_schedule_constraints
*sc
)
489 enum isl_edge_type i
;
492 for (i
= isl_edge_first
; i
<= isl_edge_last
; ++i
)
493 n
+= isl_union_map_n_map(sc
->constraint
[i
]);
498 /* Internal information about a node that is used during the construction
500 * space represents the space in which the domain lives
501 * sched is a matrix representation of the schedule being constructed
502 * for this node; if compressed is set, then this schedule is
503 * defined over the compressed domain space
504 * sched_map is an isl_map representation of the same (partial) schedule
505 * sched_map may be NULL; if compressed is set, then this map
506 * is defined over the uncompressed domain space
507 * rank is the number of linearly independent rows in the linear part
509 * the columns of cmap represent a change of basis for the schedule
510 * coefficients; the first rank columns span the linear part of
512 * cinv is the inverse of cmap.
513 * ctrans is the transpose of cmap.
514 * start is the first variable in the LP problem in the sequences that
515 * represents the schedule coefficients of this node
516 * nvar is the dimension of the domain
517 * nparam is the number of parameters or 0 if we are not constructing
518 * a parametric schedule
520 * If compressed is set, then hull represents the constraints
521 * that were used to derive the compression, while compress and
522 * decompress map the original space to the compressed space and
525 * scc is the index of SCC (or WCC) this node belongs to
527 * "cluster" is only used inside extract_clusters and identifies
528 * the cluster of SCCs that the node belongs to.
530 * coincident contains a boolean for each of the rows of the schedule,
531 * indicating whether the corresponding scheduling dimension satisfies
532 * the coincidence constraints in the sense that the corresponding
533 * dependence distances are zero.
535 struct isl_sched_node
{
539 isl_multi_aff
*compress
;
540 isl_multi_aff
*decompress
;
557 static int node_has_space(const void *entry
, const void *val
)
559 struct isl_sched_node
*node
= (struct isl_sched_node
*)entry
;
560 isl_space
*dim
= (isl_space
*)val
;
562 return isl_space_is_equal(node
->space
, dim
);
565 static int node_scc_exactly(struct isl_sched_node
*node
, int scc
)
567 return node
->scc
== scc
;
570 static int node_scc_at_most(struct isl_sched_node
*node
, int scc
)
572 return node
->scc
<= scc
;
575 static int node_scc_at_least(struct isl_sched_node
*node
, int scc
)
577 return node
->scc
>= scc
;
580 /* An edge in the dependence graph. An edge may be used to
581 * ensure validity of the generated schedule, to minimize the dependence
584 * map is the dependence relation, with i -> j in the map if j depends on i
585 * tagged_condition and tagged_validity contain the union of all tagged
586 * condition or conditional validity dependence relations that
587 * specialize the dependence relation "map"; that is,
588 * if (i -> a) -> (j -> b) is an element of "tagged_condition"
589 * or "tagged_validity", then i -> j is an element of "map".
590 * If these fields are NULL, then they represent the empty relation.
591 * src is the source node
592 * dst is the sink node
594 * types is a bit vector containing the types of this edge.
595 * validity is set if the edge is used to ensure correctness
596 * coincidence is used to enforce zero dependence distances
597 * proximity is set if the edge is used to minimize dependence distances
598 * condition is set if the edge represents a condition
599 * for a conditional validity schedule constraint
600 * local can only be set for condition edges and indicates that
601 * the dependence distance over the edge should be zero
602 * conditional_validity is set if the edge is used to conditionally
605 * For validity edges, start and end mark the sequence of inequality
606 * constraints in the LP problem that encode the validity constraint
607 * corresponding to this edge.
609 * During clustering, an edge may be marked "no_merge" if it should
610 * not be used to merge clusters.
611 * The weight is also only used during clustering and it is
612 * an indication of how many schedule dimensions on either side
613 * of the schedule constraints can be aligned.
614 * If the weight is negative, then this means that this edge was postponed
615 * by has_bounded_distances or any_no_merge. The original weight can
616 * be retrieved by adding 1 + graph->max_weight, with "graph"
617 * the graph containing this edge.
619 struct isl_sched_edge
{
621 isl_union_map
*tagged_condition
;
622 isl_union_map
*tagged_validity
;
624 struct isl_sched_node
*src
;
625 struct isl_sched_node
*dst
;
636 /* Is "edge" marked as being of type "type"?
638 static int is_type(struct isl_sched_edge
*edge
, enum isl_edge_type type
)
640 return ISL_FL_ISSET(edge
->types
, 1 << type
);
643 /* Mark "edge" as being of type "type".
645 static void set_type(struct isl_sched_edge
*edge
, enum isl_edge_type type
)
647 ISL_FL_SET(edge
->types
, 1 << type
);
650 /* No longer mark "edge" as being of type "type"?
652 static void clear_type(struct isl_sched_edge
*edge
, enum isl_edge_type type
)
654 ISL_FL_CLR(edge
->types
, 1 << type
);
657 /* Is "edge" marked as a validity edge?
659 static int is_validity(struct isl_sched_edge
*edge
)
661 return is_type(edge
, isl_edge_validity
);
664 /* Mark "edge" as a validity edge.
666 static void set_validity(struct isl_sched_edge
*edge
)
668 set_type(edge
, isl_edge_validity
);
671 /* Is "edge" marked as a proximity edge?
673 static int is_proximity(struct isl_sched_edge
*edge
)
675 return is_type(edge
, isl_edge_proximity
);
678 /* Is "edge" marked as a local edge?
680 static int is_local(struct isl_sched_edge
*edge
)
682 return is_type(edge
, isl_edge_local
);
685 /* Mark "edge" as a local edge.
687 static void set_local(struct isl_sched_edge
*edge
)
689 set_type(edge
, isl_edge_local
);
692 /* No longer mark "edge" as a local edge.
694 static void clear_local(struct isl_sched_edge
*edge
)
696 clear_type(edge
, isl_edge_local
);
699 /* Is "edge" marked as a coincidence edge?
701 static int is_coincidence(struct isl_sched_edge
*edge
)
703 return is_type(edge
, isl_edge_coincidence
);
706 /* Is "edge" marked as a condition edge?
708 static int is_condition(struct isl_sched_edge
*edge
)
710 return is_type(edge
, isl_edge_condition
);
713 /* Is "edge" marked as a conditional validity edge?
715 static int is_conditional_validity(struct isl_sched_edge
*edge
)
717 return is_type(edge
, isl_edge_conditional_validity
);
720 /* Internal information about the dependence graph used during
721 * the construction of the schedule.
723 * intra_hmap is a cache, mapping dependence relations to their dual,
724 * for dependences from a node to itself
725 * inter_hmap is a cache, mapping dependence relations to their dual,
726 * for dependences between distinct nodes
727 * if compression is involved then the key for these maps
728 * is the original, uncompressed dependence relation, while
729 * the value is the dual of the compressed dependence relation.
731 * n is the number of nodes
732 * node is the list of nodes
733 * maxvar is the maximal number of variables over all nodes
734 * max_row is the allocated number of rows in the schedule
735 * n_row is the current (maximal) number of linearly independent
736 * rows in the node schedules
737 * n_total_row is the current number of rows in the node schedules
738 * band_start is the starting row in the node schedules of the current band
739 * root is set if this graph is the original dependence graph,
740 * without any splitting
742 * sorted contains a list of node indices sorted according to the
743 * SCC to which a node belongs
745 * n_edge is the number of edges
746 * edge is the list of edges
747 * max_edge contains the maximal number of edges of each type;
748 * in particular, it contains the number of edges in the inital graph.
749 * edge_table contains pointers into the edge array, hashed on the source
750 * and sink spaces; there is one such table for each type;
751 * a given edge may be referenced from more than one table
752 * if the corresponding relation appears in more than one of the
753 * sets of dependences; however, for each type there is only
754 * a single edge between a given pair of source and sink space
755 * in the entire graph
757 * node_table contains pointers into the node array, hashed on the space
759 * region contains a list of variable sequences that should be non-trivial
761 * lp contains the (I)LP problem used to obtain new schedule rows
763 * src_scc and dst_scc are the source and sink SCCs of an edge with
764 * conflicting constraints
766 * scc represents the number of components
767 * weak is set if the components are weakly connected
769 * max_weight is used during clustering and represents the maximal
770 * weight of the relevant proximity edges.
772 struct isl_sched_graph
{
773 isl_map_to_basic_set
*intra_hmap
;
774 isl_map_to_basic_set
*inter_hmap
;
776 struct isl_sched_node
*node
;
789 struct isl_sched_edge
*edge
;
791 int max_edge
[isl_edge_last
+ 1];
792 struct isl_hash_table
*edge_table
[isl_edge_last
+ 1];
794 struct isl_hash_table
*node_table
;
795 struct isl_region
*region
;
808 /* Initialize node_table based on the list of nodes.
810 static int graph_init_table(isl_ctx
*ctx
, struct isl_sched_graph
*graph
)
814 graph
->node_table
= isl_hash_table_alloc(ctx
, graph
->n
);
815 if (!graph
->node_table
)
818 for (i
= 0; i
< graph
->n
; ++i
) {
819 struct isl_hash_table_entry
*entry
;
822 hash
= isl_space_get_hash(graph
->node
[i
].space
);
823 entry
= isl_hash_table_find(ctx
, graph
->node_table
, hash
,
825 graph
->node
[i
].space
, 1);
828 entry
->data
= &graph
->node
[i
];
834 /* Return a pointer to the node that lives within the given space,
835 * or NULL if there is no such node.
837 static struct isl_sched_node
*graph_find_node(isl_ctx
*ctx
,
838 struct isl_sched_graph
*graph
, __isl_keep isl_space
*dim
)
840 struct isl_hash_table_entry
*entry
;
843 hash
= isl_space_get_hash(dim
);
844 entry
= isl_hash_table_find(ctx
, graph
->node_table
, hash
,
845 &node_has_space
, dim
, 0);
847 return entry
? entry
->data
: NULL
;
850 static int edge_has_src_and_dst(const void *entry
, const void *val
)
852 const struct isl_sched_edge
*edge
= entry
;
853 const struct isl_sched_edge
*temp
= val
;
855 return edge
->src
== temp
->src
&& edge
->dst
== temp
->dst
;
858 /* Add the given edge to graph->edge_table[type].
860 static isl_stat
graph_edge_table_add(isl_ctx
*ctx
,
861 struct isl_sched_graph
*graph
, enum isl_edge_type type
,
862 struct isl_sched_edge
*edge
)
864 struct isl_hash_table_entry
*entry
;
867 hash
= isl_hash_init();
868 hash
= isl_hash_builtin(hash
, edge
->src
);
869 hash
= isl_hash_builtin(hash
, edge
->dst
);
870 entry
= isl_hash_table_find(ctx
, graph
->edge_table
[type
], hash
,
871 &edge_has_src_and_dst
, edge
, 1);
873 return isl_stat_error
;
879 /* Allocate the edge_tables based on the maximal number of edges of
882 static int graph_init_edge_tables(isl_ctx
*ctx
, struct isl_sched_graph
*graph
)
886 for (i
= 0; i
<= isl_edge_last
; ++i
) {
887 graph
->edge_table
[i
] = isl_hash_table_alloc(ctx
,
889 if (!graph
->edge_table
[i
])
896 /* If graph->edge_table[type] contains an edge from the given source
897 * to the given destination, then return the hash table entry of this edge.
898 * Otherwise, return NULL.
900 static struct isl_hash_table_entry
*graph_find_edge_entry(
901 struct isl_sched_graph
*graph
,
902 enum isl_edge_type type
,
903 struct isl_sched_node
*src
, struct isl_sched_node
*dst
)
905 isl_ctx
*ctx
= isl_space_get_ctx(src
->space
);
907 struct isl_sched_edge temp
= { .src
= src
, .dst
= dst
};
909 hash
= isl_hash_init();
910 hash
= isl_hash_builtin(hash
, temp
.src
);
911 hash
= isl_hash_builtin(hash
, temp
.dst
);
912 return isl_hash_table_find(ctx
, graph
->edge_table
[type
], hash
,
913 &edge_has_src_and_dst
, &temp
, 0);
917 /* If graph->edge_table[type] contains an edge from the given source
918 * to the given destination, then return this edge.
919 * Otherwise, return NULL.
921 static struct isl_sched_edge
*graph_find_edge(struct isl_sched_graph
*graph
,
922 enum isl_edge_type type
,
923 struct isl_sched_node
*src
, struct isl_sched_node
*dst
)
925 struct isl_hash_table_entry
*entry
;
927 entry
= graph_find_edge_entry(graph
, type
, src
, dst
);
934 /* Check whether the dependence graph has an edge of the given type
935 * between the given two nodes.
937 static isl_bool
graph_has_edge(struct isl_sched_graph
*graph
,
938 enum isl_edge_type type
,
939 struct isl_sched_node
*src
, struct isl_sched_node
*dst
)
941 struct isl_sched_edge
*edge
;
944 edge
= graph_find_edge(graph
, type
, src
, dst
);
948 empty
= isl_map_plain_is_empty(edge
->map
);
950 return isl_bool_error
;
955 /* Look for any edge with the same src, dst and map fields as "model".
957 * Return the matching edge if one can be found.
958 * Return "model" if no matching edge is found.
959 * Return NULL on error.
961 static struct isl_sched_edge
*graph_find_matching_edge(
962 struct isl_sched_graph
*graph
, struct isl_sched_edge
*model
)
964 enum isl_edge_type i
;
965 struct isl_sched_edge
*edge
;
967 for (i
= isl_edge_first
; i
<= isl_edge_last
; ++i
) {
970 edge
= graph_find_edge(graph
, i
, model
->src
, model
->dst
);
973 is_equal
= isl_map_plain_is_equal(model
->map
, edge
->map
);
983 /* Remove the given edge from all the edge_tables that refer to it.
985 static void graph_remove_edge(struct isl_sched_graph
*graph
,
986 struct isl_sched_edge
*edge
)
988 isl_ctx
*ctx
= isl_map_get_ctx(edge
->map
);
989 enum isl_edge_type i
;
991 for (i
= isl_edge_first
; i
<= isl_edge_last
; ++i
) {
992 struct isl_hash_table_entry
*entry
;
994 entry
= graph_find_edge_entry(graph
, i
, edge
->src
, edge
->dst
);
997 if (entry
->data
!= edge
)
999 isl_hash_table_remove(ctx
, graph
->edge_table
[i
], entry
);
1003 /* Check whether the dependence graph has any edge
1004 * between the given two nodes.
1006 static isl_bool
graph_has_any_edge(struct isl_sched_graph
*graph
,
1007 struct isl_sched_node
*src
, struct isl_sched_node
*dst
)
1009 enum isl_edge_type i
;
1012 for (i
= isl_edge_first
; i
<= isl_edge_last
; ++i
) {
1013 r
= graph_has_edge(graph
, i
, src
, dst
);
1021 /* Check whether the dependence graph has a validity edge
1022 * between the given two nodes.
1024 * Conditional validity edges are essentially validity edges that
1025 * can be ignored if the corresponding condition edges are iteration private.
1026 * Here, we are only checking for the presence of validity
1027 * edges, so we need to consider the conditional validity edges too.
1028 * In particular, this function is used during the detection
1029 * of strongly connected components and we cannot ignore
1030 * conditional validity edges during this detection.
1032 static isl_bool
graph_has_validity_edge(struct isl_sched_graph
*graph
,
1033 struct isl_sched_node
*src
, struct isl_sched_node
*dst
)
1037 r
= graph_has_edge(graph
, isl_edge_validity
, src
, dst
);
1041 return graph_has_edge(graph
, isl_edge_conditional_validity
, src
, dst
);
1044 static int graph_alloc(isl_ctx
*ctx
, struct isl_sched_graph
*graph
,
1045 int n_node
, int n_edge
)
1050 graph
->n_edge
= n_edge
;
1051 graph
->node
= isl_calloc_array(ctx
, struct isl_sched_node
, graph
->n
);
1052 graph
->sorted
= isl_calloc_array(ctx
, int, graph
->n
);
1053 graph
->region
= isl_alloc_array(ctx
, struct isl_region
, graph
->n
);
1054 graph
->edge
= isl_calloc_array(ctx
,
1055 struct isl_sched_edge
, graph
->n_edge
);
1057 graph
->intra_hmap
= isl_map_to_basic_set_alloc(ctx
, 2 * n_edge
);
1058 graph
->inter_hmap
= isl_map_to_basic_set_alloc(ctx
, 2 * n_edge
);
1060 if (!graph
->node
|| !graph
->region
|| (graph
->n_edge
&& !graph
->edge
) ||
1064 for(i
= 0; i
< graph
->n
; ++i
)
1065 graph
->sorted
[i
] = i
;
1070 static void graph_free(isl_ctx
*ctx
, struct isl_sched_graph
*graph
)
1074 isl_map_to_basic_set_free(graph
->intra_hmap
);
1075 isl_map_to_basic_set_free(graph
->inter_hmap
);
1078 for (i
= 0; i
< graph
->n
; ++i
) {
1079 isl_space_free(graph
->node
[i
].space
);
1080 isl_set_free(graph
->node
[i
].hull
);
1081 isl_multi_aff_free(graph
->node
[i
].compress
);
1082 isl_multi_aff_free(graph
->node
[i
].decompress
);
1083 isl_mat_free(graph
->node
[i
].sched
);
1084 isl_map_free(graph
->node
[i
].sched_map
);
1085 isl_mat_free(graph
->node
[i
].cmap
);
1086 isl_mat_free(graph
->node
[i
].cinv
);
1087 isl_mat_free(graph
->node
[i
].ctrans
);
1089 free(graph
->node
[i
].coincident
);
1092 free(graph
->sorted
);
1094 for (i
= 0; i
< graph
->n_edge
; ++i
) {
1095 isl_map_free(graph
->edge
[i
].map
);
1096 isl_union_map_free(graph
->edge
[i
].tagged_condition
);
1097 isl_union_map_free(graph
->edge
[i
].tagged_validity
);
1100 free(graph
->region
);
1101 for (i
= 0; i
<= isl_edge_last
; ++i
)
1102 isl_hash_table_free(ctx
, graph
->edge_table
[i
]);
1103 isl_hash_table_free(ctx
, graph
->node_table
);
1104 isl_basic_set_free(graph
->lp
);
1107 /* For each "set" on which this function is called, increment
1108 * graph->n by one and update graph->maxvar.
1110 static isl_stat
init_n_maxvar(__isl_take isl_set
*set
, void *user
)
1112 struct isl_sched_graph
*graph
= user
;
1113 int nvar
= isl_set_dim(set
, isl_dim_set
);
1116 if (nvar
> graph
->maxvar
)
1117 graph
->maxvar
= nvar
;
1124 /* Add the number of basic maps in "map" to *n.
1126 static isl_stat
add_n_basic_map(__isl_take isl_map
*map
, void *user
)
1130 *n
+= isl_map_n_basic_map(map
);
1136 /* Compute the number of rows that should be allocated for the schedule.
1137 * In particular, we need one row for each variable or one row
1138 * for each basic map in the dependences.
1139 * Note that it is practically impossible to exhaust both
1140 * the number of dependences and the number of variables.
1142 static int compute_max_row(struct isl_sched_graph
*graph
,
1143 __isl_keep isl_schedule_constraints
*sc
)
1145 enum isl_edge_type i
;
1150 if (isl_union_set_foreach_set(sc
->domain
, &init_n_maxvar
, graph
) < 0)
1153 for (i
= isl_edge_first
; i
<= isl_edge_last
; ++i
)
1154 if (isl_union_map_foreach_map(sc
->constraint
[i
],
1155 &add_n_basic_map
, &n_edge
) < 0)
1157 graph
->max_row
= n_edge
+ graph
->maxvar
;
1162 /* Does "bset" have any defining equalities for its set variables?
1164 static int has_any_defining_equality(__isl_keep isl_basic_set
*bset
)
1171 n
= isl_basic_set_dim(bset
, isl_dim_set
);
1172 for (i
= 0; i
< n
; ++i
) {
1175 has
= isl_basic_set_has_defining_equality(bset
, isl_dim_set
, i
,
1184 /* Add a new node to the graph representing the given space.
1185 * "nvar" is the (possibly compressed) number of variables and
1186 * may be smaller than then number of set variables in "space"
1187 * if "compressed" is set.
1188 * If "compressed" is set, then "hull" represents the constraints
1189 * that were used to derive the compression, while "compress" and
1190 * "decompress" map the original space to the compressed space and
1192 * If "compressed" is not set, then "hull", "compress" and "decompress"
1195 static isl_stat
add_node(struct isl_sched_graph
*graph
,
1196 __isl_take isl_space
*space
, int nvar
, int compressed
,
1197 __isl_take isl_set
*hull
, __isl_take isl_multi_aff
*compress
,
1198 __isl_take isl_multi_aff
*decompress
)
1206 return isl_stat_error
;
1208 ctx
= isl_space_get_ctx(space
);
1209 nparam
= isl_space_dim(space
, isl_dim_param
);
1210 if (!ctx
->opt
->schedule_parametric
)
1212 sched
= isl_mat_alloc(ctx
, 0, 1 + nparam
+ nvar
);
1213 graph
->node
[graph
->n
].space
= space
;
1214 graph
->node
[graph
->n
].nvar
= nvar
;
1215 graph
->node
[graph
->n
].nparam
= nparam
;
1216 graph
->node
[graph
->n
].sched
= sched
;
1217 graph
->node
[graph
->n
].sched_map
= NULL
;
1218 coincident
= isl_calloc_array(ctx
, int, graph
->max_row
);
1219 graph
->node
[graph
->n
].coincident
= coincident
;
1220 graph
->node
[graph
->n
].compressed
= compressed
;
1221 graph
->node
[graph
->n
].hull
= hull
;
1222 graph
->node
[graph
->n
].compress
= compress
;
1223 graph
->node
[graph
->n
].decompress
= decompress
;
1226 if (!space
|| !sched
|| (graph
->max_row
&& !coincident
))
1227 return isl_stat_error
;
1228 if (compressed
&& (!hull
|| !compress
|| !decompress
))
1229 return isl_stat_error
;
1234 /* Add a new node to the graph representing the given set.
1236 * If any of the set variables is defined by an equality, then
1237 * we perform variable compression such that we can perform
1238 * the scheduling on the compressed domain.
1240 static isl_stat
extract_node(__isl_take isl_set
*set
, void *user
)
1245 isl_basic_set
*hull
;
1248 isl_multi_aff
*compress
, *decompress
;
1249 struct isl_sched_graph
*graph
= user
;
1251 space
= isl_set_get_space(set
);
1252 hull
= isl_set_affine_hull(set
);
1253 hull
= isl_basic_set_remove_divs(hull
);
1254 nvar
= isl_space_dim(space
, isl_dim_set
);
1255 has_equality
= has_any_defining_equality(hull
);
1257 if (has_equality
< 0)
1259 if (!has_equality
) {
1260 isl_basic_set_free(hull
);
1261 return add_node(graph
, space
, nvar
, 0, NULL
, NULL
, NULL
);
1264 morph
= isl_basic_set_variable_compression(hull
, isl_dim_set
);
1265 nvar
= isl_morph_ran_dim(morph
, isl_dim_set
);
1266 compress
= isl_morph_get_var_multi_aff(morph
);
1267 morph
= isl_morph_inverse(morph
);
1268 decompress
= isl_morph_get_var_multi_aff(morph
);
1269 isl_morph_free(morph
);
1271 hull_set
= isl_set_from_basic_set(hull
);
1272 return add_node(graph
, space
, nvar
, 1, hull_set
, compress
, decompress
);
1274 isl_basic_set_free(hull
);
1275 isl_space_free(space
);
1276 return isl_stat_error
;
1279 struct isl_extract_edge_data
{
1280 enum isl_edge_type type
;
1281 struct isl_sched_graph
*graph
;
1284 /* Merge edge2 into edge1, freeing the contents of edge2.
1285 * Return 0 on success and -1 on failure.
1287 * edge1 and edge2 are assumed to have the same value for the map field.
1289 static int merge_edge(struct isl_sched_edge
*edge1
,
1290 struct isl_sched_edge
*edge2
)
1292 edge1
->types
|= edge2
->types
;
1293 isl_map_free(edge2
->map
);
1295 if (is_condition(edge2
)) {
1296 if (!edge1
->tagged_condition
)
1297 edge1
->tagged_condition
= edge2
->tagged_condition
;
1299 edge1
->tagged_condition
=
1300 isl_union_map_union(edge1
->tagged_condition
,
1301 edge2
->tagged_condition
);
1304 if (is_conditional_validity(edge2
)) {
1305 if (!edge1
->tagged_validity
)
1306 edge1
->tagged_validity
= edge2
->tagged_validity
;
1308 edge1
->tagged_validity
=
1309 isl_union_map_union(edge1
->tagged_validity
,
1310 edge2
->tagged_validity
);
1313 if (is_condition(edge2
) && !edge1
->tagged_condition
)
1315 if (is_conditional_validity(edge2
) && !edge1
->tagged_validity
)
1321 /* Insert dummy tags in domain and range of "map".
1323 * In particular, if "map" is of the form
1329 * [A -> dummy_tag] -> [B -> dummy_tag]
1331 * where the dummy_tags are identical and equal to any dummy tags
1332 * introduced by any other call to this function.
1334 static __isl_give isl_map
*insert_dummy_tags(__isl_take isl_map
*map
)
1340 isl_set
*domain
, *range
;
1342 ctx
= isl_map_get_ctx(map
);
1344 id
= isl_id_alloc(ctx
, NULL
, &dummy
);
1345 space
= isl_space_params(isl_map_get_space(map
));
1346 space
= isl_space_set_from_params(space
);
1347 space
= isl_space_set_tuple_id(space
, isl_dim_set
, id
);
1348 space
= isl_space_map_from_set(space
);
1350 domain
= isl_map_wrap(map
);
1351 range
= isl_map_wrap(isl_map_universe(space
));
1352 map
= isl_map_from_domain_and_range(domain
, range
);
1353 map
= isl_map_zip(map
);
1358 /* Given that at least one of "src" or "dst" is compressed, return
1359 * a map between the spaces of these nodes restricted to the affine
1360 * hull that was used in the compression.
1362 static __isl_give isl_map
*extract_hull(struct isl_sched_node
*src
,
1363 struct isl_sched_node
*dst
)
1367 if (src
->compressed
)
1368 dom
= isl_set_copy(src
->hull
);
1370 dom
= isl_set_universe(isl_space_copy(src
->space
));
1371 if (dst
->compressed
)
1372 ran
= isl_set_copy(dst
->hull
);
1374 ran
= isl_set_universe(isl_space_copy(dst
->space
));
1376 return isl_map_from_domain_and_range(dom
, ran
);
1379 /* Intersect the domains of the nested relations in domain and range
1380 * of "tagged" with "map".
1382 static __isl_give isl_map
*map_intersect_domains(__isl_take isl_map
*tagged
,
1383 __isl_keep isl_map
*map
)
1387 tagged
= isl_map_zip(tagged
);
1388 set
= isl_map_wrap(isl_map_copy(map
));
1389 tagged
= isl_map_intersect_domain(tagged
, set
);
1390 tagged
= isl_map_zip(tagged
);
1394 /* Return a pointer to the node that lives in the domain space of "map"
1395 * or NULL if there is no such node.
1397 static struct isl_sched_node
*find_domain_node(isl_ctx
*ctx
,
1398 struct isl_sched_graph
*graph
, __isl_keep isl_map
*map
)
1400 struct isl_sched_node
*node
;
1403 space
= isl_space_domain(isl_map_get_space(map
));
1404 node
= graph_find_node(ctx
, graph
, space
);
1405 isl_space_free(space
);
1410 /* Return a pointer to the node that lives in the range space of "map"
1411 * or NULL if there is no such node.
1413 static struct isl_sched_node
*find_range_node(isl_ctx
*ctx
,
1414 struct isl_sched_graph
*graph
, __isl_keep isl_map
*map
)
1416 struct isl_sched_node
*node
;
1419 space
= isl_space_range(isl_map_get_space(map
));
1420 node
= graph_find_node(ctx
, graph
, space
);
1421 isl_space_free(space
);
1426 /* Add a new edge to the graph based on the given map
1427 * and add it to data->graph->edge_table[data->type].
1428 * If a dependence relation of a given type happens to be identical
1429 * to one of the dependence relations of a type that was added before,
1430 * then we don't create a new edge, but instead mark the original edge
1431 * as also representing a dependence of the current type.
1433 * Edges of type isl_edge_condition or isl_edge_conditional_validity
1434 * may be specified as "tagged" dependence relations. That is, "map"
1435 * may contain elements (i -> a) -> (j -> b), where i -> j denotes
1436 * the dependence on iterations and a and b are tags.
1437 * edge->map is set to the relation containing the elements i -> j,
1438 * while edge->tagged_condition and edge->tagged_validity contain
1439 * the union of all the "map" relations
1440 * for which extract_edge is called that result in the same edge->map.
1442 * If the source or the destination node is compressed, then
1443 * intersect both "map" and "tagged" with the constraints that
1444 * were used to construct the compression.
1445 * This ensures that there are no schedule constraints defined
1446 * outside of these domains, while the scheduler no longer has
1447 * any control over those outside parts.
1449 static isl_stat
extract_edge(__isl_take isl_map
*map
, void *user
)
1451 isl_ctx
*ctx
= isl_map_get_ctx(map
);
1452 struct isl_extract_edge_data
*data
= user
;
1453 struct isl_sched_graph
*graph
= data
->graph
;
1454 struct isl_sched_node
*src
, *dst
;
1455 struct isl_sched_edge
*edge
;
1456 isl_map
*tagged
= NULL
;
1458 if (data
->type
== isl_edge_condition
||
1459 data
->type
== isl_edge_conditional_validity
) {
1460 if (isl_map_can_zip(map
)) {
1461 tagged
= isl_map_copy(map
);
1462 map
= isl_set_unwrap(isl_map_domain(isl_map_zip(map
)));
1464 tagged
= insert_dummy_tags(isl_map_copy(map
));
1468 src
= find_domain_node(ctx
, graph
, map
);
1469 dst
= find_range_node(ctx
, graph
, map
);
1473 isl_map_free(tagged
);
1477 if (src
->compressed
|| dst
->compressed
) {
1479 hull
= extract_hull(src
, dst
);
1481 tagged
= map_intersect_domains(tagged
, hull
);
1482 map
= isl_map_intersect(map
, hull
);
1485 graph
->edge
[graph
->n_edge
].src
= src
;
1486 graph
->edge
[graph
->n_edge
].dst
= dst
;
1487 graph
->edge
[graph
->n_edge
].map
= map
;
1488 graph
->edge
[graph
->n_edge
].types
= 0;
1489 graph
->edge
[graph
->n_edge
].tagged_condition
= NULL
;
1490 graph
->edge
[graph
->n_edge
].tagged_validity
= NULL
;
1491 set_type(&graph
->edge
[graph
->n_edge
], data
->type
);
1492 if (data
->type
== isl_edge_condition
)
1493 graph
->edge
[graph
->n_edge
].tagged_condition
=
1494 isl_union_map_from_map(tagged
);
1495 if (data
->type
== isl_edge_conditional_validity
)
1496 graph
->edge
[graph
->n_edge
].tagged_validity
=
1497 isl_union_map_from_map(tagged
);
1499 edge
= graph_find_matching_edge(graph
, &graph
->edge
[graph
->n_edge
]);
1502 return isl_stat_error
;
1504 if (edge
== &graph
->edge
[graph
->n_edge
])
1505 return graph_edge_table_add(ctx
, graph
, data
->type
,
1506 &graph
->edge
[graph
->n_edge
++]);
1508 if (merge_edge(edge
, &graph
->edge
[graph
->n_edge
]) < 0)
1511 return graph_edge_table_add(ctx
, graph
, data
->type
, edge
);
1514 /* Initialize the schedule graph "graph" from the schedule constraints "sc".
1516 * The context is included in the domain before the nodes of
1517 * the graphs are extracted in order to be able to exploit
1518 * any possible additional equalities.
1519 * Note that this intersection is only performed locally here.
1521 static isl_stat
graph_init(struct isl_sched_graph
*graph
,
1522 __isl_keep isl_schedule_constraints
*sc
)
1525 isl_union_set
*domain
;
1526 struct isl_extract_edge_data data
;
1527 enum isl_edge_type i
;
1531 return isl_stat_error
;
1533 ctx
= isl_schedule_constraints_get_ctx(sc
);
1535 domain
= isl_schedule_constraints_get_domain(sc
);
1536 graph
->n
= isl_union_set_n_set(domain
);
1537 isl_union_set_free(domain
);
1539 if (graph_alloc(ctx
, graph
, graph
->n
,
1540 isl_schedule_constraints_n_map(sc
)) < 0)
1541 return isl_stat_error
;
1543 if (compute_max_row(graph
, sc
) < 0)
1544 return isl_stat_error
;
1547 domain
= isl_schedule_constraints_get_domain(sc
);
1548 domain
= isl_union_set_intersect_params(domain
,
1549 isl_set_copy(sc
->context
));
1550 r
= isl_union_set_foreach_set(domain
, &extract_node
, graph
);
1551 isl_union_set_free(domain
);
1553 return isl_stat_error
;
1554 if (graph_init_table(ctx
, graph
) < 0)
1555 return isl_stat_error
;
1556 for (i
= isl_edge_first
; i
<= isl_edge_last
; ++i
)
1557 graph
->max_edge
[i
] = isl_union_map_n_map(sc
->constraint
[i
]);
1558 if (graph_init_edge_tables(ctx
, graph
) < 0)
1559 return isl_stat_error
;
1562 for (i
= isl_edge_first
; i
<= isl_edge_last
; ++i
) {
1564 if (isl_union_map_foreach_map(sc
->constraint
[i
],
1565 &extract_edge
, &data
) < 0)
1566 return isl_stat_error
;
1572 /* Check whether there is any dependence from node[j] to node[i]
1573 * or from node[i] to node[j].
1575 static isl_bool
node_follows_weak(int i
, int j
, void *user
)
1578 struct isl_sched_graph
*graph
= user
;
1580 f
= graph_has_any_edge(graph
, &graph
->node
[j
], &graph
->node
[i
]);
1583 return graph_has_any_edge(graph
, &graph
->node
[i
], &graph
->node
[j
]);
1586 /* Check whether there is a (conditional) validity dependence from node[j]
1587 * to node[i], forcing node[i] to follow node[j].
1589 static isl_bool
node_follows_strong(int i
, int j
, void *user
)
1591 struct isl_sched_graph
*graph
= user
;
1593 return graph_has_validity_edge(graph
, &graph
->node
[j
], &graph
->node
[i
]);
1596 /* Use Tarjan's algorithm for computing the strongly connected components
1597 * in the dependence graph only considering those edges defined by "follows".
1599 static int detect_ccs(isl_ctx
*ctx
, struct isl_sched_graph
*graph
,
1600 isl_bool (*follows
)(int i
, int j
, void *user
))
1603 struct isl_tarjan_graph
*g
= NULL
;
1605 g
= isl_tarjan_graph_init(ctx
, graph
->n
, follows
, graph
);
1613 while (g
->order
[i
] != -1) {
1614 graph
->node
[g
->order
[i
]].scc
= graph
->scc
;
1622 isl_tarjan_graph_free(g
);
1627 /* Apply Tarjan's algorithm to detect the strongly connected components
1628 * in the dependence graph.
1629 * Only consider the (conditional) validity dependences and clear "weak".
1631 static int detect_sccs(isl_ctx
*ctx
, struct isl_sched_graph
*graph
)
1634 return detect_ccs(ctx
, graph
, &node_follows_strong
);
1637 /* Apply Tarjan's algorithm to detect the (weakly) connected components
1638 * in the dependence graph.
1639 * Consider all dependences and set "weak".
1641 static int detect_wccs(isl_ctx
*ctx
, struct isl_sched_graph
*graph
)
1644 return detect_ccs(ctx
, graph
, &node_follows_weak
);
1647 static int cmp_scc(const void *a
, const void *b
, void *data
)
1649 struct isl_sched_graph
*graph
= data
;
1653 return graph
->node
[*i1
].scc
- graph
->node
[*i2
].scc
;
1656 /* Sort the elements of graph->sorted according to the corresponding SCCs.
1658 static int sort_sccs(struct isl_sched_graph
*graph
)
1660 return isl_sort(graph
->sorted
, graph
->n
, sizeof(int), &cmp_scc
, graph
);
1663 /* Given a dependence relation R from "node" to itself,
1664 * construct the set of coefficients of valid constraints for elements
1665 * in that dependence relation.
1666 * In particular, the result contains tuples of coefficients
1667 * c_0, c_n, c_x such that
1669 * c_0 + c_n n + c_x y - c_x x >= 0 for each (x,y) in R
1673 * c_0 + c_n n + c_x d >= 0 for each d in delta R = { y - x | (x,y) in R }
1675 * We choose here to compute the dual of delta R.
1676 * Alternatively, we could have computed the dual of R, resulting
1677 * in a set of tuples c_0, c_n, c_x, c_y, and then
1678 * plugged in (c_0, c_n, c_x, -c_x).
1680 * If "node" has been compressed, then the dependence relation
1681 * is also compressed before the set of coefficients is computed.
1683 static __isl_give isl_basic_set
*intra_coefficients(
1684 struct isl_sched_graph
*graph
, struct isl_sched_node
*node
,
1685 __isl_take isl_map
*map
)
1689 isl_basic_set
*coef
;
1690 isl_maybe_isl_basic_set m
;
1692 m
= isl_map_to_basic_set_try_get(graph
->intra_hmap
, map
);
1693 if (m
.valid
< 0 || m
.valid
) {
1698 key
= isl_map_copy(map
);
1699 if (node
->compressed
) {
1700 map
= isl_map_preimage_domain_multi_aff(map
,
1701 isl_multi_aff_copy(node
->decompress
));
1702 map
= isl_map_preimage_range_multi_aff(map
,
1703 isl_multi_aff_copy(node
->decompress
));
1705 delta
= isl_set_remove_divs(isl_map_deltas(map
));
1706 coef
= isl_set_coefficients(delta
);
1707 graph
->intra_hmap
= isl_map_to_basic_set_set(graph
->intra_hmap
, key
,
1708 isl_basic_set_copy(coef
));
1713 /* Given a dependence relation R, construct the set of coefficients
1714 * of valid constraints for elements in that dependence relation.
1715 * In particular, the result contains tuples of coefficients
1716 * c_0, c_n, c_x, c_y such that
1718 * c_0 + c_n n + c_x x + c_y y >= 0 for each (x,y) in R
1720 * If the source or destination nodes of "edge" have been compressed,
1721 * then the dependence relation is also compressed before
1722 * the set of coefficients is computed.
1724 static __isl_give isl_basic_set
*inter_coefficients(
1725 struct isl_sched_graph
*graph
, struct isl_sched_edge
*edge
,
1726 __isl_take isl_map
*map
)
1730 isl_basic_set
*coef
;
1731 isl_maybe_isl_basic_set m
;
1733 m
= isl_map_to_basic_set_try_get(graph
->inter_hmap
, map
);
1734 if (m
.valid
< 0 || m
.valid
) {
1739 key
= isl_map_copy(map
);
1740 if (edge
->src
->compressed
)
1741 map
= isl_map_preimage_domain_multi_aff(map
,
1742 isl_multi_aff_copy(edge
->src
->decompress
));
1743 if (edge
->dst
->compressed
)
1744 map
= isl_map_preimage_range_multi_aff(map
,
1745 isl_multi_aff_copy(edge
->dst
->decompress
));
1746 set
= isl_map_wrap(isl_map_remove_divs(map
));
1747 coef
= isl_set_coefficients(set
);
1748 graph
->inter_hmap
= isl_map_to_basic_set_set(graph
->inter_hmap
, key
,
1749 isl_basic_set_copy(coef
));
1754 /* Add constraints to graph->lp that force validity for the given
1755 * dependence from a node i to itself.
1756 * That is, add constraints that enforce
1758 * (c_i_0 + c_i_n n + c_i_x y) - (c_i_0 + c_i_n n + c_i_x x)
1759 * = c_i_x (y - x) >= 0
1761 * for each (x,y) in R.
1762 * We obtain general constraints on coefficients (c_0, c_n, c_x)
1763 * of valid constraints for (y - x) and then plug in (0, 0, c_i_x^+ - c_i_x^-),
1764 * where c_i_x = c_i_x^+ - c_i_x^-, with c_i_x^+ and c_i_x^- non-negative.
1765 * In graph->lp, the c_i_x^- appear before their c_i_x^+ counterpart.
1767 * Actually, we do not construct constraints for the c_i_x themselves,
1768 * but for the coefficients of c_i_x written as a linear combination
1769 * of the columns in node->cmap.
1771 static int add_intra_validity_constraints(struct isl_sched_graph
*graph
,
1772 struct isl_sched_edge
*edge
)
1775 isl_map
*map
= isl_map_copy(edge
->map
);
1776 isl_ctx
*ctx
= isl_map_get_ctx(map
);
1778 isl_dim_map
*dim_map
;
1779 isl_basic_set
*coef
;
1780 struct isl_sched_node
*node
= edge
->src
;
1782 coef
= intra_coefficients(graph
, node
, map
);
1784 dim
= isl_space_domain(isl_space_unwrap(isl_basic_set_get_space(coef
)));
1786 coef
= isl_basic_set_transform_dims(coef
, isl_dim_set
,
1787 isl_space_dim(dim
, isl_dim_set
), isl_mat_copy(node
->cmap
));
1791 total
= isl_basic_set_total_dim(graph
->lp
);
1792 dim_map
= isl_dim_map_alloc(ctx
, total
);
1793 isl_dim_map_range(dim_map
, node
->start
+ 2 * node
->nparam
+ 1, 2,
1794 isl_space_dim(dim
, isl_dim_set
), 1,
1796 isl_dim_map_range(dim_map
, node
->start
+ 2 * node
->nparam
+ 2, 2,
1797 isl_space_dim(dim
, isl_dim_set
), 1,
1799 graph
->lp
= isl_basic_set_extend_constraints(graph
->lp
,
1800 coef
->n_eq
, coef
->n_ineq
);
1801 graph
->lp
= isl_basic_set_add_constraints_dim_map(graph
->lp
,
1803 isl_space_free(dim
);
1807 isl_space_free(dim
);
1811 /* Add constraints to graph->lp that force validity for the given
1812 * dependence from node i to node j.
1813 * That is, add constraints that enforce
1815 * (c_j_0 + c_j_n n + c_j_x y) - (c_i_0 + c_i_n n + c_i_x x) >= 0
1817 * for each (x,y) in R.
1818 * We obtain general constraints on coefficients (c_0, c_n, c_x, c_y)
1819 * of valid constraints for R and then plug in
1820 * (c_j_0 - c_i_0, c_j_n^+ - c_j_n^- - (c_i_n^+ - c_i_n^-),
1821 * c_j_x^+ - c_j_x^- - (c_i_x^+ - c_i_x^-)),
1822 * where c_* = c_*^+ - c_*^-, with c_*^+ and c_*^- non-negative.
1823 * In graph->lp, the c_*^- appear before their c_*^+ counterpart.
1825 * Actually, we do not construct constraints for the c_*_x themselves,
1826 * but for the coefficients of c_*_x written as a linear combination
1827 * of the columns in node->cmap.
1829 static int add_inter_validity_constraints(struct isl_sched_graph
*graph
,
1830 struct isl_sched_edge
*edge
)
1833 isl_map
*map
= isl_map_copy(edge
->map
);
1834 isl_ctx
*ctx
= isl_map_get_ctx(map
);
1836 isl_dim_map
*dim_map
;
1837 isl_basic_set
*coef
;
1838 struct isl_sched_node
*src
= edge
->src
;
1839 struct isl_sched_node
*dst
= edge
->dst
;
1841 coef
= inter_coefficients(graph
, edge
, map
);
1843 dim
= isl_space_domain(isl_space_unwrap(isl_basic_set_get_space(coef
)));
1845 coef
= isl_basic_set_transform_dims(coef
, isl_dim_set
,
1846 isl_space_dim(dim
, isl_dim_set
), isl_mat_copy(src
->cmap
));
1847 coef
= isl_basic_set_transform_dims(coef
, isl_dim_set
,
1848 isl_space_dim(dim
, isl_dim_set
) + src
->nvar
,
1849 isl_mat_copy(dst
->cmap
));
1853 total
= isl_basic_set_total_dim(graph
->lp
);
1854 dim_map
= isl_dim_map_alloc(ctx
, total
);
1856 isl_dim_map_range(dim_map
, dst
->start
, 0, 0, 0, 1, 1);
1857 isl_dim_map_range(dim_map
, dst
->start
+ 1, 2, 1, 1, dst
->nparam
, -1);
1858 isl_dim_map_range(dim_map
, dst
->start
+ 2, 2, 1, 1, dst
->nparam
, 1);
1859 isl_dim_map_range(dim_map
, dst
->start
+ 2 * dst
->nparam
+ 1, 2,
1860 isl_space_dim(dim
, isl_dim_set
) + src
->nvar
, 1,
1862 isl_dim_map_range(dim_map
, dst
->start
+ 2 * dst
->nparam
+ 2, 2,
1863 isl_space_dim(dim
, isl_dim_set
) + src
->nvar
, 1,
1866 isl_dim_map_range(dim_map
, src
->start
, 0, 0, 0, 1, -1);
1867 isl_dim_map_range(dim_map
, src
->start
+ 1, 2, 1, 1, src
->nparam
, 1);
1868 isl_dim_map_range(dim_map
, src
->start
+ 2, 2, 1, 1, src
->nparam
, -1);
1869 isl_dim_map_range(dim_map
, src
->start
+ 2 * src
->nparam
+ 1, 2,
1870 isl_space_dim(dim
, isl_dim_set
), 1,
1872 isl_dim_map_range(dim_map
, src
->start
+ 2 * src
->nparam
+ 2, 2,
1873 isl_space_dim(dim
, isl_dim_set
), 1,
1876 edge
->start
= graph
->lp
->n_ineq
;
1877 graph
->lp
= isl_basic_set_extend_constraints(graph
->lp
,
1878 coef
->n_eq
, coef
->n_ineq
);
1879 graph
->lp
= isl_basic_set_add_constraints_dim_map(graph
->lp
,
1883 isl_space_free(dim
);
1884 edge
->end
= graph
->lp
->n_ineq
;
1888 isl_space_free(dim
);
1892 /* Add constraints to graph->lp that bound the dependence distance for the given
1893 * dependence from a node i to itself.
1894 * If s = 1, we add the constraint
1896 * c_i_x (y - x) <= m_0 + m_n n
1900 * -c_i_x (y - x) + m_0 + m_n n >= 0
1902 * for each (x,y) in R.
1903 * If s = -1, we add the constraint
1905 * -c_i_x (y - x) <= m_0 + m_n n
1909 * c_i_x (y - x) + m_0 + m_n n >= 0
1911 * for each (x,y) in R.
1912 * We obtain general constraints on coefficients (c_0, c_n, c_x)
1913 * of valid constraints for (y - x) and then plug in (m_0, m_n, -s * c_i_x),
1914 * with each coefficient (except m_0) represented as a pair of non-negative
1917 * Actually, we do not construct constraints for the c_i_x themselves,
1918 * but for the coefficients of c_i_x written as a linear combination
1919 * of the columns in node->cmap.
1922 * If "local" is set, then we add constraints
1924 * c_i_x (y - x) <= 0
1928 * -c_i_x (y - x) <= 0
1930 * instead, forcing the dependence distance to be (less than or) equal to 0.
1931 * That is, we plug in (0, 0, -s * c_i_x),
1932 * Note that dependences marked local are treated as validity constraints
1933 * by add_all_validity_constraints and therefore also have
1934 * their distances bounded by 0 from below.
1936 static int add_intra_proximity_constraints(struct isl_sched_graph
*graph
,
1937 struct isl_sched_edge
*edge
, int s
, int local
)
1941 isl_map
*map
= isl_map_copy(edge
->map
);
1942 isl_ctx
*ctx
= isl_map_get_ctx(map
);
1944 isl_dim_map
*dim_map
;
1945 isl_basic_set
*coef
;
1946 struct isl_sched_node
*node
= edge
->src
;
1948 coef
= intra_coefficients(graph
, node
, map
);
1950 dim
= isl_space_domain(isl_space_unwrap(isl_basic_set_get_space(coef
)));
1952 coef
= isl_basic_set_transform_dims(coef
, isl_dim_set
,
1953 isl_space_dim(dim
, isl_dim_set
), isl_mat_copy(node
->cmap
));
1957 nparam
= isl_space_dim(node
->space
, isl_dim_param
);
1958 total
= isl_basic_set_total_dim(graph
->lp
);
1959 dim_map
= isl_dim_map_alloc(ctx
, total
);
1962 isl_dim_map_range(dim_map
, 1, 0, 0, 0, 1, 1);
1963 isl_dim_map_range(dim_map
, 4, 2, 1, 1, nparam
, -1);
1964 isl_dim_map_range(dim_map
, 5, 2, 1, 1, nparam
, 1);
1966 isl_dim_map_range(dim_map
, node
->start
+ 2 * node
->nparam
+ 1, 2,
1967 isl_space_dim(dim
, isl_dim_set
), 1,
1969 isl_dim_map_range(dim_map
, node
->start
+ 2 * node
->nparam
+ 2, 2,
1970 isl_space_dim(dim
, isl_dim_set
), 1,
1972 graph
->lp
= isl_basic_set_extend_constraints(graph
->lp
,
1973 coef
->n_eq
, coef
->n_ineq
);
1974 graph
->lp
= isl_basic_set_add_constraints_dim_map(graph
->lp
,
1976 isl_space_free(dim
);
1980 isl_space_free(dim
);
1984 /* Add constraints to graph->lp that bound the dependence distance for the given
1985 * dependence from node i to node j.
1986 * If s = 1, we add the constraint
1988 * (c_j_0 + c_j_n n + c_j_x y) - (c_i_0 + c_i_n n + c_i_x x)
1993 * -(c_j_0 + c_j_n n + c_j_x y) + (c_i_0 + c_i_n n + c_i_x x) +
1996 * for each (x,y) in R.
1997 * If s = -1, we add the constraint
1999 * -((c_j_0 + c_j_n n + c_j_x y) - (c_i_0 + c_i_n n + c_i_x x))
2004 * (c_j_0 + c_j_n n + c_j_x y) - (c_i_0 + c_i_n n + c_i_x x) +
2007 * for each (x,y) in R.
2008 * We obtain general constraints on coefficients (c_0, c_n, c_x, c_y)
2009 * of valid constraints for R and then plug in
2010 * (m_0 - s*c_j_0 + s*c_i_0, m_n - s*c_j_n + s*c_i_n,
2012 * with each coefficient (except m_0, c_j_0 and c_i_0)
2013 * represented as a pair of non-negative coefficients.
2015 * Actually, we do not construct constraints for the c_*_x themselves,
2016 * but for the coefficients of c_*_x written as a linear combination
2017 * of the columns in node->cmap.
2020 * If "local" is set, then we add constraints
2022 * (c_j_0 + c_j_n n + c_j_x y) - (c_i_0 + c_i_n n + c_i_x x) <= 0
2026 * -((c_j_0 + c_j_n n + c_j_x y) - (c_i_0 + c_i_n n + c_i_x x)) <= 0
2028 * instead, forcing the dependence distance to be (less than or) equal to 0.
2029 * That is, we plug in
2030 * (-s*c_j_0 + s*c_i_0, -s*c_j_n + s*c_i_n, -s*c_j_x+s*c_i_x).
2031 * Note that dependences marked local are treated as validity constraints
2032 * by add_all_validity_constraints and therefore also have
2033 * their distances bounded by 0 from below.
2035 static int add_inter_proximity_constraints(struct isl_sched_graph
*graph
,
2036 struct isl_sched_edge
*edge
, int s
, int local
)
2040 isl_map
*map
= isl_map_copy(edge
->map
);
2041 isl_ctx
*ctx
= isl_map_get_ctx(map
);
2043 isl_dim_map
*dim_map
;
2044 isl_basic_set
*coef
;
2045 struct isl_sched_node
*src
= edge
->src
;
2046 struct isl_sched_node
*dst
= edge
->dst
;
2048 coef
= inter_coefficients(graph
, edge
, map
);
2050 dim
= isl_space_domain(isl_space_unwrap(isl_basic_set_get_space(coef
)));
2052 coef
= isl_basic_set_transform_dims(coef
, isl_dim_set
,
2053 isl_space_dim(dim
, isl_dim_set
), isl_mat_copy(src
->cmap
));
2054 coef
= isl_basic_set_transform_dims(coef
, isl_dim_set
,
2055 isl_space_dim(dim
, isl_dim_set
) + src
->nvar
,
2056 isl_mat_copy(dst
->cmap
));
2060 nparam
= isl_space_dim(src
->space
, isl_dim_param
);
2061 total
= isl_basic_set_total_dim(graph
->lp
);
2062 dim_map
= isl_dim_map_alloc(ctx
, total
);
2065 isl_dim_map_range(dim_map
, 1, 0, 0, 0, 1, 1);
2066 isl_dim_map_range(dim_map
, 4, 2, 1, 1, nparam
, -1);
2067 isl_dim_map_range(dim_map
, 5, 2, 1, 1, nparam
, 1);
2070 isl_dim_map_range(dim_map
, dst
->start
, 0, 0, 0, 1, -s
);
2071 isl_dim_map_range(dim_map
, dst
->start
+ 1, 2, 1, 1, dst
->nparam
, s
);
2072 isl_dim_map_range(dim_map
, dst
->start
+ 2, 2, 1, 1, dst
->nparam
, -s
);
2073 isl_dim_map_range(dim_map
, dst
->start
+ 2 * dst
->nparam
+ 1, 2,
2074 isl_space_dim(dim
, isl_dim_set
) + src
->nvar
, 1,
2076 isl_dim_map_range(dim_map
, dst
->start
+ 2 * dst
->nparam
+ 2, 2,
2077 isl_space_dim(dim
, isl_dim_set
) + src
->nvar
, 1,
2080 isl_dim_map_range(dim_map
, src
->start
, 0, 0, 0, 1, s
);
2081 isl_dim_map_range(dim_map
, src
->start
+ 1, 2, 1, 1, src
->nparam
, -s
);
2082 isl_dim_map_range(dim_map
, src
->start
+ 2, 2, 1, 1, src
->nparam
, s
);
2083 isl_dim_map_range(dim_map
, src
->start
+ 2 * src
->nparam
+ 1, 2,
2084 isl_space_dim(dim
, isl_dim_set
), 1,
2086 isl_dim_map_range(dim_map
, src
->start
+ 2 * src
->nparam
+ 2, 2,
2087 isl_space_dim(dim
, isl_dim_set
), 1,
2090 graph
->lp
= isl_basic_set_extend_constraints(graph
->lp
,
2091 coef
->n_eq
, coef
->n_ineq
);
2092 graph
->lp
= isl_basic_set_add_constraints_dim_map(graph
->lp
,
2094 isl_space_free(dim
);
2098 isl_space_free(dim
);
2102 /* Add all validity constraints to graph->lp.
2104 * An edge that is forced to be local needs to have its dependence
2105 * distances equal to zero. We take care of bounding them by 0 from below
2106 * here. add_all_proximity_constraints takes care of bounding them by 0
2109 * If "use_coincidence" is set, then we treat coincidence edges as local edges.
2110 * Otherwise, we ignore them.
2112 static int add_all_validity_constraints(struct isl_sched_graph
*graph
,
2113 int use_coincidence
)
2117 for (i
= 0; i
< graph
->n_edge
; ++i
) {
2118 struct isl_sched_edge
*edge
= &graph
->edge
[i
];
2121 local
= is_local(edge
) ||
2122 (is_coincidence(edge
) && use_coincidence
);
2123 if (!is_validity(edge
) && !local
)
2125 if (edge
->src
!= edge
->dst
)
2127 if (add_intra_validity_constraints(graph
, edge
) < 0)
2131 for (i
= 0; i
< graph
->n_edge
; ++i
) {
2132 struct isl_sched_edge
*edge
= &graph
->edge
[i
];
2135 local
= is_local(edge
) ||
2136 (is_coincidence(edge
) && use_coincidence
);
2137 if (!is_validity(edge
) && !local
)
2139 if (edge
->src
== edge
->dst
)
2141 if (add_inter_validity_constraints(graph
, edge
) < 0)
2148 /* Add constraints to graph->lp that bound the dependence distance
2149 * for all dependence relations.
2150 * If a given proximity dependence is identical to a validity
2151 * dependence, then the dependence distance is already bounded
2152 * from below (by zero), so we only need to bound the distance
2153 * from above. (This includes the case of "local" dependences
2154 * which are treated as validity dependence by add_all_validity_constraints.)
2155 * Otherwise, we need to bound the distance both from above and from below.
2157 * If "use_coincidence" is set, then we treat coincidence edges as local edges.
2158 * Otherwise, we ignore them.
2160 static int add_all_proximity_constraints(struct isl_sched_graph
*graph
,
2161 int use_coincidence
)
2165 for (i
= 0; i
< graph
->n_edge
; ++i
) {
2166 struct isl_sched_edge
*edge
= &graph
->edge
[i
];
2169 local
= is_local(edge
) ||
2170 (is_coincidence(edge
) && use_coincidence
);
2171 if (!is_proximity(edge
) && !local
)
2173 if (edge
->src
== edge
->dst
&&
2174 add_intra_proximity_constraints(graph
, edge
, 1, local
) < 0)
2176 if (edge
->src
!= edge
->dst
&&
2177 add_inter_proximity_constraints(graph
, edge
, 1, local
) < 0)
2179 if (is_validity(edge
) || local
)
2181 if (edge
->src
== edge
->dst
&&
2182 add_intra_proximity_constraints(graph
, edge
, -1, 0) < 0)
2184 if (edge
->src
!= edge
->dst
&&
2185 add_inter_proximity_constraints(graph
, edge
, -1, 0) < 0)
2192 /* Compute a basis for the rows in the linear part of the schedule
2193 * and extend this basis to a full basis. The remaining rows
2194 * can then be used to force linear independence from the rows
2197 * In particular, given the schedule rows S, we compute
2202 * with H the Hermite normal form of S. That is, all but the
2203 * first rank columns of H are zero and so each row in S is
2204 * a linear combination of the first rank rows of Q.
2205 * The matrix Q is then transposed because we will write the
2206 * coefficients of the next schedule row as a column vector s
2207 * and express this s as a linear combination s = Q c of the
2209 * Similarly, the matrix U is transposed such that we can
2210 * compute the coefficients c = U s from a schedule row s.
2212 static int node_update_cmap(struct isl_sched_node
*node
)
2215 int n_row
= isl_mat_rows(node
->sched
);
2217 H
= isl_mat_sub_alloc(node
->sched
, 0, n_row
,
2218 1 + node
->nparam
, node
->nvar
);
2220 H
= isl_mat_left_hermite(H
, 0, &U
, &Q
);
2221 isl_mat_free(node
->cmap
);
2222 isl_mat_free(node
->cinv
);
2223 isl_mat_free(node
->ctrans
);
2224 node
->ctrans
= isl_mat_copy(Q
);
2225 node
->cmap
= isl_mat_transpose(Q
);
2226 node
->cinv
= isl_mat_transpose(U
);
2227 node
->rank
= isl_mat_initial_non_zero_cols(H
);
2230 if (!node
->cmap
|| !node
->cinv
|| !node
->ctrans
|| node
->rank
< 0)
2235 /* How many times should we count the constraints in "edge"?
2237 * If carry is set, then we are counting the number of
2238 * (validity or conditional validity) constraints that will be added
2239 * in setup_carry_lp and we count each edge exactly once.
2241 * Otherwise, we count as follows
2242 * validity -> 1 (>= 0)
2243 * validity+proximity -> 2 (>= 0 and upper bound)
2244 * proximity -> 2 (lower and upper bound)
2245 * local(+any) -> 2 (>= 0 and <= 0)
2247 * If an edge is only marked conditional_validity then it counts
2248 * as zero since it is only checked afterwards.
2250 * If "use_coincidence" is set, then we treat coincidence edges as local edges.
2251 * Otherwise, we ignore them.
2253 static int edge_multiplicity(struct isl_sched_edge
*edge
, int carry
,
2254 int use_coincidence
)
2256 if (carry
&& !is_validity(edge
) && !is_conditional_validity(edge
))
2260 if (is_proximity(edge
) || is_local(edge
))
2262 if (use_coincidence
&& is_coincidence(edge
))
2264 if (is_validity(edge
))
2269 /* Count the number of equality and inequality constraints
2270 * that will be added for the given map.
2272 * "use_coincidence" is set if we should take into account coincidence edges.
2274 static int count_map_constraints(struct isl_sched_graph
*graph
,
2275 struct isl_sched_edge
*edge
, __isl_take isl_map
*map
,
2276 int *n_eq
, int *n_ineq
, int carry
, int use_coincidence
)
2278 isl_basic_set
*coef
;
2279 int f
= edge_multiplicity(edge
, carry
, use_coincidence
);
2286 if (edge
->src
== edge
->dst
)
2287 coef
= intra_coefficients(graph
, edge
->src
, map
);
2289 coef
= inter_coefficients(graph
, edge
, map
);
2292 *n_eq
+= f
* coef
->n_eq
;
2293 *n_ineq
+= f
* coef
->n_ineq
;
2294 isl_basic_set_free(coef
);
2299 /* Count the number of equality and inequality constraints
2300 * that will be added to the main lp problem.
2301 * We count as follows
2302 * validity -> 1 (>= 0)
2303 * validity+proximity -> 2 (>= 0 and upper bound)
2304 * proximity -> 2 (lower and upper bound)
2305 * local(+any) -> 2 (>= 0 and <= 0)
2307 * If "use_coincidence" is set, then we treat coincidence edges as local edges.
2308 * Otherwise, we ignore them.
2310 static int count_constraints(struct isl_sched_graph
*graph
,
2311 int *n_eq
, int *n_ineq
, int use_coincidence
)
2315 *n_eq
= *n_ineq
= 0;
2316 for (i
= 0; i
< graph
->n_edge
; ++i
) {
2317 struct isl_sched_edge
*edge
= &graph
->edge
[i
];
2318 isl_map
*map
= isl_map_copy(edge
->map
);
2320 if (count_map_constraints(graph
, edge
, map
, n_eq
, n_ineq
,
2321 0, use_coincidence
) < 0)
2328 /* Count the number of constraints that will be added by
2329 * add_bound_coefficient_constraints and increment *n_eq and *n_ineq
2332 * In practice, add_bound_coefficient_constraints only adds inequalities.
2334 static int count_bound_coefficient_constraints(isl_ctx
*ctx
,
2335 struct isl_sched_graph
*graph
, int *n_eq
, int *n_ineq
)
2339 if (ctx
->opt
->schedule_max_coefficient
== -1)
2342 for (i
= 0; i
< graph
->n
; ++i
)
2343 *n_ineq
+= 2 * graph
->node
[i
].nparam
+ 2 * graph
->node
[i
].nvar
;
2348 /* Add constraints that bound the values of the variable and parameter
2349 * coefficients of the schedule.
2351 * The maximal value of the coefficients is defined by the option
2352 * 'schedule_max_coefficient'.
2354 static int add_bound_coefficient_constraints(isl_ctx
*ctx
,
2355 struct isl_sched_graph
*graph
)
2358 int max_coefficient
;
2361 max_coefficient
= ctx
->opt
->schedule_max_coefficient
;
2363 if (max_coefficient
== -1)
2366 total
= isl_basic_set_total_dim(graph
->lp
);
2368 for (i
= 0; i
< graph
->n
; ++i
) {
2369 struct isl_sched_node
*node
= &graph
->node
[i
];
2370 for (j
= 0; j
< 2 * node
->nparam
+ 2 * node
->nvar
; ++j
) {
2372 k
= isl_basic_set_alloc_inequality(graph
->lp
);
2375 dim
= 1 + node
->start
+ 1 + j
;
2376 isl_seq_clr(graph
->lp
->ineq
[k
], 1 + total
);
2377 isl_int_set_si(graph
->lp
->ineq
[k
][dim
], -1);
2378 isl_int_set_si(graph
->lp
->ineq
[k
][0], max_coefficient
);
2385 /* Construct an ILP problem for finding schedule coefficients
2386 * that result in non-negative, but small dependence distances
2387 * over all dependences.
2388 * In particular, the dependence distances over proximity edges
2389 * are bounded by m_0 + m_n n and we compute schedule coefficients
2390 * with small values (preferably zero) of m_n and m_0.
2392 * All variables of the ILP are non-negative. The actual coefficients
2393 * may be negative, so each coefficient is represented as the difference
2394 * of two non-negative variables. The negative part always appears
2395 * immediately before the positive part.
2396 * Other than that, the variables have the following order
2398 * - sum of positive and negative parts of m_n coefficients
2400 * - sum of positive and negative parts of all c_n coefficients
2401 * (unconstrained when computing non-parametric schedules)
2402 * - sum of positive and negative parts of all c_x coefficients
2403 * - positive and negative parts of m_n coefficients
2406 * - positive and negative parts of c_i_n (if parametric)
2407 * - positive and negative parts of c_i_x
2409 * The c_i_x are not represented directly, but through the columns of
2410 * node->cmap. That is, the computed values are for variable t_i_x
2411 * such that c_i_x = Q t_i_x with Q equal to node->cmap.
2413 * The constraints are those from the edges plus two or three equalities
2414 * to express the sums.
2416 * If "use_coincidence" is set, then we treat coincidence edges as local edges.
2417 * Otherwise, we ignore them.
2419 static int setup_lp(isl_ctx
*ctx
, struct isl_sched_graph
*graph
,
2420 int use_coincidence
)
2430 int max_constant_term
;
2432 max_constant_term
= ctx
->opt
->schedule_max_constant_term
;
2434 parametric
= ctx
->opt
->schedule_parametric
;
2435 nparam
= isl_space_dim(graph
->node
[0].space
, isl_dim_param
);
2437 total
= param_pos
+ 2 * nparam
;
2438 for (i
= 0; i
< graph
->n
; ++i
) {
2439 struct isl_sched_node
*node
= &graph
->node
[graph
->sorted
[i
]];
2440 if (node_update_cmap(node
) < 0)
2442 node
->start
= total
;
2443 total
+= 1 + 2 * (node
->nparam
+ node
->nvar
);
2446 if (count_constraints(graph
, &n_eq
, &n_ineq
, use_coincidence
) < 0)
2448 if (count_bound_coefficient_constraints(ctx
, graph
, &n_eq
, &n_ineq
) < 0)
2451 dim
= isl_space_set_alloc(ctx
, 0, total
);
2452 isl_basic_set_free(graph
->lp
);
2453 n_eq
+= 2 + parametric
;
2454 if (max_constant_term
!= -1)
2457 graph
->lp
= isl_basic_set_alloc_space(dim
, 0, n_eq
, n_ineq
);
2459 k
= isl_basic_set_alloc_equality(graph
->lp
);
2462 isl_seq_clr(graph
->lp
->eq
[k
], 1 + total
);
2463 isl_int_set_si(graph
->lp
->eq
[k
][1], -1);
2464 for (i
= 0; i
< 2 * nparam
; ++i
)
2465 isl_int_set_si(graph
->lp
->eq
[k
][1 + param_pos
+ i
], 1);
2468 k
= isl_basic_set_alloc_equality(graph
->lp
);
2471 isl_seq_clr(graph
->lp
->eq
[k
], 1 + total
);
2472 isl_int_set_si(graph
->lp
->eq
[k
][3], -1);
2473 for (i
= 0; i
< graph
->n
; ++i
) {
2474 int pos
= 1 + graph
->node
[i
].start
+ 1;
2476 for (j
= 0; j
< 2 * graph
->node
[i
].nparam
; ++j
)
2477 isl_int_set_si(graph
->lp
->eq
[k
][pos
+ j
], 1);
2481 k
= isl_basic_set_alloc_equality(graph
->lp
);
2484 isl_seq_clr(graph
->lp
->eq
[k
], 1 + total
);
2485 isl_int_set_si(graph
->lp
->eq
[k
][4], -1);
2486 for (i
= 0; i
< graph
->n
; ++i
) {
2487 struct isl_sched_node
*node
= &graph
->node
[i
];
2488 int pos
= 1 + node
->start
+ 1 + 2 * node
->nparam
;
2490 for (j
= 0; j
< 2 * node
->nvar
; ++j
)
2491 isl_int_set_si(graph
->lp
->eq
[k
][pos
+ j
], 1);
2494 if (max_constant_term
!= -1)
2495 for (i
= 0; i
< graph
->n
; ++i
) {
2496 struct isl_sched_node
*node
= &graph
->node
[i
];
2497 k
= isl_basic_set_alloc_inequality(graph
->lp
);
2500 isl_seq_clr(graph
->lp
->ineq
[k
], 1 + total
);
2501 isl_int_set_si(graph
->lp
->ineq
[k
][1 + node
->start
], -1);
2502 isl_int_set_si(graph
->lp
->ineq
[k
][0], max_constant_term
);
2505 if (add_bound_coefficient_constraints(ctx
, graph
) < 0)
2507 if (add_all_validity_constraints(graph
, use_coincidence
) < 0)
2509 if (add_all_proximity_constraints(graph
, use_coincidence
) < 0)
2515 /* Analyze the conflicting constraint found by
2516 * isl_tab_basic_set_non_trivial_lexmin. If it corresponds to the validity
2517 * constraint of one of the edges between distinct nodes, living, moreover
2518 * in distinct SCCs, then record the source and sink SCC as this may
2519 * be a good place to cut between SCCs.
2521 static int check_conflict(int con
, void *user
)
2524 struct isl_sched_graph
*graph
= user
;
2526 if (graph
->src_scc
>= 0)
2529 con
-= graph
->lp
->n_eq
;
2531 if (con
>= graph
->lp
->n_ineq
)
2534 for (i
= 0; i
< graph
->n_edge
; ++i
) {
2535 if (!is_validity(&graph
->edge
[i
]))
2537 if (graph
->edge
[i
].src
== graph
->edge
[i
].dst
)
2539 if (graph
->edge
[i
].src
->scc
== graph
->edge
[i
].dst
->scc
)
2541 if (graph
->edge
[i
].start
> con
)
2543 if (graph
->edge
[i
].end
<= con
)
2545 graph
->src_scc
= graph
->edge
[i
].src
->scc
;
2546 graph
->dst_scc
= graph
->edge
[i
].dst
->scc
;
2552 /* Check whether the next schedule row of the given node needs to be
2553 * non-trivial. Lower-dimensional domains may have some trivial rows,
2554 * but as soon as the number of remaining required non-trivial rows
2555 * is as large as the number or remaining rows to be computed,
2556 * all remaining rows need to be non-trivial.
2558 static int needs_row(struct isl_sched_graph
*graph
, struct isl_sched_node
*node
)
2560 return node
->nvar
- node
->rank
>= graph
->maxvar
- graph
->n_row
;
2563 /* Solve the ILP problem constructed in setup_lp.
2564 * For each node such that all the remaining rows of its schedule
2565 * need to be non-trivial, we construct a non-triviality region.
2566 * This region imposes that the next row is independent of previous rows.
2567 * In particular the coefficients c_i_x are represented by t_i_x
2568 * variables with c_i_x = Q t_i_x and Q a unimodular matrix such that
2569 * its first columns span the rows of the previously computed part
2570 * of the schedule. The non-triviality region enforces that at least
2571 * one of the remaining components of t_i_x is non-zero, i.e.,
2572 * that the new schedule row depends on at least one of the remaining
2575 static __isl_give isl_vec
*solve_lp(struct isl_sched_graph
*graph
)
2581 for (i
= 0; i
< graph
->n
; ++i
) {
2582 struct isl_sched_node
*node
= &graph
->node
[i
];
2583 int skip
= node
->rank
;
2584 graph
->region
[i
].pos
= node
->start
+ 1 + 2*(node
->nparam
+skip
);
2585 if (needs_row(graph
, node
))
2586 graph
->region
[i
].len
= 2 * (node
->nvar
- skip
);
2588 graph
->region
[i
].len
= 0;
2590 lp
= isl_basic_set_copy(graph
->lp
);
2591 sol
= isl_tab_basic_set_non_trivial_lexmin(lp
, 2, graph
->n
,
2592 graph
->region
, &check_conflict
, graph
);
2596 /* Update the schedules of all nodes based on the given solution
2597 * of the LP problem.
2598 * The new row is added to the current band.
2599 * All possibly negative coefficients are encoded as a difference
2600 * of two non-negative variables, so we need to perform the subtraction
2601 * here. Moreover, if use_cmap is set, then the solution does
2602 * not refer to the actual coefficients c_i_x, but instead to variables
2603 * t_i_x such that c_i_x = Q t_i_x and Q is equal to node->cmap.
2604 * In this case, we then also need to perform this multiplication
2605 * to obtain the values of c_i_x.
2607 * If coincident is set, then the caller guarantees that the new
2608 * row satisfies the coincidence constraints.
2610 static int update_schedule(struct isl_sched_graph
*graph
,
2611 __isl_take isl_vec
*sol
, int use_cmap
, int coincident
)
2614 isl_vec
*csol
= NULL
;
2619 isl_die(sol
->ctx
, isl_error_internal
,
2620 "no solution found", goto error
);
2621 if (graph
->n_total_row
>= graph
->max_row
)
2622 isl_die(sol
->ctx
, isl_error_internal
,
2623 "too many schedule rows", goto error
);
2625 for (i
= 0; i
< graph
->n
; ++i
) {
2626 struct isl_sched_node
*node
= &graph
->node
[i
];
2627 int pos
= node
->start
;
2628 int row
= isl_mat_rows(node
->sched
);
2631 csol
= isl_vec_alloc(sol
->ctx
, node
->nvar
);
2635 isl_map_free(node
->sched_map
);
2636 node
->sched_map
= NULL
;
2637 node
->sched
= isl_mat_add_rows(node
->sched
, 1);
2640 node
->sched
= isl_mat_set_element(node
->sched
, row
, 0,
2642 for (j
= 0; j
< node
->nparam
+ node
->nvar
; ++j
)
2643 isl_int_sub(sol
->el
[1 + pos
+ 1 + 2 * j
+ 1],
2644 sol
->el
[1 + pos
+ 1 + 2 * j
+ 1],
2645 sol
->el
[1 + pos
+ 1 + 2 * j
]);
2646 for (j
= 0; j
< node
->nparam
; ++j
)
2647 node
->sched
= isl_mat_set_element(node
->sched
,
2648 row
, 1 + j
, sol
->el
[1+pos
+1+2*j
+1]);
2649 for (j
= 0; j
< node
->nvar
; ++j
)
2650 isl_int_set(csol
->el
[j
],
2651 sol
->el
[1+pos
+1+2*(node
->nparam
+j
)+1]);
2653 csol
= isl_mat_vec_product(isl_mat_copy(node
->cmap
),
2657 for (j
= 0; j
< node
->nvar
; ++j
)
2658 node
->sched
= isl_mat_set_element(node
->sched
,
2659 row
, 1 + node
->nparam
+ j
, csol
->el
[j
]);
2660 node
->coincident
[graph
->n_total_row
] = coincident
;
2666 graph
->n_total_row
++;
2675 /* Convert row "row" of node->sched into an isl_aff living in "ls"
2676 * and return this isl_aff.
2678 static __isl_give isl_aff
*extract_schedule_row(__isl_take isl_local_space
*ls
,
2679 struct isl_sched_node
*node
, int row
)
2687 aff
= isl_aff_zero_on_domain(ls
);
2688 isl_mat_get_element(node
->sched
, row
, 0, &v
);
2689 aff
= isl_aff_set_constant(aff
, v
);
2690 for (j
= 0; j
< node
->nparam
; ++j
) {
2691 isl_mat_get_element(node
->sched
, row
, 1 + j
, &v
);
2692 aff
= isl_aff_set_coefficient(aff
, isl_dim_param
, j
, v
);
2694 for (j
= 0; j
< node
->nvar
; ++j
) {
2695 isl_mat_get_element(node
->sched
, row
, 1 + node
->nparam
+ j
, &v
);
2696 aff
= isl_aff_set_coefficient(aff
, isl_dim_in
, j
, v
);
2704 /* Convert the "n" rows starting at "first" of node->sched into a multi_aff
2705 * and return this multi_aff.
2707 * The result is defined over the uncompressed node domain.
2709 static __isl_give isl_multi_aff
*node_extract_partial_schedule_multi_aff(
2710 struct isl_sched_node
*node
, int first
, int n
)
2714 isl_local_space
*ls
;
2721 nrow
= isl_mat_rows(node
->sched
);
2722 if (node
->compressed
)
2723 space
= isl_multi_aff_get_domain_space(node
->decompress
);
2725 space
= isl_space_copy(node
->space
);
2726 ls
= isl_local_space_from_space(isl_space_copy(space
));
2727 space
= isl_space_from_domain(space
);
2728 space
= isl_space_add_dims(space
, isl_dim_out
, n
);
2729 ma
= isl_multi_aff_zero(space
);
2731 for (i
= first
; i
< first
+ n
; ++i
) {
2732 aff
= extract_schedule_row(isl_local_space_copy(ls
), node
, i
);
2733 ma
= isl_multi_aff_set_aff(ma
, i
- first
, aff
);
2736 isl_local_space_free(ls
);
2738 if (node
->compressed
)
2739 ma
= isl_multi_aff_pullback_multi_aff(ma
,
2740 isl_multi_aff_copy(node
->compress
));
2745 /* Convert node->sched into a multi_aff and return this multi_aff.
2747 * The result is defined over the uncompressed node domain.
2749 static __isl_give isl_multi_aff
*node_extract_schedule_multi_aff(
2750 struct isl_sched_node
*node
)
2754 nrow
= isl_mat_rows(node
->sched
);
2755 return node_extract_partial_schedule_multi_aff(node
, 0, nrow
);
2758 /* Convert node->sched into a map and return this map.
2760 * The result is cached in node->sched_map, which needs to be released
2761 * whenever node->sched is updated.
2762 * It is defined over the uncompressed node domain.
2764 static __isl_give isl_map
*node_extract_schedule(struct isl_sched_node
*node
)
2766 if (!node
->sched_map
) {
2769 ma
= node_extract_schedule_multi_aff(node
);
2770 node
->sched_map
= isl_map_from_multi_aff(ma
);
2773 return isl_map_copy(node
->sched_map
);
2776 /* Construct a map that can be used to update a dependence relation
2777 * based on the current schedule.
2778 * That is, construct a map expressing that source and sink
2779 * are executed within the same iteration of the current schedule.
2780 * This map can then be intersected with the dependence relation.
2781 * This is not the most efficient way, but this shouldn't be a critical
2784 static __isl_give isl_map
*specializer(struct isl_sched_node
*src
,
2785 struct isl_sched_node
*dst
)
2787 isl_map
*src_sched
, *dst_sched
;
2789 src_sched
= node_extract_schedule(src
);
2790 dst_sched
= node_extract_schedule(dst
);
2791 return isl_map_apply_range(src_sched
, isl_map_reverse(dst_sched
));
2794 /* Intersect the domains of the nested relations in domain and range
2795 * of "umap" with "map".
2797 static __isl_give isl_union_map
*intersect_domains(
2798 __isl_take isl_union_map
*umap
, __isl_keep isl_map
*map
)
2800 isl_union_set
*uset
;
2802 umap
= isl_union_map_zip(umap
);
2803 uset
= isl_union_set_from_set(isl_map_wrap(isl_map_copy(map
)));
2804 umap
= isl_union_map_intersect_domain(umap
, uset
);
2805 umap
= isl_union_map_zip(umap
);
2809 /* Update the dependence relation of the given edge based
2810 * on the current schedule.
2811 * If the dependence is carried completely by the current schedule, then
2812 * it is removed from the edge_tables. It is kept in the list of edges
2813 * as otherwise all edge_tables would have to be recomputed.
2815 static int update_edge(struct isl_sched_graph
*graph
,
2816 struct isl_sched_edge
*edge
)
2821 id
= specializer(edge
->src
, edge
->dst
);
2822 edge
->map
= isl_map_intersect(edge
->map
, isl_map_copy(id
));
2826 if (edge
->tagged_condition
) {
2827 edge
->tagged_condition
=
2828 intersect_domains(edge
->tagged_condition
, id
);
2829 if (!edge
->tagged_condition
)
2832 if (edge
->tagged_validity
) {
2833 edge
->tagged_validity
=
2834 intersect_domains(edge
->tagged_validity
, id
);
2835 if (!edge
->tagged_validity
)
2839 empty
= isl_map_plain_is_empty(edge
->map
);
2843 graph_remove_edge(graph
, edge
);
2852 /* Does the domain of "umap" intersect "uset"?
2854 static int domain_intersects(__isl_keep isl_union_map
*umap
,
2855 __isl_keep isl_union_set
*uset
)
2859 umap
= isl_union_map_copy(umap
);
2860 umap
= isl_union_map_intersect_domain(umap
, isl_union_set_copy(uset
));
2861 empty
= isl_union_map_is_empty(umap
);
2862 isl_union_map_free(umap
);
2864 return empty
< 0 ? -1 : !empty
;
2867 /* Does the range of "umap" intersect "uset"?
2869 static int range_intersects(__isl_keep isl_union_map
*umap
,
2870 __isl_keep isl_union_set
*uset
)
2874 umap
= isl_union_map_copy(umap
);
2875 umap
= isl_union_map_intersect_range(umap
, isl_union_set_copy(uset
));
2876 empty
= isl_union_map_is_empty(umap
);
2877 isl_union_map_free(umap
);
2879 return empty
< 0 ? -1 : !empty
;
2882 /* Are the condition dependences of "edge" local with respect to
2883 * the current schedule?
2885 * That is, are domain and range of the condition dependences mapped
2886 * to the same point?
2888 * In other words, is the condition false?
2890 static int is_condition_false(struct isl_sched_edge
*edge
)
2892 isl_union_map
*umap
;
2893 isl_map
*map
, *sched
, *test
;
2896 empty
= isl_union_map_is_empty(edge
->tagged_condition
);
2897 if (empty
< 0 || empty
)
2900 umap
= isl_union_map_copy(edge
->tagged_condition
);
2901 umap
= isl_union_map_zip(umap
);
2902 umap
= isl_union_set_unwrap(isl_union_map_domain(umap
));
2903 map
= isl_map_from_union_map(umap
);
2905 sched
= node_extract_schedule(edge
->src
);
2906 map
= isl_map_apply_domain(map
, sched
);
2907 sched
= node_extract_schedule(edge
->dst
);
2908 map
= isl_map_apply_range(map
, sched
);
2910 test
= isl_map_identity(isl_map_get_space(map
));
2911 local
= isl_map_is_subset(map
, test
);
2918 /* For each conditional validity constraint that is adjacent
2919 * to a condition with domain in condition_source or range in condition_sink,
2920 * turn it into an unconditional validity constraint.
2922 static int unconditionalize_adjacent_validity(struct isl_sched_graph
*graph
,
2923 __isl_take isl_union_set
*condition_source
,
2924 __isl_take isl_union_set
*condition_sink
)
2928 condition_source
= isl_union_set_coalesce(condition_source
);
2929 condition_sink
= isl_union_set_coalesce(condition_sink
);
2931 for (i
= 0; i
< graph
->n_edge
; ++i
) {
2933 isl_union_map
*validity
;
2935 if (!is_conditional_validity(&graph
->edge
[i
]))
2937 if (is_validity(&graph
->edge
[i
]))
2940 validity
= graph
->edge
[i
].tagged_validity
;
2941 adjacent
= domain_intersects(validity
, condition_sink
);
2942 if (adjacent
>= 0 && !adjacent
)
2943 adjacent
= range_intersects(validity
, condition_source
);
2949 set_validity(&graph
->edge
[i
]);
2952 isl_union_set_free(condition_source
);
2953 isl_union_set_free(condition_sink
);
2956 isl_union_set_free(condition_source
);
2957 isl_union_set_free(condition_sink
);
2961 /* Update the dependence relations of all edges based on the current schedule
2962 * and enforce conditional validity constraints that are adjacent
2963 * to satisfied condition constraints.
2965 * First check if any of the condition constraints are satisfied
2966 * (i.e., not local to the outer schedule) and keep track of
2967 * their domain and range.
2968 * Then update all dependence relations (which removes the non-local
2970 * Finally, if any condition constraints turned out to be satisfied,
2971 * then turn all adjacent conditional validity constraints into
2972 * unconditional validity constraints.
2974 static int update_edges(isl_ctx
*ctx
, struct isl_sched_graph
*graph
)
2978 isl_union_set
*source
, *sink
;
2980 source
= isl_union_set_empty(isl_space_params_alloc(ctx
, 0));
2981 sink
= isl_union_set_empty(isl_space_params_alloc(ctx
, 0));
2982 for (i
= 0; i
< graph
->n_edge
; ++i
) {
2984 isl_union_set
*uset
;
2985 isl_union_map
*umap
;
2987 if (!is_condition(&graph
->edge
[i
]))
2989 if (is_local(&graph
->edge
[i
]))
2991 local
= is_condition_false(&graph
->edge
[i
]);
2999 umap
= isl_union_map_copy(graph
->edge
[i
].tagged_condition
);
3000 uset
= isl_union_map_domain(umap
);
3001 source
= isl_union_set_union(source
, uset
);
3003 umap
= isl_union_map_copy(graph
->edge
[i
].tagged_condition
);
3004 uset
= isl_union_map_range(umap
);
3005 sink
= isl_union_set_union(sink
, uset
);
3008 for (i
= graph
->n_edge
- 1; i
>= 0; --i
) {
3009 if (update_edge(graph
, &graph
->edge
[i
]) < 0)
3014 return unconditionalize_adjacent_validity(graph
, source
, sink
);
3016 isl_union_set_free(source
);
3017 isl_union_set_free(sink
);
3020 isl_union_set_free(source
);
3021 isl_union_set_free(sink
);
3025 static void next_band(struct isl_sched_graph
*graph
)
3027 graph
->band_start
= graph
->n_total_row
;
3030 /* Return the union of the universe domains of the nodes in "graph"
3031 * that satisfy "pred".
3033 static __isl_give isl_union_set
*isl_sched_graph_domain(isl_ctx
*ctx
,
3034 struct isl_sched_graph
*graph
,
3035 int (*pred
)(struct isl_sched_node
*node
, int data
), int data
)
3041 for (i
= 0; i
< graph
->n
; ++i
)
3042 if (pred(&graph
->node
[i
], data
))
3046 isl_die(ctx
, isl_error_internal
,
3047 "empty component", return NULL
);
3049 set
= isl_set_universe(isl_space_copy(graph
->node
[i
].space
));
3050 dom
= isl_union_set_from_set(set
);
3052 for (i
= i
+ 1; i
< graph
->n
; ++i
) {
3053 if (!pred(&graph
->node
[i
], data
))
3055 set
= isl_set_universe(isl_space_copy(graph
->node
[i
].space
));
3056 dom
= isl_union_set_union(dom
, isl_union_set_from_set(set
));
3062 /* Return a list of unions of universe domains, where each element
3063 * in the list corresponds to an SCC (or WCC) indexed by node->scc.
3065 static __isl_give isl_union_set_list
*extract_sccs(isl_ctx
*ctx
,
3066 struct isl_sched_graph
*graph
)
3069 isl_union_set_list
*filters
;
3071 filters
= isl_union_set_list_alloc(ctx
, graph
->scc
);
3072 for (i
= 0; i
< graph
->scc
; ++i
) {
3075 dom
= isl_sched_graph_domain(ctx
, graph
, &node_scc_exactly
, i
);
3076 filters
= isl_union_set_list_add(filters
, dom
);
3082 /* Return a list of two unions of universe domains, one for the SCCs up
3083 * to and including graph->src_scc and another for the other SCCs.
3085 static __isl_give isl_union_set_list
*extract_split(isl_ctx
*ctx
,
3086 struct isl_sched_graph
*graph
)
3089 isl_union_set_list
*filters
;
3091 filters
= isl_union_set_list_alloc(ctx
, 2);
3092 dom
= isl_sched_graph_domain(ctx
, graph
,
3093 &node_scc_at_most
, graph
->src_scc
);
3094 filters
= isl_union_set_list_add(filters
, dom
);
3095 dom
= isl_sched_graph_domain(ctx
, graph
,
3096 &node_scc_at_least
, graph
->src_scc
+ 1);
3097 filters
= isl_union_set_list_add(filters
, dom
);
3102 /* Copy nodes that satisfy node_pred from the src dependence graph
3103 * to the dst dependence graph.
3105 static int copy_nodes(struct isl_sched_graph
*dst
, struct isl_sched_graph
*src
,
3106 int (*node_pred
)(struct isl_sched_node
*node
, int data
), int data
)
3111 for (i
= 0; i
< src
->n
; ++i
) {
3114 if (!node_pred(&src
->node
[i
], data
))
3118 dst
->node
[j
].space
= isl_space_copy(src
->node
[i
].space
);
3119 dst
->node
[j
].compressed
= src
->node
[i
].compressed
;
3120 dst
->node
[j
].hull
= isl_set_copy(src
->node
[i
].hull
);
3121 dst
->node
[j
].compress
=
3122 isl_multi_aff_copy(src
->node
[i
].compress
);
3123 dst
->node
[j
].decompress
=
3124 isl_multi_aff_copy(src
->node
[i
].decompress
);
3125 dst
->node
[j
].nvar
= src
->node
[i
].nvar
;
3126 dst
->node
[j
].nparam
= src
->node
[i
].nparam
;
3127 dst
->node
[j
].sched
= isl_mat_copy(src
->node
[i
].sched
);
3128 dst
->node
[j
].sched_map
= isl_map_copy(src
->node
[i
].sched_map
);
3129 dst
->node
[j
].coincident
= src
->node
[i
].coincident
;
3132 if (!dst
->node
[j
].space
|| !dst
->node
[j
].sched
)
3134 if (dst
->node
[j
].compressed
&&
3135 (!dst
->node
[j
].hull
|| !dst
->node
[j
].compress
||
3136 !dst
->node
[j
].decompress
))
3143 /* Copy non-empty edges that satisfy edge_pred from the src dependence graph
3144 * to the dst dependence graph.
3145 * If the source or destination node of the edge is not in the destination
3146 * graph, then it must be a backward proximity edge and it should simply
3149 static int copy_edges(isl_ctx
*ctx
, struct isl_sched_graph
*dst
,
3150 struct isl_sched_graph
*src
,
3151 int (*edge_pred
)(struct isl_sched_edge
*edge
, int data
), int data
)
3154 enum isl_edge_type t
;
3157 for (i
= 0; i
< src
->n_edge
; ++i
) {
3158 struct isl_sched_edge
*edge
= &src
->edge
[i
];
3160 isl_union_map
*tagged_condition
;
3161 isl_union_map
*tagged_validity
;
3162 struct isl_sched_node
*dst_src
, *dst_dst
;
3164 if (!edge_pred(edge
, data
))
3167 if (isl_map_plain_is_empty(edge
->map
))
3170 dst_src
= graph_find_node(ctx
, dst
, edge
->src
->space
);
3171 dst_dst
= graph_find_node(ctx
, dst
, edge
->dst
->space
);
3172 if (!dst_src
|| !dst_dst
) {
3173 if (is_validity(edge
) || is_conditional_validity(edge
))
3174 isl_die(ctx
, isl_error_internal
,
3175 "backward (conditional) validity edge",
3180 map
= isl_map_copy(edge
->map
);
3181 tagged_condition
= isl_union_map_copy(edge
->tagged_condition
);
3182 tagged_validity
= isl_union_map_copy(edge
->tagged_validity
);
3184 dst
->edge
[dst
->n_edge
].src
= dst_src
;
3185 dst
->edge
[dst
->n_edge
].dst
= dst_dst
;
3186 dst
->edge
[dst
->n_edge
].map
= map
;
3187 dst
->edge
[dst
->n_edge
].tagged_condition
= tagged_condition
;
3188 dst
->edge
[dst
->n_edge
].tagged_validity
= tagged_validity
;
3189 dst
->edge
[dst
->n_edge
].types
= edge
->types
;
3192 if (edge
->tagged_condition
&& !tagged_condition
)
3194 if (edge
->tagged_validity
&& !tagged_validity
)
3197 for (t
= isl_edge_first
; t
<= isl_edge_last
; ++t
) {
3199 graph_find_edge(src
, t
, edge
->src
, edge
->dst
))
3201 if (graph_edge_table_add(ctx
, dst
, t
,
3202 &dst
->edge
[dst
->n_edge
- 1]) < 0)
3210 /* Compute the maximal number of variables over all nodes.
3211 * This is the maximal number of linearly independent schedule
3212 * rows that we need to compute.
3213 * Just in case we end up in a part of the dependence graph
3214 * with only lower-dimensional domains, we make sure we will
3215 * compute the required amount of extra linearly independent rows.
3217 static int compute_maxvar(struct isl_sched_graph
*graph
)
3222 for (i
= 0; i
< graph
->n
; ++i
) {
3223 struct isl_sched_node
*node
= &graph
->node
[i
];
3226 if (node_update_cmap(node
) < 0)
3228 nvar
= node
->nvar
+ graph
->n_row
- node
->rank
;
3229 if (nvar
> graph
->maxvar
)
3230 graph
->maxvar
= nvar
;
3236 /* Extract the subgraph of "graph" that consists of the node satisfying
3237 * "node_pred" and the edges satisfying "edge_pred" and store
3238 * the result in "sub".
3240 static int extract_sub_graph(isl_ctx
*ctx
, struct isl_sched_graph
*graph
,
3241 int (*node_pred
)(struct isl_sched_node
*node
, int data
),
3242 int (*edge_pred
)(struct isl_sched_edge
*edge
, int data
),
3243 int data
, struct isl_sched_graph
*sub
)
3245 int i
, n
= 0, n_edge
= 0;
3248 for (i
= 0; i
< graph
->n
; ++i
)
3249 if (node_pred(&graph
->node
[i
], data
))
3251 for (i
= 0; i
< graph
->n_edge
; ++i
)
3252 if (edge_pred(&graph
->edge
[i
], data
))
3254 if (graph_alloc(ctx
, sub
, n
, n_edge
) < 0)
3256 if (copy_nodes(sub
, graph
, node_pred
, data
) < 0)
3258 if (graph_init_table(ctx
, sub
) < 0)
3260 for (t
= 0; t
<= isl_edge_last
; ++t
)
3261 sub
->max_edge
[t
] = graph
->max_edge
[t
];
3262 if (graph_init_edge_tables(ctx
, sub
) < 0)
3264 if (copy_edges(ctx
, sub
, graph
, edge_pred
, data
) < 0)
3266 sub
->n_row
= graph
->n_row
;
3267 sub
->max_row
= graph
->max_row
;
3268 sub
->n_total_row
= graph
->n_total_row
;
3269 sub
->band_start
= graph
->band_start
;
3274 static __isl_give isl_schedule_node
*compute_schedule(isl_schedule_node
*node
,
3275 struct isl_sched_graph
*graph
);
3276 static __isl_give isl_schedule_node
*compute_schedule_wcc(
3277 isl_schedule_node
*node
, struct isl_sched_graph
*graph
);
3279 /* Compute a schedule for a subgraph of "graph". In particular, for
3280 * the graph composed of nodes that satisfy node_pred and edges that
3281 * that satisfy edge_pred.
3282 * If the subgraph is known to consist of a single component, then wcc should
3283 * be set and then we call compute_schedule_wcc on the constructed subgraph.
3284 * Otherwise, we call compute_schedule, which will check whether the subgraph
3287 * The schedule is inserted at "node" and the updated schedule node
3290 static __isl_give isl_schedule_node
*compute_sub_schedule(
3291 __isl_take isl_schedule_node
*node
, isl_ctx
*ctx
,
3292 struct isl_sched_graph
*graph
,
3293 int (*node_pred
)(struct isl_sched_node
*node
, int data
),
3294 int (*edge_pred
)(struct isl_sched_edge
*edge
, int data
),
3297 struct isl_sched_graph split
= { 0 };
3299 if (extract_sub_graph(ctx
, graph
, node_pred
, edge_pred
, data
,
3304 node
= compute_schedule_wcc(node
, &split
);
3306 node
= compute_schedule(node
, &split
);
3308 graph_free(ctx
, &split
);
3311 graph_free(ctx
, &split
);
3312 return isl_schedule_node_free(node
);
3315 static int edge_scc_exactly(struct isl_sched_edge
*edge
, int scc
)
3317 return edge
->src
->scc
== scc
&& edge
->dst
->scc
== scc
;
3320 static int edge_dst_scc_at_most(struct isl_sched_edge
*edge
, int scc
)
3322 return edge
->dst
->scc
<= scc
;
3325 static int edge_src_scc_at_least(struct isl_sched_edge
*edge
, int scc
)
3327 return edge
->src
->scc
>= scc
;
3330 /* Reset the current band by dropping all its schedule rows.
3332 static int reset_band(struct isl_sched_graph
*graph
)
3337 drop
= graph
->n_total_row
- graph
->band_start
;
3338 graph
->n_total_row
-= drop
;
3339 graph
->n_row
-= drop
;
3341 for (i
= 0; i
< graph
->n
; ++i
) {
3342 struct isl_sched_node
*node
= &graph
->node
[i
];
3344 isl_map_free(node
->sched_map
);
3345 node
->sched_map
= NULL
;
3347 node
->sched
= isl_mat_drop_rows(node
->sched
,
3348 graph
->band_start
, drop
);
3357 /* Split the current graph into two parts and compute a schedule for each
3358 * part individually. In particular, one part consists of all SCCs up
3359 * to and including graph->src_scc, while the other part contains the other
3360 * SCCs. The split is enforced by a sequence node inserted at position "node"
3361 * in the schedule tree. Return the updated schedule node.
3362 * If either of these two parts consists of a sequence, then it is spliced
3363 * into the sequence containing the two parts.
3365 * The current band is reset. It would be possible to reuse
3366 * the previously computed rows as the first rows in the next
3367 * band, but recomputing them may result in better rows as we are looking
3368 * at a smaller part of the dependence graph.
3370 static __isl_give isl_schedule_node
*compute_split_schedule(
3371 __isl_take isl_schedule_node
*node
, struct isl_sched_graph
*graph
)
3375 isl_union_set_list
*filters
;
3380 if (reset_band(graph
) < 0)
3381 return isl_schedule_node_free(node
);
3385 ctx
= isl_schedule_node_get_ctx(node
);
3386 filters
= extract_split(ctx
, graph
);
3387 node
= isl_schedule_node_insert_sequence(node
, filters
);
3388 node
= isl_schedule_node_child(node
, 1);
3389 node
= isl_schedule_node_child(node
, 0);
3391 node
= compute_sub_schedule(node
, ctx
, graph
,
3392 &node_scc_at_least
, &edge_src_scc_at_least
,
3393 graph
->src_scc
+ 1, 0);
3394 is_seq
= isl_schedule_node_get_type(node
) == isl_schedule_node_sequence
;
3395 node
= isl_schedule_node_parent(node
);
3396 node
= isl_schedule_node_parent(node
);
3398 node
= isl_schedule_node_sequence_splice_child(node
, 1);
3399 node
= isl_schedule_node_child(node
, 0);
3400 node
= isl_schedule_node_child(node
, 0);
3401 node
= compute_sub_schedule(node
, ctx
, graph
,
3402 &node_scc_at_most
, &edge_dst_scc_at_most
,
3404 is_seq
= isl_schedule_node_get_type(node
) == isl_schedule_node_sequence
;
3405 node
= isl_schedule_node_parent(node
);
3406 node
= isl_schedule_node_parent(node
);
3408 node
= isl_schedule_node_sequence_splice_child(node
, 0);
3413 /* Insert a band node at position "node" in the schedule tree corresponding
3414 * to the current band in "graph". Mark the band node permutable
3415 * if "permutable" is set.
3416 * The partial schedules and the coincidence property are extracted
3417 * from the graph nodes.
3418 * Return the updated schedule node.
3420 static __isl_give isl_schedule_node
*insert_current_band(
3421 __isl_take isl_schedule_node
*node
, struct isl_sched_graph
*graph
,
3427 isl_multi_pw_aff
*mpa
;
3428 isl_multi_union_pw_aff
*mupa
;
3434 isl_die(isl_schedule_node_get_ctx(node
), isl_error_internal
,
3435 "graph should have at least one node",
3436 return isl_schedule_node_free(node
));
3438 start
= graph
->band_start
;
3439 end
= graph
->n_total_row
;
3442 ma
= node_extract_partial_schedule_multi_aff(&graph
->node
[0], start
, n
);
3443 mpa
= isl_multi_pw_aff_from_multi_aff(ma
);
3444 mupa
= isl_multi_union_pw_aff_from_multi_pw_aff(mpa
);
3446 for (i
= 1; i
< graph
->n
; ++i
) {
3447 isl_multi_union_pw_aff
*mupa_i
;
3449 ma
= node_extract_partial_schedule_multi_aff(&graph
->node
[i
],
3451 mpa
= isl_multi_pw_aff_from_multi_aff(ma
);
3452 mupa_i
= isl_multi_union_pw_aff_from_multi_pw_aff(mpa
);
3453 mupa
= isl_multi_union_pw_aff_union_add(mupa
, mupa_i
);
3455 node
= isl_schedule_node_insert_partial_schedule(node
, mupa
);
3457 for (i
= 0; i
< n
; ++i
)
3458 node
= isl_schedule_node_band_member_set_coincident(node
, i
,
3459 graph
->node
[0].coincident
[start
+ i
]);
3460 node
= isl_schedule_node_band_set_permutable(node
, permutable
);
3465 /* Update the dependence relations based on the current schedule,
3466 * add the current band to "node" and then continue with the computation
3468 * Return the updated schedule node.
3470 static __isl_give isl_schedule_node
*compute_next_band(
3471 __isl_take isl_schedule_node
*node
,
3472 struct isl_sched_graph
*graph
, int permutable
)
3479 ctx
= isl_schedule_node_get_ctx(node
);
3480 if (update_edges(ctx
, graph
) < 0)
3481 return isl_schedule_node_free(node
);
3482 node
= insert_current_band(node
, graph
, permutable
);
3485 node
= isl_schedule_node_child(node
, 0);
3486 node
= compute_schedule(node
, graph
);
3487 node
= isl_schedule_node_parent(node
);
3492 /* Add constraints to graph->lp that force the dependence "map" (which
3493 * is part of the dependence relation of "edge")
3494 * to be respected and attempt to carry it, where the edge is one from
3495 * a node j to itself. "pos" is the sequence number of the given map.
3496 * That is, add constraints that enforce
3498 * (c_j_0 + c_j_n n + c_j_x y) - (c_j_0 + c_j_n n + c_j_x x)
3499 * = c_j_x (y - x) >= e_i
3501 * for each (x,y) in R.
3502 * We obtain general constraints on coefficients (c_0, c_n, c_x)
3503 * of valid constraints for (y - x) and then plug in (-e_i, 0, c_j_x),
3504 * with each coefficient in c_j_x represented as a pair of non-negative
3507 static int add_intra_constraints(struct isl_sched_graph
*graph
,
3508 struct isl_sched_edge
*edge
, __isl_take isl_map
*map
, int pos
)
3511 isl_ctx
*ctx
= isl_map_get_ctx(map
);
3513 isl_dim_map
*dim_map
;
3514 isl_basic_set
*coef
;
3515 struct isl_sched_node
*node
= edge
->src
;
3517 coef
= intra_coefficients(graph
, node
, map
);
3521 dim
= isl_space_domain(isl_space_unwrap(isl_basic_set_get_space(coef
)));
3523 total
= isl_basic_set_total_dim(graph
->lp
);
3524 dim_map
= isl_dim_map_alloc(ctx
, total
);
3525 isl_dim_map_range(dim_map
, 3 + pos
, 0, 0, 0, 1, -1);
3526 isl_dim_map_range(dim_map
, node
->start
+ 2 * node
->nparam
+ 1, 2,
3527 isl_space_dim(dim
, isl_dim_set
), 1,
3529 isl_dim_map_range(dim_map
, node
->start
+ 2 * node
->nparam
+ 2, 2,
3530 isl_space_dim(dim
, isl_dim_set
), 1,
3532 graph
->lp
= isl_basic_set_extend_constraints(graph
->lp
,
3533 coef
->n_eq
, coef
->n_ineq
);
3534 graph
->lp
= isl_basic_set_add_constraints_dim_map(graph
->lp
,
3536 isl_space_free(dim
);
3541 /* Add constraints to graph->lp that force the dependence "map" (which
3542 * is part of the dependence relation of "edge")
3543 * to be respected and attempt to carry it, where the edge is one from
3544 * node j to node k. "pos" is the sequence number of the given map.
3545 * That is, add constraints that enforce
3547 * (c_k_0 + c_k_n n + c_k_x y) - (c_j_0 + c_j_n n + c_j_x x) >= e_i
3549 * for each (x,y) in R.
3550 * We obtain general constraints on coefficients (c_0, c_n, c_x)
3551 * of valid constraints for R and then plug in
3552 * (-e_i + c_k_0 - c_j_0, c_k_n - c_j_n, c_k_x - c_j_x)
3553 * with each coefficient (except e_i, c_k_0 and c_j_0)
3554 * represented as a pair of non-negative coefficients.
3556 static int add_inter_constraints(struct isl_sched_graph
*graph
,
3557 struct isl_sched_edge
*edge
, __isl_take isl_map
*map
, int pos
)
3560 isl_ctx
*ctx
= isl_map_get_ctx(map
);
3562 isl_dim_map
*dim_map
;
3563 isl_basic_set
*coef
;
3564 struct isl_sched_node
*src
= edge
->src
;
3565 struct isl_sched_node
*dst
= edge
->dst
;
3567 coef
= inter_coefficients(graph
, edge
, map
);
3571 dim
= isl_space_domain(isl_space_unwrap(isl_basic_set_get_space(coef
)));
3573 total
= isl_basic_set_total_dim(graph
->lp
);
3574 dim_map
= isl_dim_map_alloc(ctx
, total
);
3576 isl_dim_map_range(dim_map
, 3 + pos
, 0, 0, 0, 1, -1);
3578 isl_dim_map_range(dim_map
, dst
->start
, 0, 0, 0, 1, 1);
3579 isl_dim_map_range(dim_map
, dst
->start
+ 1, 2, 1, 1, dst
->nparam
, -1);
3580 isl_dim_map_range(dim_map
, dst
->start
+ 2, 2, 1, 1, dst
->nparam
, 1);
3581 isl_dim_map_range(dim_map
, dst
->start
+ 2 * dst
->nparam
+ 1, 2,
3582 isl_space_dim(dim
, isl_dim_set
) + src
->nvar
, 1,
3584 isl_dim_map_range(dim_map
, dst
->start
+ 2 * dst
->nparam
+ 2, 2,
3585 isl_space_dim(dim
, isl_dim_set
) + src
->nvar
, 1,
3588 isl_dim_map_range(dim_map
, src
->start
, 0, 0, 0, 1, -1);
3589 isl_dim_map_range(dim_map
, src
->start
+ 1, 2, 1, 1, src
->nparam
, 1);
3590 isl_dim_map_range(dim_map
, src
->start
+ 2, 2, 1, 1, src
->nparam
, -1);
3591 isl_dim_map_range(dim_map
, src
->start
+ 2 * src
->nparam
+ 1, 2,
3592 isl_space_dim(dim
, isl_dim_set
), 1,
3594 isl_dim_map_range(dim_map
, src
->start
+ 2 * src
->nparam
+ 2, 2,
3595 isl_space_dim(dim
, isl_dim_set
), 1,
3598 graph
->lp
= isl_basic_set_extend_constraints(graph
->lp
,
3599 coef
->n_eq
, coef
->n_ineq
);
3600 graph
->lp
= isl_basic_set_add_constraints_dim_map(graph
->lp
,
3602 isl_space_free(dim
);
3607 /* Add constraints to graph->lp that force all (conditional) validity
3608 * dependences to be respected and attempt to carry them.
3610 static int add_all_constraints(struct isl_sched_graph
*graph
)
3616 for (i
= 0; i
< graph
->n_edge
; ++i
) {
3617 struct isl_sched_edge
*edge
= &graph
->edge
[i
];
3619 if (!is_validity(edge
) && !is_conditional_validity(edge
))
3622 for (j
= 0; j
< edge
->map
->n
; ++j
) {
3623 isl_basic_map
*bmap
;
3626 bmap
= isl_basic_map_copy(edge
->map
->p
[j
]);
3627 map
= isl_map_from_basic_map(bmap
);
3629 if (edge
->src
== edge
->dst
&&
3630 add_intra_constraints(graph
, edge
, map
, pos
) < 0)
3632 if (edge
->src
!= edge
->dst
&&
3633 add_inter_constraints(graph
, edge
, map
, pos
) < 0)
3642 /* Count the number of equality and inequality constraints
3643 * that will be added to the carry_lp problem.
3644 * We count each edge exactly once.
3646 static int count_all_constraints(struct isl_sched_graph
*graph
,
3647 int *n_eq
, int *n_ineq
)
3651 *n_eq
= *n_ineq
= 0;
3652 for (i
= 0; i
< graph
->n_edge
; ++i
) {
3653 struct isl_sched_edge
*edge
= &graph
->edge
[i
];
3654 for (j
= 0; j
< edge
->map
->n
; ++j
) {
3655 isl_basic_map
*bmap
;
3658 bmap
= isl_basic_map_copy(edge
->map
->p
[j
]);
3659 map
= isl_map_from_basic_map(bmap
);
3661 if (count_map_constraints(graph
, edge
, map
,
3662 n_eq
, n_ineq
, 1, 0) < 0)
3670 /* Construct an LP problem for finding schedule coefficients
3671 * such that the schedule carries as many dependences as possible.
3672 * In particular, for each dependence i, we bound the dependence distance
3673 * from below by e_i, with 0 <= e_i <= 1 and then maximize the sum
3674 * of all e_i's. Dependences with e_i = 0 in the solution are simply
3675 * respected, while those with e_i > 0 (in practice e_i = 1) are carried.
3676 * Note that if the dependence relation is a union of basic maps,
3677 * then we have to consider each basic map individually as it may only
3678 * be possible to carry the dependences expressed by some of those
3679 * basic maps and not all of them.
3680 * Below, we consider each of those basic maps as a separate "edge".
3682 * All variables of the LP are non-negative. The actual coefficients
3683 * may be negative, so each coefficient is represented as the difference
3684 * of two non-negative variables. The negative part always appears
3685 * immediately before the positive part.
3686 * Other than that, the variables have the following order
3688 * - sum of (1 - e_i) over all edges
3689 * - sum of positive and negative parts of all c_n coefficients
3690 * (unconstrained when computing non-parametric schedules)
3691 * - sum of positive and negative parts of all c_x coefficients
3696 * - positive and negative parts of c_i_n (if parametric)
3697 * - positive and negative parts of c_i_x
3699 * The constraints are those from the (validity) edges plus three equalities
3700 * to express the sums and n_edge inequalities to express e_i <= 1.
3702 static int setup_carry_lp(isl_ctx
*ctx
, struct isl_sched_graph
*graph
)
3712 for (i
= 0; i
< graph
->n_edge
; ++i
)
3713 n_edge
+= graph
->edge
[i
].map
->n
;
3716 for (i
= 0; i
< graph
->n
; ++i
) {
3717 struct isl_sched_node
*node
= &graph
->node
[graph
->sorted
[i
]];
3718 node
->start
= total
;
3719 total
+= 1 + 2 * (node
->nparam
+ node
->nvar
);
3722 if (count_all_constraints(graph
, &n_eq
, &n_ineq
) < 0)
3725 dim
= isl_space_set_alloc(ctx
, 0, total
);
3726 isl_basic_set_free(graph
->lp
);
3729 graph
->lp
= isl_basic_set_alloc_space(dim
, 0, n_eq
, n_ineq
);
3730 graph
->lp
= isl_basic_set_set_rational(graph
->lp
);
3732 k
= isl_basic_set_alloc_equality(graph
->lp
);
3735 isl_seq_clr(graph
->lp
->eq
[k
], 1 + total
);
3736 isl_int_set_si(graph
->lp
->eq
[k
][0], -n_edge
);
3737 isl_int_set_si(graph
->lp
->eq
[k
][1], 1);
3738 for (i
= 0; i
< n_edge
; ++i
)
3739 isl_int_set_si(graph
->lp
->eq
[k
][4 + i
], 1);
3741 k
= isl_basic_set_alloc_equality(graph
->lp
);
3744 isl_seq_clr(graph
->lp
->eq
[k
], 1 + total
);
3745 isl_int_set_si(graph
->lp
->eq
[k
][2], -1);
3746 for (i
= 0; i
< graph
->n
; ++i
) {
3747 int pos
= 1 + graph
->node
[i
].start
+ 1;
3749 for (j
= 0; j
< 2 * graph
->node
[i
].nparam
; ++j
)
3750 isl_int_set_si(graph
->lp
->eq
[k
][pos
+ j
], 1);
3753 k
= isl_basic_set_alloc_equality(graph
->lp
);
3756 isl_seq_clr(graph
->lp
->eq
[k
], 1 + total
);
3757 isl_int_set_si(graph
->lp
->eq
[k
][3], -1);
3758 for (i
= 0; i
< graph
->n
; ++i
) {
3759 struct isl_sched_node
*node
= &graph
->node
[i
];
3760 int pos
= 1 + node
->start
+ 1 + 2 * node
->nparam
;
3762 for (j
= 0; j
< 2 * node
->nvar
; ++j
)
3763 isl_int_set_si(graph
->lp
->eq
[k
][pos
+ j
], 1);
3766 for (i
= 0; i
< n_edge
; ++i
) {
3767 k
= isl_basic_set_alloc_inequality(graph
->lp
);
3770 isl_seq_clr(graph
->lp
->ineq
[k
], 1 + total
);
3771 isl_int_set_si(graph
->lp
->ineq
[k
][4 + i
], -1);
3772 isl_int_set_si(graph
->lp
->ineq
[k
][0], 1);
3775 if (add_all_constraints(graph
) < 0)
3781 static __isl_give isl_schedule_node
*compute_component_schedule(
3782 __isl_take isl_schedule_node
*node
, struct isl_sched_graph
*graph
,
3785 /* Comparison function for sorting the statements based on
3786 * the corresponding value in "r".
3788 static int smaller_value(const void *a
, const void *b
, void *data
)
3794 return isl_int_cmp(r
->el
[*i1
], r
->el
[*i2
]);
3797 /* If the schedule_split_scaled option is set and if the linear
3798 * parts of the scheduling rows for all nodes in the graphs have
3799 * a non-trivial common divisor, then split off the remainder of the
3800 * constant term modulo this common divisor from the linear part.
3801 * Otherwise, insert a band node directly and continue with
3802 * the construction of the schedule.
3804 * If a non-trivial common divisor is found, then
3805 * the linear part is reduced and the remainder is enforced
3806 * by a sequence node with the children placed in the order
3807 * of this remainder.
3808 * In particular, we assign an scc index based on the remainder and
3809 * then rely on compute_component_schedule to insert the sequence and
3810 * to continue the schedule construction on each part.
3812 static __isl_give isl_schedule_node
*split_scaled(
3813 __isl_take isl_schedule_node
*node
, struct isl_sched_graph
*graph
)
3826 ctx
= isl_schedule_node_get_ctx(node
);
3827 if (!ctx
->opt
->schedule_split_scaled
)
3828 return compute_next_band(node
, graph
, 0);
3830 return compute_next_band(node
, graph
, 0);
3833 isl_int_init(gcd_i
);
3835 isl_int_set_si(gcd
, 0);
3837 row
= isl_mat_rows(graph
->node
[0].sched
) - 1;
3839 for (i
= 0; i
< graph
->n
; ++i
) {
3840 struct isl_sched_node
*node
= &graph
->node
[i
];
3841 int cols
= isl_mat_cols(node
->sched
);
3843 isl_seq_gcd(node
->sched
->row
[row
] + 1, cols
- 1, &gcd_i
);
3844 isl_int_gcd(gcd
, gcd
, gcd_i
);
3847 isl_int_clear(gcd_i
);
3849 if (isl_int_cmp_si(gcd
, 1) <= 0) {
3851 return compute_next_band(node
, graph
, 0);
3854 r
= isl_vec_alloc(ctx
, graph
->n
);
3855 order
= isl_calloc_array(ctx
, int, graph
->n
);
3859 for (i
= 0; i
< graph
->n
; ++i
) {
3860 struct isl_sched_node
*node
= &graph
->node
[i
];
3863 isl_int_fdiv_r(r
->el
[i
], node
->sched
->row
[row
][0], gcd
);
3864 isl_int_fdiv_q(node
->sched
->row
[row
][0],
3865 node
->sched
->row
[row
][0], gcd
);
3866 isl_int_mul(node
->sched
->row
[row
][0],
3867 node
->sched
->row
[row
][0], gcd
);
3868 node
->sched
= isl_mat_scale_down_row(node
->sched
, row
, gcd
);
3873 if (isl_sort(order
, graph
->n
, sizeof(order
[0]), &smaller_value
, r
) < 0)
3877 for (i
= 0; i
< graph
->n
; ++i
) {
3878 if (i
> 0 && isl_int_ne(r
->el
[order
[i
- 1]], r
->el
[order
[i
]]))
3880 graph
->node
[order
[i
]].scc
= scc
;
3889 if (update_edges(ctx
, graph
) < 0)
3890 return isl_schedule_node_free(node
);
3891 node
= insert_current_band(node
, graph
, 0);
3894 node
= isl_schedule_node_child(node
, 0);
3895 node
= compute_component_schedule(node
, graph
, 0);
3896 node
= isl_schedule_node_parent(node
);
3903 return isl_schedule_node_free(node
);
3906 /* Is the schedule row "sol" trivial on node "node"?
3907 * That is, is the solution zero on the dimensions orthogonal to
3908 * the previously found solutions?
3909 * Return 1 if the solution is trivial, 0 if it is not and -1 on error.
3911 * Each coefficient is represented as the difference between
3912 * two non-negative values in "sol". "sol" has been computed
3913 * in terms of the original iterators (i.e., without use of cmap).
3914 * We construct the schedule row s and write it as a linear
3915 * combination of (linear combinations of) previously computed schedule rows.
3916 * s = Q c or c = U s.
3917 * If the final entries of c are all zero, then the solution is trivial.
3919 static int is_trivial(struct isl_sched_node
*node
, __isl_keep isl_vec
*sol
)
3929 if (node
->nvar
== node
->rank
)
3932 ctx
= isl_vec_get_ctx(sol
);
3933 node_sol
= isl_vec_alloc(ctx
, node
->nvar
);
3937 pos
= 1 + node
->start
+ 1 + 2 * node
->nparam
;
3939 for (i
= 0; i
< node
->nvar
; ++i
)
3940 isl_int_sub(node_sol
->el
[i
],
3941 sol
->el
[pos
+ 2 * i
+ 1], sol
->el
[pos
+ 2 * i
]);
3943 node_sol
= isl_mat_vec_product(isl_mat_copy(node
->cinv
), node_sol
);
3948 trivial
= isl_seq_first_non_zero(node_sol
->el
+ node
->rank
,
3949 node
->nvar
- node
->rank
) == -1;
3951 isl_vec_free(node_sol
);
3956 /* Is the schedule row "sol" trivial on any node where it should
3958 * "sol" has been computed in terms of the original iterators
3959 * (i.e., without use of cmap).
3960 * Return 1 if any solution is trivial, 0 if they are not and -1 on error.
3962 static int is_any_trivial(struct isl_sched_graph
*graph
,
3963 __isl_keep isl_vec
*sol
)
3967 for (i
= 0; i
< graph
->n
; ++i
) {
3968 struct isl_sched_node
*node
= &graph
->node
[i
];
3971 if (!needs_row(graph
, node
))
3973 trivial
= is_trivial(node
, sol
);
3974 if (trivial
< 0 || trivial
)
3981 /* Construct a schedule row for each node such that as many dependences
3982 * as possible are carried and then continue with the next band.
3984 * Note that despite the fact that the problem is solved using a rational
3985 * solver, the solution is guaranteed to be integral.
3986 * Specifically, the dependence distance lower bounds e_i (and therefore
3987 * also their sum) are integers. See Lemma 5 of [1].
3989 * If the computed schedule row turns out to be trivial on one or
3990 * more nodes where it should not be trivial, then we throw it away
3991 * and try again on each component separately.
3993 * If there is only one component, then we accept the schedule row anyway,
3994 * but we do not consider it as a complete row and therefore do not
3995 * increment graph->n_row. Note that the ranks of the nodes that
3996 * do get a non-trivial schedule part will get updated regardless and
3997 * graph->maxvar is computed based on these ranks. The test for
3998 * whether more schedule rows are required in compute_schedule_wcc
3999 * is therefore not affected.
4001 * Insert a band corresponding to the schedule row at position "node"
4002 * of the schedule tree and continue with the construction of the schedule.
4003 * This insertion and the continued construction is performed by split_scaled
4004 * after optionally checking for non-trivial common divisors.
4006 * [1] P. Feautrier, Some Efficient Solutions to the Affine Scheduling
4007 * Problem, Part II: Multi-Dimensional Time.
4008 * In Intl. Journal of Parallel Programming, 1992.
4010 static __isl_give isl_schedule_node
*carry_dependences(
4011 __isl_take isl_schedule_node
*node
, struct isl_sched_graph
*graph
)
4024 for (i
= 0; i
< graph
->n_edge
; ++i
)
4025 n_edge
+= graph
->edge
[i
].map
->n
;
4027 ctx
= isl_schedule_node_get_ctx(node
);
4028 if (setup_carry_lp(ctx
, graph
) < 0)
4029 return isl_schedule_node_free(node
);
4031 lp
= isl_basic_set_copy(graph
->lp
);
4032 sol
= isl_tab_basic_set_non_neg_lexmin(lp
);
4034 return isl_schedule_node_free(node
);
4036 if (sol
->size
== 0) {
4038 isl_die(ctx
, isl_error_internal
,
4039 "error in schedule construction",
4040 return isl_schedule_node_free(node
));
4043 isl_int_divexact(sol
->el
[1], sol
->el
[1], sol
->el
[0]);
4044 if (isl_int_cmp_si(sol
->el
[1], n_edge
) >= 0) {
4046 isl_die(ctx
, isl_error_unknown
,
4047 "unable to carry dependences",
4048 return isl_schedule_node_free(node
));
4051 trivial
= is_any_trivial(graph
, sol
);
4053 sol
= isl_vec_free(sol
);
4054 } else if (trivial
&& graph
->scc
> 1) {
4056 return compute_component_schedule(node
, graph
, 1);
4059 if (update_schedule(graph
, sol
, 0, 0) < 0)
4060 return isl_schedule_node_free(node
);
4064 return split_scaled(node
, graph
);
4067 /* Topologically sort statements mapped to the same schedule iteration
4068 * and add insert a sequence node in front of "node"
4069 * corresponding to this order.
4070 * If "initialized" is set, then it may be assumed that compute_maxvar
4071 * has been called on the current band. Otherwise, call
4072 * compute_maxvar if and before carry_dependences gets called.
4074 * If it turns out to be impossible to sort the statements apart,
4075 * because different dependences impose different orderings
4076 * on the statements, then we extend the schedule such that
4077 * it carries at least one more dependence.
4079 static __isl_give isl_schedule_node
*sort_statements(
4080 __isl_take isl_schedule_node
*node
, struct isl_sched_graph
*graph
,
4084 isl_union_set_list
*filters
;
4089 ctx
= isl_schedule_node_get_ctx(node
);
4091 isl_die(ctx
, isl_error_internal
,
4092 "graph should have at least one node",
4093 return isl_schedule_node_free(node
));
4098 if (update_edges(ctx
, graph
) < 0)
4099 return isl_schedule_node_free(node
);
4101 if (graph
->n_edge
== 0)
4104 if (detect_sccs(ctx
, graph
) < 0)
4105 return isl_schedule_node_free(node
);
4108 if (graph
->scc
< graph
->n
) {
4109 if (!initialized
&& compute_maxvar(graph
) < 0)
4110 return isl_schedule_node_free(node
);
4111 return carry_dependences(node
, graph
);
4114 filters
= extract_sccs(ctx
, graph
);
4115 node
= isl_schedule_node_insert_sequence(node
, filters
);
4120 /* Are there any (non-empty) (conditional) validity edges in the graph?
4122 static int has_validity_edges(struct isl_sched_graph
*graph
)
4126 for (i
= 0; i
< graph
->n_edge
; ++i
) {
4129 empty
= isl_map_plain_is_empty(graph
->edge
[i
].map
);
4134 if (is_validity(&graph
->edge
[i
]) ||
4135 is_conditional_validity(&graph
->edge
[i
]))
4142 /* Should we apply a Feautrier step?
4143 * That is, did the user request the Feautrier algorithm and are
4144 * there any validity dependences (left)?
4146 static int need_feautrier_step(isl_ctx
*ctx
, struct isl_sched_graph
*graph
)
4148 if (ctx
->opt
->schedule_algorithm
!= ISL_SCHEDULE_ALGORITHM_FEAUTRIER
)
4151 return has_validity_edges(graph
);
4154 /* Compute a schedule for a connected dependence graph using Feautrier's
4155 * multi-dimensional scheduling algorithm and return the updated schedule node.
4157 * The original algorithm is described in [1].
4158 * The main idea is to minimize the number of scheduling dimensions, by
4159 * trying to satisfy as many dependences as possible per scheduling dimension.
4161 * [1] P. Feautrier, Some Efficient Solutions to the Affine Scheduling
4162 * Problem, Part II: Multi-Dimensional Time.
4163 * In Intl. Journal of Parallel Programming, 1992.
4165 static __isl_give isl_schedule_node
*compute_schedule_wcc_feautrier(
4166 isl_schedule_node
*node
, struct isl_sched_graph
*graph
)
4168 return carry_dependences(node
, graph
);
4171 /* Turn off the "local" bit on all (condition) edges.
4173 static void clear_local_edges(struct isl_sched_graph
*graph
)
4177 for (i
= 0; i
< graph
->n_edge
; ++i
)
4178 if (is_condition(&graph
->edge
[i
]))
4179 clear_local(&graph
->edge
[i
]);
4182 /* Does "graph" have both condition and conditional validity edges?
4184 static int need_condition_check(struct isl_sched_graph
*graph
)
4187 int any_condition
= 0;
4188 int any_conditional_validity
= 0;
4190 for (i
= 0; i
< graph
->n_edge
; ++i
) {
4191 if (is_condition(&graph
->edge
[i
]))
4193 if (is_conditional_validity(&graph
->edge
[i
]))
4194 any_conditional_validity
= 1;
4197 return any_condition
&& any_conditional_validity
;
4200 /* Does "graph" contain any coincidence edge?
4202 static int has_any_coincidence(struct isl_sched_graph
*graph
)
4206 for (i
= 0; i
< graph
->n_edge
; ++i
)
4207 if (is_coincidence(&graph
->edge
[i
]))
4213 /* Extract the final schedule row as a map with the iteration domain
4214 * of "node" as domain.
4216 static __isl_give isl_map
*final_row(struct isl_sched_node
*node
)
4218 isl_local_space
*ls
;
4222 row
= isl_mat_rows(node
->sched
) - 1;
4223 ls
= isl_local_space_from_space(isl_space_copy(node
->space
));
4224 aff
= extract_schedule_row(ls
, node
, row
);
4225 return isl_map_from_aff(aff
);
4228 /* Is the conditional validity dependence in the edge with index "edge_index"
4229 * violated by the latest (i.e., final) row of the schedule?
4230 * That is, is i scheduled after j
4231 * for any conditional validity dependence i -> j?
4233 static int is_violated(struct isl_sched_graph
*graph
, int edge_index
)
4235 isl_map
*src_sched
, *dst_sched
, *map
;
4236 struct isl_sched_edge
*edge
= &graph
->edge
[edge_index
];
4239 src_sched
= final_row(edge
->src
);
4240 dst_sched
= final_row(edge
->dst
);
4241 map
= isl_map_copy(edge
->map
);
4242 map
= isl_map_apply_domain(map
, src_sched
);
4243 map
= isl_map_apply_range(map
, dst_sched
);
4244 map
= isl_map_order_gt(map
, isl_dim_in
, 0, isl_dim_out
, 0);
4245 empty
= isl_map_is_empty(map
);
4254 /* Does "graph" have any satisfied condition edges that
4255 * are adjacent to the conditional validity constraint with
4256 * domain "conditional_source" and range "conditional_sink"?
4258 * A satisfied condition is one that is not local.
4259 * If a condition was forced to be local already (i.e., marked as local)
4260 * then there is no need to check if it is in fact local.
4262 * Additionally, mark all adjacent condition edges found as local.
4264 static int has_adjacent_true_conditions(struct isl_sched_graph
*graph
,
4265 __isl_keep isl_union_set
*conditional_source
,
4266 __isl_keep isl_union_set
*conditional_sink
)
4271 for (i
= 0; i
< graph
->n_edge
; ++i
) {
4272 int adjacent
, local
;
4273 isl_union_map
*condition
;
4275 if (!is_condition(&graph
->edge
[i
]))
4277 if (is_local(&graph
->edge
[i
]))
4280 condition
= graph
->edge
[i
].tagged_condition
;
4281 adjacent
= domain_intersects(condition
, conditional_sink
);
4282 if (adjacent
>= 0 && !adjacent
)
4283 adjacent
= range_intersects(condition
,
4284 conditional_source
);
4290 set_local(&graph
->edge
[i
]);
4292 local
= is_condition_false(&graph
->edge
[i
]);
4302 /* Are there any violated conditional validity dependences with
4303 * adjacent condition dependences that are not local with respect
4304 * to the current schedule?
4305 * That is, is the conditional validity constraint violated?
4307 * Additionally, mark all those adjacent condition dependences as local.
4308 * We also mark those adjacent condition dependences that were not marked
4309 * as local before, but just happened to be local already. This ensures
4310 * that they remain local if the schedule is recomputed.
4312 * We first collect domain and range of all violated conditional validity
4313 * dependences and then check if there are any adjacent non-local
4314 * condition dependences.
4316 static int has_violated_conditional_constraint(isl_ctx
*ctx
,
4317 struct isl_sched_graph
*graph
)
4321 isl_union_set
*source
, *sink
;
4323 source
= isl_union_set_empty(isl_space_params_alloc(ctx
, 0));
4324 sink
= isl_union_set_empty(isl_space_params_alloc(ctx
, 0));
4325 for (i
= 0; i
< graph
->n_edge
; ++i
) {
4326 isl_union_set
*uset
;
4327 isl_union_map
*umap
;
4330 if (!is_conditional_validity(&graph
->edge
[i
]))
4333 violated
= is_violated(graph
, i
);
4341 umap
= isl_union_map_copy(graph
->edge
[i
].tagged_validity
);
4342 uset
= isl_union_map_domain(umap
);
4343 source
= isl_union_set_union(source
, uset
);
4344 source
= isl_union_set_coalesce(source
);
4346 umap
= isl_union_map_copy(graph
->edge
[i
].tagged_validity
);
4347 uset
= isl_union_map_range(umap
);
4348 sink
= isl_union_set_union(sink
, uset
);
4349 sink
= isl_union_set_coalesce(sink
);
4353 any
= has_adjacent_true_conditions(graph
, source
, sink
);
4355 isl_union_set_free(source
);
4356 isl_union_set_free(sink
);
4359 isl_union_set_free(source
);
4360 isl_union_set_free(sink
);
4364 /* Examine the current band (the rows between graph->band_start and
4365 * graph->n_total_row), deciding whether to drop it or add it to "node"
4366 * and then continue with the computation of the next band, if any.
4367 * If "initialized" is set, then it may be assumed that compute_maxvar
4368 * has been called on the current band. Otherwise, call
4369 * compute_maxvar if and before carry_dependences gets called.
4371 * The caller keeps looking for a new row as long as
4372 * graph->n_row < graph->maxvar. If the latest attempt to find
4373 * such a row failed (i.e., we still have graph->n_row < graph->maxvar),
4375 * - split between SCCs and start over (assuming we found an interesting
4376 * pair of SCCs between which to split)
4377 * - continue with the next band (assuming the current band has at least
4379 * - try to carry as many dependences as possible and continue with the next
4381 * In each case, we first insert a band node in the schedule tree
4382 * if any rows have been computed.
4384 * If the caller managed to complete the schedule, we insert a band node
4385 * (if any schedule rows were computed) and we finish off by topologically
4386 * sorting the statements based on the remaining dependences.
4388 static __isl_give isl_schedule_node
*compute_schedule_finish_band(
4389 __isl_take isl_schedule_node
*node
, struct isl_sched_graph
*graph
,
4397 if (graph
->n_row
< graph
->maxvar
) {
4399 int empty
= graph
->n_total_row
== graph
->band_start
;
4401 ctx
= isl_schedule_node_get_ctx(node
);
4402 if (!ctx
->opt
->schedule_maximize_band_depth
&& !empty
)
4403 return compute_next_band(node
, graph
, 1);
4404 if (graph
->src_scc
>= 0)
4405 return compute_split_schedule(node
, graph
);
4407 return compute_next_band(node
, graph
, 1);
4408 if (!initialized
&& compute_maxvar(graph
) < 0)
4409 return isl_schedule_node_free(node
);
4410 return carry_dependences(node
, graph
);
4413 insert
= graph
->n_total_row
> graph
->band_start
;
4415 node
= insert_current_band(node
, graph
, 1);
4416 node
= isl_schedule_node_child(node
, 0);
4418 node
= sort_statements(node
, graph
, initialized
);
4420 node
= isl_schedule_node_parent(node
);
4425 /* Construct a band of schedule rows for a connected dependence graph.
4426 * The caller is responsible for determining the strongly connected
4427 * components and calling compute_maxvar first.
4429 * We try to find a sequence of as many schedule rows as possible that result
4430 * in non-negative dependence distances (independent of the previous rows
4431 * in the sequence, i.e., such that the sequence is tilable), with as
4432 * many of the initial rows as possible satisfying the coincidence constraints.
4433 * The computation stops if we can't find any more rows or if we have found
4434 * all the rows we wanted to find.
4436 * If ctx->opt->schedule_outer_coincidence is set, then we force the
4437 * outermost dimension to satisfy the coincidence constraints. If this
4438 * turns out to be impossible, we fall back on the general scheme above
4439 * and try to carry as many dependences as possible.
4441 * If "graph" contains both condition and conditional validity dependences,
4442 * then we need to check that that the conditional schedule constraint
4443 * is satisfied, i.e., there are no violated conditional validity dependences
4444 * that are adjacent to any non-local condition dependences.
4445 * If there are, then we mark all those adjacent condition dependences
4446 * as local and recompute the current band. Those dependences that
4447 * are marked local will then be forced to be local.
4448 * The initial computation is performed with no dependences marked as local.
4449 * If we are lucky, then there will be no violated conditional validity
4450 * dependences adjacent to any non-local condition dependences.
4451 * Otherwise, we mark some additional condition dependences as local and
4452 * recompute. We continue this process until there are no violations left or
4453 * until we are no longer able to compute a schedule.
4454 * Since there are only a finite number of dependences,
4455 * there will only be a finite number of iterations.
4457 static isl_stat
compute_schedule_wcc_band(isl_ctx
*ctx
,
4458 struct isl_sched_graph
*graph
)
4460 int has_coincidence
;
4461 int use_coincidence
;
4462 int force_coincidence
= 0;
4463 int check_conditional
;
4465 if (sort_sccs(graph
) < 0)
4466 return isl_stat_error
;
4468 clear_local_edges(graph
);
4469 check_conditional
= need_condition_check(graph
);
4470 has_coincidence
= has_any_coincidence(graph
);
4472 if (ctx
->opt
->schedule_outer_coincidence
)
4473 force_coincidence
= 1;
4475 use_coincidence
= has_coincidence
;
4476 while (graph
->n_row
< graph
->maxvar
) {
4481 graph
->src_scc
= -1;
4482 graph
->dst_scc
= -1;
4484 if (setup_lp(ctx
, graph
, use_coincidence
) < 0)
4485 return isl_stat_error
;
4486 sol
= solve_lp(graph
);
4488 return isl_stat_error
;
4489 if (sol
->size
== 0) {
4490 int empty
= graph
->n_total_row
== graph
->band_start
;
4493 if (use_coincidence
&& (!force_coincidence
|| !empty
)) {
4494 use_coincidence
= 0;
4499 coincident
= !has_coincidence
|| use_coincidence
;
4500 if (update_schedule(graph
, sol
, 1, coincident
) < 0)
4501 return isl_stat_error
;
4503 if (!check_conditional
)
4505 violated
= has_violated_conditional_constraint(ctx
, graph
);
4507 return isl_stat_error
;
4510 if (reset_band(graph
) < 0)
4511 return isl_stat_error
;
4512 use_coincidence
= has_coincidence
;
4518 /* Compute a schedule for a connected dependence graph by considering
4519 * the graph as a whole and return the updated schedule node.
4521 * The actual schedule rows of the current band are computed by
4522 * compute_schedule_wcc_band. compute_schedule_finish_band takes
4523 * care of integrating the band into "node" and continuing
4526 static __isl_give isl_schedule_node
*compute_schedule_wcc_whole(
4527 __isl_take isl_schedule_node
*node
, struct isl_sched_graph
*graph
)
4534 ctx
= isl_schedule_node_get_ctx(node
);
4535 if (compute_schedule_wcc_band(ctx
, graph
) < 0)
4536 return isl_schedule_node_free(node
);
4538 return compute_schedule_finish_band(node
, graph
, 1);
4541 /* Clustering information used by compute_schedule_wcc_clustering.
4543 * "n" is the number of SCCs in the original dependence graph
4544 * "scc" is an array of "n" elements, each representing an SCC
4545 * of the original dependence graph. All entries in the same cluster
4546 * have the same number of schedule rows.
4547 * "scc_cluster" maps each SCC index to the cluster to which it belongs,
4548 * where each cluster is represented by the index of the first SCC
4549 * in the cluster. Initially, each SCC belongs to a cluster containing
4552 * "scc_in_merge" is used by merge_clusters_along_edge to keep
4553 * track of which SCCs need to be merged.
4555 * "cluster" contains the merged clusters of SCCs after the clustering
4558 * "scc_node" is a temporary data structure used inside copy_partial.
4559 * For each SCC, it keeps track of the number of nodes in the SCC
4560 * that have already been copied.
4562 struct isl_clustering
{
4564 struct isl_sched_graph
*scc
;
4565 struct isl_sched_graph
*cluster
;
4571 /* Initialize the clustering data structure "c" from "graph".
4573 * In particular, allocate memory, extract the SCCs from "graph"
4574 * into c->scc, initialize scc_cluster and construct
4575 * a band of schedule rows for each SCC.
4576 * Within each SCC, there is only one SCC by definition.
4577 * Each SCC initially belongs to a cluster containing only that SCC.
4579 static isl_stat
clustering_init(isl_ctx
*ctx
, struct isl_clustering
*c
,
4580 struct isl_sched_graph
*graph
)
4585 c
->scc
= isl_calloc_array(ctx
, struct isl_sched_graph
, c
->n
);
4586 c
->cluster
= isl_calloc_array(ctx
, struct isl_sched_graph
, c
->n
);
4587 c
->scc_cluster
= isl_calloc_array(ctx
, int, c
->n
);
4588 c
->scc_node
= isl_calloc_array(ctx
, int, c
->n
);
4589 c
->scc_in_merge
= isl_calloc_array(ctx
, int, c
->n
);
4590 if (!c
->scc
|| !c
->cluster
||
4591 !c
->scc_cluster
|| !c
->scc_node
|| !c
->scc_in_merge
)
4592 return isl_stat_error
;
4594 for (i
= 0; i
< c
->n
; ++i
) {
4595 if (extract_sub_graph(ctx
, graph
, &node_scc_exactly
,
4596 &edge_scc_exactly
, i
, &c
->scc
[i
]) < 0)
4597 return isl_stat_error
;
4599 if (compute_maxvar(&c
->scc
[i
]) < 0)
4600 return isl_stat_error
;
4601 if (compute_schedule_wcc_band(ctx
, &c
->scc
[i
]) < 0)
4602 return isl_stat_error
;
4603 c
->scc_cluster
[i
] = i
;
4609 /* Free all memory allocated for "c".
4611 static void clustering_free(isl_ctx
*ctx
, struct isl_clustering
*c
)
4616 for (i
= 0; i
< c
->n
; ++i
)
4617 graph_free(ctx
, &c
->scc
[i
]);
4620 for (i
= 0; i
< c
->n
; ++i
)
4621 graph_free(ctx
, &c
->cluster
[i
]);
4623 free(c
->scc_cluster
);
4625 free(c
->scc_in_merge
);
4628 /* Should we refrain from merging the cluster in "graph" with
4629 * any other cluster?
4630 * In particular, is its current schedule band empty and incomplete.
4632 static int bad_cluster(struct isl_sched_graph
*graph
)
4634 return graph
->n_row
< graph
->maxvar
&&
4635 graph
->n_total_row
== graph
->band_start
;
4638 /* Return the index of an edge in "graph" that can be used to merge
4639 * two clusters in "c".
4640 * Return graph->n_edge if no such edge can be found.
4641 * Return -1 on error.
4643 * In particular, return a proximity edge between two clusters
4644 * that is not marked "no_merge" and such that neither of the
4645 * two clusters has an incomplete, empty band.
4647 * If there are multiple such edges, then try and find the most
4648 * appropriate edge to use for merging. In particular, pick the edge
4649 * with the greatest weight. If there are multiple of those,
4650 * then pick one with the shortest distance between
4651 * the two cluster representatives.
4653 static int find_proximity(struct isl_sched_graph
*graph
,
4654 struct isl_clustering
*c
)
4656 int i
, best
= graph
->n_edge
, best_dist
, best_weight
;
4658 for (i
= 0; i
< graph
->n_edge
; ++i
) {
4659 struct isl_sched_edge
*edge
= &graph
->edge
[i
];
4662 if (!is_proximity(edge
))
4666 if (bad_cluster(&c
->scc
[edge
->src
->scc
]) ||
4667 bad_cluster(&c
->scc
[edge
->dst
->scc
]))
4669 dist
= c
->scc_cluster
[edge
->dst
->scc
] -
4670 c
->scc_cluster
[edge
->src
->scc
];
4673 weight
= edge
->weight
;
4674 if (best
< graph
->n_edge
) {
4675 if (best_weight
> weight
)
4677 if (best_weight
== weight
&& best_dist
<= dist
)
4682 best_weight
= weight
;
4688 /* Internal data structure used in mark_merge_sccs.
4690 * "graph" is the dependence graph in which a strongly connected
4691 * component is constructed.
4692 * "scc_cluster" maps each SCC index to the cluster to which it belongs.
4693 * "src" and "dst" are the indices of the nodes that are being merged.
4695 struct isl_mark_merge_sccs_data
{
4696 struct isl_sched_graph
*graph
;
4702 /* Check whether the cluster containing node "i" depends on the cluster
4703 * containing node "j". If "i" and "j" belong to the same cluster,
4704 * then they are taken to depend on each other to ensure that
4705 * the resulting strongly connected component consists of complete
4706 * clusters. Furthermore, if "i" and "j" are the two nodes that
4707 * are being merged, then they are taken to depend on each other as well.
4708 * Otherwise, check if there is a (conditional) validity dependence
4709 * from node[j] to node[i], forcing node[i] to follow node[j].
4711 static isl_bool
cluster_follows(int i
, int j
, void *user
)
4713 struct isl_mark_merge_sccs_data
*data
= user
;
4714 struct isl_sched_graph
*graph
= data
->graph
;
4715 int *scc_cluster
= data
->scc_cluster
;
4717 if (data
->src
== i
&& data
->dst
== j
)
4718 return isl_bool_true
;
4719 if (data
->src
== j
&& data
->dst
== i
)
4720 return isl_bool_true
;
4721 if (scc_cluster
[graph
->node
[i
].scc
] == scc_cluster
[graph
->node
[j
].scc
])
4722 return isl_bool_true
;
4724 return graph_has_validity_edge(graph
, &graph
->node
[j
], &graph
->node
[i
]);
4727 /* Mark all SCCs that belong to either of the two clusters in "c"
4728 * connected by the edge in "graph" with index "edge", or to any
4729 * of the intermediate clusters.
4730 * The marking is recorded in c->scc_in_merge.
4732 * The given edge has been selected for merging two clusters,
4733 * meaning that there is at least a proximity edge between the two nodes.
4734 * However, there may also be (indirect) validity dependences
4735 * between the two nodes. When merging the two clusters, all clusters
4736 * containing one or more of the intermediate nodes along the
4737 * indirect validity dependences need to be merged in as well.
4739 * First collect all such nodes by computing the strongly connected
4740 * component (SCC) containing the two nodes connected by the edge, where
4741 * the two nodes are considered to depend on each other to make
4742 * sure they end up in the same SCC. Similarly, each node is considered
4743 * to depend on every other node in the same cluster to ensure
4744 * that the SCC consists of complete clusters.
4746 * Then the original SCCs that contain any of these nodes are marked
4747 * in c->scc_in_merge.
4749 static isl_stat
mark_merge_sccs(isl_ctx
*ctx
, struct isl_sched_graph
*graph
,
4750 int edge
, struct isl_clustering
*c
)
4752 struct isl_mark_merge_sccs_data data
;
4753 struct isl_tarjan_graph
*g
;
4756 for (i
= 0; i
< c
->n
; ++i
)
4757 c
->scc_in_merge
[i
] = 0;
4760 data
.scc_cluster
= c
->scc_cluster
;
4761 data
.src
= graph
->edge
[edge
].src
- graph
->node
;
4762 data
.dst
= graph
->edge
[edge
].dst
- graph
->node
;
4764 g
= isl_tarjan_graph_component(ctx
, graph
->n
, data
.dst
,
4765 &cluster_follows
, &data
);
4771 isl_die(ctx
, isl_error_internal
,
4772 "expecting at least two nodes in component",
4774 if (g
->order
[--i
] != -1)
4775 isl_die(ctx
, isl_error_internal
,
4776 "expecting end of component marker", goto error
);
4778 for (--i
; i
>= 0 && g
->order
[i
] != -1; --i
) {
4779 int scc
= graph
->node
[g
->order
[i
]].scc
;
4780 c
->scc_in_merge
[scc
] = 1;
4783 isl_tarjan_graph_free(g
);
4786 isl_tarjan_graph_free(g
);
4787 return isl_stat_error
;
4790 /* Construct the identifier "cluster_i".
4792 static __isl_give isl_id
*cluster_id(isl_ctx
*ctx
, int i
)
4796 snprintf(name
, sizeof(name
), "cluster_%d", i
);
4797 return isl_id_alloc(ctx
, name
, NULL
);
4800 /* Construct the space of the cluster with index "i" containing
4801 * the strongly connected component "scc".
4803 * In particular, construct a space called cluster_i with dimension equal
4804 * to the number of schedule rows in the current band of "scc".
4806 static __isl_give isl_space
*cluster_space(struct isl_sched_graph
*scc
, int i
)
4812 nvar
= scc
->n_total_row
- scc
->band_start
;
4813 space
= isl_space_copy(scc
->node
[0].space
);
4814 space
= isl_space_params(space
);
4815 space
= isl_space_set_from_params(space
);
4816 space
= isl_space_add_dims(space
, isl_dim_set
, nvar
);
4817 id
= cluster_id(isl_space_get_ctx(space
), i
);
4818 space
= isl_space_set_tuple_id(space
, isl_dim_set
, id
);
4823 /* Collect the domain of the graph for merging clusters.
4825 * In particular, for each cluster with first SCC "i", construct
4826 * a set in the space called cluster_i with dimension equal
4827 * to the number of schedule rows in the current band of the cluster.
4829 static __isl_give isl_union_set
*collect_domain(isl_ctx
*ctx
,
4830 struct isl_sched_graph
*graph
, struct isl_clustering
*c
)
4834 isl_union_set
*domain
;
4836 space
= isl_space_params_alloc(ctx
, 0);
4837 domain
= isl_union_set_empty(space
);
4839 for (i
= 0; i
< graph
->scc
; ++i
) {
4842 if (!c
->scc_in_merge
[i
])
4844 if (c
->scc_cluster
[i
] != i
)
4846 space
= cluster_space(&c
->scc
[i
], i
);
4847 domain
= isl_union_set_add_set(domain
, isl_set_universe(space
));
4853 /* Construct a map from the original instances to the corresponding
4854 * cluster instance in the current bands of the clusters in "c".
4856 static __isl_give isl_union_map
*collect_cluster_map(isl_ctx
*ctx
,
4857 struct isl_sched_graph
*graph
, struct isl_clustering
*c
)
4861 isl_union_map
*cluster_map
;
4863 space
= isl_space_params_alloc(ctx
, 0);
4864 cluster_map
= isl_union_map_empty(space
);
4865 for (i
= 0; i
< graph
->scc
; ++i
) {
4869 if (!c
->scc_in_merge
[i
])
4872 id
= cluster_id(ctx
, c
->scc_cluster
[i
]);
4873 start
= c
->scc
[i
].band_start
;
4874 n
= c
->scc
[i
].n_total_row
- start
;
4875 for (j
= 0; j
< c
->scc
[i
].n
; ++j
) {
4878 struct isl_sched_node
*node
= &c
->scc
[i
].node
[j
];
4880 ma
= node_extract_partial_schedule_multi_aff(node
,
4882 ma
= isl_multi_aff_set_tuple_id(ma
, isl_dim_out
,
4884 map
= isl_map_from_multi_aff(ma
);
4885 cluster_map
= isl_union_map_add_map(cluster_map
, map
);
4893 /* Add "umap" to the schedule constraints "sc" of all types of "edge"
4894 * that are not isl_edge_condition or isl_edge_conditional_validity.
4896 static __isl_give isl_schedule_constraints
*add_non_conditional_constraints(
4897 struct isl_sched_edge
*edge
, __isl_keep isl_union_map
*umap
,
4898 __isl_take isl_schedule_constraints
*sc
)
4900 enum isl_edge_type t
;
4905 for (t
= isl_edge_first
; t
<= isl_edge_last
; ++t
) {
4906 if (t
== isl_edge_condition
||
4907 t
== isl_edge_conditional_validity
)
4909 if (!is_type(edge
, t
))
4911 sc
->constraint
[t
] = isl_union_map_union(sc
->constraint
[t
],
4912 isl_union_map_copy(umap
));
4913 if (!sc
->constraint
[t
])
4914 return isl_schedule_constraints_free(sc
);
4920 /* Add schedule constraints of types isl_edge_condition and
4921 * isl_edge_conditional_validity to "sc" by applying "umap" to
4922 * the domains of the wrapped relations in domain and range
4923 * of the corresponding tagged constraints of "edge".
4925 static __isl_give isl_schedule_constraints
*add_conditional_constraints(
4926 struct isl_sched_edge
*edge
, __isl_keep isl_union_map
*umap
,
4927 __isl_take isl_schedule_constraints
*sc
)
4929 enum isl_edge_type t
;
4930 isl_union_map
*tagged
;
4932 for (t
= isl_edge_condition
; t
<= isl_edge_conditional_validity
; ++t
) {
4933 if (!is_type(edge
, t
))
4935 if (t
== isl_edge_condition
)
4936 tagged
= isl_union_map_copy(edge
->tagged_condition
);
4938 tagged
= isl_union_map_copy(edge
->tagged_validity
);
4939 tagged
= isl_union_map_zip(tagged
);
4940 tagged
= isl_union_map_apply_domain(tagged
,
4941 isl_union_map_copy(umap
));
4942 tagged
= isl_union_map_zip(tagged
);
4943 sc
->constraint
[t
] = isl_union_map_union(sc
->constraint
[t
],
4945 if (!sc
->constraint
[t
])
4946 return isl_schedule_constraints_free(sc
);
4952 /* Given a mapping "cluster_map" from the original instances to
4953 * the cluster instances, add schedule constraints on the clusters
4954 * to "sc" corresponding to the original constraints represented by "edge".
4956 * For non-tagged dependence constraints, the cluster constraints
4957 * are obtained by applying "cluster_map" to the edge->map.
4959 * For tagged dependence constraints, "cluster_map" needs to be applied
4960 * to the domains of the wrapped relations in domain and range
4961 * of the tagged dependence constraints. Pick out the mappings
4962 * from these domains from "cluster_map" and construct their product.
4963 * This mapping can then be applied to the pair of domains.
4965 static __isl_give isl_schedule_constraints
*collect_edge_constraints(
4966 struct isl_sched_edge
*edge
, __isl_keep isl_union_map
*cluster_map
,
4967 __isl_take isl_schedule_constraints
*sc
)
4969 isl_union_map
*umap
;
4971 isl_union_set
*uset
;
4972 isl_union_map
*umap1
, *umap2
;
4977 umap
= isl_union_map_from_map(isl_map_copy(edge
->map
));
4978 umap
= isl_union_map_apply_domain(umap
,
4979 isl_union_map_copy(cluster_map
));
4980 umap
= isl_union_map_apply_range(umap
,
4981 isl_union_map_copy(cluster_map
));
4982 sc
= add_non_conditional_constraints(edge
, umap
, sc
);
4983 isl_union_map_free(umap
);
4985 if (!sc
|| (!is_condition(edge
) && !is_conditional_validity(edge
)))
4988 space
= isl_space_domain(isl_map_get_space(edge
->map
));
4989 uset
= isl_union_set_from_set(isl_set_universe(space
));
4990 umap1
= isl_union_map_copy(cluster_map
);
4991 umap1
= isl_union_map_intersect_domain(umap1
, uset
);
4992 space
= isl_space_range(isl_map_get_space(edge
->map
));
4993 uset
= isl_union_set_from_set(isl_set_universe(space
));
4994 umap2
= isl_union_map_copy(cluster_map
);
4995 umap2
= isl_union_map_intersect_domain(umap2
, uset
);
4996 umap
= isl_union_map_product(umap1
, umap2
);
4998 sc
= add_conditional_constraints(edge
, umap
, sc
);
5000 isl_union_map_free(umap
);
5004 /* Given a mapping "cluster_map" from the original instances to
5005 * the cluster instances, add schedule constraints on the clusters
5006 * to "sc" corresponding to all edges in "graph" between nodes that
5007 * belong to SCCs that are marked for merging in "scc_in_merge".
5009 static __isl_give isl_schedule_constraints
*collect_constraints(
5010 struct isl_sched_graph
*graph
, int *scc_in_merge
,
5011 __isl_keep isl_union_map
*cluster_map
,
5012 __isl_take isl_schedule_constraints
*sc
)
5016 for (i
= 0; i
< graph
->n_edge
; ++i
) {
5017 struct isl_sched_edge
*edge
= &graph
->edge
[i
];
5019 if (!scc_in_merge
[edge
->src
->scc
])
5021 if (!scc_in_merge
[edge
->dst
->scc
])
5023 sc
= collect_edge_constraints(edge
, cluster_map
, sc
);
5029 /* Construct a dependence graph for scheduling clusters with respect
5030 * to each other and store the result in "merge_graph".
5031 * In particular, the nodes of the graph correspond to the schedule
5032 * dimensions of the current bands of those clusters that have been
5033 * marked for merging in "c".
5035 * First construct an isl_schedule_constraints object for this domain
5036 * by transforming the edges in "graph" to the domain.
5037 * Then initialize a dependence graph for scheduling from these
5040 static isl_stat
init_merge_graph(isl_ctx
*ctx
, struct isl_sched_graph
*graph
,
5041 struct isl_clustering
*c
, struct isl_sched_graph
*merge_graph
)
5043 isl_union_set
*domain
;
5044 isl_union_map
*cluster_map
;
5045 isl_schedule_constraints
*sc
;
5048 domain
= collect_domain(ctx
, graph
, c
);
5049 sc
= isl_schedule_constraints_on_domain(domain
);
5051 return isl_stat_error
;
5052 cluster_map
= collect_cluster_map(ctx
, graph
, c
);
5053 sc
= collect_constraints(graph
, c
->scc_in_merge
, cluster_map
, sc
);
5054 isl_union_map_free(cluster_map
);
5056 r
= graph_init(merge_graph
, sc
);
5058 isl_schedule_constraints_free(sc
);
5063 /* Compute the maximal number of remaining schedule rows that still need
5064 * to be computed for the nodes that belong to clusters with the maximal
5065 * dimension for the current band (i.e., the band that is to be merged).
5066 * Only clusters that are about to be merged are considered.
5067 * "maxvar" is the maximal dimension for the current band.
5068 * "c" contains information about the clusters.
5070 * Return the maximal number of remaining schedule rows or -1 on error.
5072 static int compute_maxvar_max_slack(int maxvar
, struct isl_clustering
*c
)
5078 for (i
= 0; i
< c
->n
; ++i
) {
5080 struct isl_sched_graph
*scc
;
5082 if (!c
->scc_in_merge
[i
])
5085 nvar
= scc
->n_total_row
- scc
->band_start
;
5088 for (j
= 0; j
< scc
->n
; ++j
) {
5089 struct isl_sched_node
*node
= &scc
->node
[j
];
5092 if (node_update_cmap(node
) < 0)
5094 slack
= node
->nvar
- node
->rank
;
5095 if (slack
> max_slack
)
5103 /* If there are any clusters where the dimension of the current band
5104 * (i.e., the band that is to be merged) is smaller than "maxvar" and
5105 * if there are any nodes in such a cluster where the number
5106 * of remaining schedule rows that still need to be computed
5107 * is greater than "max_slack", then return the smallest current band
5108 * dimension of all these clusters. Otherwise return the original value
5109 * of "maxvar". Return -1 in case of any error.
5110 * Only clusters that are about to be merged are considered.
5111 * "c" contains information about the clusters.
5113 static int limit_maxvar_to_slack(int maxvar
, int max_slack
,
5114 struct isl_clustering
*c
)
5118 for (i
= 0; i
< c
->n
; ++i
) {
5120 struct isl_sched_graph
*scc
;
5122 if (!c
->scc_in_merge
[i
])
5125 nvar
= scc
->n_total_row
- scc
->band_start
;
5128 for (j
= 0; j
< scc
->n
; ++j
) {
5129 struct isl_sched_node
*node
= &scc
->node
[j
];
5132 if (node_update_cmap(node
) < 0)
5134 slack
= node
->nvar
- node
->rank
;
5135 if (slack
> max_slack
) {
5145 /* Adjust merge_graph->maxvar based on the number of remaining schedule rows
5146 * that still need to be computed. In particular, if there is a node
5147 * in a cluster where the dimension of the current band is smaller
5148 * than merge_graph->maxvar, but the number of remaining schedule rows
5149 * is greater than that of any node in a cluster with the maximal
5150 * dimension for the current band (i.e., merge_graph->maxvar),
5151 * then adjust merge_graph->maxvar to the (smallest) current band dimension
5152 * of those clusters. Without this adjustment, the total number of
5153 * schedule dimensions would be increased, resulting in a skewed view
5154 * of the number of coincident dimensions.
5155 * "c" contains information about the clusters.
5157 * If the maximize_band_depth option is set and merge_graph->maxvar is reduced,
5158 * then there is no point in attempting any merge since it will be rejected
5159 * anyway. Set merge_graph->maxvar to zero in such cases.
5161 static isl_stat
adjust_maxvar_to_slack(isl_ctx
*ctx
,
5162 struct isl_sched_graph
*merge_graph
, struct isl_clustering
*c
)
5164 int max_slack
, maxvar
;
5166 max_slack
= compute_maxvar_max_slack(merge_graph
->maxvar
, c
);
5168 return isl_stat_error
;
5169 maxvar
= limit_maxvar_to_slack(merge_graph
->maxvar
, max_slack
, c
);
5171 return isl_stat_error
;
5173 if (maxvar
< merge_graph
->maxvar
) {
5174 if (isl_options_get_schedule_maximize_band_depth(ctx
))
5175 merge_graph
->maxvar
= 0;
5177 merge_graph
->maxvar
= maxvar
;
5183 /* Return the number of coincident dimensions in the current band of "graph",
5184 * where the nodes of "graph" are assumed to be scheduled by a single band.
5186 static int get_n_coincident(struct isl_sched_graph
*graph
)
5190 for (i
= graph
->band_start
; i
< graph
->n_total_row
; ++i
)
5191 if (!graph
->node
[0].coincident
[i
])
5194 return i
- graph
->band_start
;
5197 /* Should the clusters be merged based on the cluster schedule
5198 * in the current (and only) band of "merge_graph", given that
5199 * coincidence should be maximized?
5201 * If the number of coincident schedule dimensions in the merged band
5202 * would be less than the maximal number of coincident schedule dimensions
5203 * in any of the merged clusters, then the clusters should not be merged.
5205 static isl_bool
ok_to_merge_coincident(struct isl_clustering
*c
,
5206 struct isl_sched_graph
*merge_graph
)
5213 for (i
= 0; i
< c
->n
; ++i
) {
5214 if (!c
->scc_in_merge
[i
])
5216 n_coincident
= get_n_coincident(&c
->scc
[i
]);
5217 if (n_coincident
> max_coincident
)
5218 max_coincident
= n_coincident
;
5221 n_coincident
= get_n_coincident(merge_graph
);
5223 return n_coincident
>= max_coincident
;
5226 /* Return the transformation on "node" expressed by the current (and only)
5227 * band of "merge_graph" applied to the clusters in "c".
5229 * First find the representation of "node" in its SCC in "c" and
5230 * extract the transformation expressed by the current band.
5231 * Then extract the transformation applied by "merge_graph"
5232 * to the cluster to which this SCC belongs.
5233 * Combine the two to obtain the complete transformation on the node.
5235 * Note that the range of the first transformation is an anonymous space,
5236 * while the domain of the second is named "cluster_X". The range
5237 * of the former therefore needs to be adjusted before the two
5240 static __isl_give isl_map
*extract_node_transformation(isl_ctx
*ctx
,
5241 struct isl_sched_node
*node
, struct isl_clustering
*c
,
5242 struct isl_sched_graph
*merge_graph
)
5244 struct isl_sched_node
*scc_node
, *cluster_node
;
5248 isl_multi_aff
*ma
, *ma2
;
5250 scc_node
= graph_find_node(ctx
, &c
->scc
[node
->scc
], node
->space
);
5251 start
= c
->scc
[node
->scc
].band_start
;
5252 n
= c
->scc
[node
->scc
].n_total_row
- start
;
5253 ma
= node_extract_partial_schedule_multi_aff(scc_node
, start
, n
);
5254 space
= cluster_space(&c
->scc
[node
->scc
], c
->scc_cluster
[node
->scc
]);
5255 cluster_node
= graph_find_node(ctx
, merge_graph
, space
);
5256 if (space
&& !cluster_node
)
5257 isl_die(ctx
, isl_error_internal
, "unable to find cluster",
5258 space
= isl_space_free(space
));
5259 id
= isl_space_get_tuple_id(space
, isl_dim_set
);
5260 ma
= isl_multi_aff_set_tuple_id(ma
, isl_dim_out
, id
);
5261 isl_space_free(space
);
5262 n
= merge_graph
->n_total_row
;
5263 ma2
= node_extract_partial_schedule_multi_aff(cluster_node
, 0, n
);
5264 ma
= isl_multi_aff_pullback_multi_aff(ma2
, ma
);
5266 return isl_map_from_multi_aff(ma
);
5269 /* Give a set of distances "set", are they bounded by a small constant
5270 * in direction "pos"?
5271 * In practice, check if they are bounded by 2 by checking that there
5272 * are no elements with a value greater than or equal to 3 or
5273 * smaller than or equal to -3.
5275 static isl_bool
distance_is_bounded(__isl_keep isl_set
*set
, int pos
)
5281 return isl_bool_error
;
5283 test
= isl_set_copy(set
);
5284 test
= isl_set_lower_bound_si(test
, isl_dim_set
, pos
, 3);
5285 bounded
= isl_set_is_empty(test
);
5288 if (bounded
< 0 || !bounded
)
5291 test
= isl_set_copy(set
);
5292 test
= isl_set_upper_bound_si(test
, isl_dim_set
, pos
, -3);
5293 bounded
= isl_set_is_empty(test
);
5299 /* Does the set "set" have a fixed (but possible parametric) value
5300 * at dimension "pos"?
5302 static isl_bool
has_single_value(__isl_keep isl_set
*set
, int pos
)
5308 return isl_bool_error
;
5309 set
= isl_set_copy(set
);
5310 n
= isl_set_dim(set
, isl_dim_set
);
5311 set
= isl_set_project_out(set
, isl_dim_set
, pos
+ 1, n
- (pos
+ 1));
5312 set
= isl_set_project_out(set
, isl_dim_set
, 0, pos
);
5313 single
= isl_set_is_singleton(set
);
5319 /* Does "map" have a fixed (but possible parametric) value
5320 * at dimension "pos" of either its domain or its range?
5322 static isl_bool
has_singular_src_or_dst(__isl_keep isl_map
*map
, int pos
)
5327 set
= isl_map_domain(isl_map_copy(map
));
5328 single
= has_single_value(set
, pos
);
5331 if (single
< 0 || single
)
5334 set
= isl_map_range(isl_map_copy(map
));
5335 single
= has_single_value(set
, pos
);
5341 /* Does the edge "edge" from "graph" have bounded dependence distances
5342 * in the merged graph "merge_graph" of a selection of clusters in "c"?
5344 * Extract the complete transformations of the source and destination
5345 * nodes of the edge, apply them to the edge constraints and
5346 * compute the differences. Finally, check if these differences are bounded
5347 * in each direction.
5349 * If the dimension of the band is greater than the number of
5350 * dimensions that can be expected to be optimized by the edge
5351 * (based on its weight), then also allow the differences to be unbounded
5352 * in the remaining dimensions, but only if either the source or
5353 * the destination has a fixed value in that direction.
5354 * This allows a statement that produces values that are used by
5355 * several instance of another statement to be merged with that
5357 * However, merging such clusters will introduce an inherently
5358 * large proximity distance inside the merged cluster, meaning
5359 * that proximity distances will no longer be optimized in
5360 * subsequent merges. These merges are therefore only allowed
5361 * after all other possible merges have been tried.
5362 * The first time such a merge is encountered, the weight of the edge
5363 * is replaced by a negative weight. The second time (i.e., after
5364 * all merges over edges with a non-negative weight have been tried),
5365 * the merge is allowed.
5367 static isl_bool
has_bounded_distances(isl_ctx
*ctx
, struct isl_sched_edge
*edge
,
5368 struct isl_sched_graph
*graph
, struct isl_clustering
*c
,
5369 struct isl_sched_graph
*merge_graph
)
5376 map
= isl_map_copy(edge
->map
);
5377 t
= extract_node_transformation(ctx
, edge
->src
, c
, merge_graph
);
5378 map
= isl_map_apply_domain(map
, t
);
5379 t
= extract_node_transformation(ctx
, edge
->dst
, c
, merge_graph
);
5380 map
= isl_map_apply_range(map
, t
);
5381 dist
= isl_map_deltas(isl_map_copy(map
));
5383 bounded
= isl_bool_true
;
5384 n
= isl_set_dim(dist
, isl_dim_set
);
5385 n_slack
= n
- edge
->weight
;
5386 if (edge
->weight
< 0)
5387 n_slack
-= graph
->max_weight
+ 1;
5388 for (i
= 0; i
< n
; ++i
) {
5389 isl_bool bounded_i
, singular_i
;
5391 bounded_i
= distance_is_bounded(dist
, i
);
5396 if (edge
->weight
>= 0)
5397 bounded
= isl_bool_false
;
5401 singular_i
= has_singular_src_or_dst(map
, i
);
5406 bounded
= isl_bool_false
;
5409 if (!bounded
&& i
>= n
&& edge
->weight
>= 0)
5410 edge
->weight
-= graph
->max_weight
+ 1;
5418 return isl_bool_error
;
5421 /* Should the clusters be merged based on the cluster schedule
5422 * in the current (and only) band of "merge_graph"?
5423 * "graph" is the original dependence graph, while "c" records
5424 * which SCCs are involved in the latest merge.
5426 * In particular, is there at least one proximity constraint
5427 * that is optimized by the merge?
5429 * A proximity constraint is considered to be optimized
5430 * if the dependence distances are small.
5432 static isl_bool
ok_to_merge_proximity(isl_ctx
*ctx
,
5433 struct isl_sched_graph
*graph
, struct isl_clustering
*c
,
5434 struct isl_sched_graph
*merge_graph
)
5438 for (i
= 0; i
< graph
->n_edge
; ++i
) {
5439 struct isl_sched_edge
*edge
= &graph
->edge
[i
];
5442 if (!is_proximity(edge
))
5444 if (!c
->scc_in_merge
[edge
->src
->scc
])
5446 if (!c
->scc_in_merge
[edge
->dst
->scc
])
5448 if (c
->scc_cluster
[edge
->dst
->scc
] ==
5449 c
->scc_cluster
[edge
->src
->scc
])
5451 bounded
= has_bounded_distances(ctx
, edge
, graph
, c
,
5453 if (bounded
< 0 || bounded
)
5457 return isl_bool_false
;
5460 /* Should the clusters be merged based on the cluster schedule
5461 * in the current (and only) band of "merge_graph"?
5462 * "graph" is the original dependence graph, while "c" records
5463 * which SCCs are involved in the latest merge.
5465 * If the current band is empty, then the clusters should not be merged.
5467 * If the band depth should be maximized and the merge schedule
5468 * is incomplete (meaning that the dimension of some of the schedule
5469 * bands in the original schedule will be reduced), then the clusters
5470 * should not be merged.
5472 * If the schedule_maximize_coincidence option is set, then check that
5473 * the number of coincident schedule dimensions is not reduced.
5475 * Finally, only allow the merge if at least one proximity
5476 * constraint is optimized.
5478 static isl_bool
ok_to_merge(isl_ctx
*ctx
, struct isl_sched_graph
*graph
,
5479 struct isl_clustering
*c
, struct isl_sched_graph
*merge_graph
)
5481 if (merge_graph
->n_total_row
== merge_graph
->band_start
)
5482 return isl_bool_false
;
5484 if (isl_options_get_schedule_maximize_band_depth(ctx
) &&
5485 merge_graph
->n_total_row
< merge_graph
->maxvar
)
5486 return isl_bool_false
;
5488 if (isl_options_get_schedule_maximize_coincidence(ctx
)) {
5491 ok
= ok_to_merge_coincident(c
, merge_graph
);
5496 return ok_to_merge_proximity(ctx
, graph
, c
, merge_graph
);
5499 /* Apply the schedule in "t_node" to the "n" rows starting at "first"
5500 * of the schedule in "node" and return the result.
5502 * That is, essentially compute
5504 * T * N(first:first+n-1)
5506 * taking into account the constant term and the parameter coefficients
5509 static __isl_give isl_mat
*node_transformation(isl_ctx
*ctx
,
5510 struct isl_sched_node
*t_node
, struct isl_sched_node
*node
,
5515 int n_row
, n_col
, n_param
, n_var
;
5517 n_param
= node
->nparam
;
5519 n_row
= isl_mat_rows(t_node
->sched
);
5520 n_col
= isl_mat_cols(node
->sched
);
5521 t
= isl_mat_alloc(ctx
, n_row
, n_col
);
5524 for (i
= 0; i
< n_row
; ++i
) {
5525 isl_seq_cpy(t
->row
[i
], t_node
->sched
->row
[i
], 1 + n_param
);
5526 isl_seq_clr(t
->row
[i
] + 1 + n_param
, n_var
);
5527 for (j
= 0; j
< n
; ++j
)
5528 isl_seq_addmul(t
->row
[i
],
5529 t_node
->sched
->row
[i
][1 + n_param
+ j
],
5530 node
->sched
->row
[first
+ j
],
5531 1 + n_param
+ n_var
);
5536 /* Apply the cluster schedule in "t_node" to the current band
5537 * schedule of the nodes in "graph".
5539 * In particular, replace the rows starting at band_start
5540 * by the result of applying the cluster schedule in "t_node"
5541 * to the original rows.
5543 * The coincidence of the schedule is determined by the coincidence
5544 * of the cluster schedule.
5546 static isl_stat
transform(isl_ctx
*ctx
, struct isl_sched_graph
*graph
,
5547 struct isl_sched_node
*t_node
)
5553 start
= graph
->band_start
;
5554 n
= graph
->n_total_row
- start
;
5556 n_new
= isl_mat_rows(t_node
->sched
);
5557 for (i
= 0; i
< graph
->n
; ++i
) {
5558 struct isl_sched_node
*node
= &graph
->node
[i
];
5561 t
= node_transformation(ctx
, t_node
, node
, start
, n
);
5562 node
->sched
= isl_mat_drop_rows(node
->sched
, start
, n
);
5563 node
->sched
= isl_mat_concat(node
->sched
, t
);
5564 node
->sched_map
= isl_map_free(node
->sched_map
);
5566 return isl_stat_error
;
5567 for (j
= 0; j
< n_new
; ++j
)
5568 node
->coincident
[start
+ j
] = t_node
->coincident
[j
];
5570 graph
->n_total_row
-= n
;
5572 graph
->n_total_row
+= n_new
;
5573 graph
->n_row
+= n_new
;
5579 /* Merge the clusters marked for merging in "c" into a single
5580 * cluster using the cluster schedule in the current band of "merge_graph".
5581 * The representative SCC for the new cluster is the SCC with
5582 * the smallest index.
5584 * The current band schedule of each SCC in the new cluster is obtained
5585 * by applying the schedule of the corresponding original cluster
5586 * to the original band schedule.
5587 * All SCCs in the new cluster have the same number of schedule rows.
5589 static isl_stat
merge(isl_ctx
*ctx
, struct isl_clustering
*c
,
5590 struct isl_sched_graph
*merge_graph
)
5596 for (i
= 0; i
< c
->n
; ++i
) {
5597 struct isl_sched_node
*node
;
5599 if (!c
->scc_in_merge
[i
])
5603 space
= cluster_space(&c
->scc
[i
], c
->scc_cluster
[i
]);
5605 return isl_stat_error
;
5606 node
= graph_find_node(ctx
, merge_graph
, space
);
5607 isl_space_free(space
);
5609 isl_die(ctx
, isl_error_internal
,
5610 "unable to find cluster",
5611 return isl_stat_error
);
5612 if (transform(ctx
, &c
->scc
[i
], node
) < 0)
5613 return isl_stat_error
;
5614 c
->scc_cluster
[i
] = cluster
;
5620 /* Try and merge the clusters of SCCs marked in c->scc_in_merge
5621 * by scheduling the current cluster bands with respect to each other.
5623 * Construct a dependence graph with a space for each cluster and
5624 * with the coordinates of each space corresponding to the schedule
5625 * dimensions of the current band of that cluster.
5626 * Construct a cluster schedule in this cluster dependence graph and
5627 * apply it to the current cluster bands if it is applicable
5628 * according to ok_to_merge.
5630 * If the number of remaining schedule dimensions in a cluster
5631 * with a non-maximal current schedule dimension is greater than
5632 * the number of remaining schedule dimensions in clusters
5633 * with a maximal current schedule dimension, then restrict
5634 * the number of rows to be computed in the cluster schedule
5635 * to the minimal such non-maximal current schedule dimension.
5636 * Do this by adjusting merge_graph.maxvar.
5638 * Return isl_bool_true if the clusters have effectively been merged
5639 * into a single cluster.
5641 * Note that since the standard scheduling algorithm minimizes the maximal
5642 * distance over proximity constraints, the proximity constraints between
5643 * the merged clusters may not be optimized any further than what is
5644 * sufficient to bring the distances within the limits of the internal
5645 * proximity constraints inside the individual clusters.
5646 * It may therefore make sense to perform an additional translation step
5647 * to bring the clusters closer to each other, while maintaining
5648 * the linear part of the merging schedule found using the standard
5649 * scheduling algorithm.
5651 static isl_bool
try_merge(isl_ctx
*ctx
, struct isl_sched_graph
*graph
,
5652 struct isl_clustering
*c
)
5654 struct isl_sched_graph merge_graph
= { 0 };
5657 if (init_merge_graph(ctx
, graph
, c
, &merge_graph
) < 0)
5660 if (compute_maxvar(&merge_graph
) < 0)
5662 if (adjust_maxvar_to_slack(ctx
, &merge_graph
,c
) < 0)
5664 if (compute_schedule_wcc_band(ctx
, &merge_graph
) < 0)
5666 merged
= ok_to_merge(ctx
, graph
, c
, &merge_graph
);
5667 if (merged
&& merge(ctx
, c
, &merge_graph
) < 0)
5670 graph_free(ctx
, &merge_graph
);
5673 graph_free(ctx
, &merge_graph
);
5674 return isl_bool_error
;
5677 /* Is there any edge marked "no_merge" between two SCCs that are
5678 * about to be merged (i.e., that are set in "scc_in_merge")?
5679 * "merge_edge" is the proximity edge along which the clusters of SCCs
5680 * are going to be merged.
5682 * If there is any edge between two SCCs with a negative weight,
5683 * while the weight of "merge_edge" is non-negative, then this
5684 * means that the edge was postponed. "merge_edge" should then
5685 * also be postponed since merging along the edge with negative weight should
5686 * be postponed until all edges with non-negative weight have been tried.
5687 * Replace the weight of "merge_edge" by a negative weight as well and
5688 * tell the caller not to attempt a merge.
5690 static int any_no_merge(struct isl_sched_graph
*graph
, int *scc_in_merge
,
5691 struct isl_sched_edge
*merge_edge
)
5695 for (i
= 0; i
< graph
->n_edge
; ++i
) {
5696 struct isl_sched_edge
*edge
= &graph
->edge
[i
];
5698 if (!scc_in_merge
[edge
->src
->scc
])
5700 if (!scc_in_merge
[edge
->dst
->scc
])
5704 if (merge_edge
->weight
>= 0 && edge
->weight
< 0) {
5705 merge_edge
->weight
-= graph
->max_weight
+ 1;
5713 /* Merge the two clusters in "c" connected by the edge in "graph"
5714 * with index "edge" into a single cluster.
5715 * If it turns out to be impossible to merge these two clusters,
5716 * then mark the edge as "no_merge" such that it will not be
5719 * First mark all SCCs that need to be merged. This includes the SCCs
5720 * in the two clusters, but it may also include the SCCs
5721 * of intermediate clusters.
5722 * If there is already a no_merge edge between any pair of such SCCs,
5723 * then simply mark the current edge as no_merge as well.
5724 * Likewise, if any of those edges was postponed by has_bounded_distances,
5725 * then postpone the current edge as well.
5726 * Otherwise, try and merge the clusters and mark "edge" as "no_merge"
5727 * if the clusters did not end up getting merged, unless the non-merge
5728 * is due to the fact that the edge was postponed. This postponement
5729 * can be recognized by a change in weight (from non-negative to negative).
5731 static isl_stat
merge_clusters_along_edge(isl_ctx
*ctx
,
5732 struct isl_sched_graph
*graph
, int edge
, struct isl_clustering
*c
)
5735 int edge_weight
= graph
->edge
[edge
].weight
;
5737 if (mark_merge_sccs(ctx
, graph
, edge
, c
) < 0)
5738 return isl_stat_error
;
5740 if (any_no_merge(graph
, c
->scc_in_merge
, &graph
->edge
[edge
]))
5741 merged
= isl_bool_false
;
5743 merged
= try_merge(ctx
, graph
, c
);
5745 return isl_stat_error
;
5746 if (!merged
&& edge_weight
== graph
->edge
[edge
].weight
)
5747 graph
->edge
[edge
].no_merge
= 1;
5752 /* Does "node" belong to the cluster identified by "cluster"?
5754 static int node_cluster_exactly(struct isl_sched_node
*node
, int cluster
)
5756 return node
->cluster
== cluster
;
5759 /* Does "edge" connect two nodes belonging to the cluster
5760 * identified by "cluster"?
5762 static int edge_cluster_exactly(struct isl_sched_edge
*edge
, int cluster
)
5764 return edge
->src
->cluster
== cluster
&& edge
->dst
->cluster
== cluster
;
5767 /* Swap the schedule of "node1" and "node2".
5768 * Both nodes have been derived from the same node in a common parent graph.
5769 * Since the "coincident" field is shared with that node
5770 * in the parent graph, there is no need to also swap this field.
5772 static void swap_sched(struct isl_sched_node
*node1
,
5773 struct isl_sched_node
*node2
)
5778 sched
= node1
->sched
;
5779 node1
->sched
= node2
->sched
;
5780 node2
->sched
= sched
;
5782 sched_map
= node1
->sched_map
;
5783 node1
->sched_map
= node2
->sched_map
;
5784 node2
->sched_map
= sched_map
;
5787 /* Copy the current band schedule from the SCCs that form the cluster
5788 * with index "pos" to the actual cluster at position "pos".
5789 * By construction, the index of the first SCC that belongs to the cluster
5792 * The order of the nodes inside both the SCCs and the cluster
5793 * is assumed to be same as the order in the original "graph".
5795 * Since the SCC graphs will no longer be used after this function,
5796 * the schedules are actually swapped rather than copied.
5798 static isl_stat
copy_partial(struct isl_sched_graph
*graph
,
5799 struct isl_clustering
*c
, int pos
)
5803 c
->cluster
[pos
].n_total_row
= c
->scc
[pos
].n_total_row
;
5804 c
->cluster
[pos
].n_row
= c
->scc
[pos
].n_row
;
5805 c
->cluster
[pos
].maxvar
= c
->scc
[pos
].maxvar
;
5807 for (i
= 0; i
< graph
->n
; ++i
) {
5811 if (graph
->node
[i
].cluster
!= pos
)
5813 s
= graph
->node
[i
].scc
;
5814 k
= c
->scc_node
[s
]++;
5815 swap_sched(&c
->cluster
[pos
].node
[j
], &c
->scc
[s
].node
[k
]);
5816 if (c
->scc
[s
].maxvar
> c
->cluster
[pos
].maxvar
)
5817 c
->cluster
[pos
].maxvar
= c
->scc
[s
].maxvar
;
5824 /* Is there a (conditional) validity dependence from node[j] to node[i],
5825 * forcing node[i] to follow node[j] or do the nodes belong to the same
5828 static isl_bool
node_follows_strong_or_same_cluster(int i
, int j
, void *user
)
5830 struct isl_sched_graph
*graph
= user
;
5832 if (graph
->node
[i
].cluster
== graph
->node
[j
].cluster
)
5833 return isl_bool_true
;
5834 return graph_has_validity_edge(graph
, &graph
->node
[j
], &graph
->node
[i
]);
5837 /* Extract the merged clusters of SCCs in "graph", sort them, and
5838 * store them in c->clusters. Update c->scc_cluster accordingly.
5840 * First keep track of the cluster containing the SCC to which a node
5841 * belongs in the node itself.
5842 * Then extract the clusters into c->clusters, copying the current
5843 * band schedule from the SCCs that belong to the cluster.
5844 * Do this only once per cluster.
5846 * Finally, topologically sort the clusters and update c->scc_cluster
5847 * to match the new scc numbering. While the SCCs were originally
5848 * sorted already, some SCCs that depend on some other SCCs may
5849 * have been merged with SCCs that appear before these other SCCs.
5850 * A reordering may therefore be required.
5852 static isl_stat
extract_clusters(isl_ctx
*ctx
, struct isl_sched_graph
*graph
,
5853 struct isl_clustering
*c
)
5857 for (i
= 0; i
< graph
->n
; ++i
)
5858 graph
->node
[i
].cluster
= c
->scc_cluster
[graph
->node
[i
].scc
];
5860 for (i
= 0; i
< graph
->scc
; ++i
) {
5861 if (c
->scc_cluster
[i
] != i
)
5863 if (extract_sub_graph(ctx
, graph
, &node_cluster_exactly
,
5864 &edge_cluster_exactly
, i
, &c
->cluster
[i
]) < 0)
5865 return isl_stat_error
;
5866 c
->cluster
[i
].src_scc
= -1;
5867 c
->cluster
[i
].dst_scc
= -1;
5868 if (copy_partial(graph
, c
, i
) < 0)
5869 return isl_stat_error
;
5872 if (detect_ccs(ctx
, graph
, &node_follows_strong_or_same_cluster
) < 0)
5873 return isl_stat_error
;
5874 for (i
= 0; i
< graph
->n
; ++i
)
5875 c
->scc_cluster
[graph
->node
[i
].scc
] = graph
->node
[i
].cluster
;
5880 /* Compute weights on the proximity edges of "graph" that can
5881 * be used by find_proximity to find the most appropriate
5882 * proximity edge to use to merge two clusters in "c".
5883 * The weights are also used by has_bounded_distances to determine
5884 * whether the merge should be allowed.
5885 * Store the maximum of the computed weights in graph->max_weight.
5887 * The computed weight is a measure for the number of remaining schedule
5888 * dimensions that can still be completely aligned.
5889 * In particular, compute the number of equalities between
5890 * input dimensions and output dimensions in the proximity constraints.
5891 * The directions that are already handled by outer schedule bands
5892 * are projected out prior to determining this number.
5894 * Edges that will never be considered by find_proximity are ignored.
5896 static isl_stat
compute_weights(struct isl_sched_graph
*graph
,
5897 struct isl_clustering
*c
)
5901 graph
->max_weight
= 0;
5903 for (i
= 0; i
< graph
->n_edge
; ++i
) {
5904 struct isl_sched_edge
*edge
= &graph
->edge
[i
];
5905 struct isl_sched_node
*src
= edge
->src
;
5906 struct isl_sched_node
*dst
= edge
->dst
;
5907 isl_basic_map
*hull
;
5910 if (!is_proximity(edge
))
5912 if (bad_cluster(&c
->scc
[edge
->src
->scc
]) ||
5913 bad_cluster(&c
->scc
[edge
->dst
->scc
]))
5915 if (c
->scc_cluster
[edge
->dst
->scc
] ==
5916 c
->scc_cluster
[edge
->src
->scc
])
5919 hull
= isl_map_affine_hull(isl_map_copy(edge
->map
));
5920 hull
= isl_basic_map_transform_dims(hull
, isl_dim_in
, 0,
5921 isl_mat_copy(src
->ctrans
));
5922 hull
= isl_basic_map_transform_dims(hull
, isl_dim_out
, 0,
5923 isl_mat_copy(dst
->ctrans
));
5924 hull
= isl_basic_map_project_out(hull
,
5925 isl_dim_in
, 0, src
->rank
);
5926 hull
= isl_basic_map_project_out(hull
,
5927 isl_dim_out
, 0, dst
->rank
);
5928 hull
= isl_basic_map_remove_divs(hull
);
5929 n_in
= isl_basic_map_dim(hull
, isl_dim_in
);
5930 n_out
= isl_basic_map_dim(hull
, isl_dim_out
);
5931 hull
= isl_basic_map_drop_constraints_not_involving_dims(hull
,
5932 isl_dim_in
, 0, n_in
);
5933 hull
= isl_basic_map_drop_constraints_not_involving_dims(hull
,
5934 isl_dim_out
, 0, n_out
);
5936 return isl_stat_error
;
5937 edge
->weight
= hull
->n_eq
;
5938 isl_basic_map_free(hull
);
5940 if (edge
->weight
> graph
->max_weight
)
5941 graph
->max_weight
= edge
->weight
;
5947 /* Call compute_schedule_finish_band on each of the clusters in "c"
5948 * in their topological order. This order is determined by the scc
5949 * fields of the nodes in "graph".
5950 * Combine the results in a sequence expressing the topological order.
5952 * If there is only one cluster left, then there is no need to introduce
5953 * a sequence node. Also, in this case, the cluster necessarily contains
5954 * the SCC at position 0 in the original graph and is therefore also
5955 * stored in the first cluster of "c".
5957 static __isl_give isl_schedule_node
*finish_bands_clustering(
5958 __isl_take isl_schedule_node
*node
, struct isl_sched_graph
*graph
,
5959 struct isl_clustering
*c
)
5963 isl_union_set_list
*filters
;
5965 if (graph
->scc
== 1)
5966 return compute_schedule_finish_band(node
, &c
->cluster
[0], 0);
5968 ctx
= isl_schedule_node_get_ctx(node
);
5970 filters
= extract_sccs(ctx
, graph
);
5971 node
= isl_schedule_node_insert_sequence(node
, filters
);
5973 for (i
= 0; i
< graph
->scc
; ++i
) {
5974 int j
= c
->scc_cluster
[i
];
5975 node
= isl_schedule_node_child(node
, i
);
5976 node
= isl_schedule_node_child(node
, 0);
5977 node
= compute_schedule_finish_band(node
, &c
->cluster
[j
], 0);
5978 node
= isl_schedule_node_parent(node
);
5979 node
= isl_schedule_node_parent(node
);
5985 /* Compute a schedule for a connected dependence graph by first considering
5986 * each strongly connected component (SCC) in the graph separately and then
5987 * incrementally combining them into clusters.
5988 * Return the updated schedule node.
5990 * Initially, each cluster consists of a single SCC, each with its
5991 * own band schedule. The algorithm then tries to merge pairs
5992 * of clusters along a proximity edge until no more suitable
5993 * proximity edges can be found. During this merging, the schedule
5994 * is maintained in the individual SCCs.
5995 * After the merging is completed, the full resulting clusters
5996 * are extracted and in finish_bands_clustering,
5997 * compute_schedule_finish_band is called on each of them to integrate
5998 * the band into "node" and to continue the computation.
6000 * compute_weights initializes the weights that are used by find_proximity.
6002 static __isl_give isl_schedule_node
*compute_schedule_wcc_clustering(
6003 __isl_take isl_schedule_node
*node
, struct isl_sched_graph
*graph
)
6006 struct isl_clustering c
;
6009 ctx
= isl_schedule_node_get_ctx(node
);
6011 if (clustering_init(ctx
, &c
, graph
) < 0)
6014 if (compute_weights(graph
, &c
) < 0)
6018 i
= find_proximity(graph
, &c
);
6021 if (i
>= graph
->n_edge
)
6023 if (merge_clusters_along_edge(ctx
, graph
, i
, &c
) < 0)
6027 if (extract_clusters(ctx
, graph
, &c
) < 0)
6030 node
= finish_bands_clustering(node
, graph
, &c
);
6032 clustering_free(ctx
, &c
);
6035 clustering_free(ctx
, &c
);
6036 return isl_schedule_node_free(node
);
6039 /* Compute a schedule for a connected dependence graph and return
6040 * the updated schedule node.
6042 * If Feautrier's algorithm is selected, we first recursively try to satisfy
6043 * as many validity dependences as possible. When all validity dependences
6044 * are satisfied we extend the schedule to a full-dimensional schedule.
6046 * Call compute_schedule_wcc_whole or compute_schedule_wcc_clustering
6047 * depending on whether the user has selected the option to try and
6048 * compute a schedule for the entire (weakly connected) component first.
6049 * If there is only a single strongly connected component (SCC), then
6050 * there is no point in trying to combine SCCs
6051 * in compute_schedule_wcc_clustering, so compute_schedule_wcc_whole
6052 * is called instead.
6054 static __isl_give isl_schedule_node
*compute_schedule_wcc(
6055 __isl_take isl_schedule_node
*node
, struct isl_sched_graph
*graph
)
6062 ctx
= isl_schedule_node_get_ctx(node
);
6063 if (detect_sccs(ctx
, graph
) < 0)
6064 return isl_schedule_node_free(node
);
6066 if (compute_maxvar(graph
) < 0)
6067 return isl_schedule_node_free(node
);
6069 if (need_feautrier_step(ctx
, graph
))
6070 return compute_schedule_wcc_feautrier(node
, graph
);
6072 if (graph
->scc
<= 1 || isl_options_get_schedule_whole_component(ctx
))
6073 return compute_schedule_wcc_whole(node
, graph
);
6075 return compute_schedule_wcc_clustering(node
, graph
);
6078 /* Compute a schedule for each group of nodes identified by node->scc
6079 * separately and then combine them in a sequence node (or as set node
6080 * if graph->weak is set) inserted at position "node" of the schedule tree.
6081 * Return the updated schedule node.
6083 * If "wcc" is set then each of the groups belongs to a single
6084 * weakly connected component in the dependence graph so that
6085 * there is no need for compute_sub_schedule to look for weakly
6086 * connected components.
6088 static __isl_give isl_schedule_node
*compute_component_schedule(
6089 __isl_take isl_schedule_node
*node
, struct isl_sched_graph
*graph
,
6094 isl_union_set_list
*filters
;
6098 ctx
= isl_schedule_node_get_ctx(node
);
6100 filters
= extract_sccs(ctx
, graph
);
6102 node
= isl_schedule_node_insert_set(node
, filters
);
6104 node
= isl_schedule_node_insert_sequence(node
, filters
);
6106 for (component
= 0; component
< graph
->scc
; ++component
) {
6107 node
= isl_schedule_node_child(node
, component
);
6108 node
= isl_schedule_node_child(node
, 0);
6109 node
= compute_sub_schedule(node
, ctx
, graph
,
6111 &edge_scc_exactly
, component
, wcc
);
6112 node
= isl_schedule_node_parent(node
);
6113 node
= isl_schedule_node_parent(node
);
6119 /* Compute a schedule for the given dependence graph and insert it at "node".
6120 * Return the updated schedule node.
6122 * We first check if the graph is connected (through validity and conditional
6123 * validity dependences) and, if not, compute a schedule
6124 * for each component separately.
6125 * If the schedule_serialize_sccs option is set, then we check for strongly
6126 * connected components instead and compute a separate schedule for
6127 * each such strongly connected component.
6129 static __isl_give isl_schedule_node
*compute_schedule(isl_schedule_node
*node
,
6130 struct isl_sched_graph
*graph
)
6137 ctx
= isl_schedule_node_get_ctx(node
);
6138 if (isl_options_get_schedule_serialize_sccs(ctx
)) {
6139 if (detect_sccs(ctx
, graph
) < 0)
6140 return isl_schedule_node_free(node
);
6142 if (detect_wccs(ctx
, graph
) < 0)
6143 return isl_schedule_node_free(node
);
6147 return compute_component_schedule(node
, graph
, 1);
6149 return compute_schedule_wcc(node
, graph
);
6152 /* Compute a schedule on sc->domain that respects the given schedule
6155 * In particular, the schedule respects all the validity dependences.
6156 * If the default isl scheduling algorithm is used, it tries to minimize
6157 * the dependence distances over the proximity dependences.
6158 * If Feautrier's scheduling algorithm is used, the proximity dependence
6159 * distances are only minimized during the extension to a full-dimensional
6162 * If there are any condition and conditional validity dependences,
6163 * then the conditional validity dependences may be violated inside
6164 * a tilable band, provided they have no adjacent non-local
6165 * condition dependences.
6167 __isl_give isl_schedule
*isl_schedule_constraints_compute_schedule(
6168 __isl_take isl_schedule_constraints
*sc
)
6170 isl_ctx
*ctx
= isl_schedule_constraints_get_ctx(sc
);
6171 struct isl_sched_graph graph
= { 0 };
6172 isl_schedule
*sched
;
6173 isl_schedule_node
*node
;
6174 isl_union_set
*domain
;
6176 sc
= isl_schedule_constraints_align_params(sc
);
6178 domain
= isl_schedule_constraints_get_domain(sc
);
6179 if (isl_union_set_n_set(domain
) == 0) {
6180 isl_schedule_constraints_free(sc
);
6181 return isl_schedule_from_domain(domain
);
6184 if (graph_init(&graph
, sc
) < 0)
6185 domain
= isl_union_set_free(domain
);
6187 node
= isl_schedule_node_from_domain(domain
);
6188 node
= isl_schedule_node_child(node
, 0);
6190 node
= compute_schedule(node
, &graph
);
6191 sched
= isl_schedule_node_get_schedule(node
);
6192 isl_schedule_node_free(node
);
6194 graph_free(ctx
, &graph
);
6195 isl_schedule_constraints_free(sc
);
6200 /* Compute a schedule for the given union of domains that respects
6201 * all the validity dependences and minimizes
6202 * the dependence distances over the proximity dependences.
6204 * This function is kept for backward compatibility.
6206 __isl_give isl_schedule
*isl_union_set_compute_schedule(
6207 __isl_take isl_union_set
*domain
,
6208 __isl_take isl_union_map
*validity
,
6209 __isl_take isl_union_map
*proximity
)
6211 isl_schedule_constraints
*sc
;
6213 sc
= isl_schedule_constraints_on_domain(domain
);
6214 sc
= isl_schedule_constraints_set_validity(sc
, validity
);
6215 sc
= isl_schedule_constraints_set_proximity(sc
, proximity
);
6217 return isl_schedule_constraints_compute_schedule(sc
);