2 * Copyright 2011 INRIA Saclay
3 * Copyright 2012-2014 Ecole Normale Superieure
4 * Copyright 2015-2016 Sven Verdoolaege
6 * Use of this software is governed by the MIT license
8 * Written by Sven Verdoolaege, INRIA Saclay - Ile-de-France,
9 * Parc Club Orsay Universite, ZAC des vignes, 4 rue Jacques Monod,
11 * and Ecole Normale Superieure, 45 rue d'Ulm, 75230 Paris, France
14 #include <isl_ctx_private.h>
15 #include <isl_map_private.h>
16 #include <isl_space_private.h>
17 #include <isl_aff_private.h>
19 #include <isl/constraint.h>
20 #include <isl/schedule.h>
21 #include <isl/schedule_node.h>
22 #include <isl_mat_private.h>
23 #include <isl_vec_private.h>
25 #include <isl/union_set.h>
28 #include <isl_dim_map.h>
29 #include <isl/map_to_basic_set.h>
31 #include <isl_options_private.h>
32 #include <isl_tarjan.h>
33 #include <isl_morph.h>
36 * The scheduling algorithm implemented in this file was inspired by
37 * Bondhugula et al., "Automatic Transformations for Communication-Minimized
38 * Parallelization and Locality Optimization in the Polyhedral Model".
42 isl_edge_validity
= 0,
43 isl_edge_first
= isl_edge_validity
,
46 isl_edge_conditional_validity
,
48 isl_edge_last
= isl_edge_proximity
,
52 /* The constraints that need to be satisfied by a schedule on "domain".
54 * "context" specifies extra constraints on the parameters.
56 * "validity" constraints map domain elements i to domain elements
57 * that should be scheduled after i. (Hard constraint)
58 * "proximity" constraints map domain elements i to domains elements
59 * that should be scheduled as early as possible after i (or before i).
62 * "condition" and "conditional_validity" constraints map possibly "tagged"
63 * domain elements i -> s to "tagged" domain elements j -> t.
64 * The elements of the "conditional_validity" constraints, but without the
65 * tags (i.e., the elements i -> j) are treated as validity constraints,
66 * except that during the construction of a tilable band,
67 * the elements of the "conditional_validity" constraints may be violated
68 * provided that all adjacent elements of the "condition" constraints
69 * are local within the band.
70 * A dependence is local within a band if domain and range are mapped
71 * to the same schedule point by the band.
73 struct isl_schedule_constraints
{
74 isl_union_set
*domain
;
77 isl_union_map
*constraint
[isl_edge_last
+ 1];
80 __isl_give isl_schedule_constraints
*isl_schedule_constraints_copy(
81 __isl_keep isl_schedule_constraints
*sc
)
84 isl_schedule_constraints
*sc_copy
;
87 ctx
= isl_union_set_get_ctx(sc
->domain
);
88 sc_copy
= isl_calloc_type(ctx
, struct isl_schedule_constraints
);
92 sc_copy
->domain
= isl_union_set_copy(sc
->domain
);
93 sc_copy
->context
= isl_set_copy(sc
->context
);
94 if (!sc_copy
->domain
|| !sc_copy
->context
)
95 return isl_schedule_constraints_free(sc_copy
);
97 for (i
= isl_edge_first
; i
<= isl_edge_last
; ++i
) {
98 sc_copy
->constraint
[i
] = isl_union_map_copy(sc
->constraint
[i
]);
99 if (!sc_copy
->constraint
[i
])
100 return isl_schedule_constraints_free(sc_copy
);
107 /* Construct an isl_schedule_constraints object for computing a schedule
108 * on "domain". The initial object does not impose any constraints.
110 __isl_give isl_schedule_constraints
*isl_schedule_constraints_on_domain(
111 __isl_take isl_union_set
*domain
)
115 isl_schedule_constraints
*sc
;
116 isl_union_map
*empty
;
117 enum isl_edge_type i
;
122 ctx
= isl_union_set_get_ctx(domain
);
123 sc
= isl_calloc_type(ctx
, struct isl_schedule_constraints
);
127 space
= isl_union_set_get_space(domain
);
129 sc
->context
= isl_set_universe(isl_space_copy(space
));
130 empty
= isl_union_map_empty(space
);
131 for (i
= isl_edge_first
; i
<= isl_edge_last
; ++i
) {
132 sc
->constraint
[i
] = isl_union_map_copy(empty
);
133 if (!sc
->constraint
[i
])
134 sc
->domain
= isl_union_set_free(sc
->domain
);
136 isl_union_map_free(empty
);
138 if (!sc
->domain
|| !sc
->context
)
139 return isl_schedule_constraints_free(sc
);
143 isl_union_set_free(domain
);
147 /* Replace the context of "sc" by "context".
149 __isl_give isl_schedule_constraints
*isl_schedule_constraints_set_context(
150 __isl_take isl_schedule_constraints
*sc
, __isl_take isl_set
*context
)
155 isl_set_free(sc
->context
);
156 sc
->context
= context
;
160 isl_schedule_constraints_free(sc
);
161 isl_set_free(context
);
165 /* Replace the validity constraints of "sc" by "validity".
167 __isl_give isl_schedule_constraints
*isl_schedule_constraints_set_validity(
168 __isl_take isl_schedule_constraints
*sc
,
169 __isl_take isl_union_map
*validity
)
171 if (!sc
|| !validity
)
174 isl_union_map_free(sc
->constraint
[isl_edge_validity
]);
175 sc
->constraint
[isl_edge_validity
] = validity
;
179 isl_schedule_constraints_free(sc
);
180 isl_union_map_free(validity
);
184 /* Replace the coincidence constraints of "sc" by "coincidence".
186 __isl_give isl_schedule_constraints
*isl_schedule_constraints_set_coincidence(
187 __isl_take isl_schedule_constraints
*sc
,
188 __isl_take isl_union_map
*coincidence
)
190 if (!sc
|| !coincidence
)
193 isl_union_map_free(sc
->constraint
[isl_edge_coincidence
]);
194 sc
->constraint
[isl_edge_coincidence
] = coincidence
;
198 isl_schedule_constraints_free(sc
);
199 isl_union_map_free(coincidence
);
203 /* Replace the proximity constraints of "sc" by "proximity".
205 __isl_give isl_schedule_constraints
*isl_schedule_constraints_set_proximity(
206 __isl_take isl_schedule_constraints
*sc
,
207 __isl_take isl_union_map
*proximity
)
209 if (!sc
|| !proximity
)
212 isl_union_map_free(sc
->constraint
[isl_edge_proximity
]);
213 sc
->constraint
[isl_edge_proximity
] = proximity
;
217 isl_schedule_constraints_free(sc
);
218 isl_union_map_free(proximity
);
222 /* Replace the conditional validity constraints of "sc" by "condition"
225 __isl_give isl_schedule_constraints
*
226 isl_schedule_constraints_set_conditional_validity(
227 __isl_take isl_schedule_constraints
*sc
,
228 __isl_take isl_union_map
*condition
,
229 __isl_take isl_union_map
*validity
)
231 if (!sc
|| !condition
|| !validity
)
234 isl_union_map_free(sc
->constraint
[isl_edge_condition
]);
235 sc
->constraint
[isl_edge_condition
] = condition
;
236 isl_union_map_free(sc
->constraint
[isl_edge_conditional_validity
]);
237 sc
->constraint
[isl_edge_conditional_validity
] = validity
;
241 isl_schedule_constraints_free(sc
);
242 isl_union_map_free(condition
);
243 isl_union_map_free(validity
);
247 __isl_null isl_schedule_constraints
*isl_schedule_constraints_free(
248 __isl_take isl_schedule_constraints
*sc
)
250 enum isl_edge_type i
;
255 isl_union_set_free(sc
->domain
);
256 isl_set_free(sc
->context
);
257 for (i
= isl_edge_first
; i
<= isl_edge_last
; ++i
)
258 isl_union_map_free(sc
->constraint
[i
]);
265 isl_ctx
*isl_schedule_constraints_get_ctx(
266 __isl_keep isl_schedule_constraints
*sc
)
268 return sc
? isl_union_set_get_ctx(sc
->domain
) : NULL
;
271 /* Return the domain of "sc".
273 __isl_give isl_union_set
*isl_schedule_constraints_get_domain(
274 __isl_keep isl_schedule_constraints
*sc
)
279 return isl_union_set_copy(sc
->domain
);
282 /* Return the validity constraints of "sc".
284 __isl_give isl_union_map
*isl_schedule_constraints_get_validity(
285 __isl_keep isl_schedule_constraints
*sc
)
290 return isl_union_map_copy(sc
->constraint
[isl_edge_validity
]);
293 /* Return the coincidence constraints of "sc".
295 __isl_give isl_union_map
*isl_schedule_constraints_get_coincidence(
296 __isl_keep isl_schedule_constraints
*sc
)
301 return isl_union_map_copy(sc
->constraint
[isl_edge_coincidence
]);
304 /* Return the proximity constraints of "sc".
306 __isl_give isl_union_map
*isl_schedule_constraints_get_proximity(
307 __isl_keep isl_schedule_constraints
*sc
)
312 return isl_union_map_copy(sc
->constraint
[isl_edge_proximity
]);
315 /* Return the conditional validity constraints of "sc".
317 __isl_give isl_union_map
*isl_schedule_constraints_get_conditional_validity(
318 __isl_keep isl_schedule_constraints
*sc
)
324 isl_union_map_copy(sc
->constraint
[isl_edge_conditional_validity
]);
327 /* Return the conditions for the conditional validity constraints of "sc".
329 __isl_give isl_union_map
*
330 isl_schedule_constraints_get_conditional_validity_condition(
331 __isl_keep isl_schedule_constraints
*sc
)
336 return isl_union_map_copy(sc
->constraint
[isl_edge_condition
]);
339 /* Can a schedule constraint of type "type" be tagged?
341 static int may_be_tagged(enum isl_edge_type type
)
343 if (type
== isl_edge_condition
|| type
== isl_edge_conditional_validity
)
348 /* Apply "umap" to the domains of the wrapped relations
349 * inside the domain and range of "c".
351 * That is, for each map of the form
353 * [D -> S] -> [E -> T]
355 * in "c", apply "umap" to D and E.
357 * D is exposed by currying the relation to
359 * D -> [S -> [E -> T]]
361 * E is exposed by doing the same to the inverse of "c".
363 static __isl_give isl_union_map
*apply_factor_domain(
364 __isl_take isl_union_map
*c
, __isl_keep isl_union_map
*umap
)
366 c
= isl_union_map_curry(c
);
367 c
= isl_union_map_apply_domain(c
, isl_union_map_copy(umap
));
368 c
= isl_union_map_uncurry(c
);
370 c
= isl_union_map_reverse(c
);
371 c
= isl_union_map_curry(c
);
372 c
= isl_union_map_apply_domain(c
, isl_union_map_copy(umap
));
373 c
= isl_union_map_uncurry(c
);
374 c
= isl_union_map_reverse(c
);
379 /* Apply "umap" to domain and range of "c".
380 * If "tag" is set, then "c" may contain tags and then "umap"
381 * needs to be applied to the domains of the wrapped relations
382 * inside the domain and range of "c".
384 static __isl_give isl_union_map
*apply(__isl_take isl_union_map
*c
,
385 __isl_keep isl_union_map
*umap
, int tag
)
390 t
= isl_union_map_copy(c
);
391 c
= isl_union_map_apply_domain(c
, isl_union_map_copy(umap
));
392 c
= isl_union_map_apply_range(c
, isl_union_map_copy(umap
));
395 t
= apply_factor_domain(t
, umap
);
396 c
= isl_union_map_union(c
, t
);
400 /* Apply "umap" to the domain of the schedule constraints "sc".
402 * The two sides of the various schedule constraints are adjusted
405 __isl_give isl_schedule_constraints
*isl_schedule_constraints_apply(
406 __isl_take isl_schedule_constraints
*sc
,
407 __isl_take isl_union_map
*umap
)
409 enum isl_edge_type i
;
414 for (i
= isl_edge_first
; i
<= isl_edge_last
; ++i
) {
415 int tag
= may_be_tagged(i
);
417 sc
->constraint
[i
] = apply(sc
->constraint
[i
], umap
, tag
);
418 if (!sc
->constraint
[i
])
421 sc
->domain
= isl_union_set_apply(sc
->domain
, umap
);
423 return isl_schedule_constraints_free(sc
);
427 isl_schedule_constraints_free(sc
);
428 isl_union_map_free(umap
);
432 void isl_schedule_constraints_dump(__isl_keep isl_schedule_constraints
*sc
)
437 fprintf(stderr
, "domain: ");
438 isl_union_set_dump(sc
->domain
);
439 fprintf(stderr
, "context: ");
440 isl_set_dump(sc
->context
);
441 fprintf(stderr
, "validity: ");
442 isl_union_map_dump(sc
->constraint
[isl_edge_validity
]);
443 fprintf(stderr
, "proximity: ");
444 isl_union_map_dump(sc
->constraint
[isl_edge_proximity
]);
445 fprintf(stderr
, "coincidence: ");
446 isl_union_map_dump(sc
->constraint
[isl_edge_coincidence
]);
447 fprintf(stderr
, "condition: ");
448 isl_union_map_dump(sc
->constraint
[isl_edge_condition
]);
449 fprintf(stderr
, "conditional_validity: ");
450 isl_union_map_dump(sc
->constraint
[isl_edge_conditional_validity
]);
453 /* Align the parameters of the fields of "sc".
455 static __isl_give isl_schedule_constraints
*
456 isl_schedule_constraints_align_params(__isl_take isl_schedule_constraints
*sc
)
459 enum isl_edge_type i
;
464 space
= isl_union_set_get_space(sc
->domain
);
465 space
= isl_space_align_params(space
, isl_set_get_space(sc
->context
));
466 for (i
= isl_edge_first
; i
<= isl_edge_last
; ++i
)
467 space
= isl_space_align_params(space
,
468 isl_union_map_get_space(sc
->constraint
[i
]));
470 for (i
= isl_edge_first
; i
<= isl_edge_last
; ++i
) {
471 sc
->constraint
[i
] = isl_union_map_align_params(
472 sc
->constraint
[i
], isl_space_copy(space
));
473 if (!sc
->constraint
[i
])
474 space
= isl_space_free(space
);
476 sc
->context
= isl_set_align_params(sc
->context
, isl_space_copy(space
));
477 sc
->domain
= isl_union_set_align_params(sc
->domain
, space
);
478 if (!sc
->context
|| !sc
->domain
)
479 return isl_schedule_constraints_free(sc
);
484 /* Return the total number of isl_maps in the constraints of "sc".
486 static __isl_give
int isl_schedule_constraints_n_map(
487 __isl_keep isl_schedule_constraints
*sc
)
489 enum isl_edge_type i
;
492 for (i
= isl_edge_first
; i
<= isl_edge_last
; ++i
)
493 n
+= isl_union_map_n_map(sc
->constraint
[i
]);
498 /* Internal information about a node that is used during the construction
500 * space represents the space in which the domain lives
501 * sched is a matrix representation of the schedule being constructed
502 * for this node; if compressed is set, then this schedule is
503 * defined over the compressed domain space
504 * sched_map is an isl_map representation of the same (partial) schedule
505 * sched_map may be NULL; if compressed is set, then this map
506 * is defined over the uncompressed domain space
507 * rank is the number of linearly independent rows in the linear part
509 * the columns of cmap represent a change of basis for the schedule
510 * coefficients; the first rank columns span the linear part of
512 * cinv is the inverse of cmap.
513 * ctrans is the transpose of cmap.
514 * start is the first variable in the LP problem in the sequences that
515 * represents the schedule coefficients of this node
516 * nvar is the dimension of the domain
517 * nparam is the number of parameters or 0 if we are not constructing
518 * a parametric schedule
520 * If compressed is set, then hull represents the constraints
521 * that were used to derive the compression, while compress and
522 * decompress map the original space to the compressed space and
525 * scc is the index of SCC (or WCC) this node belongs to
527 * "cluster" is only used inside extract_clusters and identifies
528 * the cluster of SCCs that the node belongs to.
530 * coincident contains a boolean for each of the rows of the schedule,
531 * indicating whether the corresponding scheduling dimension satisfies
532 * the coincidence constraints in the sense that the corresponding
533 * dependence distances are zero.
535 struct isl_sched_node
{
539 isl_multi_aff
*compress
;
540 isl_multi_aff
*decompress
;
557 static int node_has_space(const void *entry
, const void *val
)
559 struct isl_sched_node
*node
= (struct isl_sched_node
*)entry
;
560 isl_space
*dim
= (isl_space
*)val
;
562 return isl_space_is_equal(node
->space
, dim
);
565 static int node_scc_exactly(struct isl_sched_node
*node
, int scc
)
567 return node
->scc
== scc
;
570 static int node_scc_at_most(struct isl_sched_node
*node
, int scc
)
572 return node
->scc
<= scc
;
575 static int node_scc_at_least(struct isl_sched_node
*node
, int scc
)
577 return node
->scc
>= scc
;
580 /* An edge in the dependence graph. An edge may be used to
581 * ensure validity of the generated schedule, to minimize the dependence
584 * map is the dependence relation, with i -> j in the map if j depends on i
585 * tagged_condition and tagged_validity contain the union of all tagged
586 * condition or conditional validity dependence relations that
587 * specialize the dependence relation "map"; that is,
588 * if (i -> a) -> (j -> b) is an element of "tagged_condition"
589 * or "tagged_validity", then i -> j is an element of "map".
590 * If these fields are NULL, then they represent the empty relation.
591 * src is the source node
592 * dst is the sink node
594 * types is a bit vector containing the types of this edge.
595 * validity is set if the edge is used to ensure correctness
596 * coincidence is used to enforce zero dependence distances
597 * proximity is set if the edge is used to minimize dependence distances
598 * condition is set if the edge represents a condition
599 * for a conditional validity schedule constraint
600 * local can only be set for condition edges and indicates that
601 * the dependence distance over the edge should be zero
602 * conditional_validity is set if the edge is used to conditionally
605 * For validity edges, start and end mark the sequence of inequality
606 * constraints in the LP problem that encode the validity constraint
607 * corresponding to this edge.
609 * During clustering, an edge may be marked "no_merge" if it should
610 * not be used to merge clusters.
611 * The weight is also only used during clustering and it is
612 * an indication of how many schedule dimensions on either side
613 * of the schedule constraints can be aligned.
614 * If the weight is negative, then this means that this edge was postponed
615 * by has_bounded_distances or any_no_merge. The original weight can
616 * be retrieved by adding 1 + graph->max_weight, with "graph"
617 * the graph containing this edge.
619 struct isl_sched_edge
{
621 isl_union_map
*tagged_condition
;
622 isl_union_map
*tagged_validity
;
624 struct isl_sched_node
*src
;
625 struct isl_sched_node
*dst
;
636 /* Is "edge" marked as being of type "type"?
638 static int is_type(struct isl_sched_edge
*edge
, enum isl_edge_type type
)
640 return ISL_FL_ISSET(edge
->types
, 1 << type
);
643 /* Mark "edge" as being of type "type".
645 static void set_type(struct isl_sched_edge
*edge
, enum isl_edge_type type
)
647 ISL_FL_SET(edge
->types
, 1 << type
);
650 /* No longer mark "edge" as being of type "type"?
652 static void clear_type(struct isl_sched_edge
*edge
, enum isl_edge_type type
)
654 ISL_FL_CLR(edge
->types
, 1 << type
);
657 /* Is "edge" marked as a validity edge?
659 static int is_validity(struct isl_sched_edge
*edge
)
661 return is_type(edge
, isl_edge_validity
);
664 /* Mark "edge" as a validity edge.
666 static void set_validity(struct isl_sched_edge
*edge
)
668 set_type(edge
, isl_edge_validity
);
671 /* Is "edge" marked as a proximity edge?
673 static int is_proximity(struct isl_sched_edge
*edge
)
675 return is_type(edge
, isl_edge_proximity
);
678 /* Is "edge" marked as a local edge?
680 static int is_local(struct isl_sched_edge
*edge
)
682 return is_type(edge
, isl_edge_local
);
685 /* Mark "edge" as a local edge.
687 static void set_local(struct isl_sched_edge
*edge
)
689 set_type(edge
, isl_edge_local
);
692 /* No longer mark "edge" as a local edge.
694 static void clear_local(struct isl_sched_edge
*edge
)
696 clear_type(edge
, isl_edge_local
);
699 /* Is "edge" marked as a coincidence edge?
701 static int is_coincidence(struct isl_sched_edge
*edge
)
703 return is_type(edge
, isl_edge_coincidence
);
706 /* Is "edge" marked as a condition edge?
708 static int is_condition(struct isl_sched_edge
*edge
)
710 return is_type(edge
, isl_edge_condition
);
713 /* Is "edge" marked as a conditional validity edge?
715 static int is_conditional_validity(struct isl_sched_edge
*edge
)
717 return is_type(edge
, isl_edge_conditional_validity
);
720 /* Internal information about the dependence graph used during
721 * the construction of the schedule.
723 * intra_hmap is a cache, mapping dependence relations to their dual,
724 * for dependences from a node to itself
725 * inter_hmap is a cache, mapping dependence relations to their dual,
726 * for dependences between distinct nodes
727 * if compression is involved then the key for these maps
728 * is the original, uncompressed dependence relation, while
729 * the value is the dual of the compressed dependence relation.
731 * n is the number of nodes
732 * node is the list of nodes
733 * maxvar is the maximal number of variables over all nodes
734 * max_row is the allocated number of rows in the schedule
735 * n_row is the current (maximal) number of linearly independent
736 * rows in the node schedules
737 * n_total_row is the current number of rows in the node schedules
738 * band_start is the starting row in the node schedules of the current band
739 * root is set if this graph is the original dependence graph,
740 * without any splitting
742 * sorted contains a list of node indices sorted according to the
743 * SCC to which a node belongs
745 * n_edge is the number of edges
746 * edge is the list of edges
747 * max_edge contains the maximal number of edges of each type;
748 * in particular, it contains the number of edges in the inital graph.
749 * edge_table contains pointers into the edge array, hashed on the source
750 * and sink spaces; there is one such table for each type;
751 * a given edge may be referenced from more than one table
752 * if the corresponding relation appears in more than one of the
753 * sets of dependences; however, for each type there is only
754 * a single edge between a given pair of source and sink space
755 * in the entire graph
757 * node_table contains pointers into the node array, hashed on the space
759 * region contains a list of variable sequences that should be non-trivial
761 * lp contains the (I)LP problem used to obtain new schedule rows
763 * src_scc and dst_scc are the source and sink SCCs of an edge with
764 * conflicting constraints
766 * scc represents the number of components
767 * weak is set if the components are weakly connected
769 * max_weight is used during clustering and represents the maximal
770 * weight of the relevant proximity edges.
772 struct isl_sched_graph
{
773 isl_map_to_basic_set
*intra_hmap
;
774 isl_map_to_basic_set
*inter_hmap
;
776 struct isl_sched_node
*node
;
789 struct isl_sched_edge
*edge
;
791 int max_edge
[isl_edge_last
+ 1];
792 struct isl_hash_table
*edge_table
[isl_edge_last
+ 1];
794 struct isl_hash_table
*node_table
;
795 struct isl_region
*region
;
808 /* Initialize node_table based on the list of nodes.
810 static int graph_init_table(isl_ctx
*ctx
, struct isl_sched_graph
*graph
)
814 graph
->node_table
= isl_hash_table_alloc(ctx
, graph
->n
);
815 if (!graph
->node_table
)
818 for (i
= 0; i
< graph
->n
; ++i
) {
819 struct isl_hash_table_entry
*entry
;
822 hash
= isl_space_get_hash(graph
->node
[i
].space
);
823 entry
= isl_hash_table_find(ctx
, graph
->node_table
, hash
,
825 graph
->node
[i
].space
, 1);
828 entry
->data
= &graph
->node
[i
];
834 /* Return a pointer to the node that lives within the given space,
835 * or NULL if there is no such node.
837 static struct isl_sched_node
*graph_find_node(isl_ctx
*ctx
,
838 struct isl_sched_graph
*graph
, __isl_keep isl_space
*dim
)
840 struct isl_hash_table_entry
*entry
;
843 hash
= isl_space_get_hash(dim
);
844 entry
= isl_hash_table_find(ctx
, graph
->node_table
, hash
,
845 &node_has_space
, dim
, 0);
847 return entry
? entry
->data
: NULL
;
850 static int edge_has_src_and_dst(const void *entry
, const void *val
)
852 const struct isl_sched_edge
*edge
= entry
;
853 const struct isl_sched_edge
*temp
= val
;
855 return edge
->src
== temp
->src
&& edge
->dst
== temp
->dst
;
858 /* Add the given edge to graph->edge_table[type].
860 static isl_stat
graph_edge_table_add(isl_ctx
*ctx
,
861 struct isl_sched_graph
*graph
, enum isl_edge_type type
,
862 struct isl_sched_edge
*edge
)
864 struct isl_hash_table_entry
*entry
;
867 hash
= isl_hash_init();
868 hash
= isl_hash_builtin(hash
, edge
->src
);
869 hash
= isl_hash_builtin(hash
, edge
->dst
);
870 entry
= isl_hash_table_find(ctx
, graph
->edge_table
[type
], hash
,
871 &edge_has_src_and_dst
, edge
, 1);
873 return isl_stat_error
;
879 /* Allocate the edge_tables based on the maximal number of edges of
882 static int graph_init_edge_tables(isl_ctx
*ctx
, struct isl_sched_graph
*graph
)
886 for (i
= 0; i
<= isl_edge_last
; ++i
) {
887 graph
->edge_table
[i
] = isl_hash_table_alloc(ctx
,
889 if (!graph
->edge_table
[i
])
896 /* If graph->edge_table[type] contains an edge from the given source
897 * to the given destination, then return the hash table entry of this edge.
898 * Otherwise, return NULL.
900 static struct isl_hash_table_entry
*graph_find_edge_entry(
901 struct isl_sched_graph
*graph
,
902 enum isl_edge_type type
,
903 struct isl_sched_node
*src
, struct isl_sched_node
*dst
)
905 isl_ctx
*ctx
= isl_space_get_ctx(src
->space
);
907 struct isl_sched_edge temp
= { .src
= src
, .dst
= dst
};
909 hash
= isl_hash_init();
910 hash
= isl_hash_builtin(hash
, temp
.src
);
911 hash
= isl_hash_builtin(hash
, temp
.dst
);
912 return isl_hash_table_find(ctx
, graph
->edge_table
[type
], hash
,
913 &edge_has_src_and_dst
, &temp
, 0);
917 /* If graph->edge_table[type] contains an edge from the given source
918 * to the given destination, then return this edge.
919 * Otherwise, return NULL.
921 static struct isl_sched_edge
*graph_find_edge(struct isl_sched_graph
*graph
,
922 enum isl_edge_type type
,
923 struct isl_sched_node
*src
, struct isl_sched_node
*dst
)
925 struct isl_hash_table_entry
*entry
;
927 entry
= graph_find_edge_entry(graph
, type
, src
, dst
);
934 /* Check whether the dependence graph has an edge of the given type
935 * between the given two nodes.
937 static isl_bool
graph_has_edge(struct isl_sched_graph
*graph
,
938 enum isl_edge_type type
,
939 struct isl_sched_node
*src
, struct isl_sched_node
*dst
)
941 struct isl_sched_edge
*edge
;
944 edge
= graph_find_edge(graph
, type
, src
, dst
);
948 empty
= isl_map_plain_is_empty(edge
->map
);
950 return isl_bool_error
;
955 /* Look for any edge with the same src, dst and map fields as "model".
957 * Return the matching edge if one can be found.
958 * Return "model" if no matching edge is found.
959 * Return NULL on error.
961 static struct isl_sched_edge
*graph_find_matching_edge(
962 struct isl_sched_graph
*graph
, struct isl_sched_edge
*model
)
964 enum isl_edge_type i
;
965 struct isl_sched_edge
*edge
;
967 for (i
= isl_edge_first
; i
<= isl_edge_last
; ++i
) {
970 edge
= graph_find_edge(graph
, i
, model
->src
, model
->dst
);
973 is_equal
= isl_map_plain_is_equal(model
->map
, edge
->map
);
983 /* Remove the given edge from all the edge_tables that refer to it.
985 static void graph_remove_edge(struct isl_sched_graph
*graph
,
986 struct isl_sched_edge
*edge
)
988 isl_ctx
*ctx
= isl_map_get_ctx(edge
->map
);
989 enum isl_edge_type i
;
991 for (i
= isl_edge_first
; i
<= isl_edge_last
; ++i
) {
992 struct isl_hash_table_entry
*entry
;
994 entry
= graph_find_edge_entry(graph
, i
, edge
->src
, edge
->dst
);
997 if (entry
->data
!= edge
)
999 isl_hash_table_remove(ctx
, graph
->edge_table
[i
], entry
);
1003 /* Check whether the dependence graph has any edge
1004 * between the given two nodes.
1006 static isl_bool
graph_has_any_edge(struct isl_sched_graph
*graph
,
1007 struct isl_sched_node
*src
, struct isl_sched_node
*dst
)
1009 enum isl_edge_type i
;
1012 for (i
= isl_edge_first
; i
<= isl_edge_last
; ++i
) {
1013 r
= graph_has_edge(graph
, i
, src
, dst
);
1021 /* Check whether the dependence graph has a validity edge
1022 * between the given two nodes.
1024 * Conditional validity edges are essentially validity edges that
1025 * can be ignored if the corresponding condition edges are iteration private.
1026 * Here, we are only checking for the presence of validity
1027 * edges, so we need to consider the conditional validity edges too.
1028 * In particular, this function is used during the detection
1029 * of strongly connected components and we cannot ignore
1030 * conditional validity edges during this detection.
1032 static isl_bool
graph_has_validity_edge(struct isl_sched_graph
*graph
,
1033 struct isl_sched_node
*src
, struct isl_sched_node
*dst
)
1037 r
= graph_has_edge(graph
, isl_edge_validity
, src
, dst
);
1041 return graph_has_edge(graph
, isl_edge_conditional_validity
, src
, dst
);
1044 static int graph_alloc(isl_ctx
*ctx
, struct isl_sched_graph
*graph
,
1045 int n_node
, int n_edge
)
1050 graph
->n_edge
= n_edge
;
1051 graph
->node
= isl_calloc_array(ctx
, struct isl_sched_node
, graph
->n
);
1052 graph
->sorted
= isl_calloc_array(ctx
, int, graph
->n
);
1053 graph
->region
= isl_alloc_array(ctx
, struct isl_region
, graph
->n
);
1054 graph
->edge
= isl_calloc_array(ctx
,
1055 struct isl_sched_edge
, graph
->n_edge
);
1057 graph
->intra_hmap
= isl_map_to_basic_set_alloc(ctx
, 2 * n_edge
);
1058 graph
->inter_hmap
= isl_map_to_basic_set_alloc(ctx
, 2 * n_edge
);
1060 if (!graph
->node
|| !graph
->region
|| (graph
->n_edge
&& !graph
->edge
) ||
1064 for(i
= 0; i
< graph
->n
; ++i
)
1065 graph
->sorted
[i
] = i
;
1070 static void graph_free(isl_ctx
*ctx
, struct isl_sched_graph
*graph
)
1074 isl_map_to_basic_set_free(graph
->intra_hmap
);
1075 isl_map_to_basic_set_free(graph
->inter_hmap
);
1078 for (i
= 0; i
< graph
->n
; ++i
) {
1079 isl_space_free(graph
->node
[i
].space
);
1080 isl_set_free(graph
->node
[i
].hull
);
1081 isl_multi_aff_free(graph
->node
[i
].compress
);
1082 isl_multi_aff_free(graph
->node
[i
].decompress
);
1083 isl_mat_free(graph
->node
[i
].sched
);
1084 isl_map_free(graph
->node
[i
].sched_map
);
1085 isl_mat_free(graph
->node
[i
].cmap
);
1086 isl_mat_free(graph
->node
[i
].cinv
);
1087 isl_mat_free(graph
->node
[i
].ctrans
);
1089 free(graph
->node
[i
].coincident
);
1092 free(graph
->sorted
);
1094 for (i
= 0; i
< graph
->n_edge
; ++i
) {
1095 isl_map_free(graph
->edge
[i
].map
);
1096 isl_union_map_free(graph
->edge
[i
].tagged_condition
);
1097 isl_union_map_free(graph
->edge
[i
].tagged_validity
);
1100 free(graph
->region
);
1101 for (i
= 0; i
<= isl_edge_last
; ++i
)
1102 isl_hash_table_free(ctx
, graph
->edge_table
[i
]);
1103 isl_hash_table_free(ctx
, graph
->node_table
);
1104 isl_basic_set_free(graph
->lp
);
1107 /* For each "set" on which this function is called, increment
1108 * graph->n by one and update graph->maxvar.
1110 static isl_stat
init_n_maxvar(__isl_take isl_set
*set
, void *user
)
1112 struct isl_sched_graph
*graph
= user
;
1113 int nvar
= isl_set_dim(set
, isl_dim_set
);
1116 if (nvar
> graph
->maxvar
)
1117 graph
->maxvar
= nvar
;
1124 /* Add the number of basic maps in "map" to *n.
1126 static isl_stat
add_n_basic_map(__isl_take isl_map
*map
, void *user
)
1130 *n
+= isl_map_n_basic_map(map
);
1136 /* Compute the number of rows that should be allocated for the schedule.
1137 * In particular, we need one row for each variable or one row
1138 * for each basic map in the dependences.
1139 * Note that it is practically impossible to exhaust both
1140 * the number of dependences and the number of variables.
1142 static int compute_max_row(struct isl_sched_graph
*graph
,
1143 __isl_keep isl_schedule_constraints
*sc
)
1145 enum isl_edge_type i
;
1150 if (isl_union_set_foreach_set(sc
->domain
, &init_n_maxvar
, graph
) < 0)
1153 for (i
= isl_edge_first
; i
<= isl_edge_last
; ++i
)
1154 if (isl_union_map_foreach_map(sc
->constraint
[i
],
1155 &add_n_basic_map
, &n_edge
) < 0)
1157 graph
->max_row
= n_edge
+ graph
->maxvar
;
1162 /* Does "bset" have any defining equalities for its set variables?
1164 static int has_any_defining_equality(__isl_keep isl_basic_set
*bset
)
1171 n
= isl_basic_set_dim(bset
, isl_dim_set
);
1172 for (i
= 0; i
< n
; ++i
) {
1175 has
= isl_basic_set_has_defining_equality(bset
, isl_dim_set
, i
,
1184 /* Add a new node to the graph representing the given space.
1185 * "nvar" is the (possibly compressed) number of variables and
1186 * may be smaller than then number of set variables in "space"
1187 * if "compressed" is set.
1188 * If "compressed" is set, then "hull" represents the constraints
1189 * that were used to derive the compression, while "compress" and
1190 * "decompress" map the original space to the compressed space and
1192 * If "compressed" is not set, then "hull", "compress" and "decompress"
1195 static isl_stat
add_node(struct isl_sched_graph
*graph
,
1196 __isl_take isl_space
*space
, int nvar
, int compressed
,
1197 __isl_take isl_set
*hull
, __isl_take isl_multi_aff
*compress
,
1198 __isl_take isl_multi_aff
*decompress
)
1206 return isl_stat_error
;
1208 ctx
= isl_space_get_ctx(space
);
1209 nparam
= isl_space_dim(space
, isl_dim_param
);
1210 if (!ctx
->opt
->schedule_parametric
)
1212 sched
= isl_mat_alloc(ctx
, 0, 1 + nparam
+ nvar
);
1213 graph
->node
[graph
->n
].space
= space
;
1214 graph
->node
[graph
->n
].nvar
= nvar
;
1215 graph
->node
[graph
->n
].nparam
= nparam
;
1216 graph
->node
[graph
->n
].sched
= sched
;
1217 graph
->node
[graph
->n
].sched_map
= NULL
;
1218 coincident
= isl_calloc_array(ctx
, int, graph
->max_row
);
1219 graph
->node
[graph
->n
].coincident
= coincident
;
1220 graph
->node
[graph
->n
].compressed
= compressed
;
1221 graph
->node
[graph
->n
].hull
= hull
;
1222 graph
->node
[graph
->n
].compress
= compress
;
1223 graph
->node
[graph
->n
].decompress
= decompress
;
1226 if (!space
|| !sched
|| (graph
->max_row
&& !coincident
))
1227 return isl_stat_error
;
1228 if (compressed
&& (!hull
|| !compress
|| !decompress
))
1229 return isl_stat_error
;
1234 /* Add a new node to the graph representing the given set.
1236 * If any of the set variables is defined by an equality, then
1237 * we perform variable compression such that we can perform
1238 * the scheduling on the compressed domain.
1240 static isl_stat
extract_node(__isl_take isl_set
*set
, void *user
)
1245 isl_basic_set
*hull
;
1248 isl_multi_aff
*compress
, *decompress
;
1249 struct isl_sched_graph
*graph
= user
;
1251 space
= isl_set_get_space(set
);
1252 hull
= isl_set_affine_hull(set
);
1253 hull
= isl_basic_set_remove_divs(hull
);
1254 nvar
= isl_space_dim(space
, isl_dim_set
);
1255 has_equality
= has_any_defining_equality(hull
);
1257 if (has_equality
< 0)
1259 if (!has_equality
) {
1260 isl_basic_set_free(hull
);
1261 return add_node(graph
, space
, nvar
, 0, NULL
, NULL
, NULL
);
1264 morph
= isl_basic_set_variable_compression(hull
, isl_dim_set
);
1265 nvar
= isl_morph_ran_dim(morph
, isl_dim_set
);
1266 compress
= isl_morph_get_var_multi_aff(morph
);
1267 morph
= isl_morph_inverse(morph
);
1268 decompress
= isl_morph_get_var_multi_aff(morph
);
1269 isl_morph_free(morph
);
1271 hull_set
= isl_set_from_basic_set(hull
);
1272 return add_node(graph
, space
, nvar
, 1, hull_set
, compress
, decompress
);
1274 isl_basic_set_free(hull
);
1275 isl_space_free(space
);
1276 return isl_stat_error
;
1279 struct isl_extract_edge_data
{
1280 enum isl_edge_type type
;
1281 struct isl_sched_graph
*graph
;
1284 /* Merge edge2 into edge1, freeing the contents of edge2.
1285 * Return 0 on success and -1 on failure.
1287 * edge1 and edge2 are assumed to have the same value for the map field.
1289 static int merge_edge(struct isl_sched_edge
*edge1
,
1290 struct isl_sched_edge
*edge2
)
1292 edge1
->types
|= edge2
->types
;
1293 isl_map_free(edge2
->map
);
1295 if (is_condition(edge2
)) {
1296 if (!edge1
->tagged_condition
)
1297 edge1
->tagged_condition
= edge2
->tagged_condition
;
1299 edge1
->tagged_condition
=
1300 isl_union_map_union(edge1
->tagged_condition
,
1301 edge2
->tagged_condition
);
1304 if (is_conditional_validity(edge2
)) {
1305 if (!edge1
->tagged_validity
)
1306 edge1
->tagged_validity
= edge2
->tagged_validity
;
1308 edge1
->tagged_validity
=
1309 isl_union_map_union(edge1
->tagged_validity
,
1310 edge2
->tagged_validity
);
1313 if (is_condition(edge2
) && !edge1
->tagged_condition
)
1315 if (is_conditional_validity(edge2
) && !edge1
->tagged_validity
)
1321 /* Insert dummy tags in domain and range of "map".
1323 * In particular, if "map" is of the form
1329 * [A -> dummy_tag] -> [B -> dummy_tag]
1331 * where the dummy_tags are identical and equal to any dummy tags
1332 * introduced by any other call to this function.
1334 static __isl_give isl_map
*insert_dummy_tags(__isl_take isl_map
*map
)
1340 isl_set
*domain
, *range
;
1342 ctx
= isl_map_get_ctx(map
);
1344 id
= isl_id_alloc(ctx
, NULL
, &dummy
);
1345 space
= isl_space_params(isl_map_get_space(map
));
1346 space
= isl_space_set_from_params(space
);
1347 space
= isl_space_set_tuple_id(space
, isl_dim_set
, id
);
1348 space
= isl_space_map_from_set(space
);
1350 domain
= isl_map_wrap(map
);
1351 range
= isl_map_wrap(isl_map_universe(space
));
1352 map
= isl_map_from_domain_and_range(domain
, range
);
1353 map
= isl_map_zip(map
);
1358 /* Given that at least one of "src" or "dst" is compressed, return
1359 * a map between the spaces of these nodes restricted to the affine
1360 * hull that was used in the compression.
1362 static __isl_give isl_map
*extract_hull(struct isl_sched_node
*src
,
1363 struct isl_sched_node
*dst
)
1367 if (src
->compressed
)
1368 dom
= isl_set_copy(src
->hull
);
1370 dom
= isl_set_universe(isl_space_copy(src
->space
));
1371 if (dst
->compressed
)
1372 ran
= isl_set_copy(dst
->hull
);
1374 ran
= isl_set_universe(isl_space_copy(dst
->space
));
1376 return isl_map_from_domain_and_range(dom
, ran
);
1379 /* Intersect the domains of the nested relations in domain and range
1380 * of "tagged" with "map".
1382 static __isl_give isl_map
*map_intersect_domains(__isl_take isl_map
*tagged
,
1383 __isl_keep isl_map
*map
)
1387 tagged
= isl_map_zip(tagged
);
1388 set
= isl_map_wrap(isl_map_copy(map
));
1389 tagged
= isl_map_intersect_domain(tagged
, set
);
1390 tagged
= isl_map_zip(tagged
);
1394 /* Return a pointer to the node that lives in the domain space of "map"
1395 * or NULL if there is no such node.
1397 static struct isl_sched_node
*find_domain_node(isl_ctx
*ctx
,
1398 struct isl_sched_graph
*graph
, __isl_keep isl_map
*map
)
1400 struct isl_sched_node
*node
;
1403 space
= isl_space_domain(isl_map_get_space(map
));
1404 node
= graph_find_node(ctx
, graph
, space
);
1405 isl_space_free(space
);
1410 /* Return a pointer to the node that lives in the range space of "map"
1411 * or NULL if there is no such node.
1413 static struct isl_sched_node
*find_range_node(isl_ctx
*ctx
,
1414 struct isl_sched_graph
*graph
, __isl_keep isl_map
*map
)
1416 struct isl_sched_node
*node
;
1419 space
= isl_space_range(isl_map_get_space(map
));
1420 node
= graph_find_node(ctx
, graph
, space
);
1421 isl_space_free(space
);
1426 /* Add a new edge to the graph based on the given map
1427 * and add it to data->graph->edge_table[data->type].
1428 * If a dependence relation of a given type happens to be identical
1429 * to one of the dependence relations of a type that was added before,
1430 * then we don't create a new edge, but instead mark the original edge
1431 * as also representing a dependence of the current type.
1433 * Edges of type isl_edge_condition or isl_edge_conditional_validity
1434 * may be specified as "tagged" dependence relations. That is, "map"
1435 * may contain elements (i -> a) -> (j -> b), where i -> j denotes
1436 * the dependence on iterations and a and b are tags.
1437 * edge->map is set to the relation containing the elements i -> j,
1438 * while edge->tagged_condition and edge->tagged_validity contain
1439 * the union of all the "map" relations
1440 * for which extract_edge is called that result in the same edge->map.
1442 * If the source or the destination node is compressed, then
1443 * intersect both "map" and "tagged" with the constraints that
1444 * were used to construct the compression.
1445 * This ensures that there are no schedule constraints defined
1446 * outside of these domains, while the scheduler no longer has
1447 * any control over those outside parts.
1449 static isl_stat
extract_edge(__isl_take isl_map
*map
, void *user
)
1451 isl_ctx
*ctx
= isl_map_get_ctx(map
);
1452 struct isl_extract_edge_data
*data
= user
;
1453 struct isl_sched_graph
*graph
= data
->graph
;
1454 struct isl_sched_node
*src
, *dst
;
1455 struct isl_sched_edge
*edge
;
1456 isl_map
*tagged
= NULL
;
1458 if (data
->type
== isl_edge_condition
||
1459 data
->type
== isl_edge_conditional_validity
) {
1460 if (isl_map_can_zip(map
)) {
1461 tagged
= isl_map_copy(map
);
1462 map
= isl_set_unwrap(isl_map_domain(isl_map_zip(map
)));
1464 tagged
= insert_dummy_tags(isl_map_copy(map
));
1468 src
= find_domain_node(ctx
, graph
, map
);
1469 dst
= find_range_node(ctx
, graph
, map
);
1473 isl_map_free(tagged
);
1477 if (src
->compressed
|| dst
->compressed
) {
1479 hull
= extract_hull(src
, dst
);
1481 tagged
= map_intersect_domains(tagged
, hull
);
1482 map
= isl_map_intersect(map
, hull
);
1485 graph
->edge
[graph
->n_edge
].src
= src
;
1486 graph
->edge
[graph
->n_edge
].dst
= dst
;
1487 graph
->edge
[graph
->n_edge
].map
= map
;
1488 graph
->edge
[graph
->n_edge
].types
= 0;
1489 graph
->edge
[graph
->n_edge
].tagged_condition
= NULL
;
1490 graph
->edge
[graph
->n_edge
].tagged_validity
= NULL
;
1491 set_type(&graph
->edge
[graph
->n_edge
], data
->type
);
1492 if (data
->type
== isl_edge_condition
)
1493 graph
->edge
[graph
->n_edge
].tagged_condition
=
1494 isl_union_map_from_map(tagged
);
1495 if (data
->type
== isl_edge_conditional_validity
)
1496 graph
->edge
[graph
->n_edge
].tagged_validity
=
1497 isl_union_map_from_map(tagged
);
1499 edge
= graph_find_matching_edge(graph
, &graph
->edge
[graph
->n_edge
]);
1502 return isl_stat_error
;
1504 if (edge
== &graph
->edge
[graph
->n_edge
])
1505 return graph_edge_table_add(ctx
, graph
, data
->type
,
1506 &graph
->edge
[graph
->n_edge
++]);
1508 if (merge_edge(edge
, &graph
->edge
[graph
->n_edge
]) < 0)
1511 return graph_edge_table_add(ctx
, graph
, data
->type
, edge
);
1514 /* Initialize the schedule graph "graph" from the schedule constraints "sc".
1516 * The context is included in the domain before the nodes of
1517 * the graphs are extracted in order to be able to exploit
1518 * any possible additional equalities.
1519 * Note that this intersection is only performed locally here.
1521 static isl_stat
graph_init(struct isl_sched_graph
*graph
,
1522 __isl_keep isl_schedule_constraints
*sc
)
1525 isl_union_set
*domain
;
1526 struct isl_extract_edge_data data
;
1527 enum isl_edge_type i
;
1531 return isl_stat_error
;
1533 ctx
= isl_schedule_constraints_get_ctx(sc
);
1535 domain
= isl_schedule_constraints_get_domain(sc
);
1536 graph
->n
= isl_union_set_n_set(domain
);
1537 isl_union_set_free(domain
);
1539 if (graph_alloc(ctx
, graph
, graph
->n
,
1540 isl_schedule_constraints_n_map(sc
)) < 0)
1541 return isl_stat_error
;
1543 if (compute_max_row(graph
, sc
) < 0)
1544 return isl_stat_error
;
1547 domain
= isl_schedule_constraints_get_domain(sc
);
1548 domain
= isl_union_set_intersect_params(domain
,
1549 isl_set_copy(sc
->context
));
1550 r
= isl_union_set_foreach_set(domain
, &extract_node
, graph
);
1551 isl_union_set_free(domain
);
1553 return isl_stat_error
;
1554 if (graph_init_table(ctx
, graph
) < 0)
1555 return isl_stat_error
;
1556 for (i
= isl_edge_first
; i
<= isl_edge_last
; ++i
)
1557 graph
->max_edge
[i
] = isl_union_map_n_map(sc
->constraint
[i
]);
1558 if (graph_init_edge_tables(ctx
, graph
) < 0)
1559 return isl_stat_error
;
1562 for (i
= isl_edge_first
; i
<= isl_edge_last
; ++i
) {
1564 if (isl_union_map_foreach_map(sc
->constraint
[i
],
1565 &extract_edge
, &data
) < 0)
1566 return isl_stat_error
;
1572 /* Check whether there is any dependence from node[j] to node[i]
1573 * or from node[i] to node[j].
1575 static isl_bool
node_follows_weak(int i
, int j
, void *user
)
1578 struct isl_sched_graph
*graph
= user
;
1580 f
= graph_has_any_edge(graph
, &graph
->node
[j
], &graph
->node
[i
]);
1583 return graph_has_any_edge(graph
, &graph
->node
[i
], &graph
->node
[j
]);
1586 /* Check whether there is a (conditional) validity dependence from node[j]
1587 * to node[i], forcing node[i] to follow node[j].
1589 static isl_bool
node_follows_strong(int i
, int j
, void *user
)
1591 struct isl_sched_graph
*graph
= user
;
1593 return graph_has_validity_edge(graph
, &graph
->node
[j
], &graph
->node
[i
]);
1596 /* Use Tarjan's algorithm for computing the strongly connected components
1597 * in the dependence graph only considering those edges defined by "follows".
1599 static int detect_ccs(isl_ctx
*ctx
, struct isl_sched_graph
*graph
,
1600 isl_bool (*follows
)(int i
, int j
, void *user
))
1603 struct isl_tarjan_graph
*g
= NULL
;
1605 g
= isl_tarjan_graph_init(ctx
, graph
->n
, follows
, graph
);
1613 while (g
->order
[i
] != -1) {
1614 graph
->node
[g
->order
[i
]].scc
= graph
->scc
;
1622 isl_tarjan_graph_free(g
);
1627 /* Apply Tarjan's algorithm to detect the strongly connected components
1628 * in the dependence graph.
1629 * Only consider the (conditional) validity dependences and clear "weak".
1631 static int detect_sccs(isl_ctx
*ctx
, struct isl_sched_graph
*graph
)
1634 return detect_ccs(ctx
, graph
, &node_follows_strong
);
1637 /* Apply Tarjan's algorithm to detect the (weakly) connected components
1638 * in the dependence graph.
1639 * Consider all dependences and set "weak".
1641 static int detect_wccs(isl_ctx
*ctx
, struct isl_sched_graph
*graph
)
1644 return detect_ccs(ctx
, graph
, &node_follows_weak
);
1647 static int cmp_scc(const void *a
, const void *b
, void *data
)
1649 struct isl_sched_graph
*graph
= data
;
1653 return graph
->node
[*i1
].scc
- graph
->node
[*i2
].scc
;
1656 /* Sort the elements of graph->sorted according to the corresponding SCCs.
1658 static int sort_sccs(struct isl_sched_graph
*graph
)
1660 return isl_sort(graph
->sorted
, graph
->n
, sizeof(int), &cmp_scc
, graph
);
1663 /* Given a dependence relation R from "node" to itself,
1664 * construct the set of coefficients of valid constraints for elements
1665 * in that dependence relation.
1666 * In particular, the result contains tuples of coefficients
1667 * c_0, c_n, c_x such that
1669 * c_0 + c_n n + c_x y - c_x x >= 0 for each (x,y) in R
1673 * c_0 + c_n n + c_x d >= 0 for each d in delta R = { y - x | (x,y) in R }
1675 * We choose here to compute the dual of delta R.
1676 * Alternatively, we could have computed the dual of R, resulting
1677 * in a set of tuples c_0, c_n, c_x, c_y, and then
1678 * plugged in (c_0, c_n, c_x, -c_x).
1680 * If "node" has been compressed, then the dependence relation
1681 * is also compressed before the set of coefficients is computed.
1683 static __isl_give isl_basic_set
*intra_coefficients(
1684 struct isl_sched_graph
*graph
, struct isl_sched_node
*node
,
1685 __isl_take isl_map
*map
)
1689 isl_basic_set
*coef
;
1690 isl_maybe_isl_basic_set m
;
1692 m
= isl_map_to_basic_set_try_get(graph
->intra_hmap
, map
);
1693 if (m
.valid
< 0 || m
.valid
) {
1698 key
= isl_map_copy(map
);
1699 if (node
->compressed
) {
1700 map
= isl_map_preimage_domain_multi_aff(map
,
1701 isl_multi_aff_copy(node
->decompress
));
1702 map
= isl_map_preimage_range_multi_aff(map
,
1703 isl_multi_aff_copy(node
->decompress
));
1705 delta
= isl_set_remove_divs(isl_map_deltas(map
));
1706 coef
= isl_set_coefficients(delta
);
1707 graph
->intra_hmap
= isl_map_to_basic_set_set(graph
->intra_hmap
, key
,
1708 isl_basic_set_copy(coef
));
1713 /* Given a dependence relation R, construct the set of coefficients
1714 * of valid constraints for elements in that dependence relation.
1715 * In particular, the result contains tuples of coefficients
1716 * c_0, c_n, c_x, c_y such that
1718 * c_0 + c_n n + c_x x + c_y y >= 0 for each (x,y) in R
1720 * If the source or destination nodes of "edge" have been compressed,
1721 * then the dependence relation is also compressed before
1722 * the set of coefficients is computed.
1724 static __isl_give isl_basic_set
*inter_coefficients(
1725 struct isl_sched_graph
*graph
, struct isl_sched_edge
*edge
,
1726 __isl_take isl_map
*map
)
1730 isl_basic_set
*coef
;
1731 isl_maybe_isl_basic_set m
;
1733 m
= isl_map_to_basic_set_try_get(graph
->inter_hmap
, map
);
1734 if (m
.valid
< 0 || m
.valid
) {
1739 key
= isl_map_copy(map
);
1740 if (edge
->src
->compressed
)
1741 map
= isl_map_preimage_domain_multi_aff(map
,
1742 isl_multi_aff_copy(edge
->src
->decompress
));
1743 if (edge
->dst
->compressed
)
1744 map
= isl_map_preimage_range_multi_aff(map
,
1745 isl_multi_aff_copy(edge
->dst
->decompress
));
1746 set
= isl_map_wrap(isl_map_remove_divs(map
));
1747 coef
= isl_set_coefficients(set
);
1748 graph
->inter_hmap
= isl_map_to_basic_set_set(graph
->inter_hmap
, key
,
1749 isl_basic_set_copy(coef
));
1754 /* Add constraints to graph->lp that force validity for the given
1755 * dependence from a node i to itself.
1756 * That is, add constraints that enforce
1758 * (c_i_0 + c_i_n n + c_i_x y) - (c_i_0 + c_i_n n + c_i_x x)
1759 * = c_i_x (y - x) >= 0
1761 * for each (x,y) in R.
1762 * We obtain general constraints on coefficients (c_0, c_n, c_x)
1763 * of valid constraints for (y - x) and then plug in (0, 0, c_i_x^+ - c_i_x^-),
1764 * where c_i_x = c_i_x^+ - c_i_x^-, with c_i_x^+ and c_i_x^- non-negative.
1765 * In graph->lp, the c_i_x^- appear before their c_i_x^+ counterpart.
1767 * Actually, we do not construct constraints for the c_i_x themselves,
1768 * but for the coefficients of c_i_x written as a linear combination
1769 * of the columns in node->cmap.
1771 static int add_intra_validity_constraints(struct isl_sched_graph
*graph
,
1772 struct isl_sched_edge
*edge
)
1775 isl_map
*map
= isl_map_copy(edge
->map
);
1776 isl_ctx
*ctx
= isl_map_get_ctx(map
);
1778 isl_dim_map
*dim_map
;
1779 isl_basic_set
*coef
;
1780 struct isl_sched_node
*node
= edge
->src
;
1782 coef
= intra_coefficients(graph
, node
, map
);
1784 dim
= isl_space_domain(isl_space_unwrap(isl_basic_set_get_space(coef
)));
1786 coef
= isl_basic_set_transform_dims(coef
, isl_dim_set
,
1787 isl_space_dim(dim
, isl_dim_set
), isl_mat_copy(node
->cmap
));
1791 total
= isl_basic_set_total_dim(graph
->lp
);
1792 dim_map
= isl_dim_map_alloc(ctx
, total
);
1793 isl_dim_map_range(dim_map
, node
->start
+ 2 * node
->nparam
+ 1, 2,
1794 isl_space_dim(dim
, isl_dim_set
), 1,
1796 isl_dim_map_range(dim_map
, node
->start
+ 2 * node
->nparam
+ 2, 2,
1797 isl_space_dim(dim
, isl_dim_set
), 1,
1799 graph
->lp
= isl_basic_set_extend_constraints(graph
->lp
,
1800 coef
->n_eq
, coef
->n_ineq
);
1801 graph
->lp
= isl_basic_set_add_constraints_dim_map(graph
->lp
,
1803 isl_space_free(dim
);
1807 isl_space_free(dim
);
1811 /* Add constraints to graph->lp that force validity for the given
1812 * dependence from node i to node j.
1813 * That is, add constraints that enforce
1815 * (c_j_0 + c_j_n n + c_j_x y) - (c_i_0 + c_i_n n + c_i_x x) >= 0
1817 * for each (x,y) in R.
1818 * We obtain general constraints on coefficients (c_0, c_n, c_x, c_y)
1819 * of valid constraints for R and then plug in
1820 * (c_j_0 - c_i_0, c_j_n^+ - c_j_n^- - (c_i_n^+ - c_i_n^-),
1821 * c_j_x^+ - c_j_x^- - (c_i_x^+ - c_i_x^-)),
1822 * where c_* = c_*^+ - c_*^-, with c_*^+ and c_*^- non-negative.
1823 * In graph->lp, the c_*^- appear before their c_*^+ counterpart.
1825 * Actually, we do not construct constraints for the c_*_x themselves,
1826 * but for the coefficients of c_*_x written as a linear combination
1827 * of the columns in node->cmap.
1829 static int add_inter_validity_constraints(struct isl_sched_graph
*graph
,
1830 struct isl_sched_edge
*edge
)
1833 isl_map
*map
= isl_map_copy(edge
->map
);
1834 isl_ctx
*ctx
= isl_map_get_ctx(map
);
1836 isl_dim_map
*dim_map
;
1837 isl_basic_set
*coef
;
1838 struct isl_sched_node
*src
= edge
->src
;
1839 struct isl_sched_node
*dst
= edge
->dst
;
1841 coef
= inter_coefficients(graph
, edge
, map
);
1843 dim
= isl_space_domain(isl_space_unwrap(isl_basic_set_get_space(coef
)));
1845 coef
= isl_basic_set_transform_dims(coef
, isl_dim_set
,
1846 isl_space_dim(dim
, isl_dim_set
), isl_mat_copy(src
->cmap
));
1847 coef
= isl_basic_set_transform_dims(coef
, isl_dim_set
,
1848 isl_space_dim(dim
, isl_dim_set
) + src
->nvar
,
1849 isl_mat_copy(dst
->cmap
));
1853 total
= isl_basic_set_total_dim(graph
->lp
);
1854 dim_map
= isl_dim_map_alloc(ctx
, total
);
1856 isl_dim_map_range(dim_map
, dst
->start
, 0, 0, 0, 1, 1);
1857 isl_dim_map_range(dim_map
, dst
->start
+ 1, 2, 1, 1, dst
->nparam
, -1);
1858 isl_dim_map_range(dim_map
, dst
->start
+ 2, 2, 1, 1, dst
->nparam
, 1);
1859 isl_dim_map_range(dim_map
, dst
->start
+ 2 * dst
->nparam
+ 1, 2,
1860 isl_space_dim(dim
, isl_dim_set
) + src
->nvar
, 1,
1862 isl_dim_map_range(dim_map
, dst
->start
+ 2 * dst
->nparam
+ 2, 2,
1863 isl_space_dim(dim
, isl_dim_set
) + src
->nvar
, 1,
1866 isl_dim_map_range(dim_map
, src
->start
, 0, 0, 0, 1, -1);
1867 isl_dim_map_range(dim_map
, src
->start
+ 1, 2, 1, 1, src
->nparam
, 1);
1868 isl_dim_map_range(dim_map
, src
->start
+ 2, 2, 1, 1, src
->nparam
, -1);
1869 isl_dim_map_range(dim_map
, src
->start
+ 2 * src
->nparam
+ 1, 2,
1870 isl_space_dim(dim
, isl_dim_set
), 1,
1872 isl_dim_map_range(dim_map
, src
->start
+ 2 * src
->nparam
+ 2, 2,
1873 isl_space_dim(dim
, isl_dim_set
), 1,
1876 edge
->start
= graph
->lp
->n_ineq
;
1877 graph
->lp
= isl_basic_set_extend_constraints(graph
->lp
,
1878 coef
->n_eq
, coef
->n_ineq
);
1879 graph
->lp
= isl_basic_set_add_constraints_dim_map(graph
->lp
,
1883 isl_space_free(dim
);
1884 edge
->end
= graph
->lp
->n_ineq
;
1888 isl_space_free(dim
);
1892 /* Add constraints to graph->lp that bound the dependence distance for the given
1893 * dependence from a node i to itself.
1894 * If s = 1, we add the constraint
1896 * c_i_x (y - x) <= m_0 + m_n n
1900 * -c_i_x (y - x) + m_0 + m_n n >= 0
1902 * for each (x,y) in R.
1903 * If s = -1, we add the constraint
1905 * -c_i_x (y - x) <= m_0 + m_n n
1909 * c_i_x (y - x) + m_0 + m_n n >= 0
1911 * for each (x,y) in R.
1912 * We obtain general constraints on coefficients (c_0, c_n, c_x)
1913 * of valid constraints for (y - x) and then plug in (m_0, m_n, -s * c_i_x),
1914 * with each coefficient (except m_0) represented as a pair of non-negative
1917 * Actually, we do not construct constraints for the c_i_x themselves,
1918 * but for the coefficients of c_i_x written as a linear combination
1919 * of the columns in node->cmap.
1922 * If "local" is set, then we add constraints
1924 * c_i_x (y - x) <= 0
1928 * -c_i_x (y - x) <= 0
1930 * instead, forcing the dependence distance to be (less than or) equal to 0.
1931 * That is, we plug in (0, 0, -s * c_i_x),
1932 * Note that dependences marked local are treated as validity constraints
1933 * by add_all_validity_constraints and therefore also have
1934 * their distances bounded by 0 from below.
1936 static int add_intra_proximity_constraints(struct isl_sched_graph
*graph
,
1937 struct isl_sched_edge
*edge
, int s
, int local
)
1941 isl_map
*map
= isl_map_copy(edge
->map
);
1942 isl_ctx
*ctx
= isl_map_get_ctx(map
);
1944 isl_dim_map
*dim_map
;
1945 isl_basic_set
*coef
;
1946 struct isl_sched_node
*node
= edge
->src
;
1948 coef
= intra_coefficients(graph
, node
, map
);
1950 dim
= isl_space_domain(isl_space_unwrap(isl_basic_set_get_space(coef
)));
1952 coef
= isl_basic_set_transform_dims(coef
, isl_dim_set
,
1953 isl_space_dim(dim
, isl_dim_set
), isl_mat_copy(node
->cmap
));
1957 nparam
= isl_space_dim(node
->space
, isl_dim_param
);
1958 total
= isl_basic_set_total_dim(graph
->lp
);
1959 dim_map
= isl_dim_map_alloc(ctx
, total
);
1962 isl_dim_map_range(dim_map
, 1, 0, 0, 0, 1, 1);
1963 isl_dim_map_range(dim_map
, 4, 2, 1, 1, nparam
, -1);
1964 isl_dim_map_range(dim_map
, 5, 2, 1, 1, nparam
, 1);
1966 isl_dim_map_range(dim_map
, node
->start
+ 2 * node
->nparam
+ 1, 2,
1967 isl_space_dim(dim
, isl_dim_set
), 1,
1969 isl_dim_map_range(dim_map
, node
->start
+ 2 * node
->nparam
+ 2, 2,
1970 isl_space_dim(dim
, isl_dim_set
), 1,
1972 graph
->lp
= isl_basic_set_extend_constraints(graph
->lp
,
1973 coef
->n_eq
, coef
->n_ineq
);
1974 graph
->lp
= isl_basic_set_add_constraints_dim_map(graph
->lp
,
1976 isl_space_free(dim
);
1980 isl_space_free(dim
);
1984 /* Add constraints to graph->lp that bound the dependence distance for the given
1985 * dependence from node i to node j.
1986 * If s = 1, we add the constraint
1988 * (c_j_0 + c_j_n n + c_j_x y) - (c_i_0 + c_i_n n + c_i_x x)
1993 * -(c_j_0 + c_j_n n + c_j_x y) + (c_i_0 + c_i_n n + c_i_x x) +
1996 * for each (x,y) in R.
1997 * If s = -1, we add the constraint
1999 * -((c_j_0 + c_j_n n + c_j_x y) - (c_i_0 + c_i_n n + c_i_x x))
2004 * (c_j_0 + c_j_n n + c_j_x y) - (c_i_0 + c_i_n n + c_i_x x) +
2007 * for each (x,y) in R.
2008 * We obtain general constraints on coefficients (c_0, c_n, c_x, c_y)
2009 * of valid constraints for R and then plug in
2010 * (m_0 - s*c_j_0 + s*c_i_0, m_n - s*c_j_n + s*c_i_n,
2012 * with each coefficient (except m_0, c_j_0 and c_i_0)
2013 * represented as a pair of non-negative coefficients.
2015 * Actually, we do not construct constraints for the c_*_x themselves,
2016 * but for the coefficients of c_*_x written as a linear combination
2017 * of the columns in node->cmap.
2020 * If "local" is set, then we add constraints
2022 * (c_j_0 + c_j_n n + c_j_x y) - (c_i_0 + c_i_n n + c_i_x x) <= 0
2026 * -((c_j_0 + c_j_n n + c_j_x y) - (c_i_0 + c_i_n n + c_i_x x)) <= 0
2028 * instead, forcing the dependence distance to be (less than or) equal to 0.
2029 * That is, we plug in
2030 * (-s*c_j_0 + s*c_i_0, -s*c_j_n + s*c_i_n, -s*c_j_x+s*c_i_x).
2031 * Note that dependences marked local are treated as validity constraints
2032 * by add_all_validity_constraints and therefore also have
2033 * their distances bounded by 0 from below.
2035 static int add_inter_proximity_constraints(struct isl_sched_graph
*graph
,
2036 struct isl_sched_edge
*edge
, int s
, int local
)
2040 isl_map
*map
= isl_map_copy(edge
->map
);
2041 isl_ctx
*ctx
= isl_map_get_ctx(map
);
2043 isl_dim_map
*dim_map
;
2044 isl_basic_set
*coef
;
2045 struct isl_sched_node
*src
= edge
->src
;
2046 struct isl_sched_node
*dst
= edge
->dst
;
2048 coef
= inter_coefficients(graph
, edge
, map
);
2050 dim
= isl_space_domain(isl_space_unwrap(isl_basic_set_get_space(coef
)));
2052 coef
= isl_basic_set_transform_dims(coef
, isl_dim_set
,
2053 isl_space_dim(dim
, isl_dim_set
), isl_mat_copy(src
->cmap
));
2054 coef
= isl_basic_set_transform_dims(coef
, isl_dim_set
,
2055 isl_space_dim(dim
, isl_dim_set
) + src
->nvar
,
2056 isl_mat_copy(dst
->cmap
));
2060 nparam
= isl_space_dim(src
->space
, isl_dim_param
);
2061 total
= isl_basic_set_total_dim(graph
->lp
);
2062 dim_map
= isl_dim_map_alloc(ctx
, total
);
2065 isl_dim_map_range(dim_map
, 1, 0, 0, 0, 1, 1);
2066 isl_dim_map_range(dim_map
, 4, 2, 1, 1, nparam
, -1);
2067 isl_dim_map_range(dim_map
, 5, 2, 1, 1, nparam
, 1);
2070 isl_dim_map_range(dim_map
, dst
->start
, 0, 0, 0, 1, -s
);
2071 isl_dim_map_range(dim_map
, dst
->start
+ 1, 2, 1, 1, dst
->nparam
, s
);
2072 isl_dim_map_range(dim_map
, dst
->start
+ 2, 2, 1, 1, dst
->nparam
, -s
);
2073 isl_dim_map_range(dim_map
, dst
->start
+ 2 * dst
->nparam
+ 1, 2,
2074 isl_space_dim(dim
, isl_dim_set
) + src
->nvar
, 1,
2076 isl_dim_map_range(dim_map
, dst
->start
+ 2 * dst
->nparam
+ 2, 2,
2077 isl_space_dim(dim
, isl_dim_set
) + src
->nvar
, 1,
2080 isl_dim_map_range(dim_map
, src
->start
, 0, 0, 0, 1, s
);
2081 isl_dim_map_range(dim_map
, src
->start
+ 1, 2, 1, 1, src
->nparam
, -s
);
2082 isl_dim_map_range(dim_map
, src
->start
+ 2, 2, 1, 1, src
->nparam
, s
);
2083 isl_dim_map_range(dim_map
, src
->start
+ 2 * src
->nparam
+ 1, 2,
2084 isl_space_dim(dim
, isl_dim_set
), 1,
2086 isl_dim_map_range(dim_map
, src
->start
+ 2 * src
->nparam
+ 2, 2,
2087 isl_space_dim(dim
, isl_dim_set
), 1,
2090 graph
->lp
= isl_basic_set_extend_constraints(graph
->lp
,
2091 coef
->n_eq
, coef
->n_ineq
);
2092 graph
->lp
= isl_basic_set_add_constraints_dim_map(graph
->lp
,
2094 isl_space_free(dim
);
2098 isl_space_free(dim
);
2102 /* Add all validity constraints to graph->lp.
2104 * An edge that is forced to be local needs to have its dependence
2105 * distances equal to zero. We take care of bounding them by 0 from below
2106 * here. add_all_proximity_constraints takes care of bounding them by 0
2109 * If "use_coincidence" is set, then we treat coincidence edges as local edges.
2110 * Otherwise, we ignore them.
2112 static int add_all_validity_constraints(struct isl_sched_graph
*graph
,
2113 int use_coincidence
)
2117 for (i
= 0; i
< graph
->n_edge
; ++i
) {
2118 struct isl_sched_edge
*edge
= &graph
->edge
[i
];
2121 local
= is_local(edge
) ||
2122 (is_coincidence(edge
) && use_coincidence
);
2123 if (!is_validity(edge
) && !local
)
2125 if (edge
->src
!= edge
->dst
)
2127 if (add_intra_validity_constraints(graph
, edge
) < 0)
2131 for (i
= 0; i
< graph
->n_edge
; ++i
) {
2132 struct isl_sched_edge
*edge
= &graph
->edge
[i
];
2135 local
= is_local(edge
) ||
2136 (is_coincidence(edge
) && use_coincidence
);
2137 if (!is_validity(edge
) && !local
)
2139 if (edge
->src
== edge
->dst
)
2141 if (add_inter_validity_constraints(graph
, edge
) < 0)
2148 /* Add constraints to graph->lp that bound the dependence distance
2149 * for all dependence relations.
2150 * If a given proximity dependence is identical to a validity
2151 * dependence, then the dependence distance is already bounded
2152 * from below (by zero), so we only need to bound the distance
2153 * from above. (This includes the case of "local" dependences
2154 * which are treated as validity dependence by add_all_validity_constraints.)
2155 * Otherwise, we need to bound the distance both from above and from below.
2157 * If "use_coincidence" is set, then we treat coincidence edges as local edges.
2158 * Otherwise, we ignore them.
2160 static int add_all_proximity_constraints(struct isl_sched_graph
*graph
,
2161 int use_coincidence
)
2165 for (i
= 0; i
< graph
->n_edge
; ++i
) {
2166 struct isl_sched_edge
*edge
= &graph
->edge
[i
];
2169 local
= is_local(edge
) ||
2170 (is_coincidence(edge
) && use_coincidence
);
2171 if (!is_proximity(edge
) && !local
)
2173 if (edge
->src
== edge
->dst
&&
2174 add_intra_proximity_constraints(graph
, edge
, 1, local
) < 0)
2176 if (edge
->src
!= edge
->dst
&&
2177 add_inter_proximity_constraints(graph
, edge
, 1, local
) < 0)
2179 if (is_validity(edge
) || local
)
2181 if (edge
->src
== edge
->dst
&&
2182 add_intra_proximity_constraints(graph
, edge
, -1, 0) < 0)
2184 if (edge
->src
!= edge
->dst
&&
2185 add_inter_proximity_constraints(graph
, edge
, -1, 0) < 0)
2192 /* Compute a basis for the rows in the linear part of the schedule
2193 * and extend this basis to a full basis. The remaining rows
2194 * can then be used to force linear independence from the rows
2197 * In particular, given the schedule rows S, we compute
2202 * with H the Hermite normal form of S. That is, all but the
2203 * first rank columns of H are zero and so each row in S is
2204 * a linear combination of the first rank rows of Q.
2205 * The matrix Q is then transposed because we will write the
2206 * coefficients of the next schedule row as a column vector s
2207 * and express this s as a linear combination s = Q c of the
2209 * Similarly, the matrix U is transposed such that we can
2210 * compute the coefficients c = U s from a schedule row s.
2212 static int node_update_cmap(struct isl_sched_node
*node
)
2215 int n_row
= isl_mat_rows(node
->sched
);
2217 H
= isl_mat_sub_alloc(node
->sched
, 0, n_row
,
2218 1 + node
->nparam
, node
->nvar
);
2220 H
= isl_mat_left_hermite(H
, 0, &U
, &Q
);
2221 isl_mat_free(node
->cmap
);
2222 isl_mat_free(node
->cinv
);
2223 isl_mat_free(node
->ctrans
);
2224 node
->ctrans
= isl_mat_copy(Q
);
2225 node
->cmap
= isl_mat_transpose(Q
);
2226 node
->cinv
= isl_mat_transpose(U
);
2227 node
->rank
= isl_mat_initial_non_zero_cols(H
);
2230 if (!node
->cmap
|| !node
->cinv
|| !node
->ctrans
|| node
->rank
< 0)
2235 /* How many times should we count the constraints in "edge"?
2237 * If carry is set, then we are counting the number of
2238 * (validity or conditional validity) constraints that will be added
2239 * in setup_carry_lp and we count each edge exactly once.
2241 * Otherwise, we count as follows
2242 * validity -> 1 (>= 0)
2243 * validity+proximity -> 2 (>= 0 and upper bound)
2244 * proximity -> 2 (lower and upper bound)
2245 * local(+any) -> 2 (>= 0 and <= 0)
2247 * If an edge is only marked conditional_validity then it counts
2248 * as zero since it is only checked afterwards.
2250 * If "use_coincidence" is set, then we treat coincidence edges as local edges.
2251 * Otherwise, we ignore them.
2253 static int edge_multiplicity(struct isl_sched_edge
*edge
, int carry
,
2254 int use_coincidence
)
2256 if (carry
&& !is_validity(edge
) && !is_conditional_validity(edge
))
2260 if (is_proximity(edge
) || is_local(edge
))
2262 if (use_coincidence
&& is_coincidence(edge
))
2264 if (is_validity(edge
))
2269 /* Count the number of equality and inequality constraints
2270 * that will be added for the given map.
2272 * "use_coincidence" is set if we should take into account coincidence edges.
2274 static int count_map_constraints(struct isl_sched_graph
*graph
,
2275 struct isl_sched_edge
*edge
, __isl_take isl_map
*map
,
2276 int *n_eq
, int *n_ineq
, int carry
, int use_coincidence
)
2278 isl_basic_set
*coef
;
2279 int f
= edge_multiplicity(edge
, carry
, use_coincidence
);
2286 if (edge
->src
== edge
->dst
)
2287 coef
= intra_coefficients(graph
, edge
->src
, map
);
2289 coef
= inter_coefficients(graph
, edge
, map
);
2292 *n_eq
+= f
* coef
->n_eq
;
2293 *n_ineq
+= f
* coef
->n_ineq
;
2294 isl_basic_set_free(coef
);
2299 /* Count the number of equality and inequality constraints
2300 * that will be added to the main lp problem.
2301 * We count as follows
2302 * validity -> 1 (>= 0)
2303 * validity+proximity -> 2 (>= 0 and upper bound)
2304 * proximity -> 2 (lower and upper bound)
2305 * local(+any) -> 2 (>= 0 and <= 0)
2307 * If "use_coincidence" is set, then we treat coincidence edges as local edges.
2308 * Otherwise, we ignore them.
2310 static int count_constraints(struct isl_sched_graph
*graph
,
2311 int *n_eq
, int *n_ineq
, int use_coincidence
)
2315 *n_eq
= *n_ineq
= 0;
2316 for (i
= 0; i
< graph
->n_edge
; ++i
) {
2317 struct isl_sched_edge
*edge
= &graph
->edge
[i
];
2318 isl_map
*map
= isl_map_copy(edge
->map
);
2320 if (count_map_constraints(graph
, edge
, map
, n_eq
, n_ineq
,
2321 0, use_coincidence
) < 0)
2328 /* Count the number of constraints that will be added by
2329 * add_bound_constant_constraints to bound the values of the constant terms
2330 * and increment *n_eq and *n_ineq accordingly.
2332 * In practice, add_bound_constant_constraints only adds inequalities.
2334 static isl_stat
count_bound_constant_constraints(isl_ctx
*ctx
,
2335 struct isl_sched_graph
*graph
, int *n_eq
, int *n_ineq
)
2337 if (isl_options_get_schedule_max_constant_term(ctx
) == -1)
2340 *n_ineq
+= graph
->n
;
2345 /* Add constraints to bound the values of the constant terms in the schedule,
2346 * if requested by the user.
2348 * The maximal value of the constant terms is defined by the option
2349 * "schedule_max_constant_term".
2351 * Within each node, the coefficients have the following order:
2353 * - positive and negative parts of c_i_n (if parametric)
2354 * - positive and negative parts of c_i_x
2356 static isl_stat
add_bound_constant_constraints(isl_ctx
*ctx
,
2357 struct isl_sched_graph
*graph
)
2363 max
= isl_options_get_schedule_max_constant_term(ctx
);
2367 total
= isl_basic_set_dim(graph
->lp
, isl_dim_set
);
2369 for (i
= 0; i
< graph
->n
; ++i
) {
2370 struct isl_sched_node
*node
= &graph
->node
[i
];
2371 k
= isl_basic_set_alloc_inequality(graph
->lp
);
2373 return isl_stat_error
;
2374 isl_seq_clr(graph
->lp
->ineq
[k
], 1 + total
);
2375 isl_int_set_si(graph
->lp
->ineq
[k
][1 + node
->start
], -1);
2376 isl_int_set_si(graph
->lp
->ineq
[k
][0], max
);
2382 /* Count the number of constraints that will be added by
2383 * add_bound_coefficient_constraints and increment *n_eq and *n_ineq
2386 * In practice, add_bound_coefficient_constraints only adds inequalities.
2388 static int count_bound_coefficient_constraints(isl_ctx
*ctx
,
2389 struct isl_sched_graph
*graph
, int *n_eq
, int *n_ineq
)
2393 if (ctx
->opt
->schedule_max_coefficient
== -1)
2396 for (i
= 0; i
< graph
->n
; ++i
)
2397 *n_ineq
+= 2 * graph
->node
[i
].nparam
+ 2 * graph
->node
[i
].nvar
;
2402 /* Add constraints that bound the values of the variable and parameter
2403 * coefficients of the schedule.
2405 * The maximal value of the coefficients is defined by the option
2406 * 'schedule_max_coefficient'.
2408 static int add_bound_coefficient_constraints(isl_ctx
*ctx
,
2409 struct isl_sched_graph
*graph
)
2412 int max_coefficient
;
2415 max_coefficient
= ctx
->opt
->schedule_max_coefficient
;
2417 if (max_coefficient
== -1)
2420 total
= isl_basic_set_total_dim(graph
->lp
);
2422 for (i
= 0; i
< graph
->n
; ++i
) {
2423 struct isl_sched_node
*node
= &graph
->node
[i
];
2424 for (j
= 0; j
< 2 * node
->nparam
+ 2 * node
->nvar
; ++j
) {
2426 k
= isl_basic_set_alloc_inequality(graph
->lp
);
2429 dim
= 1 + node
->start
+ 1 + j
;
2430 isl_seq_clr(graph
->lp
->ineq
[k
], 1 + total
);
2431 isl_int_set_si(graph
->lp
->ineq
[k
][dim
], -1);
2432 isl_int_set_si(graph
->lp
->ineq
[k
][0], max_coefficient
);
2439 /* Construct an ILP problem for finding schedule coefficients
2440 * that result in non-negative, but small dependence distances
2441 * over all dependences.
2442 * In particular, the dependence distances over proximity edges
2443 * are bounded by m_0 + m_n n and we compute schedule coefficients
2444 * with small values (preferably zero) of m_n and m_0.
2446 * All variables of the ILP are non-negative. The actual coefficients
2447 * may be negative, so each coefficient is represented as the difference
2448 * of two non-negative variables. The negative part always appears
2449 * immediately before the positive part.
2450 * Other than that, the variables have the following order
2452 * - sum of positive and negative parts of m_n coefficients
2454 * - sum of positive and negative parts of all c_n coefficients
2455 * (unconstrained when computing non-parametric schedules)
2456 * - sum of positive and negative parts of all c_x coefficients
2457 * - positive and negative parts of m_n coefficients
2460 * - positive and negative parts of c_i_n (if parametric)
2461 * - positive and negative parts of c_i_x
2463 * The c_i_x are not represented directly, but through the columns of
2464 * node->cmap. That is, the computed values are for variable t_i_x
2465 * such that c_i_x = Q t_i_x with Q equal to node->cmap.
2467 * The constraints are those from the edges plus two or three equalities
2468 * to express the sums.
2470 * If "use_coincidence" is set, then we treat coincidence edges as local edges.
2471 * Otherwise, we ignore them.
2473 static isl_stat
setup_lp(isl_ctx
*ctx
, struct isl_sched_graph
*graph
,
2474 int use_coincidence
)
2485 parametric
= ctx
->opt
->schedule_parametric
;
2486 nparam
= isl_space_dim(graph
->node
[0].space
, isl_dim_param
);
2488 total
= param_pos
+ 2 * nparam
;
2489 for (i
= 0; i
< graph
->n
; ++i
) {
2490 struct isl_sched_node
*node
= &graph
->node
[graph
->sorted
[i
]];
2491 if (node_update_cmap(node
) < 0)
2492 return isl_stat_error
;
2493 node
->start
= total
;
2494 total
+= 1 + 2 * (node
->nparam
+ node
->nvar
);
2497 if (count_constraints(graph
, &n_eq
, &n_ineq
, use_coincidence
) < 0)
2498 return isl_stat_error
;
2499 if (count_bound_constant_constraints(ctx
, graph
, &n_eq
, &n_ineq
) < 0)
2500 return isl_stat_error
;
2501 if (count_bound_coefficient_constraints(ctx
, graph
, &n_eq
, &n_ineq
) < 0)
2502 return isl_stat_error
;
2504 space
= isl_space_set_alloc(ctx
, 0, total
);
2505 isl_basic_set_free(graph
->lp
);
2506 n_eq
+= 2 + parametric
;
2508 graph
->lp
= isl_basic_set_alloc_space(space
, 0, n_eq
, n_ineq
);
2510 k
= isl_basic_set_alloc_equality(graph
->lp
);
2512 return isl_stat_error
;
2513 isl_seq_clr(graph
->lp
->eq
[k
], 1 + total
);
2514 isl_int_set_si(graph
->lp
->eq
[k
][1], -1);
2515 for (i
= 0; i
< 2 * nparam
; ++i
)
2516 isl_int_set_si(graph
->lp
->eq
[k
][1 + param_pos
+ i
], 1);
2519 k
= isl_basic_set_alloc_equality(graph
->lp
);
2521 return isl_stat_error
;
2522 isl_seq_clr(graph
->lp
->eq
[k
], 1 + total
);
2523 isl_int_set_si(graph
->lp
->eq
[k
][3], -1);
2524 for (i
= 0; i
< graph
->n
; ++i
) {
2525 int pos
= 1 + graph
->node
[i
].start
+ 1;
2527 for (j
= 0; j
< 2 * graph
->node
[i
].nparam
; ++j
)
2528 isl_int_set_si(graph
->lp
->eq
[k
][pos
+ j
], 1);
2532 k
= isl_basic_set_alloc_equality(graph
->lp
);
2534 return isl_stat_error
;
2535 isl_seq_clr(graph
->lp
->eq
[k
], 1 + total
);
2536 isl_int_set_si(graph
->lp
->eq
[k
][4], -1);
2537 for (i
= 0; i
< graph
->n
; ++i
) {
2538 struct isl_sched_node
*node
= &graph
->node
[i
];
2539 int pos
= 1 + node
->start
+ 1 + 2 * node
->nparam
;
2541 for (j
= 0; j
< 2 * node
->nvar
; ++j
)
2542 isl_int_set_si(graph
->lp
->eq
[k
][pos
+ j
], 1);
2545 if (add_bound_constant_constraints(ctx
, graph
) < 0)
2546 return isl_stat_error
;
2547 if (add_bound_coefficient_constraints(ctx
, graph
) < 0)
2548 return isl_stat_error
;
2549 if (add_all_validity_constraints(graph
, use_coincidence
) < 0)
2550 return isl_stat_error
;
2551 if (add_all_proximity_constraints(graph
, use_coincidence
) < 0)
2552 return isl_stat_error
;
2557 /* Analyze the conflicting constraint found by
2558 * isl_tab_basic_set_non_trivial_lexmin. If it corresponds to the validity
2559 * constraint of one of the edges between distinct nodes, living, moreover
2560 * in distinct SCCs, then record the source and sink SCC as this may
2561 * be a good place to cut between SCCs.
2563 static int check_conflict(int con
, void *user
)
2566 struct isl_sched_graph
*graph
= user
;
2568 if (graph
->src_scc
>= 0)
2571 con
-= graph
->lp
->n_eq
;
2573 if (con
>= graph
->lp
->n_ineq
)
2576 for (i
= 0; i
< graph
->n_edge
; ++i
) {
2577 if (!is_validity(&graph
->edge
[i
]))
2579 if (graph
->edge
[i
].src
== graph
->edge
[i
].dst
)
2581 if (graph
->edge
[i
].src
->scc
== graph
->edge
[i
].dst
->scc
)
2583 if (graph
->edge
[i
].start
> con
)
2585 if (graph
->edge
[i
].end
<= con
)
2587 graph
->src_scc
= graph
->edge
[i
].src
->scc
;
2588 graph
->dst_scc
= graph
->edge
[i
].dst
->scc
;
2594 /* Check whether the next schedule row of the given node needs to be
2595 * non-trivial. Lower-dimensional domains may have some trivial rows,
2596 * but as soon as the number of remaining required non-trivial rows
2597 * is as large as the number or remaining rows to be computed,
2598 * all remaining rows need to be non-trivial.
2600 static int needs_row(struct isl_sched_graph
*graph
, struct isl_sched_node
*node
)
2602 return node
->nvar
- node
->rank
>= graph
->maxvar
- graph
->n_row
;
2605 /* Solve the ILP problem constructed in setup_lp.
2606 * For each node such that all the remaining rows of its schedule
2607 * need to be non-trivial, we construct a non-triviality region.
2608 * This region imposes that the next row is independent of previous rows.
2609 * In particular the coefficients c_i_x are represented by t_i_x
2610 * variables with c_i_x = Q t_i_x and Q a unimodular matrix such that
2611 * its first columns span the rows of the previously computed part
2612 * of the schedule. The non-triviality region enforces that at least
2613 * one of the remaining components of t_i_x is non-zero, i.e.,
2614 * that the new schedule row depends on at least one of the remaining
2617 static __isl_give isl_vec
*solve_lp(struct isl_sched_graph
*graph
)
2623 for (i
= 0; i
< graph
->n
; ++i
) {
2624 struct isl_sched_node
*node
= &graph
->node
[i
];
2625 int skip
= node
->rank
;
2626 graph
->region
[i
].pos
= node
->start
+ 1 + 2*(node
->nparam
+skip
);
2627 if (needs_row(graph
, node
))
2628 graph
->region
[i
].len
= 2 * (node
->nvar
- skip
);
2630 graph
->region
[i
].len
= 0;
2632 lp
= isl_basic_set_copy(graph
->lp
);
2633 sol
= isl_tab_basic_set_non_trivial_lexmin(lp
, 2, graph
->n
,
2634 graph
->region
, &check_conflict
, graph
);
2638 /* Update the schedules of all nodes based on the given solution
2639 * of the LP problem.
2640 * The new row is added to the current band.
2641 * All possibly negative coefficients are encoded as a difference
2642 * of two non-negative variables, so we need to perform the subtraction
2643 * here. Moreover, if use_cmap is set, then the solution does
2644 * not refer to the actual coefficients c_i_x, but instead to variables
2645 * t_i_x such that c_i_x = Q t_i_x and Q is equal to node->cmap.
2646 * In this case, we then also need to perform this multiplication
2647 * to obtain the values of c_i_x.
2649 * If coincident is set, then the caller guarantees that the new
2650 * row satisfies the coincidence constraints.
2652 static int update_schedule(struct isl_sched_graph
*graph
,
2653 __isl_take isl_vec
*sol
, int use_cmap
, int coincident
)
2656 isl_vec
*csol
= NULL
;
2661 isl_die(sol
->ctx
, isl_error_internal
,
2662 "no solution found", goto error
);
2663 if (graph
->n_total_row
>= graph
->max_row
)
2664 isl_die(sol
->ctx
, isl_error_internal
,
2665 "too many schedule rows", goto error
);
2667 for (i
= 0; i
< graph
->n
; ++i
) {
2668 struct isl_sched_node
*node
= &graph
->node
[i
];
2669 int pos
= node
->start
;
2670 int row
= isl_mat_rows(node
->sched
);
2673 csol
= isl_vec_alloc(sol
->ctx
, node
->nvar
);
2677 isl_map_free(node
->sched_map
);
2678 node
->sched_map
= NULL
;
2679 node
->sched
= isl_mat_add_rows(node
->sched
, 1);
2682 node
->sched
= isl_mat_set_element(node
->sched
, row
, 0,
2684 for (j
= 0; j
< node
->nparam
+ node
->nvar
; ++j
)
2685 isl_int_sub(sol
->el
[1 + pos
+ 1 + 2 * j
+ 1],
2686 sol
->el
[1 + pos
+ 1 + 2 * j
+ 1],
2687 sol
->el
[1 + pos
+ 1 + 2 * j
]);
2688 for (j
= 0; j
< node
->nparam
; ++j
)
2689 node
->sched
= isl_mat_set_element(node
->sched
,
2690 row
, 1 + j
, sol
->el
[1+pos
+1+2*j
+1]);
2691 for (j
= 0; j
< node
->nvar
; ++j
)
2692 isl_int_set(csol
->el
[j
],
2693 sol
->el
[1+pos
+1+2*(node
->nparam
+j
)+1]);
2695 csol
= isl_mat_vec_product(isl_mat_copy(node
->cmap
),
2699 for (j
= 0; j
< node
->nvar
; ++j
)
2700 node
->sched
= isl_mat_set_element(node
->sched
,
2701 row
, 1 + node
->nparam
+ j
, csol
->el
[j
]);
2702 node
->coincident
[graph
->n_total_row
] = coincident
;
2708 graph
->n_total_row
++;
2717 /* Convert row "row" of node->sched into an isl_aff living in "ls"
2718 * and return this isl_aff.
2720 static __isl_give isl_aff
*extract_schedule_row(__isl_take isl_local_space
*ls
,
2721 struct isl_sched_node
*node
, int row
)
2729 aff
= isl_aff_zero_on_domain(ls
);
2730 isl_mat_get_element(node
->sched
, row
, 0, &v
);
2731 aff
= isl_aff_set_constant(aff
, v
);
2732 for (j
= 0; j
< node
->nparam
; ++j
) {
2733 isl_mat_get_element(node
->sched
, row
, 1 + j
, &v
);
2734 aff
= isl_aff_set_coefficient(aff
, isl_dim_param
, j
, v
);
2736 for (j
= 0; j
< node
->nvar
; ++j
) {
2737 isl_mat_get_element(node
->sched
, row
, 1 + node
->nparam
+ j
, &v
);
2738 aff
= isl_aff_set_coefficient(aff
, isl_dim_in
, j
, v
);
2746 /* Convert the "n" rows starting at "first" of node->sched into a multi_aff
2747 * and return this multi_aff.
2749 * The result is defined over the uncompressed node domain.
2751 static __isl_give isl_multi_aff
*node_extract_partial_schedule_multi_aff(
2752 struct isl_sched_node
*node
, int first
, int n
)
2756 isl_local_space
*ls
;
2763 nrow
= isl_mat_rows(node
->sched
);
2764 if (node
->compressed
)
2765 space
= isl_multi_aff_get_domain_space(node
->decompress
);
2767 space
= isl_space_copy(node
->space
);
2768 ls
= isl_local_space_from_space(isl_space_copy(space
));
2769 space
= isl_space_from_domain(space
);
2770 space
= isl_space_add_dims(space
, isl_dim_out
, n
);
2771 ma
= isl_multi_aff_zero(space
);
2773 for (i
= first
; i
< first
+ n
; ++i
) {
2774 aff
= extract_schedule_row(isl_local_space_copy(ls
), node
, i
);
2775 ma
= isl_multi_aff_set_aff(ma
, i
- first
, aff
);
2778 isl_local_space_free(ls
);
2780 if (node
->compressed
)
2781 ma
= isl_multi_aff_pullback_multi_aff(ma
,
2782 isl_multi_aff_copy(node
->compress
));
2787 /* Convert node->sched into a multi_aff and return this multi_aff.
2789 * The result is defined over the uncompressed node domain.
2791 static __isl_give isl_multi_aff
*node_extract_schedule_multi_aff(
2792 struct isl_sched_node
*node
)
2796 nrow
= isl_mat_rows(node
->sched
);
2797 return node_extract_partial_schedule_multi_aff(node
, 0, nrow
);
2800 /* Convert node->sched into a map and return this map.
2802 * The result is cached in node->sched_map, which needs to be released
2803 * whenever node->sched is updated.
2804 * It is defined over the uncompressed node domain.
2806 static __isl_give isl_map
*node_extract_schedule(struct isl_sched_node
*node
)
2808 if (!node
->sched_map
) {
2811 ma
= node_extract_schedule_multi_aff(node
);
2812 node
->sched_map
= isl_map_from_multi_aff(ma
);
2815 return isl_map_copy(node
->sched_map
);
2818 /* Construct a map that can be used to update a dependence relation
2819 * based on the current schedule.
2820 * That is, construct a map expressing that source and sink
2821 * are executed within the same iteration of the current schedule.
2822 * This map can then be intersected with the dependence relation.
2823 * This is not the most efficient way, but this shouldn't be a critical
2826 static __isl_give isl_map
*specializer(struct isl_sched_node
*src
,
2827 struct isl_sched_node
*dst
)
2829 isl_map
*src_sched
, *dst_sched
;
2831 src_sched
= node_extract_schedule(src
);
2832 dst_sched
= node_extract_schedule(dst
);
2833 return isl_map_apply_range(src_sched
, isl_map_reverse(dst_sched
));
2836 /* Intersect the domains of the nested relations in domain and range
2837 * of "umap" with "map".
2839 static __isl_give isl_union_map
*intersect_domains(
2840 __isl_take isl_union_map
*umap
, __isl_keep isl_map
*map
)
2842 isl_union_set
*uset
;
2844 umap
= isl_union_map_zip(umap
);
2845 uset
= isl_union_set_from_set(isl_map_wrap(isl_map_copy(map
)));
2846 umap
= isl_union_map_intersect_domain(umap
, uset
);
2847 umap
= isl_union_map_zip(umap
);
2851 /* Update the dependence relation of the given edge based
2852 * on the current schedule.
2853 * If the dependence is carried completely by the current schedule, then
2854 * it is removed from the edge_tables. It is kept in the list of edges
2855 * as otherwise all edge_tables would have to be recomputed.
2857 static int update_edge(struct isl_sched_graph
*graph
,
2858 struct isl_sched_edge
*edge
)
2863 id
= specializer(edge
->src
, edge
->dst
);
2864 edge
->map
= isl_map_intersect(edge
->map
, isl_map_copy(id
));
2868 if (edge
->tagged_condition
) {
2869 edge
->tagged_condition
=
2870 intersect_domains(edge
->tagged_condition
, id
);
2871 if (!edge
->tagged_condition
)
2874 if (edge
->tagged_validity
) {
2875 edge
->tagged_validity
=
2876 intersect_domains(edge
->tagged_validity
, id
);
2877 if (!edge
->tagged_validity
)
2881 empty
= isl_map_plain_is_empty(edge
->map
);
2885 graph_remove_edge(graph
, edge
);
2894 /* Does the domain of "umap" intersect "uset"?
2896 static int domain_intersects(__isl_keep isl_union_map
*umap
,
2897 __isl_keep isl_union_set
*uset
)
2901 umap
= isl_union_map_copy(umap
);
2902 umap
= isl_union_map_intersect_domain(umap
, isl_union_set_copy(uset
));
2903 empty
= isl_union_map_is_empty(umap
);
2904 isl_union_map_free(umap
);
2906 return empty
< 0 ? -1 : !empty
;
2909 /* Does the range of "umap" intersect "uset"?
2911 static int range_intersects(__isl_keep isl_union_map
*umap
,
2912 __isl_keep isl_union_set
*uset
)
2916 umap
= isl_union_map_copy(umap
);
2917 umap
= isl_union_map_intersect_range(umap
, isl_union_set_copy(uset
));
2918 empty
= isl_union_map_is_empty(umap
);
2919 isl_union_map_free(umap
);
2921 return empty
< 0 ? -1 : !empty
;
2924 /* Are the condition dependences of "edge" local with respect to
2925 * the current schedule?
2927 * That is, are domain and range of the condition dependences mapped
2928 * to the same point?
2930 * In other words, is the condition false?
2932 static int is_condition_false(struct isl_sched_edge
*edge
)
2934 isl_union_map
*umap
;
2935 isl_map
*map
, *sched
, *test
;
2938 empty
= isl_union_map_is_empty(edge
->tagged_condition
);
2939 if (empty
< 0 || empty
)
2942 umap
= isl_union_map_copy(edge
->tagged_condition
);
2943 umap
= isl_union_map_zip(umap
);
2944 umap
= isl_union_set_unwrap(isl_union_map_domain(umap
));
2945 map
= isl_map_from_union_map(umap
);
2947 sched
= node_extract_schedule(edge
->src
);
2948 map
= isl_map_apply_domain(map
, sched
);
2949 sched
= node_extract_schedule(edge
->dst
);
2950 map
= isl_map_apply_range(map
, sched
);
2952 test
= isl_map_identity(isl_map_get_space(map
));
2953 local
= isl_map_is_subset(map
, test
);
2960 /* For each conditional validity constraint that is adjacent
2961 * to a condition with domain in condition_source or range in condition_sink,
2962 * turn it into an unconditional validity constraint.
2964 static int unconditionalize_adjacent_validity(struct isl_sched_graph
*graph
,
2965 __isl_take isl_union_set
*condition_source
,
2966 __isl_take isl_union_set
*condition_sink
)
2970 condition_source
= isl_union_set_coalesce(condition_source
);
2971 condition_sink
= isl_union_set_coalesce(condition_sink
);
2973 for (i
= 0; i
< graph
->n_edge
; ++i
) {
2975 isl_union_map
*validity
;
2977 if (!is_conditional_validity(&graph
->edge
[i
]))
2979 if (is_validity(&graph
->edge
[i
]))
2982 validity
= graph
->edge
[i
].tagged_validity
;
2983 adjacent
= domain_intersects(validity
, condition_sink
);
2984 if (adjacent
>= 0 && !adjacent
)
2985 adjacent
= range_intersects(validity
, condition_source
);
2991 set_validity(&graph
->edge
[i
]);
2994 isl_union_set_free(condition_source
);
2995 isl_union_set_free(condition_sink
);
2998 isl_union_set_free(condition_source
);
2999 isl_union_set_free(condition_sink
);
3003 /* Update the dependence relations of all edges based on the current schedule
3004 * and enforce conditional validity constraints that are adjacent
3005 * to satisfied condition constraints.
3007 * First check if any of the condition constraints are satisfied
3008 * (i.e., not local to the outer schedule) and keep track of
3009 * their domain and range.
3010 * Then update all dependence relations (which removes the non-local
3012 * Finally, if any condition constraints turned out to be satisfied,
3013 * then turn all adjacent conditional validity constraints into
3014 * unconditional validity constraints.
3016 static int update_edges(isl_ctx
*ctx
, struct isl_sched_graph
*graph
)
3020 isl_union_set
*source
, *sink
;
3022 source
= isl_union_set_empty(isl_space_params_alloc(ctx
, 0));
3023 sink
= isl_union_set_empty(isl_space_params_alloc(ctx
, 0));
3024 for (i
= 0; i
< graph
->n_edge
; ++i
) {
3026 isl_union_set
*uset
;
3027 isl_union_map
*umap
;
3029 if (!is_condition(&graph
->edge
[i
]))
3031 if (is_local(&graph
->edge
[i
]))
3033 local
= is_condition_false(&graph
->edge
[i
]);
3041 umap
= isl_union_map_copy(graph
->edge
[i
].tagged_condition
);
3042 uset
= isl_union_map_domain(umap
);
3043 source
= isl_union_set_union(source
, uset
);
3045 umap
= isl_union_map_copy(graph
->edge
[i
].tagged_condition
);
3046 uset
= isl_union_map_range(umap
);
3047 sink
= isl_union_set_union(sink
, uset
);
3050 for (i
= graph
->n_edge
- 1; i
>= 0; --i
) {
3051 if (update_edge(graph
, &graph
->edge
[i
]) < 0)
3056 return unconditionalize_adjacent_validity(graph
, source
, sink
);
3058 isl_union_set_free(source
);
3059 isl_union_set_free(sink
);
3062 isl_union_set_free(source
);
3063 isl_union_set_free(sink
);
3067 static void next_band(struct isl_sched_graph
*graph
)
3069 graph
->band_start
= graph
->n_total_row
;
3072 /* Return the union of the universe domains of the nodes in "graph"
3073 * that satisfy "pred".
3075 static __isl_give isl_union_set
*isl_sched_graph_domain(isl_ctx
*ctx
,
3076 struct isl_sched_graph
*graph
,
3077 int (*pred
)(struct isl_sched_node
*node
, int data
), int data
)
3083 for (i
= 0; i
< graph
->n
; ++i
)
3084 if (pred(&graph
->node
[i
], data
))
3088 isl_die(ctx
, isl_error_internal
,
3089 "empty component", return NULL
);
3091 set
= isl_set_universe(isl_space_copy(graph
->node
[i
].space
));
3092 dom
= isl_union_set_from_set(set
);
3094 for (i
= i
+ 1; i
< graph
->n
; ++i
) {
3095 if (!pred(&graph
->node
[i
], data
))
3097 set
= isl_set_universe(isl_space_copy(graph
->node
[i
].space
));
3098 dom
= isl_union_set_union(dom
, isl_union_set_from_set(set
));
3104 /* Return a list of unions of universe domains, where each element
3105 * in the list corresponds to an SCC (or WCC) indexed by node->scc.
3107 static __isl_give isl_union_set_list
*extract_sccs(isl_ctx
*ctx
,
3108 struct isl_sched_graph
*graph
)
3111 isl_union_set_list
*filters
;
3113 filters
= isl_union_set_list_alloc(ctx
, graph
->scc
);
3114 for (i
= 0; i
< graph
->scc
; ++i
) {
3117 dom
= isl_sched_graph_domain(ctx
, graph
, &node_scc_exactly
, i
);
3118 filters
= isl_union_set_list_add(filters
, dom
);
3124 /* Return a list of two unions of universe domains, one for the SCCs up
3125 * to and including graph->src_scc and another for the other SCCs.
3127 static __isl_give isl_union_set_list
*extract_split(isl_ctx
*ctx
,
3128 struct isl_sched_graph
*graph
)
3131 isl_union_set_list
*filters
;
3133 filters
= isl_union_set_list_alloc(ctx
, 2);
3134 dom
= isl_sched_graph_domain(ctx
, graph
,
3135 &node_scc_at_most
, graph
->src_scc
);
3136 filters
= isl_union_set_list_add(filters
, dom
);
3137 dom
= isl_sched_graph_domain(ctx
, graph
,
3138 &node_scc_at_least
, graph
->src_scc
+ 1);
3139 filters
= isl_union_set_list_add(filters
, dom
);
3144 /* Copy nodes that satisfy node_pred from the src dependence graph
3145 * to the dst dependence graph.
3147 static int copy_nodes(struct isl_sched_graph
*dst
, struct isl_sched_graph
*src
,
3148 int (*node_pred
)(struct isl_sched_node
*node
, int data
), int data
)
3153 for (i
= 0; i
< src
->n
; ++i
) {
3156 if (!node_pred(&src
->node
[i
], data
))
3160 dst
->node
[j
].space
= isl_space_copy(src
->node
[i
].space
);
3161 dst
->node
[j
].compressed
= src
->node
[i
].compressed
;
3162 dst
->node
[j
].hull
= isl_set_copy(src
->node
[i
].hull
);
3163 dst
->node
[j
].compress
=
3164 isl_multi_aff_copy(src
->node
[i
].compress
);
3165 dst
->node
[j
].decompress
=
3166 isl_multi_aff_copy(src
->node
[i
].decompress
);
3167 dst
->node
[j
].nvar
= src
->node
[i
].nvar
;
3168 dst
->node
[j
].nparam
= src
->node
[i
].nparam
;
3169 dst
->node
[j
].sched
= isl_mat_copy(src
->node
[i
].sched
);
3170 dst
->node
[j
].sched_map
= isl_map_copy(src
->node
[i
].sched_map
);
3171 dst
->node
[j
].coincident
= src
->node
[i
].coincident
;
3174 if (!dst
->node
[j
].space
|| !dst
->node
[j
].sched
)
3176 if (dst
->node
[j
].compressed
&&
3177 (!dst
->node
[j
].hull
|| !dst
->node
[j
].compress
||
3178 !dst
->node
[j
].decompress
))
3185 /* Copy non-empty edges that satisfy edge_pred from the src dependence graph
3186 * to the dst dependence graph.
3187 * If the source or destination node of the edge is not in the destination
3188 * graph, then it must be a backward proximity edge and it should simply
3191 static int copy_edges(isl_ctx
*ctx
, struct isl_sched_graph
*dst
,
3192 struct isl_sched_graph
*src
,
3193 int (*edge_pred
)(struct isl_sched_edge
*edge
, int data
), int data
)
3196 enum isl_edge_type t
;
3199 for (i
= 0; i
< src
->n_edge
; ++i
) {
3200 struct isl_sched_edge
*edge
= &src
->edge
[i
];
3202 isl_union_map
*tagged_condition
;
3203 isl_union_map
*tagged_validity
;
3204 struct isl_sched_node
*dst_src
, *dst_dst
;
3206 if (!edge_pred(edge
, data
))
3209 if (isl_map_plain_is_empty(edge
->map
))
3212 dst_src
= graph_find_node(ctx
, dst
, edge
->src
->space
);
3213 dst_dst
= graph_find_node(ctx
, dst
, edge
->dst
->space
);
3214 if (!dst_src
|| !dst_dst
) {
3215 if (is_validity(edge
) || is_conditional_validity(edge
))
3216 isl_die(ctx
, isl_error_internal
,
3217 "backward (conditional) validity edge",
3222 map
= isl_map_copy(edge
->map
);
3223 tagged_condition
= isl_union_map_copy(edge
->tagged_condition
);
3224 tagged_validity
= isl_union_map_copy(edge
->tagged_validity
);
3226 dst
->edge
[dst
->n_edge
].src
= dst_src
;
3227 dst
->edge
[dst
->n_edge
].dst
= dst_dst
;
3228 dst
->edge
[dst
->n_edge
].map
= map
;
3229 dst
->edge
[dst
->n_edge
].tagged_condition
= tagged_condition
;
3230 dst
->edge
[dst
->n_edge
].tagged_validity
= tagged_validity
;
3231 dst
->edge
[dst
->n_edge
].types
= edge
->types
;
3234 if (edge
->tagged_condition
&& !tagged_condition
)
3236 if (edge
->tagged_validity
&& !tagged_validity
)
3239 for (t
= isl_edge_first
; t
<= isl_edge_last
; ++t
) {
3241 graph_find_edge(src
, t
, edge
->src
, edge
->dst
))
3243 if (graph_edge_table_add(ctx
, dst
, t
,
3244 &dst
->edge
[dst
->n_edge
- 1]) < 0)
3252 /* Compute the maximal number of variables over all nodes.
3253 * This is the maximal number of linearly independent schedule
3254 * rows that we need to compute.
3255 * Just in case we end up in a part of the dependence graph
3256 * with only lower-dimensional domains, we make sure we will
3257 * compute the required amount of extra linearly independent rows.
3259 static int compute_maxvar(struct isl_sched_graph
*graph
)
3264 for (i
= 0; i
< graph
->n
; ++i
) {
3265 struct isl_sched_node
*node
= &graph
->node
[i
];
3268 if (node_update_cmap(node
) < 0)
3270 nvar
= node
->nvar
+ graph
->n_row
- node
->rank
;
3271 if (nvar
> graph
->maxvar
)
3272 graph
->maxvar
= nvar
;
3278 /* Extract the subgraph of "graph" that consists of the node satisfying
3279 * "node_pred" and the edges satisfying "edge_pred" and store
3280 * the result in "sub".
3282 static int extract_sub_graph(isl_ctx
*ctx
, struct isl_sched_graph
*graph
,
3283 int (*node_pred
)(struct isl_sched_node
*node
, int data
),
3284 int (*edge_pred
)(struct isl_sched_edge
*edge
, int data
),
3285 int data
, struct isl_sched_graph
*sub
)
3287 int i
, n
= 0, n_edge
= 0;
3290 for (i
= 0; i
< graph
->n
; ++i
)
3291 if (node_pred(&graph
->node
[i
], data
))
3293 for (i
= 0; i
< graph
->n_edge
; ++i
)
3294 if (edge_pred(&graph
->edge
[i
], data
))
3296 if (graph_alloc(ctx
, sub
, n
, n_edge
) < 0)
3298 if (copy_nodes(sub
, graph
, node_pred
, data
) < 0)
3300 if (graph_init_table(ctx
, sub
) < 0)
3302 for (t
= 0; t
<= isl_edge_last
; ++t
)
3303 sub
->max_edge
[t
] = graph
->max_edge
[t
];
3304 if (graph_init_edge_tables(ctx
, sub
) < 0)
3306 if (copy_edges(ctx
, sub
, graph
, edge_pred
, data
) < 0)
3308 sub
->n_row
= graph
->n_row
;
3309 sub
->max_row
= graph
->max_row
;
3310 sub
->n_total_row
= graph
->n_total_row
;
3311 sub
->band_start
= graph
->band_start
;
3316 static __isl_give isl_schedule_node
*compute_schedule(isl_schedule_node
*node
,
3317 struct isl_sched_graph
*graph
);
3318 static __isl_give isl_schedule_node
*compute_schedule_wcc(
3319 isl_schedule_node
*node
, struct isl_sched_graph
*graph
);
3321 /* Compute a schedule for a subgraph of "graph". In particular, for
3322 * the graph composed of nodes that satisfy node_pred and edges that
3323 * that satisfy edge_pred.
3324 * If the subgraph is known to consist of a single component, then wcc should
3325 * be set and then we call compute_schedule_wcc on the constructed subgraph.
3326 * Otherwise, we call compute_schedule, which will check whether the subgraph
3329 * The schedule is inserted at "node" and the updated schedule node
3332 static __isl_give isl_schedule_node
*compute_sub_schedule(
3333 __isl_take isl_schedule_node
*node
, isl_ctx
*ctx
,
3334 struct isl_sched_graph
*graph
,
3335 int (*node_pred
)(struct isl_sched_node
*node
, int data
),
3336 int (*edge_pred
)(struct isl_sched_edge
*edge
, int data
),
3339 struct isl_sched_graph split
= { 0 };
3341 if (extract_sub_graph(ctx
, graph
, node_pred
, edge_pred
, data
,
3346 node
= compute_schedule_wcc(node
, &split
);
3348 node
= compute_schedule(node
, &split
);
3350 graph_free(ctx
, &split
);
3353 graph_free(ctx
, &split
);
3354 return isl_schedule_node_free(node
);
3357 static int edge_scc_exactly(struct isl_sched_edge
*edge
, int scc
)
3359 return edge
->src
->scc
== scc
&& edge
->dst
->scc
== scc
;
3362 static int edge_dst_scc_at_most(struct isl_sched_edge
*edge
, int scc
)
3364 return edge
->dst
->scc
<= scc
;
3367 static int edge_src_scc_at_least(struct isl_sched_edge
*edge
, int scc
)
3369 return edge
->src
->scc
>= scc
;
3372 /* Reset the current band by dropping all its schedule rows.
3374 static int reset_band(struct isl_sched_graph
*graph
)
3379 drop
= graph
->n_total_row
- graph
->band_start
;
3380 graph
->n_total_row
-= drop
;
3381 graph
->n_row
-= drop
;
3383 for (i
= 0; i
< graph
->n
; ++i
) {
3384 struct isl_sched_node
*node
= &graph
->node
[i
];
3386 isl_map_free(node
->sched_map
);
3387 node
->sched_map
= NULL
;
3389 node
->sched
= isl_mat_drop_rows(node
->sched
,
3390 graph
->band_start
, drop
);
3399 /* Split the current graph into two parts and compute a schedule for each
3400 * part individually. In particular, one part consists of all SCCs up
3401 * to and including graph->src_scc, while the other part contains the other
3402 * SCCs. The split is enforced by a sequence node inserted at position "node"
3403 * in the schedule tree. Return the updated schedule node.
3404 * If either of these two parts consists of a sequence, then it is spliced
3405 * into the sequence containing the two parts.
3407 * The current band is reset. It would be possible to reuse
3408 * the previously computed rows as the first rows in the next
3409 * band, but recomputing them may result in better rows as we are looking
3410 * at a smaller part of the dependence graph.
3412 static __isl_give isl_schedule_node
*compute_split_schedule(
3413 __isl_take isl_schedule_node
*node
, struct isl_sched_graph
*graph
)
3417 isl_union_set_list
*filters
;
3422 if (reset_band(graph
) < 0)
3423 return isl_schedule_node_free(node
);
3427 ctx
= isl_schedule_node_get_ctx(node
);
3428 filters
= extract_split(ctx
, graph
);
3429 node
= isl_schedule_node_insert_sequence(node
, filters
);
3430 node
= isl_schedule_node_child(node
, 1);
3431 node
= isl_schedule_node_child(node
, 0);
3433 node
= compute_sub_schedule(node
, ctx
, graph
,
3434 &node_scc_at_least
, &edge_src_scc_at_least
,
3435 graph
->src_scc
+ 1, 0);
3436 is_seq
= isl_schedule_node_get_type(node
) == isl_schedule_node_sequence
;
3437 node
= isl_schedule_node_parent(node
);
3438 node
= isl_schedule_node_parent(node
);
3440 node
= isl_schedule_node_sequence_splice_child(node
, 1);
3441 node
= isl_schedule_node_child(node
, 0);
3442 node
= isl_schedule_node_child(node
, 0);
3443 node
= compute_sub_schedule(node
, ctx
, graph
,
3444 &node_scc_at_most
, &edge_dst_scc_at_most
,
3446 is_seq
= isl_schedule_node_get_type(node
) == isl_schedule_node_sequence
;
3447 node
= isl_schedule_node_parent(node
);
3448 node
= isl_schedule_node_parent(node
);
3450 node
= isl_schedule_node_sequence_splice_child(node
, 0);
3455 /* Insert a band node at position "node" in the schedule tree corresponding
3456 * to the current band in "graph". Mark the band node permutable
3457 * if "permutable" is set.
3458 * The partial schedules and the coincidence property are extracted
3459 * from the graph nodes.
3460 * Return the updated schedule node.
3462 static __isl_give isl_schedule_node
*insert_current_band(
3463 __isl_take isl_schedule_node
*node
, struct isl_sched_graph
*graph
,
3469 isl_multi_pw_aff
*mpa
;
3470 isl_multi_union_pw_aff
*mupa
;
3476 isl_die(isl_schedule_node_get_ctx(node
), isl_error_internal
,
3477 "graph should have at least one node",
3478 return isl_schedule_node_free(node
));
3480 start
= graph
->band_start
;
3481 end
= graph
->n_total_row
;
3484 ma
= node_extract_partial_schedule_multi_aff(&graph
->node
[0], start
, n
);
3485 mpa
= isl_multi_pw_aff_from_multi_aff(ma
);
3486 mupa
= isl_multi_union_pw_aff_from_multi_pw_aff(mpa
);
3488 for (i
= 1; i
< graph
->n
; ++i
) {
3489 isl_multi_union_pw_aff
*mupa_i
;
3491 ma
= node_extract_partial_schedule_multi_aff(&graph
->node
[i
],
3493 mpa
= isl_multi_pw_aff_from_multi_aff(ma
);
3494 mupa_i
= isl_multi_union_pw_aff_from_multi_pw_aff(mpa
);
3495 mupa
= isl_multi_union_pw_aff_union_add(mupa
, mupa_i
);
3497 node
= isl_schedule_node_insert_partial_schedule(node
, mupa
);
3499 for (i
= 0; i
< n
; ++i
)
3500 node
= isl_schedule_node_band_member_set_coincident(node
, i
,
3501 graph
->node
[0].coincident
[start
+ i
]);
3502 node
= isl_schedule_node_band_set_permutable(node
, permutable
);
3507 /* Update the dependence relations based on the current schedule,
3508 * add the current band to "node" and then continue with the computation
3510 * Return the updated schedule node.
3512 static __isl_give isl_schedule_node
*compute_next_band(
3513 __isl_take isl_schedule_node
*node
,
3514 struct isl_sched_graph
*graph
, int permutable
)
3521 ctx
= isl_schedule_node_get_ctx(node
);
3522 if (update_edges(ctx
, graph
) < 0)
3523 return isl_schedule_node_free(node
);
3524 node
= insert_current_band(node
, graph
, permutable
);
3527 node
= isl_schedule_node_child(node
, 0);
3528 node
= compute_schedule(node
, graph
);
3529 node
= isl_schedule_node_parent(node
);
3534 /* Add constraints to graph->lp that force the dependence "map" (which
3535 * is part of the dependence relation of "edge")
3536 * to be respected and attempt to carry it, where the edge is one from
3537 * a node j to itself. "pos" is the sequence number of the given map.
3538 * That is, add constraints that enforce
3540 * (c_j_0 + c_j_n n + c_j_x y) - (c_j_0 + c_j_n n + c_j_x x)
3541 * = c_j_x (y - x) >= e_i
3543 * for each (x,y) in R.
3544 * We obtain general constraints on coefficients (c_0, c_n, c_x)
3545 * of valid constraints for (y - x) and then plug in (-e_i, 0, c_j_x),
3546 * with each coefficient in c_j_x represented as a pair of non-negative
3549 static int add_intra_constraints(struct isl_sched_graph
*graph
,
3550 struct isl_sched_edge
*edge
, __isl_take isl_map
*map
, int pos
)
3553 isl_ctx
*ctx
= isl_map_get_ctx(map
);
3555 isl_dim_map
*dim_map
;
3556 isl_basic_set
*coef
;
3557 struct isl_sched_node
*node
= edge
->src
;
3559 coef
= intra_coefficients(graph
, node
, map
);
3563 dim
= isl_space_domain(isl_space_unwrap(isl_basic_set_get_space(coef
)));
3565 total
= isl_basic_set_total_dim(graph
->lp
);
3566 dim_map
= isl_dim_map_alloc(ctx
, total
);
3567 isl_dim_map_range(dim_map
, 3 + pos
, 0, 0, 0, 1, -1);
3568 isl_dim_map_range(dim_map
, node
->start
+ 2 * node
->nparam
+ 1, 2,
3569 isl_space_dim(dim
, isl_dim_set
), 1,
3571 isl_dim_map_range(dim_map
, node
->start
+ 2 * node
->nparam
+ 2, 2,
3572 isl_space_dim(dim
, isl_dim_set
), 1,
3574 graph
->lp
= isl_basic_set_extend_constraints(graph
->lp
,
3575 coef
->n_eq
, coef
->n_ineq
);
3576 graph
->lp
= isl_basic_set_add_constraints_dim_map(graph
->lp
,
3578 isl_space_free(dim
);
3583 /* Add constraints to graph->lp that force the dependence "map" (which
3584 * is part of the dependence relation of "edge")
3585 * to be respected and attempt to carry it, where the edge is one from
3586 * node j to node k. "pos" is the sequence number of the given map.
3587 * That is, add constraints that enforce
3589 * (c_k_0 + c_k_n n + c_k_x y) - (c_j_0 + c_j_n n + c_j_x x) >= e_i
3591 * for each (x,y) in R.
3592 * We obtain general constraints on coefficients (c_0, c_n, c_x)
3593 * of valid constraints for R and then plug in
3594 * (-e_i + c_k_0 - c_j_0, c_k_n - c_j_n, c_k_x - c_j_x)
3595 * with each coefficient (except e_i, c_k_0 and c_j_0)
3596 * represented as a pair of non-negative coefficients.
3598 static int add_inter_constraints(struct isl_sched_graph
*graph
,
3599 struct isl_sched_edge
*edge
, __isl_take isl_map
*map
, int pos
)
3602 isl_ctx
*ctx
= isl_map_get_ctx(map
);
3604 isl_dim_map
*dim_map
;
3605 isl_basic_set
*coef
;
3606 struct isl_sched_node
*src
= edge
->src
;
3607 struct isl_sched_node
*dst
= edge
->dst
;
3609 coef
= inter_coefficients(graph
, edge
, map
);
3613 dim
= isl_space_domain(isl_space_unwrap(isl_basic_set_get_space(coef
)));
3615 total
= isl_basic_set_total_dim(graph
->lp
);
3616 dim_map
= isl_dim_map_alloc(ctx
, total
);
3618 isl_dim_map_range(dim_map
, 3 + pos
, 0, 0, 0, 1, -1);
3620 isl_dim_map_range(dim_map
, dst
->start
, 0, 0, 0, 1, 1);
3621 isl_dim_map_range(dim_map
, dst
->start
+ 1, 2, 1, 1, dst
->nparam
, -1);
3622 isl_dim_map_range(dim_map
, dst
->start
+ 2, 2, 1, 1, dst
->nparam
, 1);
3623 isl_dim_map_range(dim_map
, dst
->start
+ 2 * dst
->nparam
+ 1, 2,
3624 isl_space_dim(dim
, isl_dim_set
) + src
->nvar
, 1,
3626 isl_dim_map_range(dim_map
, dst
->start
+ 2 * dst
->nparam
+ 2, 2,
3627 isl_space_dim(dim
, isl_dim_set
) + src
->nvar
, 1,
3630 isl_dim_map_range(dim_map
, src
->start
, 0, 0, 0, 1, -1);
3631 isl_dim_map_range(dim_map
, src
->start
+ 1, 2, 1, 1, src
->nparam
, 1);
3632 isl_dim_map_range(dim_map
, src
->start
+ 2, 2, 1, 1, src
->nparam
, -1);
3633 isl_dim_map_range(dim_map
, src
->start
+ 2 * src
->nparam
+ 1, 2,
3634 isl_space_dim(dim
, isl_dim_set
), 1,
3636 isl_dim_map_range(dim_map
, src
->start
+ 2 * src
->nparam
+ 2, 2,
3637 isl_space_dim(dim
, isl_dim_set
), 1,
3640 graph
->lp
= isl_basic_set_extend_constraints(graph
->lp
,
3641 coef
->n_eq
, coef
->n_ineq
);
3642 graph
->lp
= isl_basic_set_add_constraints_dim_map(graph
->lp
,
3644 isl_space_free(dim
);
3649 /* Add constraints to graph->lp that force all (conditional) validity
3650 * dependences to be respected and attempt to carry them.
3652 static int add_all_constraints(struct isl_sched_graph
*graph
)
3658 for (i
= 0; i
< graph
->n_edge
; ++i
) {
3659 struct isl_sched_edge
*edge
= &graph
->edge
[i
];
3661 if (!is_validity(edge
) && !is_conditional_validity(edge
))
3664 for (j
= 0; j
< edge
->map
->n
; ++j
) {
3665 isl_basic_map
*bmap
;
3668 bmap
= isl_basic_map_copy(edge
->map
->p
[j
]);
3669 map
= isl_map_from_basic_map(bmap
);
3671 if (edge
->src
== edge
->dst
&&
3672 add_intra_constraints(graph
, edge
, map
, pos
) < 0)
3674 if (edge
->src
!= edge
->dst
&&
3675 add_inter_constraints(graph
, edge
, map
, pos
) < 0)
3684 /* Count the number of equality and inequality constraints
3685 * that will be added to the carry_lp problem.
3686 * We count each edge exactly once.
3688 static int count_all_constraints(struct isl_sched_graph
*graph
,
3689 int *n_eq
, int *n_ineq
)
3693 *n_eq
= *n_ineq
= 0;
3694 for (i
= 0; i
< graph
->n_edge
; ++i
) {
3695 struct isl_sched_edge
*edge
= &graph
->edge
[i
];
3696 for (j
= 0; j
< edge
->map
->n
; ++j
) {
3697 isl_basic_map
*bmap
;
3700 bmap
= isl_basic_map_copy(edge
->map
->p
[j
]);
3701 map
= isl_map_from_basic_map(bmap
);
3703 if (count_map_constraints(graph
, edge
, map
,
3704 n_eq
, n_ineq
, 1, 0) < 0)
3712 /* Construct an LP problem for finding schedule coefficients
3713 * such that the schedule carries as many dependences as possible.
3714 * In particular, for each dependence i, we bound the dependence distance
3715 * from below by e_i, with 0 <= e_i <= 1 and then maximize the sum
3716 * of all e_i's. Dependences with e_i = 0 in the solution are simply
3717 * respected, while those with e_i > 0 (in practice e_i = 1) are carried.
3718 * Note that if the dependence relation is a union of basic maps,
3719 * then we have to consider each basic map individually as it may only
3720 * be possible to carry the dependences expressed by some of those
3721 * basic maps and not all of them.
3722 * Below, we consider each of those basic maps as a separate "edge".
3724 * All variables of the LP are non-negative. The actual coefficients
3725 * may be negative, so each coefficient is represented as the difference
3726 * of two non-negative variables. The negative part always appears
3727 * immediately before the positive part.
3728 * Other than that, the variables have the following order
3730 * - sum of (1 - e_i) over all edges
3731 * - sum of positive and negative parts of all c_n coefficients
3732 * (unconstrained when computing non-parametric schedules)
3733 * - sum of positive and negative parts of all c_x coefficients
3738 * - positive and negative parts of c_i_n (if parametric)
3739 * - positive and negative parts of c_i_x
3741 * The constraints are those from the (validity) edges plus three equalities
3742 * to express the sums and n_edge inequalities to express e_i <= 1.
3744 static isl_stat
setup_carry_lp(isl_ctx
*ctx
, struct isl_sched_graph
*graph
)
3754 for (i
= 0; i
< graph
->n_edge
; ++i
)
3755 n_edge
+= graph
->edge
[i
].map
->n
;
3758 for (i
= 0; i
< graph
->n
; ++i
) {
3759 struct isl_sched_node
*node
= &graph
->node
[graph
->sorted
[i
]];
3760 node
->start
= total
;
3761 total
+= 1 + 2 * (node
->nparam
+ node
->nvar
);
3764 if (count_all_constraints(graph
, &n_eq
, &n_ineq
) < 0)
3765 return isl_stat_error
;
3767 dim
= isl_space_set_alloc(ctx
, 0, total
);
3768 isl_basic_set_free(graph
->lp
);
3771 graph
->lp
= isl_basic_set_alloc_space(dim
, 0, n_eq
, n_ineq
);
3772 graph
->lp
= isl_basic_set_set_rational(graph
->lp
);
3774 k
= isl_basic_set_alloc_equality(graph
->lp
);
3776 return isl_stat_error
;
3777 isl_seq_clr(graph
->lp
->eq
[k
], 1 + total
);
3778 isl_int_set_si(graph
->lp
->eq
[k
][0], -n_edge
);
3779 isl_int_set_si(graph
->lp
->eq
[k
][1], 1);
3780 for (i
= 0; i
< n_edge
; ++i
)
3781 isl_int_set_si(graph
->lp
->eq
[k
][4 + i
], 1);
3783 k
= isl_basic_set_alloc_equality(graph
->lp
);
3785 return isl_stat_error
;
3786 isl_seq_clr(graph
->lp
->eq
[k
], 1 + total
);
3787 isl_int_set_si(graph
->lp
->eq
[k
][2], -1);
3788 for (i
= 0; i
< graph
->n
; ++i
) {
3789 int pos
= 1 + graph
->node
[i
].start
+ 1;
3791 for (j
= 0; j
< 2 * graph
->node
[i
].nparam
; ++j
)
3792 isl_int_set_si(graph
->lp
->eq
[k
][pos
+ j
], 1);
3795 k
= isl_basic_set_alloc_equality(graph
->lp
);
3797 return isl_stat_error
;
3798 isl_seq_clr(graph
->lp
->eq
[k
], 1 + total
);
3799 isl_int_set_si(graph
->lp
->eq
[k
][3], -1);
3800 for (i
= 0; i
< graph
->n
; ++i
) {
3801 struct isl_sched_node
*node
= &graph
->node
[i
];
3802 int pos
= 1 + node
->start
+ 1 + 2 * node
->nparam
;
3804 for (j
= 0; j
< 2 * node
->nvar
; ++j
)
3805 isl_int_set_si(graph
->lp
->eq
[k
][pos
+ j
], 1);
3808 for (i
= 0; i
< n_edge
; ++i
) {
3809 k
= isl_basic_set_alloc_inequality(graph
->lp
);
3811 return isl_stat_error
;
3812 isl_seq_clr(graph
->lp
->ineq
[k
], 1 + total
);
3813 isl_int_set_si(graph
->lp
->ineq
[k
][4 + i
], -1);
3814 isl_int_set_si(graph
->lp
->ineq
[k
][0], 1);
3817 if (add_all_constraints(graph
) < 0)
3818 return isl_stat_error
;
3823 static __isl_give isl_schedule_node
*compute_component_schedule(
3824 __isl_take isl_schedule_node
*node
, struct isl_sched_graph
*graph
,
3827 /* Comparison function for sorting the statements based on
3828 * the corresponding value in "r".
3830 static int smaller_value(const void *a
, const void *b
, void *data
)
3836 return isl_int_cmp(r
->el
[*i1
], r
->el
[*i2
]);
3839 /* If the schedule_split_scaled option is set and if the linear
3840 * parts of the scheduling rows for all nodes in the graphs have
3841 * a non-trivial common divisor, then split off the remainder of the
3842 * constant term modulo this common divisor from the linear part.
3843 * Otherwise, insert a band node directly and continue with
3844 * the construction of the schedule.
3846 * If a non-trivial common divisor is found, then
3847 * the linear part is reduced and the remainder is enforced
3848 * by a sequence node with the children placed in the order
3849 * of this remainder.
3850 * In particular, we assign an scc index based on the remainder and
3851 * then rely on compute_component_schedule to insert the sequence and
3852 * to continue the schedule construction on each part.
3854 static __isl_give isl_schedule_node
*split_scaled(
3855 __isl_take isl_schedule_node
*node
, struct isl_sched_graph
*graph
)
3868 ctx
= isl_schedule_node_get_ctx(node
);
3869 if (!ctx
->opt
->schedule_split_scaled
)
3870 return compute_next_band(node
, graph
, 0);
3872 return compute_next_band(node
, graph
, 0);
3875 isl_int_init(gcd_i
);
3877 isl_int_set_si(gcd
, 0);
3879 row
= isl_mat_rows(graph
->node
[0].sched
) - 1;
3881 for (i
= 0; i
< graph
->n
; ++i
) {
3882 struct isl_sched_node
*node
= &graph
->node
[i
];
3883 int cols
= isl_mat_cols(node
->sched
);
3885 isl_seq_gcd(node
->sched
->row
[row
] + 1, cols
- 1, &gcd_i
);
3886 isl_int_gcd(gcd
, gcd
, gcd_i
);
3889 isl_int_clear(gcd_i
);
3891 if (isl_int_cmp_si(gcd
, 1) <= 0) {
3893 return compute_next_band(node
, graph
, 0);
3896 r
= isl_vec_alloc(ctx
, graph
->n
);
3897 order
= isl_calloc_array(ctx
, int, graph
->n
);
3901 for (i
= 0; i
< graph
->n
; ++i
) {
3902 struct isl_sched_node
*node
= &graph
->node
[i
];
3905 isl_int_fdiv_r(r
->el
[i
], node
->sched
->row
[row
][0], gcd
);
3906 isl_int_fdiv_q(node
->sched
->row
[row
][0],
3907 node
->sched
->row
[row
][0], gcd
);
3908 isl_int_mul(node
->sched
->row
[row
][0],
3909 node
->sched
->row
[row
][0], gcd
);
3910 node
->sched
= isl_mat_scale_down_row(node
->sched
, row
, gcd
);
3915 if (isl_sort(order
, graph
->n
, sizeof(order
[0]), &smaller_value
, r
) < 0)
3919 for (i
= 0; i
< graph
->n
; ++i
) {
3920 if (i
> 0 && isl_int_ne(r
->el
[order
[i
- 1]], r
->el
[order
[i
]]))
3922 graph
->node
[order
[i
]].scc
= scc
;
3931 if (update_edges(ctx
, graph
) < 0)
3932 return isl_schedule_node_free(node
);
3933 node
= insert_current_band(node
, graph
, 0);
3936 node
= isl_schedule_node_child(node
, 0);
3937 node
= compute_component_schedule(node
, graph
, 0);
3938 node
= isl_schedule_node_parent(node
);
3945 return isl_schedule_node_free(node
);
3948 /* Is the schedule row "sol" trivial on node "node"?
3949 * That is, is the solution zero on the dimensions orthogonal to
3950 * the previously found solutions?
3951 * Return 1 if the solution is trivial, 0 if it is not and -1 on error.
3953 * Each coefficient is represented as the difference between
3954 * two non-negative values in "sol". "sol" has been computed
3955 * in terms of the original iterators (i.e., without use of cmap).
3956 * We construct the schedule row s and write it as a linear
3957 * combination of (linear combinations of) previously computed schedule rows.
3958 * s = Q c or c = U s.
3959 * If the final entries of c are all zero, then the solution is trivial.
3961 static int is_trivial(struct isl_sched_node
*node
, __isl_keep isl_vec
*sol
)
3971 if (node
->nvar
== node
->rank
)
3974 ctx
= isl_vec_get_ctx(sol
);
3975 node_sol
= isl_vec_alloc(ctx
, node
->nvar
);
3979 pos
= 1 + node
->start
+ 1 + 2 * node
->nparam
;
3981 for (i
= 0; i
< node
->nvar
; ++i
)
3982 isl_int_sub(node_sol
->el
[i
],
3983 sol
->el
[pos
+ 2 * i
+ 1], sol
->el
[pos
+ 2 * i
]);
3985 node_sol
= isl_mat_vec_product(isl_mat_copy(node
->cinv
), node_sol
);
3990 trivial
= isl_seq_first_non_zero(node_sol
->el
+ node
->rank
,
3991 node
->nvar
- node
->rank
) == -1;
3993 isl_vec_free(node_sol
);
3998 /* Is the schedule row "sol" trivial on any node where it should
4000 * "sol" has been computed in terms of the original iterators
4001 * (i.e., without use of cmap).
4002 * Return 1 if any solution is trivial, 0 if they are not and -1 on error.
4004 static int is_any_trivial(struct isl_sched_graph
*graph
,
4005 __isl_keep isl_vec
*sol
)
4009 for (i
= 0; i
< graph
->n
; ++i
) {
4010 struct isl_sched_node
*node
= &graph
->node
[i
];
4013 if (!needs_row(graph
, node
))
4015 trivial
= is_trivial(node
, sol
);
4016 if (trivial
< 0 || trivial
)
4023 /* Construct a schedule row for each node such that as many dependences
4024 * as possible are carried and then continue with the next band.
4026 * Note that despite the fact that the problem is solved using a rational
4027 * solver, the solution is guaranteed to be integral.
4028 * Specifically, the dependence distance lower bounds e_i (and therefore
4029 * also their sum) are integers. See Lemma 5 of [1].
4031 * If the computed schedule row turns out to be trivial on one or
4032 * more nodes where it should not be trivial, then we throw it away
4033 * and try again on each component separately.
4035 * If there is only one component, then we accept the schedule row anyway,
4036 * but we do not consider it as a complete row and therefore do not
4037 * increment graph->n_row. Note that the ranks of the nodes that
4038 * do get a non-trivial schedule part will get updated regardless and
4039 * graph->maxvar is computed based on these ranks. The test for
4040 * whether more schedule rows are required in compute_schedule_wcc
4041 * is therefore not affected.
4043 * Insert a band corresponding to the schedule row at position "node"
4044 * of the schedule tree and continue with the construction of the schedule.
4045 * This insertion and the continued construction is performed by split_scaled
4046 * after optionally checking for non-trivial common divisors.
4048 * [1] P. Feautrier, Some Efficient Solutions to the Affine Scheduling
4049 * Problem, Part II: Multi-Dimensional Time.
4050 * In Intl. Journal of Parallel Programming, 1992.
4052 static __isl_give isl_schedule_node
*carry_dependences(
4053 __isl_take isl_schedule_node
*node
, struct isl_sched_graph
*graph
)
4066 for (i
= 0; i
< graph
->n_edge
; ++i
)
4067 n_edge
+= graph
->edge
[i
].map
->n
;
4069 ctx
= isl_schedule_node_get_ctx(node
);
4070 if (setup_carry_lp(ctx
, graph
) < 0)
4071 return isl_schedule_node_free(node
);
4073 lp
= isl_basic_set_copy(graph
->lp
);
4074 sol
= isl_tab_basic_set_non_neg_lexmin(lp
);
4076 return isl_schedule_node_free(node
);
4078 if (sol
->size
== 0) {
4080 isl_die(ctx
, isl_error_internal
,
4081 "error in schedule construction",
4082 return isl_schedule_node_free(node
));
4085 isl_int_divexact(sol
->el
[1], sol
->el
[1], sol
->el
[0]);
4086 if (isl_int_cmp_si(sol
->el
[1], n_edge
) >= 0) {
4088 isl_die(ctx
, isl_error_unknown
,
4089 "unable to carry dependences",
4090 return isl_schedule_node_free(node
));
4093 trivial
= is_any_trivial(graph
, sol
);
4095 sol
= isl_vec_free(sol
);
4096 } else if (trivial
&& graph
->scc
> 1) {
4098 return compute_component_schedule(node
, graph
, 1);
4101 if (update_schedule(graph
, sol
, 0, 0) < 0)
4102 return isl_schedule_node_free(node
);
4106 return split_scaled(node
, graph
);
4109 /* Topologically sort statements mapped to the same schedule iteration
4110 * and add insert a sequence node in front of "node"
4111 * corresponding to this order.
4112 * If "initialized" is set, then it may be assumed that compute_maxvar
4113 * has been called on the current band. Otherwise, call
4114 * compute_maxvar if and before carry_dependences gets called.
4116 * If it turns out to be impossible to sort the statements apart,
4117 * because different dependences impose different orderings
4118 * on the statements, then we extend the schedule such that
4119 * it carries at least one more dependence.
4121 static __isl_give isl_schedule_node
*sort_statements(
4122 __isl_take isl_schedule_node
*node
, struct isl_sched_graph
*graph
,
4126 isl_union_set_list
*filters
;
4131 ctx
= isl_schedule_node_get_ctx(node
);
4133 isl_die(ctx
, isl_error_internal
,
4134 "graph should have at least one node",
4135 return isl_schedule_node_free(node
));
4140 if (update_edges(ctx
, graph
) < 0)
4141 return isl_schedule_node_free(node
);
4143 if (graph
->n_edge
== 0)
4146 if (detect_sccs(ctx
, graph
) < 0)
4147 return isl_schedule_node_free(node
);
4150 if (graph
->scc
< graph
->n
) {
4151 if (!initialized
&& compute_maxvar(graph
) < 0)
4152 return isl_schedule_node_free(node
);
4153 return carry_dependences(node
, graph
);
4156 filters
= extract_sccs(ctx
, graph
);
4157 node
= isl_schedule_node_insert_sequence(node
, filters
);
4162 /* Are there any (non-empty) (conditional) validity edges in the graph?
4164 static int has_validity_edges(struct isl_sched_graph
*graph
)
4168 for (i
= 0; i
< graph
->n_edge
; ++i
) {
4171 empty
= isl_map_plain_is_empty(graph
->edge
[i
].map
);
4176 if (is_validity(&graph
->edge
[i
]) ||
4177 is_conditional_validity(&graph
->edge
[i
]))
4184 /* Should we apply a Feautrier step?
4185 * That is, did the user request the Feautrier algorithm and are
4186 * there any validity dependences (left)?
4188 static int need_feautrier_step(isl_ctx
*ctx
, struct isl_sched_graph
*graph
)
4190 if (ctx
->opt
->schedule_algorithm
!= ISL_SCHEDULE_ALGORITHM_FEAUTRIER
)
4193 return has_validity_edges(graph
);
4196 /* Compute a schedule for a connected dependence graph using Feautrier's
4197 * multi-dimensional scheduling algorithm and return the updated schedule node.
4199 * The original algorithm is described in [1].
4200 * The main idea is to minimize the number of scheduling dimensions, by
4201 * trying to satisfy as many dependences as possible per scheduling dimension.
4203 * [1] P. Feautrier, Some Efficient Solutions to the Affine Scheduling
4204 * Problem, Part II: Multi-Dimensional Time.
4205 * In Intl. Journal of Parallel Programming, 1992.
4207 static __isl_give isl_schedule_node
*compute_schedule_wcc_feautrier(
4208 isl_schedule_node
*node
, struct isl_sched_graph
*graph
)
4210 return carry_dependences(node
, graph
);
4213 /* Turn off the "local" bit on all (condition) edges.
4215 static void clear_local_edges(struct isl_sched_graph
*graph
)
4219 for (i
= 0; i
< graph
->n_edge
; ++i
)
4220 if (is_condition(&graph
->edge
[i
]))
4221 clear_local(&graph
->edge
[i
]);
4224 /* Does "graph" have both condition and conditional validity edges?
4226 static int need_condition_check(struct isl_sched_graph
*graph
)
4229 int any_condition
= 0;
4230 int any_conditional_validity
= 0;
4232 for (i
= 0; i
< graph
->n_edge
; ++i
) {
4233 if (is_condition(&graph
->edge
[i
]))
4235 if (is_conditional_validity(&graph
->edge
[i
]))
4236 any_conditional_validity
= 1;
4239 return any_condition
&& any_conditional_validity
;
4242 /* Does "graph" contain any coincidence edge?
4244 static int has_any_coincidence(struct isl_sched_graph
*graph
)
4248 for (i
= 0; i
< graph
->n_edge
; ++i
)
4249 if (is_coincidence(&graph
->edge
[i
]))
4255 /* Extract the final schedule row as a map with the iteration domain
4256 * of "node" as domain.
4258 static __isl_give isl_map
*final_row(struct isl_sched_node
*node
)
4260 isl_local_space
*ls
;
4264 row
= isl_mat_rows(node
->sched
) - 1;
4265 ls
= isl_local_space_from_space(isl_space_copy(node
->space
));
4266 aff
= extract_schedule_row(ls
, node
, row
);
4267 return isl_map_from_aff(aff
);
4270 /* Is the conditional validity dependence in the edge with index "edge_index"
4271 * violated by the latest (i.e., final) row of the schedule?
4272 * That is, is i scheduled after j
4273 * for any conditional validity dependence i -> j?
4275 static int is_violated(struct isl_sched_graph
*graph
, int edge_index
)
4277 isl_map
*src_sched
, *dst_sched
, *map
;
4278 struct isl_sched_edge
*edge
= &graph
->edge
[edge_index
];
4281 src_sched
= final_row(edge
->src
);
4282 dst_sched
= final_row(edge
->dst
);
4283 map
= isl_map_copy(edge
->map
);
4284 map
= isl_map_apply_domain(map
, src_sched
);
4285 map
= isl_map_apply_range(map
, dst_sched
);
4286 map
= isl_map_order_gt(map
, isl_dim_in
, 0, isl_dim_out
, 0);
4287 empty
= isl_map_is_empty(map
);
4296 /* Does "graph" have any satisfied condition edges that
4297 * are adjacent to the conditional validity constraint with
4298 * domain "conditional_source" and range "conditional_sink"?
4300 * A satisfied condition is one that is not local.
4301 * If a condition was forced to be local already (i.e., marked as local)
4302 * then there is no need to check if it is in fact local.
4304 * Additionally, mark all adjacent condition edges found as local.
4306 static int has_adjacent_true_conditions(struct isl_sched_graph
*graph
,
4307 __isl_keep isl_union_set
*conditional_source
,
4308 __isl_keep isl_union_set
*conditional_sink
)
4313 for (i
= 0; i
< graph
->n_edge
; ++i
) {
4314 int adjacent
, local
;
4315 isl_union_map
*condition
;
4317 if (!is_condition(&graph
->edge
[i
]))
4319 if (is_local(&graph
->edge
[i
]))
4322 condition
= graph
->edge
[i
].tagged_condition
;
4323 adjacent
= domain_intersects(condition
, conditional_sink
);
4324 if (adjacent
>= 0 && !adjacent
)
4325 adjacent
= range_intersects(condition
,
4326 conditional_source
);
4332 set_local(&graph
->edge
[i
]);
4334 local
= is_condition_false(&graph
->edge
[i
]);
4344 /* Are there any violated conditional validity dependences with
4345 * adjacent condition dependences that are not local with respect
4346 * to the current schedule?
4347 * That is, is the conditional validity constraint violated?
4349 * Additionally, mark all those adjacent condition dependences as local.
4350 * We also mark those adjacent condition dependences that were not marked
4351 * as local before, but just happened to be local already. This ensures
4352 * that they remain local if the schedule is recomputed.
4354 * We first collect domain and range of all violated conditional validity
4355 * dependences and then check if there are any adjacent non-local
4356 * condition dependences.
4358 static int has_violated_conditional_constraint(isl_ctx
*ctx
,
4359 struct isl_sched_graph
*graph
)
4363 isl_union_set
*source
, *sink
;
4365 source
= isl_union_set_empty(isl_space_params_alloc(ctx
, 0));
4366 sink
= isl_union_set_empty(isl_space_params_alloc(ctx
, 0));
4367 for (i
= 0; i
< graph
->n_edge
; ++i
) {
4368 isl_union_set
*uset
;
4369 isl_union_map
*umap
;
4372 if (!is_conditional_validity(&graph
->edge
[i
]))
4375 violated
= is_violated(graph
, i
);
4383 umap
= isl_union_map_copy(graph
->edge
[i
].tagged_validity
);
4384 uset
= isl_union_map_domain(umap
);
4385 source
= isl_union_set_union(source
, uset
);
4386 source
= isl_union_set_coalesce(source
);
4388 umap
= isl_union_map_copy(graph
->edge
[i
].tagged_validity
);
4389 uset
= isl_union_map_range(umap
);
4390 sink
= isl_union_set_union(sink
, uset
);
4391 sink
= isl_union_set_coalesce(sink
);
4395 any
= has_adjacent_true_conditions(graph
, source
, sink
);
4397 isl_union_set_free(source
);
4398 isl_union_set_free(sink
);
4401 isl_union_set_free(source
);
4402 isl_union_set_free(sink
);
4406 /* Examine the current band (the rows between graph->band_start and
4407 * graph->n_total_row), deciding whether to drop it or add it to "node"
4408 * and then continue with the computation of the next band, if any.
4409 * If "initialized" is set, then it may be assumed that compute_maxvar
4410 * has been called on the current band. Otherwise, call
4411 * compute_maxvar if and before carry_dependences gets called.
4413 * The caller keeps looking for a new row as long as
4414 * graph->n_row < graph->maxvar. If the latest attempt to find
4415 * such a row failed (i.e., we still have graph->n_row < graph->maxvar),
4417 * - split between SCCs and start over (assuming we found an interesting
4418 * pair of SCCs between which to split)
4419 * - continue with the next band (assuming the current band has at least
4421 * - try to carry as many dependences as possible and continue with the next
4423 * In each case, we first insert a band node in the schedule tree
4424 * if any rows have been computed.
4426 * If the caller managed to complete the schedule, we insert a band node
4427 * (if any schedule rows were computed) and we finish off by topologically
4428 * sorting the statements based on the remaining dependences.
4430 static __isl_give isl_schedule_node
*compute_schedule_finish_band(
4431 __isl_take isl_schedule_node
*node
, struct isl_sched_graph
*graph
,
4439 if (graph
->n_row
< graph
->maxvar
) {
4441 int empty
= graph
->n_total_row
== graph
->band_start
;
4443 ctx
= isl_schedule_node_get_ctx(node
);
4444 if (!ctx
->opt
->schedule_maximize_band_depth
&& !empty
)
4445 return compute_next_band(node
, graph
, 1);
4446 if (graph
->src_scc
>= 0)
4447 return compute_split_schedule(node
, graph
);
4449 return compute_next_band(node
, graph
, 1);
4450 if (!initialized
&& compute_maxvar(graph
) < 0)
4451 return isl_schedule_node_free(node
);
4452 return carry_dependences(node
, graph
);
4455 insert
= graph
->n_total_row
> graph
->band_start
;
4457 node
= insert_current_band(node
, graph
, 1);
4458 node
= isl_schedule_node_child(node
, 0);
4460 node
= sort_statements(node
, graph
, initialized
);
4462 node
= isl_schedule_node_parent(node
);
4467 /* Construct a band of schedule rows for a connected dependence graph.
4468 * The caller is responsible for determining the strongly connected
4469 * components and calling compute_maxvar first.
4471 * We try to find a sequence of as many schedule rows as possible that result
4472 * in non-negative dependence distances (independent of the previous rows
4473 * in the sequence, i.e., such that the sequence is tilable), with as
4474 * many of the initial rows as possible satisfying the coincidence constraints.
4475 * The computation stops if we can't find any more rows or if we have found
4476 * all the rows we wanted to find.
4478 * If ctx->opt->schedule_outer_coincidence is set, then we force the
4479 * outermost dimension to satisfy the coincidence constraints. If this
4480 * turns out to be impossible, we fall back on the general scheme above
4481 * and try to carry as many dependences as possible.
4483 * If "graph" contains both condition and conditional validity dependences,
4484 * then we need to check that that the conditional schedule constraint
4485 * is satisfied, i.e., there are no violated conditional validity dependences
4486 * that are adjacent to any non-local condition dependences.
4487 * If there are, then we mark all those adjacent condition dependences
4488 * as local and recompute the current band. Those dependences that
4489 * are marked local will then be forced to be local.
4490 * The initial computation is performed with no dependences marked as local.
4491 * If we are lucky, then there will be no violated conditional validity
4492 * dependences adjacent to any non-local condition dependences.
4493 * Otherwise, we mark some additional condition dependences as local and
4494 * recompute. We continue this process until there are no violations left or
4495 * until we are no longer able to compute a schedule.
4496 * Since there are only a finite number of dependences,
4497 * there will only be a finite number of iterations.
4499 static isl_stat
compute_schedule_wcc_band(isl_ctx
*ctx
,
4500 struct isl_sched_graph
*graph
)
4502 int has_coincidence
;
4503 int use_coincidence
;
4504 int force_coincidence
= 0;
4505 int check_conditional
;
4507 if (sort_sccs(graph
) < 0)
4508 return isl_stat_error
;
4510 clear_local_edges(graph
);
4511 check_conditional
= need_condition_check(graph
);
4512 has_coincidence
= has_any_coincidence(graph
);
4514 if (ctx
->opt
->schedule_outer_coincidence
)
4515 force_coincidence
= 1;
4517 use_coincidence
= has_coincidence
;
4518 while (graph
->n_row
< graph
->maxvar
) {
4523 graph
->src_scc
= -1;
4524 graph
->dst_scc
= -1;
4526 if (setup_lp(ctx
, graph
, use_coincidence
) < 0)
4527 return isl_stat_error
;
4528 sol
= solve_lp(graph
);
4530 return isl_stat_error
;
4531 if (sol
->size
== 0) {
4532 int empty
= graph
->n_total_row
== graph
->band_start
;
4535 if (use_coincidence
&& (!force_coincidence
|| !empty
)) {
4536 use_coincidence
= 0;
4541 coincident
= !has_coincidence
|| use_coincidence
;
4542 if (update_schedule(graph
, sol
, 1, coincident
) < 0)
4543 return isl_stat_error
;
4545 if (!check_conditional
)
4547 violated
= has_violated_conditional_constraint(ctx
, graph
);
4549 return isl_stat_error
;
4552 if (reset_band(graph
) < 0)
4553 return isl_stat_error
;
4554 use_coincidence
= has_coincidence
;
4560 /* Compute a schedule for a connected dependence graph by considering
4561 * the graph as a whole and return the updated schedule node.
4563 * The actual schedule rows of the current band are computed by
4564 * compute_schedule_wcc_band. compute_schedule_finish_band takes
4565 * care of integrating the band into "node" and continuing
4568 static __isl_give isl_schedule_node
*compute_schedule_wcc_whole(
4569 __isl_take isl_schedule_node
*node
, struct isl_sched_graph
*graph
)
4576 ctx
= isl_schedule_node_get_ctx(node
);
4577 if (compute_schedule_wcc_band(ctx
, graph
) < 0)
4578 return isl_schedule_node_free(node
);
4580 return compute_schedule_finish_band(node
, graph
, 1);
4583 /* Clustering information used by compute_schedule_wcc_clustering.
4585 * "n" is the number of SCCs in the original dependence graph
4586 * "scc" is an array of "n" elements, each representing an SCC
4587 * of the original dependence graph. All entries in the same cluster
4588 * have the same number of schedule rows.
4589 * "scc_cluster" maps each SCC index to the cluster to which it belongs,
4590 * where each cluster is represented by the index of the first SCC
4591 * in the cluster. Initially, each SCC belongs to a cluster containing
4594 * "scc_in_merge" is used by merge_clusters_along_edge to keep
4595 * track of which SCCs need to be merged.
4597 * "cluster" contains the merged clusters of SCCs after the clustering
4600 * "scc_node" is a temporary data structure used inside copy_partial.
4601 * For each SCC, it keeps track of the number of nodes in the SCC
4602 * that have already been copied.
4604 struct isl_clustering
{
4606 struct isl_sched_graph
*scc
;
4607 struct isl_sched_graph
*cluster
;
4613 /* Initialize the clustering data structure "c" from "graph".
4615 * In particular, allocate memory, extract the SCCs from "graph"
4616 * into c->scc, initialize scc_cluster and construct
4617 * a band of schedule rows for each SCC.
4618 * Within each SCC, there is only one SCC by definition.
4619 * Each SCC initially belongs to a cluster containing only that SCC.
4621 static isl_stat
clustering_init(isl_ctx
*ctx
, struct isl_clustering
*c
,
4622 struct isl_sched_graph
*graph
)
4627 c
->scc
= isl_calloc_array(ctx
, struct isl_sched_graph
, c
->n
);
4628 c
->cluster
= isl_calloc_array(ctx
, struct isl_sched_graph
, c
->n
);
4629 c
->scc_cluster
= isl_calloc_array(ctx
, int, c
->n
);
4630 c
->scc_node
= isl_calloc_array(ctx
, int, c
->n
);
4631 c
->scc_in_merge
= isl_calloc_array(ctx
, int, c
->n
);
4632 if (!c
->scc
|| !c
->cluster
||
4633 !c
->scc_cluster
|| !c
->scc_node
|| !c
->scc_in_merge
)
4634 return isl_stat_error
;
4636 for (i
= 0; i
< c
->n
; ++i
) {
4637 if (extract_sub_graph(ctx
, graph
, &node_scc_exactly
,
4638 &edge_scc_exactly
, i
, &c
->scc
[i
]) < 0)
4639 return isl_stat_error
;
4641 if (compute_maxvar(&c
->scc
[i
]) < 0)
4642 return isl_stat_error
;
4643 if (compute_schedule_wcc_band(ctx
, &c
->scc
[i
]) < 0)
4644 return isl_stat_error
;
4645 c
->scc_cluster
[i
] = i
;
4651 /* Free all memory allocated for "c".
4653 static void clustering_free(isl_ctx
*ctx
, struct isl_clustering
*c
)
4658 for (i
= 0; i
< c
->n
; ++i
)
4659 graph_free(ctx
, &c
->scc
[i
]);
4662 for (i
= 0; i
< c
->n
; ++i
)
4663 graph_free(ctx
, &c
->cluster
[i
]);
4665 free(c
->scc_cluster
);
4667 free(c
->scc_in_merge
);
4670 /* Should we refrain from merging the cluster in "graph" with
4671 * any other cluster?
4672 * In particular, is its current schedule band empty and incomplete.
4674 static int bad_cluster(struct isl_sched_graph
*graph
)
4676 return graph
->n_row
< graph
->maxvar
&&
4677 graph
->n_total_row
== graph
->band_start
;
4680 /* Return the index of an edge in "graph" that can be used to merge
4681 * two clusters in "c".
4682 * Return graph->n_edge if no such edge can be found.
4683 * Return -1 on error.
4685 * In particular, return a proximity edge between two clusters
4686 * that is not marked "no_merge" and such that neither of the
4687 * two clusters has an incomplete, empty band.
4689 * If there are multiple such edges, then try and find the most
4690 * appropriate edge to use for merging. In particular, pick the edge
4691 * with the greatest weight. If there are multiple of those,
4692 * then pick one with the shortest distance between
4693 * the two cluster representatives.
4695 static int find_proximity(struct isl_sched_graph
*graph
,
4696 struct isl_clustering
*c
)
4698 int i
, best
= graph
->n_edge
, best_dist
, best_weight
;
4700 for (i
= 0; i
< graph
->n_edge
; ++i
) {
4701 struct isl_sched_edge
*edge
= &graph
->edge
[i
];
4704 if (!is_proximity(edge
))
4708 if (bad_cluster(&c
->scc
[edge
->src
->scc
]) ||
4709 bad_cluster(&c
->scc
[edge
->dst
->scc
]))
4711 dist
= c
->scc_cluster
[edge
->dst
->scc
] -
4712 c
->scc_cluster
[edge
->src
->scc
];
4715 weight
= edge
->weight
;
4716 if (best
< graph
->n_edge
) {
4717 if (best_weight
> weight
)
4719 if (best_weight
== weight
&& best_dist
<= dist
)
4724 best_weight
= weight
;
4730 /* Internal data structure used in mark_merge_sccs.
4732 * "graph" is the dependence graph in which a strongly connected
4733 * component is constructed.
4734 * "scc_cluster" maps each SCC index to the cluster to which it belongs.
4735 * "src" and "dst" are the indices of the nodes that are being merged.
4737 struct isl_mark_merge_sccs_data
{
4738 struct isl_sched_graph
*graph
;
4744 /* Check whether the cluster containing node "i" depends on the cluster
4745 * containing node "j". If "i" and "j" belong to the same cluster,
4746 * then they are taken to depend on each other to ensure that
4747 * the resulting strongly connected component consists of complete
4748 * clusters. Furthermore, if "i" and "j" are the two nodes that
4749 * are being merged, then they are taken to depend on each other as well.
4750 * Otherwise, check if there is a (conditional) validity dependence
4751 * from node[j] to node[i], forcing node[i] to follow node[j].
4753 static isl_bool
cluster_follows(int i
, int j
, void *user
)
4755 struct isl_mark_merge_sccs_data
*data
= user
;
4756 struct isl_sched_graph
*graph
= data
->graph
;
4757 int *scc_cluster
= data
->scc_cluster
;
4759 if (data
->src
== i
&& data
->dst
== j
)
4760 return isl_bool_true
;
4761 if (data
->src
== j
&& data
->dst
== i
)
4762 return isl_bool_true
;
4763 if (scc_cluster
[graph
->node
[i
].scc
] == scc_cluster
[graph
->node
[j
].scc
])
4764 return isl_bool_true
;
4766 return graph_has_validity_edge(graph
, &graph
->node
[j
], &graph
->node
[i
]);
4769 /* Mark all SCCs that belong to either of the two clusters in "c"
4770 * connected by the edge in "graph" with index "edge", or to any
4771 * of the intermediate clusters.
4772 * The marking is recorded in c->scc_in_merge.
4774 * The given edge has been selected for merging two clusters,
4775 * meaning that there is at least a proximity edge between the two nodes.
4776 * However, there may also be (indirect) validity dependences
4777 * between the two nodes. When merging the two clusters, all clusters
4778 * containing one or more of the intermediate nodes along the
4779 * indirect validity dependences need to be merged in as well.
4781 * First collect all such nodes by computing the strongly connected
4782 * component (SCC) containing the two nodes connected by the edge, where
4783 * the two nodes are considered to depend on each other to make
4784 * sure they end up in the same SCC. Similarly, each node is considered
4785 * to depend on every other node in the same cluster to ensure
4786 * that the SCC consists of complete clusters.
4788 * Then the original SCCs that contain any of these nodes are marked
4789 * in c->scc_in_merge.
4791 static isl_stat
mark_merge_sccs(isl_ctx
*ctx
, struct isl_sched_graph
*graph
,
4792 int edge
, struct isl_clustering
*c
)
4794 struct isl_mark_merge_sccs_data data
;
4795 struct isl_tarjan_graph
*g
;
4798 for (i
= 0; i
< c
->n
; ++i
)
4799 c
->scc_in_merge
[i
] = 0;
4802 data
.scc_cluster
= c
->scc_cluster
;
4803 data
.src
= graph
->edge
[edge
].src
- graph
->node
;
4804 data
.dst
= graph
->edge
[edge
].dst
- graph
->node
;
4806 g
= isl_tarjan_graph_component(ctx
, graph
->n
, data
.dst
,
4807 &cluster_follows
, &data
);
4813 isl_die(ctx
, isl_error_internal
,
4814 "expecting at least two nodes in component",
4816 if (g
->order
[--i
] != -1)
4817 isl_die(ctx
, isl_error_internal
,
4818 "expecting end of component marker", goto error
);
4820 for (--i
; i
>= 0 && g
->order
[i
] != -1; --i
) {
4821 int scc
= graph
->node
[g
->order
[i
]].scc
;
4822 c
->scc_in_merge
[scc
] = 1;
4825 isl_tarjan_graph_free(g
);
4828 isl_tarjan_graph_free(g
);
4829 return isl_stat_error
;
4832 /* Construct the identifier "cluster_i".
4834 static __isl_give isl_id
*cluster_id(isl_ctx
*ctx
, int i
)
4838 snprintf(name
, sizeof(name
), "cluster_%d", i
);
4839 return isl_id_alloc(ctx
, name
, NULL
);
4842 /* Construct the space of the cluster with index "i" containing
4843 * the strongly connected component "scc".
4845 * In particular, construct a space called cluster_i with dimension equal
4846 * to the number of schedule rows in the current band of "scc".
4848 static __isl_give isl_space
*cluster_space(struct isl_sched_graph
*scc
, int i
)
4854 nvar
= scc
->n_total_row
- scc
->band_start
;
4855 space
= isl_space_copy(scc
->node
[0].space
);
4856 space
= isl_space_params(space
);
4857 space
= isl_space_set_from_params(space
);
4858 space
= isl_space_add_dims(space
, isl_dim_set
, nvar
);
4859 id
= cluster_id(isl_space_get_ctx(space
), i
);
4860 space
= isl_space_set_tuple_id(space
, isl_dim_set
, id
);
4865 /* Collect the domain of the graph for merging clusters.
4867 * In particular, for each cluster with first SCC "i", construct
4868 * a set in the space called cluster_i with dimension equal
4869 * to the number of schedule rows in the current band of the cluster.
4871 static __isl_give isl_union_set
*collect_domain(isl_ctx
*ctx
,
4872 struct isl_sched_graph
*graph
, struct isl_clustering
*c
)
4876 isl_union_set
*domain
;
4878 space
= isl_space_params_alloc(ctx
, 0);
4879 domain
= isl_union_set_empty(space
);
4881 for (i
= 0; i
< graph
->scc
; ++i
) {
4884 if (!c
->scc_in_merge
[i
])
4886 if (c
->scc_cluster
[i
] != i
)
4888 space
= cluster_space(&c
->scc
[i
], i
);
4889 domain
= isl_union_set_add_set(domain
, isl_set_universe(space
));
4895 /* Construct a map from the original instances to the corresponding
4896 * cluster instance in the current bands of the clusters in "c".
4898 static __isl_give isl_union_map
*collect_cluster_map(isl_ctx
*ctx
,
4899 struct isl_sched_graph
*graph
, struct isl_clustering
*c
)
4903 isl_union_map
*cluster_map
;
4905 space
= isl_space_params_alloc(ctx
, 0);
4906 cluster_map
= isl_union_map_empty(space
);
4907 for (i
= 0; i
< graph
->scc
; ++i
) {
4911 if (!c
->scc_in_merge
[i
])
4914 id
= cluster_id(ctx
, c
->scc_cluster
[i
]);
4915 start
= c
->scc
[i
].band_start
;
4916 n
= c
->scc
[i
].n_total_row
- start
;
4917 for (j
= 0; j
< c
->scc
[i
].n
; ++j
) {
4920 struct isl_sched_node
*node
= &c
->scc
[i
].node
[j
];
4922 ma
= node_extract_partial_schedule_multi_aff(node
,
4924 ma
= isl_multi_aff_set_tuple_id(ma
, isl_dim_out
,
4926 map
= isl_map_from_multi_aff(ma
);
4927 cluster_map
= isl_union_map_add_map(cluster_map
, map
);
4935 /* Add "umap" to the schedule constraints "sc" of all types of "edge"
4936 * that are not isl_edge_condition or isl_edge_conditional_validity.
4938 static __isl_give isl_schedule_constraints
*add_non_conditional_constraints(
4939 struct isl_sched_edge
*edge
, __isl_keep isl_union_map
*umap
,
4940 __isl_take isl_schedule_constraints
*sc
)
4942 enum isl_edge_type t
;
4947 for (t
= isl_edge_first
; t
<= isl_edge_last
; ++t
) {
4948 if (t
== isl_edge_condition
||
4949 t
== isl_edge_conditional_validity
)
4951 if (!is_type(edge
, t
))
4953 sc
->constraint
[t
] = isl_union_map_union(sc
->constraint
[t
],
4954 isl_union_map_copy(umap
));
4955 if (!sc
->constraint
[t
])
4956 return isl_schedule_constraints_free(sc
);
4962 /* Add schedule constraints of types isl_edge_condition and
4963 * isl_edge_conditional_validity to "sc" by applying "umap" to
4964 * the domains of the wrapped relations in domain and range
4965 * of the corresponding tagged constraints of "edge".
4967 static __isl_give isl_schedule_constraints
*add_conditional_constraints(
4968 struct isl_sched_edge
*edge
, __isl_keep isl_union_map
*umap
,
4969 __isl_take isl_schedule_constraints
*sc
)
4971 enum isl_edge_type t
;
4972 isl_union_map
*tagged
;
4974 for (t
= isl_edge_condition
; t
<= isl_edge_conditional_validity
; ++t
) {
4975 if (!is_type(edge
, t
))
4977 if (t
== isl_edge_condition
)
4978 tagged
= isl_union_map_copy(edge
->tagged_condition
);
4980 tagged
= isl_union_map_copy(edge
->tagged_validity
);
4981 tagged
= isl_union_map_zip(tagged
);
4982 tagged
= isl_union_map_apply_domain(tagged
,
4983 isl_union_map_copy(umap
));
4984 tagged
= isl_union_map_zip(tagged
);
4985 sc
->constraint
[t
] = isl_union_map_union(sc
->constraint
[t
],
4987 if (!sc
->constraint
[t
])
4988 return isl_schedule_constraints_free(sc
);
4994 /* Given a mapping "cluster_map" from the original instances to
4995 * the cluster instances, add schedule constraints on the clusters
4996 * to "sc" corresponding to the original constraints represented by "edge".
4998 * For non-tagged dependence constraints, the cluster constraints
4999 * are obtained by applying "cluster_map" to the edge->map.
5001 * For tagged dependence constraints, "cluster_map" needs to be applied
5002 * to the domains of the wrapped relations in domain and range
5003 * of the tagged dependence constraints. Pick out the mappings
5004 * from these domains from "cluster_map" and construct their product.
5005 * This mapping can then be applied to the pair of domains.
5007 static __isl_give isl_schedule_constraints
*collect_edge_constraints(
5008 struct isl_sched_edge
*edge
, __isl_keep isl_union_map
*cluster_map
,
5009 __isl_take isl_schedule_constraints
*sc
)
5011 isl_union_map
*umap
;
5013 isl_union_set
*uset
;
5014 isl_union_map
*umap1
, *umap2
;
5019 umap
= isl_union_map_from_map(isl_map_copy(edge
->map
));
5020 umap
= isl_union_map_apply_domain(umap
,
5021 isl_union_map_copy(cluster_map
));
5022 umap
= isl_union_map_apply_range(umap
,
5023 isl_union_map_copy(cluster_map
));
5024 sc
= add_non_conditional_constraints(edge
, umap
, sc
);
5025 isl_union_map_free(umap
);
5027 if (!sc
|| (!is_condition(edge
) && !is_conditional_validity(edge
)))
5030 space
= isl_space_domain(isl_map_get_space(edge
->map
));
5031 uset
= isl_union_set_from_set(isl_set_universe(space
));
5032 umap1
= isl_union_map_copy(cluster_map
);
5033 umap1
= isl_union_map_intersect_domain(umap1
, uset
);
5034 space
= isl_space_range(isl_map_get_space(edge
->map
));
5035 uset
= isl_union_set_from_set(isl_set_universe(space
));
5036 umap2
= isl_union_map_copy(cluster_map
);
5037 umap2
= isl_union_map_intersect_domain(umap2
, uset
);
5038 umap
= isl_union_map_product(umap1
, umap2
);
5040 sc
= add_conditional_constraints(edge
, umap
, sc
);
5042 isl_union_map_free(umap
);
5046 /* Given a mapping "cluster_map" from the original instances to
5047 * the cluster instances, add schedule constraints on the clusters
5048 * to "sc" corresponding to all edges in "graph" between nodes that
5049 * belong to SCCs that are marked for merging in "scc_in_merge".
5051 static __isl_give isl_schedule_constraints
*collect_constraints(
5052 struct isl_sched_graph
*graph
, int *scc_in_merge
,
5053 __isl_keep isl_union_map
*cluster_map
,
5054 __isl_take isl_schedule_constraints
*sc
)
5058 for (i
= 0; i
< graph
->n_edge
; ++i
) {
5059 struct isl_sched_edge
*edge
= &graph
->edge
[i
];
5061 if (!scc_in_merge
[edge
->src
->scc
])
5063 if (!scc_in_merge
[edge
->dst
->scc
])
5065 sc
= collect_edge_constraints(edge
, cluster_map
, sc
);
5071 /* Construct a dependence graph for scheduling clusters with respect
5072 * to each other and store the result in "merge_graph".
5073 * In particular, the nodes of the graph correspond to the schedule
5074 * dimensions of the current bands of those clusters that have been
5075 * marked for merging in "c".
5077 * First construct an isl_schedule_constraints object for this domain
5078 * by transforming the edges in "graph" to the domain.
5079 * Then initialize a dependence graph for scheduling from these
5082 static isl_stat
init_merge_graph(isl_ctx
*ctx
, struct isl_sched_graph
*graph
,
5083 struct isl_clustering
*c
, struct isl_sched_graph
*merge_graph
)
5085 isl_union_set
*domain
;
5086 isl_union_map
*cluster_map
;
5087 isl_schedule_constraints
*sc
;
5090 domain
= collect_domain(ctx
, graph
, c
);
5091 sc
= isl_schedule_constraints_on_domain(domain
);
5093 return isl_stat_error
;
5094 cluster_map
= collect_cluster_map(ctx
, graph
, c
);
5095 sc
= collect_constraints(graph
, c
->scc_in_merge
, cluster_map
, sc
);
5096 isl_union_map_free(cluster_map
);
5098 r
= graph_init(merge_graph
, sc
);
5100 isl_schedule_constraints_free(sc
);
5105 /* Compute the maximal number of remaining schedule rows that still need
5106 * to be computed for the nodes that belong to clusters with the maximal
5107 * dimension for the current band (i.e., the band that is to be merged).
5108 * Only clusters that are about to be merged are considered.
5109 * "maxvar" is the maximal dimension for the current band.
5110 * "c" contains information about the clusters.
5112 * Return the maximal number of remaining schedule rows or -1 on error.
5114 static int compute_maxvar_max_slack(int maxvar
, struct isl_clustering
*c
)
5120 for (i
= 0; i
< c
->n
; ++i
) {
5122 struct isl_sched_graph
*scc
;
5124 if (!c
->scc_in_merge
[i
])
5127 nvar
= scc
->n_total_row
- scc
->band_start
;
5130 for (j
= 0; j
< scc
->n
; ++j
) {
5131 struct isl_sched_node
*node
= &scc
->node
[j
];
5134 if (node_update_cmap(node
) < 0)
5136 slack
= node
->nvar
- node
->rank
;
5137 if (slack
> max_slack
)
5145 /* If there are any clusters where the dimension of the current band
5146 * (i.e., the band that is to be merged) is smaller than "maxvar" and
5147 * if there are any nodes in such a cluster where the number
5148 * of remaining schedule rows that still need to be computed
5149 * is greater than "max_slack", then return the smallest current band
5150 * dimension of all these clusters. Otherwise return the original value
5151 * of "maxvar". Return -1 in case of any error.
5152 * Only clusters that are about to be merged are considered.
5153 * "c" contains information about the clusters.
5155 static int limit_maxvar_to_slack(int maxvar
, int max_slack
,
5156 struct isl_clustering
*c
)
5160 for (i
= 0; i
< c
->n
; ++i
) {
5162 struct isl_sched_graph
*scc
;
5164 if (!c
->scc_in_merge
[i
])
5167 nvar
= scc
->n_total_row
- scc
->band_start
;
5170 for (j
= 0; j
< scc
->n
; ++j
) {
5171 struct isl_sched_node
*node
= &scc
->node
[j
];
5174 if (node_update_cmap(node
) < 0)
5176 slack
= node
->nvar
- node
->rank
;
5177 if (slack
> max_slack
) {
5187 /* Adjust merge_graph->maxvar based on the number of remaining schedule rows
5188 * that still need to be computed. In particular, if there is a node
5189 * in a cluster where the dimension of the current band is smaller
5190 * than merge_graph->maxvar, but the number of remaining schedule rows
5191 * is greater than that of any node in a cluster with the maximal
5192 * dimension for the current band (i.e., merge_graph->maxvar),
5193 * then adjust merge_graph->maxvar to the (smallest) current band dimension
5194 * of those clusters. Without this adjustment, the total number of
5195 * schedule dimensions would be increased, resulting in a skewed view
5196 * of the number of coincident dimensions.
5197 * "c" contains information about the clusters.
5199 * If the maximize_band_depth option is set and merge_graph->maxvar is reduced,
5200 * then there is no point in attempting any merge since it will be rejected
5201 * anyway. Set merge_graph->maxvar to zero in such cases.
5203 static isl_stat
adjust_maxvar_to_slack(isl_ctx
*ctx
,
5204 struct isl_sched_graph
*merge_graph
, struct isl_clustering
*c
)
5206 int max_slack
, maxvar
;
5208 max_slack
= compute_maxvar_max_slack(merge_graph
->maxvar
, c
);
5210 return isl_stat_error
;
5211 maxvar
= limit_maxvar_to_slack(merge_graph
->maxvar
, max_slack
, c
);
5213 return isl_stat_error
;
5215 if (maxvar
< merge_graph
->maxvar
) {
5216 if (isl_options_get_schedule_maximize_band_depth(ctx
))
5217 merge_graph
->maxvar
= 0;
5219 merge_graph
->maxvar
= maxvar
;
5225 /* Return the number of coincident dimensions in the current band of "graph",
5226 * where the nodes of "graph" are assumed to be scheduled by a single band.
5228 static int get_n_coincident(struct isl_sched_graph
*graph
)
5232 for (i
= graph
->band_start
; i
< graph
->n_total_row
; ++i
)
5233 if (!graph
->node
[0].coincident
[i
])
5236 return i
- graph
->band_start
;
5239 /* Should the clusters be merged based on the cluster schedule
5240 * in the current (and only) band of "merge_graph", given that
5241 * coincidence should be maximized?
5243 * If the number of coincident schedule dimensions in the merged band
5244 * would be less than the maximal number of coincident schedule dimensions
5245 * in any of the merged clusters, then the clusters should not be merged.
5247 static isl_bool
ok_to_merge_coincident(struct isl_clustering
*c
,
5248 struct isl_sched_graph
*merge_graph
)
5255 for (i
= 0; i
< c
->n
; ++i
) {
5256 if (!c
->scc_in_merge
[i
])
5258 n_coincident
= get_n_coincident(&c
->scc
[i
]);
5259 if (n_coincident
> max_coincident
)
5260 max_coincident
= n_coincident
;
5263 n_coincident
= get_n_coincident(merge_graph
);
5265 return n_coincident
>= max_coincident
;
5268 /* Return the transformation on "node" expressed by the current (and only)
5269 * band of "merge_graph" applied to the clusters in "c".
5271 * First find the representation of "node" in its SCC in "c" and
5272 * extract the transformation expressed by the current band.
5273 * Then extract the transformation applied by "merge_graph"
5274 * to the cluster to which this SCC belongs.
5275 * Combine the two to obtain the complete transformation on the node.
5277 * Note that the range of the first transformation is an anonymous space,
5278 * while the domain of the second is named "cluster_X". The range
5279 * of the former therefore needs to be adjusted before the two
5282 static __isl_give isl_map
*extract_node_transformation(isl_ctx
*ctx
,
5283 struct isl_sched_node
*node
, struct isl_clustering
*c
,
5284 struct isl_sched_graph
*merge_graph
)
5286 struct isl_sched_node
*scc_node
, *cluster_node
;
5290 isl_multi_aff
*ma
, *ma2
;
5292 scc_node
= graph_find_node(ctx
, &c
->scc
[node
->scc
], node
->space
);
5293 start
= c
->scc
[node
->scc
].band_start
;
5294 n
= c
->scc
[node
->scc
].n_total_row
- start
;
5295 ma
= node_extract_partial_schedule_multi_aff(scc_node
, start
, n
);
5296 space
= cluster_space(&c
->scc
[node
->scc
], c
->scc_cluster
[node
->scc
]);
5297 cluster_node
= graph_find_node(ctx
, merge_graph
, space
);
5298 if (space
&& !cluster_node
)
5299 isl_die(ctx
, isl_error_internal
, "unable to find cluster",
5300 space
= isl_space_free(space
));
5301 id
= isl_space_get_tuple_id(space
, isl_dim_set
);
5302 ma
= isl_multi_aff_set_tuple_id(ma
, isl_dim_out
, id
);
5303 isl_space_free(space
);
5304 n
= merge_graph
->n_total_row
;
5305 ma2
= node_extract_partial_schedule_multi_aff(cluster_node
, 0, n
);
5306 ma
= isl_multi_aff_pullback_multi_aff(ma2
, ma
);
5308 return isl_map_from_multi_aff(ma
);
5311 /* Give a set of distances "set", are they bounded by a small constant
5312 * in direction "pos"?
5313 * In practice, check if they are bounded by 2 by checking that there
5314 * are no elements with a value greater than or equal to 3 or
5315 * smaller than or equal to -3.
5317 static isl_bool
distance_is_bounded(__isl_keep isl_set
*set
, int pos
)
5323 return isl_bool_error
;
5325 test
= isl_set_copy(set
);
5326 test
= isl_set_lower_bound_si(test
, isl_dim_set
, pos
, 3);
5327 bounded
= isl_set_is_empty(test
);
5330 if (bounded
< 0 || !bounded
)
5333 test
= isl_set_copy(set
);
5334 test
= isl_set_upper_bound_si(test
, isl_dim_set
, pos
, -3);
5335 bounded
= isl_set_is_empty(test
);
5341 /* Does the set "set" have a fixed (but possible parametric) value
5342 * at dimension "pos"?
5344 static isl_bool
has_single_value(__isl_keep isl_set
*set
, int pos
)
5350 return isl_bool_error
;
5351 set
= isl_set_copy(set
);
5352 n
= isl_set_dim(set
, isl_dim_set
);
5353 set
= isl_set_project_out(set
, isl_dim_set
, pos
+ 1, n
- (pos
+ 1));
5354 set
= isl_set_project_out(set
, isl_dim_set
, 0, pos
);
5355 single
= isl_set_is_singleton(set
);
5361 /* Does "map" have a fixed (but possible parametric) value
5362 * at dimension "pos" of either its domain or its range?
5364 static isl_bool
has_singular_src_or_dst(__isl_keep isl_map
*map
, int pos
)
5369 set
= isl_map_domain(isl_map_copy(map
));
5370 single
= has_single_value(set
, pos
);
5373 if (single
< 0 || single
)
5376 set
= isl_map_range(isl_map_copy(map
));
5377 single
= has_single_value(set
, pos
);
5383 /* Does the edge "edge" from "graph" have bounded dependence distances
5384 * in the merged graph "merge_graph" of a selection of clusters in "c"?
5386 * Extract the complete transformations of the source and destination
5387 * nodes of the edge, apply them to the edge constraints and
5388 * compute the differences. Finally, check if these differences are bounded
5389 * in each direction.
5391 * If the dimension of the band is greater than the number of
5392 * dimensions that can be expected to be optimized by the edge
5393 * (based on its weight), then also allow the differences to be unbounded
5394 * in the remaining dimensions, but only if either the source or
5395 * the destination has a fixed value in that direction.
5396 * This allows a statement that produces values that are used by
5397 * several instance of another statement to be merged with that
5399 * However, merging such clusters will introduce an inherently
5400 * large proximity distance inside the merged cluster, meaning
5401 * that proximity distances will no longer be optimized in
5402 * subsequent merges. These merges are therefore only allowed
5403 * after all other possible merges have been tried.
5404 * The first time such a merge is encountered, the weight of the edge
5405 * is replaced by a negative weight. The second time (i.e., after
5406 * all merges over edges with a non-negative weight have been tried),
5407 * the merge is allowed.
5409 static isl_bool
has_bounded_distances(isl_ctx
*ctx
, struct isl_sched_edge
*edge
,
5410 struct isl_sched_graph
*graph
, struct isl_clustering
*c
,
5411 struct isl_sched_graph
*merge_graph
)
5418 map
= isl_map_copy(edge
->map
);
5419 t
= extract_node_transformation(ctx
, edge
->src
, c
, merge_graph
);
5420 map
= isl_map_apply_domain(map
, t
);
5421 t
= extract_node_transformation(ctx
, edge
->dst
, c
, merge_graph
);
5422 map
= isl_map_apply_range(map
, t
);
5423 dist
= isl_map_deltas(isl_map_copy(map
));
5425 bounded
= isl_bool_true
;
5426 n
= isl_set_dim(dist
, isl_dim_set
);
5427 n_slack
= n
- edge
->weight
;
5428 if (edge
->weight
< 0)
5429 n_slack
-= graph
->max_weight
+ 1;
5430 for (i
= 0; i
< n
; ++i
) {
5431 isl_bool bounded_i
, singular_i
;
5433 bounded_i
= distance_is_bounded(dist
, i
);
5438 if (edge
->weight
>= 0)
5439 bounded
= isl_bool_false
;
5443 singular_i
= has_singular_src_or_dst(map
, i
);
5448 bounded
= isl_bool_false
;
5451 if (!bounded
&& i
>= n
&& edge
->weight
>= 0)
5452 edge
->weight
-= graph
->max_weight
+ 1;
5460 return isl_bool_error
;
5463 /* Should the clusters be merged based on the cluster schedule
5464 * in the current (and only) band of "merge_graph"?
5465 * "graph" is the original dependence graph, while "c" records
5466 * which SCCs are involved in the latest merge.
5468 * In particular, is there at least one proximity constraint
5469 * that is optimized by the merge?
5471 * A proximity constraint is considered to be optimized
5472 * if the dependence distances are small.
5474 static isl_bool
ok_to_merge_proximity(isl_ctx
*ctx
,
5475 struct isl_sched_graph
*graph
, struct isl_clustering
*c
,
5476 struct isl_sched_graph
*merge_graph
)
5480 for (i
= 0; i
< graph
->n_edge
; ++i
) {
5481 struct isl_sched_edge
*edge
= &graph
->edge
[i
];
5484 if (!is_proximity(edge
))
5486 if (!c
->scc_in_merge
[edge
->src
->scc
])
5488 if (!c
->scc_in_merge
[edge
->dst
->scc
])
5490 if (c
->scc_cluster
[edge
->dst
->scc
] ==
5491 c
->scc_cluster
[edge
->src
->scc
])
5493 bounded
= has_bounded_distances(ctx
, edge
, graph
, c
,
5495 if (bounded
< 0 || bounded
)
5499 return isl_bool_false
;
5502 /* Should the clusters be merged based on the cluster schedule
5503 * in the current (and only) band of "merge_graph"?
5504 * "graph" is the original dependence graph, while "c" records
5505 * which SCCs are involved in the latest merge.
5507 * If the current band is empty, then the clusters should not be merged.
5509 * If the band depth should be maximized and the merge schedule
5510 * is incomplete (meaning that the dimension of some of the schedule
5511 * bands in the original schedule will be reduced), then the clusters
5512 * should not be merged.
5514 * If the schedule_maximize_coincidence option is set, then check that
5515 * the number of coincident schedule dimensions is not reduced.
5517 * Finally, only allow the merge if at least one proximity
5518 * constraint is optimized.
5520 static isl_bool
ok_to_merge(isl_ctx
*ctx
, struct isl_sched_graph
*graph
,
5521 struct isl_clustering
*c
, struct isl_sched_graph
*merge_graph
)
5523 if (merge_graph
->n_total_row
== merge_graph
->band_start
)
5524 return isl_bool_false
;
5526 if (isl_options_get_schedule_maximize_band_depth(ctx
) &&
5527 merge_graph
->n_total_row
< merge_graph
->maxvar
)
5528 return isl_bool_false
;
5530 if (isl_options_get_schedule_maximize_coincidence(ctx
)) {
5533 ok
= ok_to_merge_coincident(c
, merge_graph
);
5538 return ok_to_merge_proximity(ctx
, graph
, c
, merge_graph
);
5541 /* Apply the schedule in "t_node" to the "n" rows starting at "first"
5542 * of the schedule in "node" and return the result.
5544 * That is, essentially compute
5546 * T * N(first:first+n-1)
5548 * taking into account the constant term and the parameter coefficients
5551 static __isl_give isl_mat
*node_transformation(isl_ctx
*ctx
,
5552 struct isl_sched_node
*t_node
, struct isl_sched_node
*node
,
5557 int n_row
, n_col
, n_param
, n_var
;
5559 n_param
= node
->nparam
;
5561 n_row
= isl_mat_rows(t_node
->sched
);
5562 n_col
= isl_mat_cols(node
->sched
);
5563 t
= isl_mat_alloc(ctx
, n_row
, n_col
);
5566 for (i
= 0; i
< n_row
; ++i
) {
5567 isl_seq_cpy(t
->row
[i
], t_node
->sched
->row
[i
], 1 + n_param
);
5568 isl_seq_clr(t
->row
[i
] + 1 + n_param
, n_var
);
5569 for (j
= 0; j
< n
; ++j
)
5570 isl_seq_addmul(t
->row
[i
],
5571 t_node
->sched
->row
[i
][1 + n_param
+ j
],
5572 node
->sched
->row
[first
+ j
],
5573 1 + n_param
+ n_var
);
5578 /* Apply the cluster schedule in "t_node" to the current band
5579 * schedule of the nodes in "graph".
5581 * In particular, replace the rows starting at band_start
5582 * by the result of applying the cluster schedule in "t_node"
5583 * to the original rows.
5585 * The coincidence of the schedule is determined by the coincidence
5586 * of the cluster schedule.
5588 static isl_stat
transform(isl_ctx
*ctx
, struct isl_sched_graph
*graph
,
5589 struct isl_sched_node
*t_node
)
5595 start
= graph
->band_start
;
5596 n
= graph
->n_total_row
- start
;
5598 n_new
= isl_mat_rows(t_node
->sched
);
5599 for (i
= 0; i
< graph
->n
; ++i
) {
5600 struct isl_sched_node
*node
= &graph
->node
[i
];
5603 t
= node_transformation(ctx
, t_node
, node
, start
, n
);
5604 node
->sched
= isl_mat_drop_rows(node
->sched
, start
, n
);
5605 node
->sched
= isl_mat_concat(node
->sched
, t
);
5606 node
->sched_map
= isl_map_free(node
->sched_map
);
5608 return isl_stat_error
;
5609 for (j
= 0; j
< n_new
; ++j
)
5610 node
->coincident
[start
+ j
] = t_node
->coincident
[j
];
5612 graph
->n_total_row
-= n
;
5614 graph
->n_total_row
+= n_new
;
5615 graph
->n_row
+= n_new
;
5620 /* Merge the clusters marked for merging in "c" into a single
5621 * cluster using the cluster schedule in the current band of "merge_graph".
5622 * The representative SCC for the new cluster is the SCC with
5623 * the smallest index.
5625 * The current band schedule of each SCC in the new cluster is obtained
5626 * by applying the schedule of the corresponding original cluster
5627 * to the original band schedule.
5628 * All SCCs in the new cluster have the same number of schedule rows.
5630 static isl_stat
merge(isl_ctx
*ctx
, struct isl_clustering
*c
,
5631 struct isl_sched_graph
*merge_graph
)
5637 for (i
= 0; i
< c
->n
; ++i
) {
5638 struct isl_sched_node
*node
;
5640 if (!c
->scc_in_merge
[i
])
5644 space
= cluster_space(&c
->scc
[i
], c
->scc_cluster
[i
]);
5646 return isl_stat_error
;
5647 node
= graph_find_node(ctx
, merge_graph
, space
);
5648 isl_space_free(space
);
5650 isl_die(ctx
, isl_error_internal
,
5651 "unable to find cluster",
5652 return isl_stat_error
);
5653 if (transform(ctx
, &c
->scc
[i
], node
) < 0)
5654 return isl_stat_error
;
5655 c
->scc_cluster
[i
] = cluster
;
5661 /* Try and merge the clusters of SCCs marked in c->scc_in_merge
5662 * by scheduling the current cluster bands with respect to each other.
5664 * Construct a dependence graph with a space for each cluster and
5665 * with the coordinates of each space corresponding to the schedule
5666 * dimensions of the current band of that cluster.
5667 * Construct a cluster schedule in this cluster dependence graph and
5668 * apply it to the current cluster bands if it is applicable
5669 * according to ok_to_merge.
5671 * If the number of remaining schedule dimensions in a cluster
5672 * with a non-maximal current schedule dimension is greater than
5673 * the number of remaining schedule dimensions in clusters
5674 * with a maximal current schedule dimension, then restrict
5675 * the number of rows to be computed in the cluster schedule
5676 * to the minimal such non-maximal current schedule dimension.
5677 * Do this by adjusting merge_graph.maxvar.
5679 * Return isl_bool_true if the clusters have effectively been merged
5680 * into a single cluster.
5682 * Note that since the standard scheduling algorithm minimizes the maximal
5683 * distance over proximity constraints, the proximity constraints between
5684 * the merged clusters may not be optimized any further than what is
5685 * sufficient to bring the distances within the limits of the internal
5686 * proximity constraints inside the individual clusters.
5687 * It may therefore make sense to perform an additional translation step
5688 * to bring the clusters closer to each other, while maintaining
5689 * the linear part of the merging schedule found using the standard
5690 * scheduling algorithm.
5692 static isl_bool
try_merge(isl_ctx
*ctx
, struct isl_sched_graph
*graph
,
5693 struct isl_clustering
*c
)
5695 struct isl_sched_graph merge_graph
= { 0 };
5698 if (init_merge_graph(ctx
, graph
, c
, &merge_graph
) < 0)
5701 if (compute_maxvar(&merge_graph
) < 0)
5703 if (adjust_maxvar_to_slack(ctx
, &merge_graph
,c
) < 0)
5705 if (compute_schedule_wcc_band(ctx
, &merge_graph
) < 0)
5707 merged
= ok_to_merge(ctx
, graph
, c
, &merge_graph
);
5708 if (merged
&& merge(ctx
, c
, &merge_graph
) < 0)
5711 graph_free(ctx
, &merge_graph
);
5714 graph_free(ctx
, &merge_graph
);
5715 return isl_bool_error
;
5718 /* Is there any edge marked "no_merge" between two SCCs that are
5719 * about to be merged (i.e., that are set in "scc_in_merge")?
5720 * "merge_edge" is the proximity edge along which the clusters of SCCs
5721 * are going to be merged.
5723 * If there is any edge between two SCCs with a negative weight,
5724 * while the weight of "merge_edge" is non-negative, then this
5725 * means that the edge was postponed. "merge_edge" should then
5726 * also be postponed since merging along the edge with negative weight should
5727 * be postponed until all edges with non-negative weight have been tried.
5728 * Replace the weight of "merge_edge" by a negative weight as well and
5729 * tell the caller not to attempt a merge.
5731 static int any_no_merge(struct isl_sched_graph
*graph
, int *scc_in_merge
,
5732 struct isl_sched_edge
*merge_edge
)
5736 for (i
= 0; i
< graph
->n_edge
; ++i
) {
5737 struct isl_sched_edge
*edge
= &graph
->edge
[i
];
5739 if (!scc_in_merge
[edge
->src
->scc
])
5741 if (!scc_in_merge
[edge
->dst
->scc
])
5745 if (merge_edge
->weight
>= 0 && edge
->weight
< 0) {
5746 merge_edge
->weight
-= graph
->max_weight
+ 1;
5754 /* Merge the two clusters in "c" connected by the edge in "graph"
5755 * with index "edge" into a single cluster.
5756 * If it turns out to be impossible to merge these two clusters,
5757 * then mark the edge as "no_merge" such that it will not be
5760 * First mark all SCCs that need to be merged. This includes the SCCs
5761 * in the two clusters, but it may also include the SCCs
5762 * of intermediate clusters.
5763 * If there is already a no_merge edge between any pair of such SCCs,
5764 * then simply mark the current edge as no_merge as well.
5765 * Likewise, if any of those edges was postponed by has_bounded_distances,
5766 * then postpone the current edge as well.
5767 * Otherwise, try and merge the clusters and mark "edge" as "no_merge"
5768 * if the clusters did not end up getting merged, unless the non-merge
5769 * is due to the fact that the edge was postponed. This postponement
5770 * can be recognized by a change in weight (from non-negative to negative).
5772 static isl_stat
merge_clusters_along_edge(isl_ctx
*ctx
,
5773 struct isl_sched_graph
*graph
, int edge
, struct isl_clustering
*c
)
5776 int edge_weight
= graph
->edge
[edge
].weight
;
5778 if (mark_merge_sccs(ctx
, graph
, edge
, c
) < 0)
5779 return isl_stat_error
;
5781 if (any_no_merge(graph
, c
->scc_in_merge
, &graph
->edge
[edge
]))
5782 merged
= isl_bool_false
;
5784 merged
= try_merge(ctx
, graph
, c
);
5786 return isl_stat_error
;
5787 if (!merged
&& edge_weight
== graph
->edge
[edge
].weight
)
5788 graph
->edge
[edge
].no_merge
= 1;
5793 /* Does "node" belong to the cluster identified by "cluster"?
5795 static int node_cluster_exactly(struct isl_sched_node
*node
, int cluster
)
5797 return node
->cluster
== cluster
;
5800 /* Does "edge" connect two nodes belonging to the cluster
5801 * identified by "cluster"?
5803 static int edge_cluster_exactly(struct isl_sched_edge
*edge
, int cluster
)
5805 return edge
->src
->cluster
== cluster
&& edge
->dst
->cluster
== cluster
;
5808 /* Swap the schedule of "node1" and "node2".
5809 * Both nodes have been derived from the same node in a common parent graph.
5810 * Since the "coincident" field is shared with that node
5811 * in the parent graph, there is no need to also swap this field.
5813 static void swap_sched(struct isl_sched_node
*node1
,
5814 struct isl_sched_node
*node2
)
5819 sched
= node1
->sched
;
5820 node1
->sched
= node2
->sched
;
5821 node2
->sched
= sched
;
5823 sched_map
= node1
->sched_map
;
5824 node1
->sched_map
= node2
->sched_map
;
5825 node2
->sched_map
= sched_map
;
5828 /* Copy the current band schedule from the SCCs that form the cluster
5829 * with index "pos" to the actual cluster at position "pos".
5830 * By construction, the index of the first SCC that belongs to the cluster
5833 * The order of the nodes inside both the SCCs and the cluster
5834 * is assumed to be same as the order in the original "graph".
5836 * Since the SCC graphs will no longer be used after this function,
5837 * the schedules are actually swapped rather than copied.
5839 static isl_stat
copy_partial(struct isl_sched_graph
*graph
,
5840 struct isl_clustering
*c
, int pos
)
5844 c
->cluster
[pos
].n_total_row
= c
->scc
[pos
].n_total_row
;
5845 c
->cluster
[pos
].n_row
= c
->scc
[pos
].n_row
;
5846 c
->cluster
[pos
].maxvar
= c
->scc
[pos
].maxvar
;
5848 for (i
= 0; i
< graph
->n
; ++i
) {
5852 if (graph
->node
[i
].cluster
!= pos
)
5854 s
= graph
->node
[i
].scc
;
5855 k
= c
->scc_node
[s
]++;
5856 swap_sched(&c
->cluster
[pos
].node
[j
], &c
->scc
[s
].node
[k
]);
5857 if (c
->scc
[s
].maxvar
> c
->cluster
[pos
].maxvar
)
5858 c
->cluster
[pos
].maxvar
= c
->scc
[s
].maxvar
;
5865 /* Is there a (conditional) validity dependence from node[j] to node[i],
5866 * forcing node[i] to follow node[j] or do the nodes belong to the same
5869 static isl_bool
node_follows_strong_or_same_cluster(int i
, int j
, void *user
)
5871 struct isl_sched_graph
*graph
= user
;
5873 if (graph
->node
[i
].cluster
== graph
->node
[j
].cluster
)
5874 return isl_bool_true
;
5875 return graph_has_validity_edge(graph
, &graph
->node
[j
], &graph
->node
[i
]);
5878 /* Extract the merged clusters of SCCs in "graph", sort them, and
5879 * store them in c->clusters. Update c->scc_cluster accordingly.
5881 * First keep track of the cluster containing the SCC to which a node
5882 * belongs in the node itself.
5883 * Then extract the clusters into c->clusters, copying the current
5884 * band schedule from the SCCs that belong to the cluster.
5885 * Do this only once per cluster.
5887 * Finally, topologically sort the clusters and update c->scc_cluster
5888 * to match the new scc numbering. While the SCCs were originally
5889 * sorted already, some SCCs that depend on some other SCCs may
5890 * have been merged with SCCs that appear before these other SCCs.
5891 * A reordering may therefore be required.
5893 static isl_stat
extract_clusters(isl_ctx
*ctx
, struct isl_sched_graph
*graph
,
5894 struct isl_clustering
*c
)
5898 for (i
= 0; i
< graph
->n
; ++i
)
5899 graph
->node
[i
].cluster
= c
->scc_cluster
[graph
->node
[i
].scc
];
5901 for (i
= 0; i
< graph
->scc
; ++i
) {
5902 if (c
->scc_cluster
[i
] != i
)
5904 if (extract_sub_graph(ctx
, graph
, &node_cluster_exactly
,
5905 &edge_cluster_exactly
, i
, &c
->cluster
[i
]) < 0)
5906 return isl_stat_error
;
5907 c
->cluster
[i
].src_scc
= -1;
5908 c
->cluster
[i
].dst_scc
= -1;
5909 if (copy_partial(graph
, c
, i
) < 0)
5910 return isl_stat_error
;
5913 if (detect_ccs(ctx
, graph
, &node_follows_strong_or_same_cluster
) < 0)
5914 return isl_stat_error
;
5915 for (i
= 0; i
< graph
->n
; ++i
)
5916 c
->scc_cluster
[graph
->node
[i
].scc
] = graph
->node
[i
].cluster
;
5921 /* Compute weights on the proximity edges of "graph" that can
5922 * be used by find_proximity to find the most appropriate
5923 * proximity edge to use to merge two clusters in "c".
5924 * The weights are also used by has_bounded_distances to determine
5925 * whether the merge should be allowed.
5926 * Store the maximum of the computed weights in graph->max_weight.
5928 * The computed weight is a measure for the number of remaining schedule
5929 * dimensions that can still be completely aligned.
5930 * In particular, compute the number of equalities between
5931 * input dimensions and output dimensions in the proximity constraints.
5932 * The directions that are already handled by outer schedule bands
5933 * are projected out prior to determining this number.
5935 * Edges that will never be considered by find_proximity are ignored.
5937 static isl_stat
compute_weights(struct isl_sched_graph
*graph
,
5938 struct isl_clustering
*c
)
5942 graph
->max_weight
= 0;
5944 for (i
= 0; i
< graph
->n_edge
; ++i
) {
5945 struct isl_sched_edge
*edge
= &graph
->edge
[i
];
5946 struct isl_sched_node
*src
= edge
->src
;
5947 struct isl_sched_node
*dst
= edge
->dst
;
5948 isl_basic_map
*hull
;
5951 if (!is_proximity(edge
))
5953 if (bad_cluster(&c
->scc
[edge
->src
->scc
]) ||
5954 bad_cluster(&c
->scc
[edge
->dst
->scc
]))
5956 if (c
->scc_cluster
[edge
->dst
->scc
] ==
5957 c
->scc_cluster
[edge
->src
->scc
])
5960 hull
= isl_map_affine_hull(isl_map_copy(edge
->map
));
5961 hull
= isl_basic_map_transform_dims(hull
, isl_dim_in
, 0,
5962 isl_mat_copy(src
->ctrans
));
5963 hull
= isl_basic_map_transform_dims(hull
, isl_dim_out
, 0,
5964 isl_mat_copy(dst
->ctrans
));
5965 hull
= isl_basic_map_project_out(hull
,
5966 isl_dim_in
, 0, src
->rank
);
5967 hull
= isl_basic_map_project_out(hull
,
5968 isl_dim_out
, 0, dst
->rank
);
5969 hull
= isl_basic_map_remove_divs(hull
);
5970 n_in
= isl_basic_map_dim(hull
, isl_dim_in
);
5971 n_out
= isl_basic_map_dim(hull
, isl_dim_out
);
5972 hull
= isl_basic_map_drop_constraints_not_involving_dims(hull
,
5973 isl_dim_in
, 0, n_in
);
5974 hull
= isl_basic_map_drop_constraints_not_involving_dims(hull
,
5975 isl_dim_out
, 0, n_out
);
5977 return isl_stat_error
;
5978 edge
->weight
= hull
->n_eq
;
5979 isl_basic_map_free(hull
);
5981 if (edge
->weight
> graph
->max_weight
)
5982 graph
->max_weight
= edge
->weight
;
5988 /* Call compute_schedule_finish_band on each of the clusters in "c"
5989 * in their topological order. This order is determined by the scc
5990 * fields of the nodes in "graph".
5991 * Combine the results in a sequence expressing the topological order.
5993 * If there is only one cluster left, then there is no need to introduce
5994 * a sequence node. Also, in this case, the cluster necessarily contains
5995 * the SCC at position 0 in the original graph and is therefore also
5996 * stored in the first cluster of "c".
5998 static __isl_give isl_schedule_node
*finish_bands_clustering(
5999 __isl_take isl_schedule_node
*node
, struct isl_sched_graph
*graph
,
6000 struct isl_clustering
*c
)
6004 isl_union_set_list
*filters
;
6006 if (graph
->scc
== 1)
6007 return compute_schedule_finish_band(node
, &c
->cluster
[0], 0);
6009 ctx
= isl_schedule_node_get_ctx(node
);
6011 filters
= extract_sccs(ctx
, graph
);
6012 node
= isl_schedule_node_insert_sequence(node
, filters
);
6014 for (i
= 0; i
< graph
->scc
; ++i
) {
6015 int j
= c
->scc_cluster
[i
];
6016 node
= isl_schedule_node_child(node
, i
);
6017 node
= isl_schedule_node_child(node
, 0);
6018 node
= compute_schedule_finish_band(node
, &c
->cluster
[j
], 0);
6019 node
= isl_schedule_node_parent(node
);
6020 node
= isl_schedule_node_parent(node
);
6026 /* Compute a schedule for a connected dependence graph by first considering
6027 * each strongly connected component (SCC) in the graph separately and then
6028 * incrementally combining them into clusters.
6029 * Return the updated schedule node.
6031 * Initially, each cluster consists of a single SCC, each with its
6032 * own band schedule. The algorithm then tries to merge pairs
6033 * of clusters along a proximity edge until no more suitable
6034 * proximity edges can be found. During this merging, the schedule
6035 * is maintained in the individual SCCs.
6036 * After the merging is completed, the full resulting clusters
6037 * are extracted and in finish_bands_clustering,
6038 * compute_schedule_finish_band is called on each of them to integrate
6039 * the band into "node" and to continue the computation.
6041 * compute_weights initializes the weights that are used by find_proximity.
6043 static __isl_give isl_schedule_node
*compute_schedule_wcc_clustering(
6044 __isl_take isl_schedule_node
*node
, struct isl_sched_graph
*graph
)
6047 struct isl_clustering c
;
6050 ctx
= isl_schedule_node_get_ctx(node
);
6052 if (clustering_init(ctx
, &c
, graph
) < 0)
6055 if (compute_weights(graph
, &c
) < 0)
6059 i
= find_proximity(graph
, &c
);
6062 if (i
>= graph
->n_edge
)
6064 if (merge_clusters_along_edge(ctx
, graph
, i
, &c
) < 0)
6068 if (extract_clusters(ctx
, graph
, &c
) < 0)
6071 node
= finish_bands_clustering(node
, graph
, &c
);
6073 clustering_free(ctx
, &c
);
6076 clustering_free(ctx
, &c
);
6077 return isl_schedule_node_free(node
);
6080 /* Compute a schedule for a connected dependence graph and return
6081 * the updated schedule node.
6083 * If Feautrier's algorithm is selected, we first recursively try to satisfy
6084 * as many validity dependences as possible. When all validity dependences
6085 * are satisfied we extend the schedule to a full-dimensional schedule.
6087 * Call compute_schedule_wcc_whole or compute_schedule_wcc_clustering
6088 * depending on whether the user has selected the option to try and
6089 * compute a schedule for the entire (weakly connected) component first.
6090 * If there is only a single strongly connected component (SCC), then
6091 * there is no point in trying to combine SCCs
6092 * in compute_schedule_wcc_clustering, so compute_schedule_wcc_whole
6093 * is called instead.
6095 static __isl_give isl_schedule_node
*compute_schedule_wcc(
6096 __isl_take isl_schedule_node
*node
, struct isl_sched_graph
*graph
)
6103 ctx
= isl_schedule_node_get_ctx(node
);
6104 if (detect_sccs(ctx
, graph
) < 0)
6105 return isl_schedule_node_free(node
);
6107 if (compute_maxvar(graph
) < 0)
6108 return isl_schedule_node_free(node
);
6110 if (need_feautrier_step(ctx
, graph
))
6111 return compute_schedule_wcc_feautrier(node
, graph
);
6113 if (graph
->scc
<= 1 || isl_options_get_schedule_whole_component(ctx
))
6114 return compute_schedule_wcc_whole(node
, graph
);
6116 return compute_schedule_wcc_clustering(node
, graph
);
6119 /* Compute a schedule for each group of nodes identified by node->scc
6120 * separately and then combine them in a sequence node (or as set node
6121 * if graph->weak is set) inserted at position "node" of the schedule tree.
6122 * Return the updated schedule node.
6124 * If "wcc" is set then each of the groups belongs to a single
6125 * weakly connected component in the dependence graph so that
6126 * there is no need for compute_sub_schedule to look for weakly
6127 * connected components.
6129 static __isl_give isl_schedule_node
*compute_component_schedule(
6130 __isl_take isl_schedule_node
*node
, struct isl_sched_graph
*graph
,
6135 isl_union_set_list
*filters
;
6139 ctx
= isl_schedule_node_get_ctx(node
);
6141 filters
= extract_sccs(ctx
, graph
);
6143 node
= isl_schedule_node_insert_set(node
, filters
);
6145 node
= isl_schedule_node_insert_sequence(node
, filters
);
6147 for (component
= 0; component
< graph
->scc
; ++component
) {
6148 node
= isl_schedule_node_child(node
, component
);
6149 node
= isl_schedule_node_child(node
, 0);
6150 node
= compute_sub_schedule(node
, ctx
, graph
,
6152 &edge_scc_exactly
, component
, wcc
);
6153 node
= isl_schedule_node_parent(node
);
6154 node
= isl_schedule_node_parent(node
);
6160 /* Compute a schedule for the given dependence graph and insert it at "node".
6161 * Return the updated schedule node.
6163 * We first check if the graph is connected (through validity and conditional
6164 * validity dependences) and, if not, compute a schedule
6165 * for each component separately.
6166 * If the schedule_serialize_sccs option is set, then we check for strongly
6167 * connected components instead and compute a separate schedule for
6168 * each such strongly connected component.
6170 static __isl_give isl_schedule_node
*compute_schedule(isl_schedule_node
*node
,
6171 struct isl_sched_graph
*graph
)
6178 ctx
= isl_schedule_node_get_ctx(node
);
6179 if (isl_options_get_schedule_serialize_sccs(ctx
)) {
6180 if (detect_sccs(ctx
, graph
) < 0)
6181 return isl_schedule_node_free(node
);
6183 if (detect_wccs(ctx
, graph
) < 0)
6184 return isl_schedule_node_free(node
);
6188 return compute_component_schedule(node
, graph
, 1);
6190 return compute_schedule_wcc(node
, graph
);
6193 /* Compute a schedule on sc->domain that respects the given schedule
6196 * In particular, the schedule respects all the validity dependences.
6197 * If the default isl scheduling algorithm is used, it tries to minimize
6198 * the dependence distances over the proximity dependences.
6199 * If Feautrier's scheduling algorithm is used, the proximity dependence
6200 * distances are only minimized during the extension to a full-dimensional
6203 * If there are any condition and conditional validity dependences,
6204 * then the conditional validity dependences may be violated inside
6205 * a tilable band, provided they have no adjacent non-local
6206 * condition dependences.
6208 __isl_give isl_schedule
*isl_schedule_constraints_compute_schedule(
6209 __isl_take isl_schedule_constraints
*sc
)
6211 isl_ctx
*ctx
= isl_schedule_constraints_get_ctx(sc
);
6212 struct isl_sched_graph graph
= { 0 };
6213 isl_schedule
*sched
;
6214 isl_schedule_node
*node
;
6215 isl_union_set
*domain
;
6217 sc
= isl_schedule_constraints_align_params(sc
);
6219 domain
= isl_schedule_constraints_get_domain(sc
);
6220 if (isl_union_set_n_set(domain
) == 0) {
6221 isl_schedule_constraints_free(sc
);
6222 return isl_schedule_from_domain(domain
);
6225 if (graph_init(&graph
, sc
) < 0)
6226 domain
= isl_union_set_free(domain
);
6228 node
= isl_schedule_node_from_domain(domain
);
6229 node
= isl_schedule_node_child(node
, 0);
6231 node
= compute_schedule(node
, &graph
);
6232 sched
= isl_schedule_node_get_schedule(node
);
6233 isl_schedule_node_free(node
);
6235 graph_free(ctx
, &graph
);
6236 isl_schedule_constraints_free(sc
);
6241 /* Compute a schedule for the given union of domains that respects
6242 * all the validity dependences and minimizes
6243 * the dependence distances over the proximity dependences.
6245 * This function is kept for backward compatibility.
6247 __isl_give isl_schedule
*isl_union_set_compute_schedule(
6248 __isl_take isl_union_set
*domain
,
6249 __isl_take isl_union_map
*validity
,
6250 __isl_take isl_union_map
*proximity
)
6252 isl_schedule_constraints
*sc
;
6254 sc
= isl_schedule_constraints_on_domain(domain
);
6255 sc
= isl_schedule_constraints_set_validity(sc
, validity
);
6256 sc
= isl_schedule_constraints_set_proximity(sc
, proximity
);
6258 return isl_schedule_constraints_compute_schedule(sc
);