2 * Copyright 2011 INRIA Saclay
3 * Copyright 2012-2014 Ecole Normale Superieure
4 * Copyright 2015-2016 Sven Verdoolaege
6 * Use of this software is governed by the MIT license
8 * Written by Sven Verdoolaege, INRIA Saclay - Ile-de-France,
9 * Parc Club Orsay Universite, ZAC des vignes, 4 rue Jacques Monod,
11 * and Ecole Normale Superieure, 45 rue d'Ulm, 75230 Paris, France
14 #include <isl_ctx_private.h>
15 #include <isl_map_private.h>
16 #include <isl_space_private.h>
17 #include <isl_aff_private.h>
19 #include <isl/constraint.h>
20 #include <isl/schedule.h>
21 #include <isl/schedule_node.h>
22 #include <isl_mat_private.h>
23 #include <isl_vec_private.h>
25 #include <isl/union_set.h>
28 #include <isl_dim_map.h>
29 #include <isl/map_to_basic_set.h>
31 #include <isl_options_private.h>
32 #include <isl_tarjan.h>
33 #include <isl_morph.h>
35 #include <isl_val_private.h>
38 * The scheduling algorithm implemented in this file was inspired by
39 * Bondhugula et al., "Automatic Transformations for Communication-Minimized
40 * Parallelization and Locality Optimization in the Polyhedral Model".
44 isl_edge_validity
= 0,
45 isl_edge_first
= isl_edge_validity
,
48 isl_edge_conditional_validity
,
50 isl_edge_last
= isl_edge_proximity
,
54 /* The constraints that need to be satisfied by a schedule on "domain".
56 * "context" specifies extra constraints on the parameters.
58 * "validity" constraints map domain elements i to domain elements
59 * that should be scheduled after i. (Hard constraint)
60 * "proximity" constraints map domain elements i to domains elements
61 * that should be scheduled as early as possible after i (or before i).
64 * "condition" and "conditional_validity" constraints map possibly "tagged"
65 * domain elements i -> s to "tagged" domain elements j -> t.
66 * The elements of the "conditional_validity" constraints, but without the
67 * tags (i.e., the elements i -> j) are treated as validity constraints,
68 * except that during the construction of a tilable band,
69 * the elements of the "conditional_validity" constraints may be violated
70 * provided that all adjacent elements of the "condition" constraints
71 * are local within the band.
72 * A dependence is local within a band if domain and range are mapped
73 * to the same schedule point by the band.
75 struct isl_schedule_constraints
{
76 isl_union_set
*domain
;
79 isl_union_map
*constraint
[isl_edge_last
+ 1];
82 __isl_give isl_schedule_constraints
*isl_schedule_constraints_copy(
83 __isl_keep isl_schedule_constraints
*sc
)
86 isl_schedule_constraints
*sc_copy
;
89 ctx
= isl_union_set_get_ctx(sc
->domain
);
90 sc_copy
= isl_calloc_type(ctx
, struct isl_schedule_constraints
);
94 sc_copy
->domain
= isl_union_set_copy(sc
->domain
);
95 sc_copy
->context
= isl_set_copy(sc
->context
);
96 if (!sc_copy
->domain
|| !sc_copy
->context
)
97 return isl_schedule_constraints_free(sc_copy
);
99 for (i
= isl_edge_first
; i
<= isl_edge_last
; ++i
) {
100 sc_copy
->constraint
[i
] = isl_union_map_copy(sc
->constraint
[i
]);
101 if (!sc_copy
->constraint
[i
])
102 return isl_schedule_constraints_free(sc_copy
);
109 /* Construct an isl_schedule_constraints object for computing a schedule
110 * on "domain". The initial object does not impose any constraints.
112 __isl_give isl_schedule_constraints
*isl_schedule_constraints_on_domain(
113 __isl_take isl_union_set
*domain
)
117 isl_schedule_constraints
*sc
;
118 isl_union_map
*empty
;
119 enum isl_edge_type i
;
124 ctx
= isl_union_set_get_ctx(domain
);
125 sc
= isl_calloc_type(ctx
, struct isl_schedule_constraints
);
129 space
= isl_union_set_get_space(domain
);
131 sc
->context
= isl_set_universe(isl_space_copy(space
));
132 empty
= isl_union_map_empty(space
);
133 for (i
= isl_edge_first
; i
<= isl_edge_last
; ++i
) {
134 sc
->constraint
[i
] = isl_union_map_copy(empty
);
135 if (!sc
->constraint
[i
])
136 sc
->domain
= isl_union_set_free(sc
->domain
);
138 isl_union_map_free(empty
);
140 if (!sc
->domain
|| !sc
->context
)
141 return isl_schedule_constraints_free(sc
);
145 isl_union_set_free(domain
);
149 /* Replace the context of "sc" by "context".
151 __isl_give isl_schedule_constraints
*isl_schedule_constraints_set_context(
152 __isl_take isl_schedule_constraints
*sc
, __isl_take isl_set
*context
)
157 isl_set_free(sc
->context
);
158 sc
->context
= context
;
162 isl_schedule_constraints_free(sc
);
163 isl_set_free(context
);
167 /* Replace the validity constraints of "sc" by "validity".
169 __isl_give isl_schedule_constraints
*isl_schedule_constraints_set_validity(
170 __isl_take isl_schedule_constraints
*sc
,
171 __isl_take isl_union_map
*validity
)
173 if (!sc
|| !validity
)
176 isl_union_map_free(sc
->constraint
[isl_edge_validity
]);
177 sc
->constraint
[isl_edge_validity
] = validity
;
181 isl_schedule_constraints_free(sc
);
182 isl_union_map_free(validity
);
186 /* Replace the coincidence constraints of "sc" by "coincidence".
188 __isl_give isl_schedule_constraints
*isl_schedule_constraints_set_coincidence(
189 __isl_take isl_schedule_constraints
*sc
,
190 __isl_take isl_union_map
*coincidence
)
192 if (!sc
|| !coincidence
)
195 isl_union_map_free(sc
->constraint
[isl_edge_coincidence
]);
196 sc
->constraint
[isl_edge_coincidence
] = coincidence
;
200 isl_schedule_constraints_free(sc
);
201 isl_union_map_free(coincidence
);
205 /* Replace the proximity constraints of "sc" by "proximity".
207 __isl_give isl_schedule_constraints
*isl_schedule_constraints_set_proximity(
208 __isl_take isl_schedule_constraints
*sc
,
209 __isl_take isl_union_map
*proximity
)
211 if (!sc
|| !proximity
)
214 isl_union_map_free(sc
->constraint
[isl_edge_proximity
]);
215 sc
->constraint
[isl_edge_proximity
] = proximity
;
219 isl_schedule_constraints_free(sc
);
220 isl_union_map_free(proximity
);
224 /* Replace the conditional validity constraints of "sc" by "condition"
227 __isl_give isl_schedule_constraints
*
228 isl_schedule_constraints_set_conditional_validity(
229 __isl_take isl_schedule_constraints
*sc
,
230 __isl_take isl_union_map
*condition
,
231 __isl_take isl_union_map
*validity
)
233 if (!sc
|| !condition
|| !validity
)
236 isl_union_map_free(sc
->constraint
[isl_edge_condition
]);
237 sc
->constraint
[isl_edge_condition
] = condition
;
238 isl_union_map_free(sc
->constraint
[isl_edge_conditional_validity
]);
239 sc
->constraint
[isl_edge_conditional_validity
] = validity
;
243 isl_schedule_constraints_free(sc
);
244 isl_union_map_free(condition
);
245 isl_union_map_free(validity
);
249 __isl_null isl_schedule_constraints
*isl_schedule_constraints_free(
250 __isl_take isl_schedule_constraints
*sc
)
252 enum isl_edge_type i
;
257 isl_union_set_free(sc
->domain
);
258 isl_set_free(sc
->context
);
259 for (i
= isl_edge_first
; i
<= isl_edge_last
; ++i
)
260 isl_union_map_free(sc
->constraint
[i
]);
267 isl_ctx
*isl_schedule_constraints_get_ctx(
268 __isl_keep isl_schedule_constraints
*sc
)
270 return sc
? isl_union_set_get_ctx(sc
->domain
) : NULL
;
273 /* Return the domain of "sc".
275 __isl_give isl_union_set
*isl_schedule_constraints_get_domain(
276 __isl_keep isl_schedule_constraints
*sc
)
281 return isl_union_set_copy(sc
->domain
);
284 /* Return the validity constraints of "sc".
286 __isl_give isl_union_map
*isl_schedule_constraints_get_validity(
287 __isl_keep isl_schedule_constraints
*sc
)
292 return isl_union_map_copy(sc
->constraint
[isl_edge_validity
]);
295 /* Return the coincidence constraints of "sc".
297 __isl_give isl_union_map
*isl_schedule_constraints_get_coincidence(
298 __isl_keep isl_schedule_constraints
*sc
)
303 return isl_union_map_copy(sc
->constraint
[isl_edge_coincidence
]);
306 /* Return the proximity constraints of "sc".
308 __isl_give isl_union_map
*isl_schedule_constraints_get_proximity(
309 __isl_keep isl_schedule_constraints
*sc
)
314 return isl_union_map_copy(sc
->constraint
[isl_edge_proximity
]);
317 /* Return the conditional validity constraints of "sc".
319 __isl_give isl_union_map
*isl_schedule_constraints_get_conditional_validity(
320 __isl_keep isl_schedule_constraints
*sc
)
326 isl_union_map_copy(sc
->constraint
[isl_edge_conditional_validity
]);
329 /* Return the conditions for the conditional validity constraints of "sc".
331 __isl_give isl_union_map
*
332 isl_schedule_constraints_get_conditional_validity_condition(
333 __isl_keep isl_schedule_constraints
*sc
)
338 return isl_union_map_copy(sc
->constraint
[isl_edge_condition
]);
341 /* Can a schedule constraint of type "type" be tagged?
343 static int may_be_tagged(enum isl_edge_type type
)
345 if (type
== isl_edge_condition
|| type
== isl_edge_conditional_validity
)
350 /* Apply "umap" to the domains of the wrapped relations
351 * inside the domain and range of "c".
353 * That is, for each map of the form
355 * [D -> S] -> [E -> T]
357 * in "c", apply "umap" to D and E.
359 * D is exposed by currying the relation to
361 * D -> [S -> [E -> T]]
363 * E is exposed by doing the same to the inverse of "c".
365 static __isl_give isl_union_map
*apply_factor_domain(
366 __isl_take isl_union_map
*c
, __isl_keep isl_union_map
*umap
)
368 c
= isl_union_map_curry(c
);
369 c
= isl_union_map_apply_domain(c
, isl_union_map_copy(umap
));
370 c
= isl_union_map_uncurry(c
);
372 c
= isl_union_map_reverse(c
);
373 c
= isl_union_map_curry(c
);
374 c
= isl_union_map_apply_domain(c
, isl_union_map_copy(umap
));
375 c
= isl_union_map_uncurry(c
);
376 c
= isl_union_map_reverse(c
);
381 /* Apply "umap" to domain and range of "c".
382 * If "tag" is set, then "c" may contain tags and then "umap"
383 * needs to be applied to the domains of the wrapped relations
384 * inside the domain and range of "c".
386 static __isl_give isl_union_map
*apply(__isl_take isl_union_map
*c
,
387 __isl_keep isl_union_map
*umap
, int tag
)
392 t
= isl_union_map_copy(c
);
393 c
= isl_union_map_apply_domain(c
, isl_union_map_copy(umap
));
394 c
= isl_union_map_apply_range(c
, isl_union_map_copy(umap
));
397 t
= apply_factor_domain(t
, umap
);
398 c
= isl_union_map_union(c
, t
);
402 /* Apply "umap" to the domain of the schedule constraints "sc".
404 * The two sides of the various schedule constraints are adjusted
407 __isl_give isl_schedule_constraints
*isl_schedule_constraints_apply(
408 __isl_take isl_schedule_constraints
*sc
,
409 __isl_take isl_union_map
*umap
)
411 enum isl_edge_type i
;
416 for (i
= isl_edge_first
; i
<= isl_edge_last
; ++i
) {
417 int tag
= may_be_tagged(i
);
419 sc
->constraint
[i
] = apply(sc
->constraint
[i
], umap
, tag
);
420 if (!sc
->constraint
[i
])
423 sc
->domain
= isl_union_set_apply(sc
->domain
, umap
);
425 return isl_schedule_constraints_free(sc
);
429 isl_schedule_constraints_free(sc
);
430 isl_union_map_free(umap
);
434 void isl_schedule_constraints_dump(__isl_keep isl_schedule_constraints
*sc
)
439 fprintf(stderr
, "domain: ");
440 isl_union_set_dump(sc
->domain
);
441 fprintf(stderr
, "context: ");
442 isl_set_dump(sc
->context
);
443 fprintf(stderr
, "validity: ");
444 isl_union_map_dump(sc
->constraint
[isl_edge_validity
]);
445 fprintf(stderr
, "proximity: ");
446 isl_union_map_dump(sc
->constraint
[isl_edge_proximity
]);
447 fprintf(stderr
, "coincidence: ");
448 isl_union_map_dump(sc
->constraint
[isl_edge_coincidence
]);
449 fprintf(stderr
, "condition: ");
450 isl_union_map_dump(sc
->constraint
[isl_edge_condition
]);
451 fprintf(stderr
, "conditional_validity: ");
452 isl_union_map_dump(sc
->constraint
[isl_edge_conditional_validity
]);
455 /* Align the parameters of the fields of "sc".
457 static __isl_give isl_schedule_constraints
*
458 isl_schedule_constraints_align_params(__isl_take isl_schedule_constraints
*sc
)
461 enum isl_edge_type i
;
466 space
= isl_union_set_get_space(sc
->domain
);
467 space
= isl_space_align_params(space
, isl_set_get_space(sc
->context
));
468 for (i
= isl_edge_first
; i
<= isl_edge_last
; ++i
)
469 space
= isl_space_align_params(space
,
470 isl_union_map_get_space(sc
->constraint
[i
]));
472 for (i
= isl_edge_first
; i
<= isl_edge_last
; ++i
) {
473 sc
->constraint
[i
] = isl_union_map_align_params(
474 sc
->constraint
[i
], isl_space_copy(space
));
475 if (!sc
->constraint
[i
])
476 space
= isl_space_free(space
);
478 sc
->context
= isl_set_align_params(sc
->context
, isl_space_copy(space
));
479 sc
->domain
= isl_union_set_align_params(sc
->domain
, space
);
480 if (!sc
->context
|| !sc
->domain
)
481 return isl_schedule_constraints_free(sc
);
486 /* Return the total number of isl_maps in the constraints of "sc".
488 static __isl_give
int isl_schedule_constraints_n_map(
489 __isl_keep isl_schedule_constraints
*sc
)
491 enum isl_edge_type i
;
494 for (i
= isl_edge_first
; i
<= isl_edge_last
; ++i
)
495 n
+= isl_union_map_n_map(sc
->constraint
[i
]);
500 /* Internal information about a node that is used during the construction
502 * space represents the space in which the domain lives
503 * sched is a matrix representation of the schedule being constructed
504 * for this node; if compressed is set, then this schedule is
505 * defined over the compressed domain space
506 * sched_map is an isl_map representation of the same (partial) schedule
507 * sched_map may be NULL; if compressed is set, then this map
508 * is defined over the uncompressed domain space
509 * rank is the number of linearly independent rows in the linear part
511 * the columns of cmap represent a change of basis for the schedule
512 * coefficients; the first rank columns span the linear part of
514 * cinv is the inverse of cmap.
515 * ctrans is the transpose of cmap.
516 * start is the first variable in the LP problem in the sequences that
517 * represents the schedule coefficients of this node
518 * nvar is the dimension of the domain
519 * nparam is the number of parameters or 0 if we are not constructing
520 * a parametric schedule
522 * If compressed is set, then hull represents the constraints
523 * that were used to derive the compression, while compress and
524 * decompress map the original space to the compressed space and
527 * scc is the index of SCC (or WCC) this node belongs to
529 * "cluster" is only used inside extract_clusters and identifies
530 * the cluster of SCCs that the node belongs to.
532 * coincident contains a boolean for each of the rows of the schedule,
533 * indicating whether the corresponding scheduling dimension satisfies
534 * the coincidence constraints in the sense that the corresponding
535 * dependence distances are zero.
537 * If the schedule_treat_coalescing option is set, then
538 * "sizes" contains the sizes of the (compressed) instance set
539 * in each direction. If there is no fixed size in a given direction,
540 * then the corresponding size value is set to infinity.
541 * If the schedule_treat_coalescing option or the schedule_max_coefficient
542 * option is set, then "max" contains the maximal values for
543 * schedule coefficients of the (compressed) variables. If no bound
544 * needs to be imposed on a particular variable, then the corresponding
547 struct isl_sched_node
{
551 isl_multi_aff
*compress
;
552 isl_multi_aff
*decompress
;
568 isl_multi_val
*sizes
;
572 static int node_has_space(const void *entry
, const void *val
)
574 struct isl_sched_node
*node
= (struct isl_sched_node
*)entry
;
575 isl_space
*dim
= (isl_space
*)val
;
577 return isl_space_is_equal(node
->space
, dim
);
580 static int node_scc_exactly(struct isl_sched_node
*node
, int scc
)
582 return node
->scc
== scc
;
585 static int node_scc_at_most(struct isl_sched_node
*node
, int scc
)
587 return node
->scc
<= scc
;
590 static int node_scc_at_least(struct isl_sched_node
*node
, int scc
)
592 return node
->scc
>= scc
;
595 /* An edge in the dependence graph. An edge may be used to
596 * ensure validity of the generated schedule, to minimize the dependence
599 * map is the dependence relation, with i -> j in the map if j depends on i
600 * tagged_condition and tagged_validity contain the union of all tagged
601 * condition or conditional validity dependence relations that
602 * specialize the dependence relation "map"; that is,
603 * if (i -> a) -> (j -> b) is an element of "tagged_condition"
604 * or "tagged_validity", then i -> j is an element of "map".
605 * If these fields are NULL, then they represent the empty relation.
606 * src is the source node
607 * dst is the sink node
609 * types is a bit vector containing the types of this edge.
610 * validity is set if the edge is used to ensure correctness
611 * coincidence is used to enforce zero dependence distances
612 * proximity is set if the edge is used to minimize dependence distances
613 * condition is set if the edge represents a condition
614 * for a conditional validity schedule constraint
615 * local can only be set for condition edges and indicates that
616 * the dependence distance over the edge should be zero
617 * conditional_validity is set if the edge is used to conditionally
620 * For validity edges, start and end mark the sequence of inequality
621 * constraints in the LP problem that encode the validity constraint
622 * corresponding to this edge.
624 * During clustering, an edge may be marked "no_merge" if it should
625 * not be used to merge clusters.
626 * The weight is also only used during clustering and it is
627 * an indication of how many schedule dimensions on either side
628 * of the schedule constraints can be aligned.
629 * If the weight is negative, then this means that this edge was postponed
630 * by has_bounded_distances or any_no_merge. The original weight can
631 * be retrieved by adding 1 + graph->max_weight, with "graph"
632 * the graph containing this edge.
634 struct isl_sched_edge
{
636 isl_union_map
*tagged_condition
;
637 isl_union_map
*tagged_validity
;
639 struct isl_sched_node
*src
;
640 struct isl_sched_node
*dst
;
651 /* Is "edge" marked as being of type "type"?
653 static int is_type(struct isl_sched_edge
*edge
, enum isl_edge_type type
)
655 return ISL_FL_ISSET(edge
->types
, 1 << type
);
658 /* Mark "edge" as being of type "type".
660 static void set_type(struct isl_sched_edge
*edge
, enum isl_edge_type type
)
662 ISL_FL_SET(edge
->types
, 1 << type
);
665 /* No longer mark "edge" as being of type "type"?
667 static void clear_type(struct isl_sched_edge
*edge
, enum isl_edge_type type
)
669 ISL_FL_CLR(edge
->types
, 1 << type
);
672 /* Is "edge" marked as a validity edge?
674 static int is_validity(struct isl_sched_edge
*edge
)
676 return is_type(edge
, isl_edge_validity
);
679 /* Mark "edge" as a validity edge.
681 static void set_validity(struct isl_sched_edge
*edge
)
683 set_type(edge
, isl_edge_validity
);
686 /* Is "edge" marked as a proximity edge?
688 static int is_proximity(struct isl_sched_edge
*edge
)
690 return is_type(edge
, isl_edge_proximity
);
693 /* Is "edge" marked as a local edge?
695 static int is_local(struct isl_sched_edge
*edge
)
697 return is_type(edge
, isl_edge_local
);
700 /* Mark "edge" as a local edge.
702 static void set_local(struct isl_sched_edge
*edge
)
704 set_type(edge
, isl_edge_local
);
707 /* No longer mark "edge" as a local edge.
709 static void clear_local(struct isl_sched_edge
*edge
)
711 clear_type(edge
, isl_edge_local
);
714 /* Is "edge" marked as a coincidence edge?
716 static int is_coincidence(struct isl_sched_edge
*edge
)
718 return is_type(edge
, isl_edge_coincidence
);
721 /* Is "edge" marked as a condition edge?
723 static int is_condition(struct isl_sched_edge
*edge
)
725 return is_type(edge
, isl_edge_condition
);
728 /* Is "edge" marked as a conditional validity edge?
730 static int is_conditional_validity(struct isl_sched_edge
*edge
)
732 return is_type(edge
, isl_edge_conditional_validity
);
735 /* Internal information about the dependence graph used during
736 * the construction of the schedule.
738 * intra_hmap is a cache, mapping dependence relations to their dual,
739 * for dependences from a node to itself
740 * inter_hmap is a cache, mapping dependence relations to their dual,
741 * for dependences between distinct nodes
742 * if compression is involved then the key for these maps
743 * is the original, uncompressed dependence relation, while
744 * the value is the dual of the compressed dependence relation.
746 * n is the number of nodes
747 * node is the list of nodes
748 * maxvar is the maximal number of variables over all nodes
749 * max_row is the allocated number of rows in the schedule
750 * n_row is the current (maximal) number of linearly independent
751 * rows in the node schedules
752 * n_total_row is the current number of rows in the node schedules
753 * band_start is the starting row in the node schedules of the current band
754 * root is set if this graph is the original dependence graph,
755 * without any splitting
757 * sorted contains a list of node indices sorted according to the
758 * SCC to which a node belongs
760 * n_edge is the number of edges
761 * edge is the list of edges
762 * max_edge contains the maximal number of edges of each type;
763 * in particular, it contains the number of edges in the inital graph.
764 * edge_table contains pointers into the edge array, hashed on the source
765 * and sink spaces; there is one such table for each type;
766 * a given edge may be referenced from more than one table
767 * if the corresponding relation appears in more than one of the
768 * sets of dependences; however, for each type there is only
769 * a single edge between a given pair of source and sink space
770 * in the entire graph
772 * node_table contains pointers into the node array, hashed on the space
774 * region contains a list of variable sequences that should be non-trivial
776 * lp contains the (I)LP problem used to obtain new schedule rows
778 * src_scc and dst_scc are the source and sink SCCs of an edge with
779 * conflicting constraints
781 * scc represents the number of components
782 * weak is set if the components are weakly connected
784 * max_weight is used during clustering and represents the maximal
785 * weight of the relevant proximity edges.
787 struct isl_sched_graph
{
788 isl_map_to_basic_set
*intra_hmap
;
789 isl_map_to_basic_set
*inter_hmap
;
791 struct isl_sched_node
*node
;
804 struct isl_sched_edge
*edge
;
806 int max_edge
[isl_edge_last
+ 1];
807 struct isl_hash_table
*edge_table
[isl_edge_last
+ 1];
809 struct isl_hash_table
*node_table
;
810 struct isl_region
*region
;
823 /* Initialize node_table based on the list of nodes.
825 static int graph_init_table(isl_ctx
*ctx
, struct isl_sched_graph
*graph
)
829 graph
->node_table
= isl_hash_table_alloc(ctx
, graph
->n
);
830 if (!graph
->node_table
)
833 for (i
= 0; i
< graph
->n
; ++i
) {
834 struct isl_hash_table_entry
*entry
;
837 hash
= isl_space_get_hash(graph
->node
[i
].space
);
838 entry
= isl_hash_table_find(ctx
, graph
->node_table
, hash
,
840 graph
->node
[i
].space
, 1);
843 entry
->data
= &graph
->node
[i
];
849 /* Return a pointer to the node that lives within the given space,
850 * or NULL if there is no such node.
852 static struct isl_sched_node
*graph_find_node(isl_ctx
*ctx
,
853 struct isl_sched_graph
*graph
, __isl_keep isl_space
*dim
)
855 struct isl_hash_table_entry
*entry
;
858 hash
= isl_space_get_hash(dim
);
859 entry
= isl_hash_table_find(ctx
, graph
->node_table
, hash
,
860 &node_has_space
, dim
, 0);
862 return entry
? entry
->data
: NULL
;
865 static int edge_has_src_and_dst(const void *entry
, const void *val
)
867 const struct isl_sched_edge
*edge
= entry
;
868 const struct isl_sched_edge
*temp
= val
;
870 return edge
->src
== temp
->src
&& edge
->dst
== temp
->dst
;
873 /* Add the given edge to graph->edge_table[type].
875 static isl_stat
graph_edge_table_add(isl_ctx
*ctx
,
876 struct isl_sched_graph
*graph
, enum isl_edge_type type
,
877 struct isl_sched_edge
*edge
)
879 struct isl_hash_table_entry
*entry
;
882 hash
= isl_hash_init();
883 hash
= isl_hash_builtin(hash
, edge
->src
);
884 hash
= isl_hash_builtin(hash
, edge
->dst
);
885 entry
= isl_hash_table_find(ctx
, graph
->edge_table
[type
], hash
,
886 &edge_has_src_and_dst
, edge
, 1);
888 return isl_stat_error
;
894 /* Allocate the edge_tables based on the maximal number of edges of
897 static int graph_init_edge_tables(isl_ctx
*ctx
, struct isl_sched_graph
*graph
)
901 for (i
= 0; i
<= isl_edge_last
; ++i
) {
902 graph
->edge_table
[i
] = isl_hash_table_alloc(ctx
,
904 if (!graph
->edge_table
[i
])
911 /* If graph->edge_table[type] contains an edge from the given source
912 * to the given destination, then return the hash table entry of this edge.
913 * Otherwise, return NULL.
915 static struct isl_hash_table_entry
*graph_find_edge_entry(
916 struct isl_sched_graph
*graph
,
917 enum isl_edge_type type
,
918 struct isl_sched_node
*src
, struct isl_sched_node
*dst
)
920 isl_ctx
*ctx
= isl_space_get_ctx(src
->space
);
922 struct isl_sched_edge temp
= { .src
= src
, .dst
= dst
};
924 hash
= isl_hash_init();
925 hash
= isl_hash_builtin(hash
, temp
.src
);
926 hash
= isl_hash_builtin(hash
, temp
.dst
);
927 return isl_hash_table_find(ctx
, graph
->edge_table
[type
], hash
,
928 &edge_has_src_and_dst
, &temp
, 0);
932 /* If graph->edge_table[type] contains an edge from the given source
933 * to the given destination, then return this edge.
934 * Otherwise, return NULL.
936 static struct isl_sched_edge
*graph_find_edge(struct isl_sched_graph
*graph
,
937 enum isl_edge_type type
,
938 struct isl_sched_node
*src
, struct isl_sched_node
*dst
)
940 struct isl_hash_table_entry
*entry
;
942 entry
= graph_find_edge_entry(graph
, type
, src
, dst
);
949 /* Check whether the dependence graph has an edge of the given type
950 * between the given two nodes.
952 static isl_bool
graph_has_edge(struct isl_sched_graph
*graph
,
953 enum isl_edge_type type
,
954 struct isl_sched_node
*src
, struct isl_sched_node
*dst
)
956 struct isl_sched_edge
*edge
;
959 edge
= graph_find_edge(graph
, type
, src
, dst
);
963 empty
= isl_map_plain_is_empty(edge
->map
);
965 return isl_bool_error
;
970 /* Look for any edge with the same src, dst and map fields as "model".
972 * Return the matching edge if one can be found.
973 * Return "model" if no matching edge is found.
974 * Return NULL on error.
976 static struct isl_sched_edge
*graph_find_matching_edge(
977 struct isl_sched_graph
*graph
, struct isl_sched_edge
*model
)
979 enum isl_edge_type i
;
980 struct isl_sched_edge
*edge
;
982 for (i
= isl_edge_first
; i
<= isl_edge_last
; ++i
) {
985 edge
= graph_find_edge(graph
, i
, model
->src
, model
->dst
);
988 is_equal
= isl_map_plain_is_equal(model
->map
, edge
->map
);
998 /* Remove the given edge from all the edge_tables that refer to it.
1000 static void graph_remove_edge(struct isl_sched_graph
*graph
,
1001 struct isl_sched_edge
*edge
)
1003 isl_ctx
*ctx
= isl_map_get_ctx(edge
->map
);
1004 enum isl_edge_type i
;
1006 for (i
= isl_edge_first
; i
<= isl_edge_last
; ++i
) {
1007 struct isl_hash_table_entry
*entry
;
1009 entry
= graph_find_edge_entry(graph
, i
, edge
->src
, edge
->dst
);
1012 if (entry
->data
!= edge
)
1014 isl_hash_table_remove(ctx
, graph
->edge_table
[i
], entry
);
1018 /* Check whether the dependence graph has any edge
1019 * between the given two nodes.
1021 static isl_bool
graph_has_any_edge(struct isl_sched_graph
*graph
,
1022 struct isl_sched_node
*src
, struct isl_sched_node
*dst
)
1024 enum isl_edge_type i
;
1027 for (i
= isl_edge_first
; i
<= isl_edge_last
; ++i
) {
1028 r
= graph_has_edge(graph
, i
, src
, dst
);
1036 /* Check whether the dependence graph has a validity edge
1037 * between the given two nodes.
1039 * Conditional validity edges are essentially validity edges that
1040 * can be ignored if the corresponding condition edges are iteration private.
1041 * Here, we are only checking for the presence of validity
1042 * edges, so we need to consider the conditional validity edges too.
1043 * In particular, this function is used during the detection
1044 * of strongly connected components and we cannot ignore
1045 * conditional validity edges during this detection.
1047 static isl_bool
graph_has_validity_edge(struct isl_sched_graph
*graph
,
1048 struct isl_sched_node
*src
, struct isl_sched_node
*dst
)
1052 r
= graph_has_edge(graph
, isl_edge_validity
, src
, dst
);
1056 return graph_has_edge(graph
, isl_edge_conditional_validity
, src
, dst
);
1059 static int graph_alloc(isl_ctx
*ctx
, struct isl_sched_graph
*graph
,
1060 int n_node
, int n_edge
)
1065 graph
->n_edge
= n_edge
;
1066 graph
->node
= isl_calloc_array(ctx
, struct isl_sched_node
, graph
->n
);
1067 graph
->sorted
= isl_calloc_array(ctx
, int, graph
->n
);
1068 graph
->region
= isl_alloc_array(ctx
, struct isl_region
, graph
->n
);
1069 graph
->edge
= isl_calloc_array(ctx
,
1070 struct isl_sched_edge
, graph
->n_edge
);
1072 graph
->intra_hmap
= isl_map_to_basic_set_alloc(ctx
, 2 * n_edge
);
1073 graph
->inter_hmap
= isl_map_to_basic_set_alloc(ctx
, 2 * n_edge
);
1075 if (!graph
->node
|| !graph
->region
|| (graph
->n_edge
&& !graph
->edge
) ||
1079 for(i
= 0; i
< graph
->n
; ++i
)
1080 graph
->sorted
[i
] = i
;
1085 static void graph_free(isl_ctx
*ctx
, struct isl_sched_graph
*graph
)
1089 isl_map_to_basic_set_free(graph
->intra_hmap
);
1090 isl_map_to_basic_set_free(graph
->inter_hmap
);
1093 for (i
= 0; i
< graph
->n
; ++i
) {
1094 isl_space_free(graph
->node
[i
].space
);
1095 isl_set_free(graph
->node
[i
].hull
);
1096 isl_multi_aff_free(graph
->node
[i
].compress
);
1097 isl_multi_aff_free(graph
->node
[i
].decompress
);
1098 isl_mat_free(graph
->node
[i
].sched
);
1099 isl_map_free(graph
->node
[i
].sched_map
);
1100 isl_mat_free(graph
->node
[i
].cmap
);
1101 isl_mat_free(graph
->node
[i
].cinv
);
1102 isl_mat_free(graph
->node
[i
].ctrans
);
1104 free(graph
->node
[i
].coincident
);
1105 isl_multi_val_free(graph
->node
[i
].sizes
);
1106 isl_vec_free(graph
->node
[i
].max
);
1109 free(graph
->sorted
);
1111 for (i
= 0; i
< graph
->n_edge
; ++i
) {
1112 isl_map_free(graph
->edge
[i
].map
);
1113 isl_union_map_free(graph
->edge
[i
].tagged_condition
);
1114 isl_union_map_free(graph
->edge
[i
].tagged_validity
);
1117 free(graph
->region
);
1118 for (i
= 0; i
<= isl_edge_last
; ++i
)
1119 isl_hash_table_free(ctx
, graph
->edge_table
[i
]);
1120 isl_hash_table_free(ctx
, graph
->node_table
);
1121 isl_basic_set_free(graph
->lp
);
1124 /* For each "set" on which this function is called, increment
1125 * graph->n by one and update graph->maxvar.
1127 static isl_stat
init_n_maxvar(__isl_take isl_set
*set
, void *user
)
1129 struct isl_sched_graph
*graph
= user
;
1130 int nvar
= isl_set_dim(set
, isl_dim_set
);
1133 if (nvar
> graph
->maxvar
)
1134 graph
->maxvar
= nvar
;
1141 /* Add the number of basic maps in "map" to *n.
1143 static isl_stat
add_n_basic_map(__isl_take isl_map
*map
, void *user
)
1147 *n
+= isl_map_n_basic_map(map
);
1153 /* Compute the number of rows that should be allocated for the schedule.
1154 * In particular, we need one row for each variable or one row
1155 * for each basic map in the dependences.
1156 * Note that it is practically impossible to exhaust both
1157 * the number of dependences and the number of variables.
1159 static int compute_max_row(struct isl_sched_graph
*graph
,
1160 __isl_keep isl_schedule_constraints
*sc
)
1162 enum isl_edge_type i
;
1167 if (isl_union_set_foreach_set(sc
->domain
, &init_n_maxvar
, graph
) < 0)
1170 for (i
= isl_edge_first
; i
<= isl_edge_last
; ++i
)
1171 if (isl_union_map_foreach_map(sc
->constraint
[i
],
1172 &add_n_basic_map
, &n_edge
) < 0)
1174 graph
->max_row
= n_edge
+ graph
->maxvar
;
1179 /* Does "bset" have any defining equalities for its set variables?
1181 static int has_any_defining_equality(__isl_keep isl_basic_set
*bset
)
1188 n
= isl_basic_set_dim(bset
, isl_dim_set
);
1189 for (i
= 0; i
< n
; ++i
) {
1192 has
= isl_basic_set_has_defining_equality(bset
, isl_dim_set
, i
,
1201 /* Set the entries of node->max to the value of the schedule_max_coefficient
1204 static isl_stat
set_max_coefficient(isl_ctx
*ctx
, struct isl_sched_node
*node
)
1208 max
= isl_options_get_schedule_max_coefficient(ctx
);
1212 node
->max
= isl_vec_alloc(ctx
, node
->nvar
);
1213 node
->max
= isl_vec_set_si(node
->max
, max
);
1215 return isl_stat_error
;
1220 /* Set the entries of node->max to the minimum of the schedule_max_coefficient
1221 * option (if set) and half of the minimum of the sizes in the other
1222 * dimensions. If the minimum of the sizes is one, half of the size
1223 * is zero and this value is reset to one.
1224 * If the global minimum is unbounded (i.e., if both
1225 * the schedule_max_coefficient is not set and the sizes in the other
1226 * dimensions are unbounded), then store a negative value.
1227 * If the schedule coefficient is close to the size of the instance set
1228 * in another dimension, then the schedule may represent a loop
1229 * coalescing transformation (especially if the coefficient
1230 * in that other dimension is one). Forcing the coefficient to be
1231 * smaller than or equal to half the minimal size should avoid this
1234 static isl_stat
compute_max_coefficient(isl_ctx
*ctx
,
1235 struct isl_sched_node
*node
)
1241 max
= isl_options_get_schedule_max_coefficient(ctx
);
1242 v
= isl_vec_alloc(ctx
, node
->nvar
);
1244 return isl_stat_error
;
1246 for (i
= 0; i
< node
->nvar
; ++i
) {
1247 isl_int_set_si(v
->el
[i
], max
);
1248 isl_int_mul_si(v
->el
[i
], v
->el
[i
], 2);
1251 for (i
= 0; i
< node
->nvar
; ++i
) {
1254 size
= isl_multi_val_get_val(node
->sizes
, i
);
1257 if (!isl_val_is_int(size
)) {
1261 for (j
= 0; j
< node
->nvar
; ++j
) {
1264 if (isl_int_is_neg(v
->el
[j
]) ||
1265 isl_int_gt(v
->el
[j
], size
->n
))
1266 isl_int_set(v
->el
[j
], size
->n
);
1271 for (i
= 0; i
< node
->nvar
; ++i
) {
1272 isl_int_fdiv_q_ui(v
->el
[i
], v
->el
[i
], 2);
1273 if (isl_int_is_zero(v
->el
[i
]))
1274 isl_int_set_si(v
->el
[i
], 1);
1281 return isl_stat_error
;
1284 /* Compute and return the size of "set" in dimension "dim".
1285 * The size is taken to be the difference in values for that variable
1286 * for fixed values of the other variables.
1287 * In particular, the variable is first isolated from the other variables
1288 * in the range of a map
1290 * [i_0, ..., i_dim-1, i_dim+1, ...] -> [i_dim]
1292 * and then duplicated
1294 * [i_0, ..., i_dim-1, i_dim+1, ...] -> [[i_dim] -> [i_dim']]
1296 * The shared variables are then projected out and the maximal value
1297 * of i_dim' - i_dim is computed.
1299 static __isl_give isl_val
*compute_size(__isl_take isl_set
*set
, int dim
)
1302 isl_local_space
*ls
;
1306 map
= isl_set_project_onto_map(set
, isl_dim_set
, dim
, 1);
1307 map
= isl_map_project_out(map
, isl_dim_in
, dim
, 1);
1308 map
= isl_map_range_product(map
, isl_map_copy(map
));
1309 map
= isl_set_unwrap(isl_map_range(map
));
1310 set
= isl_map_deltas(map
);
1311 ls
= isl_local_space_from_space(isl_set_get_space(set
));
1312 obj
= isl_aff_var_on_domain(ls
, isl_dim_set
, 0);
1313 v
= isl_set_max_val(set
, obj
);
1320 /* Compute the size of the instance set "set" of "node", after compression,
1321 * as well as bounds on the corresponding coefficients, if needed.
1323 * The sizes are needed when the schedule_treat_coalescing option is set.
1324 * The bounds are needed when the schedule_treat_coalescing option or
1325 * the schedule_max_coefficient option is set.
1327 * If the schedule_treat_coalescing option is not set, then at most
1328 * the bounds need to be set and this is done in set_max_coefficient.
1329 * Otherwise, compress the domain if needed, compute the size
1330 * in each direction and store the results in node->size.
1331 * Finally, set the bounds on the coefficients based on the sizes
1332 * and the schedule_max_coefficient option in compute_max_coefficient.
1334 static isl_stat
compute_sizes_and_max(isl_ctx
*ctx
, struct isl_sched_node
*node
,
1335 __isl_take isl_set
*set
)
1340 if (!isl_options_get_schedule_treat_coalescing(ctx
)) {
1342 return set_max_coefficient(ctx
, node
);
1345 if (node
->compressed
)
1346 set
= isl_set_preimage_multi_aff(set
,
1347 isl_multi_aff_copy(node
->decompress
));
1348 mv
= isl_multi_val_zero(isl_set_get_space(set
));
1349 n
= isl_set_dim(set
, isl_dim_set
);
1350 for (j
= 0; j
< n
; ++j
) {
1353 v
= compute_size(isl_set_copy(set
), j
);
1354 mv
= isl_multi_val_set_val(mv
, j
, v
);
1359 return isl_stat_error
;
1360 return compute_max_coefficient(ctx
, node
);
1363 /* Add a new node to the graph representing the given instance set.
1364 * "nvar" is the (possibly compressed) number of variables and
1365 * may be smaller than then number of set variables in "set"
1366 * if "compressed" is set.
1367 * If "compressed" is set, then "hull" represents the constraints
1368 * that were used to derive the compression, while "compress" and
1369 * "decompress" map the original space to the compressed space and
1371 * If "compressed" is not set, then "hull", "compress" and "decompress"
1374 * Compute the size of the instance set and bounds on the coefficients,
1377 static isl_stat
add_node(struct isl_sched_graph
*graph
,
1378 __isl_take isl_set
*set
, int nvar
, int compressed
,
1379 __isl_take isl_set
*hull
, __isl_take isl_multi_aff
*compress
,
1380 __isl_take isl_multi_aff
*decompress
)
1387 struct isl_sched_node
*node
;
1390 return isl_stat_error
;
1392 ctx
= isl_set_get_ctx(set
);
1393 nparam
= isl_set_dim(set
, isl_dim_param
);
1394 if (!ctx
->opt
->schedule_parametric
)
1396 sched
= isl_mat_alloc(ctx
, 0, 1 + nparam
+ nvar
);
1397 node
= &graph
->node
[graph
->n
];
1399 space
= isl_set_get_space(set
);
1400 node
->space
= space
;
1402 node
->nparam
= nparam
;
1403 node
->sched
= sched
;
1404 node
->sched_map
= NULL
;
1405 coincident
= isl_calloc_array(ctx
, int, graph
->max_row
);
1406 node
->coincident
= coincident
;
1407 node
->compressed
= compressed
;
1409 node
->compress
= compress
;
1410 node
->decompress
= decompress
;
1411 if (compute_sizes_and_max(ctx
, node
, set
) < 0)
1412 return isl_stat_error
;
1414 if (!space
|| !sched
|| (graph
->max_row
&& !coincident
))
1415 return isl_stat_error
;
1416 if (compressed
&& (!hull
|| !compress
|| !decompress
))
1417 return isl_stat_error
;
1422 /* Add a new node to the graph representing the given set.
1424 * If any of the set variables is defined by an equality, then
1425 * we perform variable compression such that we can perform
1426 * the scheduling on the compressed domain.
1428 static isl_stat
extract_node(__isl_take isl_set
*set
, void *user
)
1432 isl_basic_set
*hull
;
1435 isl_multi_aff
*compress
, *decompress
;
1436 struct isl_sched_graph
*graph
= user
;
1438 hull
= isl_set_affine_hull(isl_set_copy(set
));
1439 hull
= isl_basic_set_remove_divs(hull
);
1440 nvar
= isl_set_dim(set
, isl_dim_set
);
1441 has_equality
= has_any_defining_equality(hull
);
1443 if (has_equality
< 0)
1445 if (!has_equality
) {
1446 isl_basic_set_free(hull
);
1447 return add_node(graph
, set
, nvar
, 0, NULL
, NULL
, NULL
);
1450 morph
= isl_basic_set_variable_compression(hull
, isl_dim_set
);
1451 nvar
= isl_morph_ran_dim(morph
, isl_dim_set
);
1452 compress
= isl_morph_get_var_multi_aff(morph
);
1453 morph
= isl_morph_inverse(morph
);
1454 decompress
= isl_morph_get_var_multi_aff(morph
);
1455 isl_morph_free(morph
);
1457 hull_set
= isl_set_from_basic_set(hull
);
1458 return add_node(graph
, set
, nvar
, 1, hull_set
, compress
, decompress
);
1460 isl_basic_set_free(hull
);
1462 return isl_stat_error
;
1465 struct isl_extract_edge_data
{
1466 enum isl_edge_type type
;
1467 struct isl_sched_graph
*graph
;
1470 /* Merge edge2 into edge1, freeing the contents of edge2.
1471 * Return 0 on success and -1 on failure.
1473 * edge1 and edge2 are assumed to have the same value for the map field.
1475 static int merge_edge(struct isl_sched_edge
*edge1
,
1476 struct isl_sched_edge
*edge2
)
1478 edge1
->types
|= edge2
->types
;
1479 isl_map_free(edge2
->map
);
1481 if (is_condition(edge2
)) {
1482 if (!edge1
->tagged_condition
)
1483 edge1
->tagged_condition
= edge2
->tagged_condition
;
1485 edge1
->tagged_condition
=
1486 isl_union_map_union(edge1
->tagged_condition
,
1487 edge2
->tagged_condition
);
1490 if (is_conditional_validity(edge2
)) {
1491 if (!edge1
->tagged_validity
)
1492 edge1
->tagged_validity
= edge2
->tagged_validity
;
1494 edge1
->tagged_validity
=
1495 isl_union_map_union(edge1
->tagged_validity
,
1496 edge2
->tagged_validity
);
1499 if (is_condition(edge2
) && !edge1
->tagged_condition
)
1501 if (is_conditional_validity(edge2
) && !edge1
->tagged_validity
)
1507 /* Insert dummy tags in domain and range of "map".
1509 * In particular, if "map" is of the form
1515 * [A -> dummy_tag] -> [B -> dummy_tag]
1517 * where the dummy_tags are identical and equal to any dummy tags
1518 * introduced by any other call to this function.
1520 static __isl_give isl_map
*insert_dummy_tags(__isl_take isl_map
*map
)
1526 isl_set
*domain
, *range
;
1528 ctx
= isl_map_get_ctx(map
);
1530 id
= isl_id_alloc(ctx
, NULL
, &dummy
);
1531 space
= isl_space_params(isl_map_get_space(map
));
1532 space
= isl_space_set_from_params(space
);
1533 space
= isl_space_set_tuple_id(space
, isl_dim_set
, id
);
1534 space
= isl_space_map_from_set(space
);
1536 domain
= isl_map_wrap(map
);
1537 range
= isl_map_wrap(isl_map_universe(space
));
1538 map
= isl_map_from_domain_and_range(domain
, range
);
1539 map
= isl_map_zip(map
);
1544 /* Given that at least one of "src" or "dst" is compressed, return
1545 * a map between the spaces of these nodes restricted to the affine
1546 * hull that was used in the compression.
1548 static __isl_give isl_map
*extract_hull(struct isl_sched_node
*src
,
1549 struct isl_sched_node
*dst
)
1553 if (src
->compressed
)
1554 dom
= isl_set_copy(src
->hull
);
1556 dom
= isl_set_universe(isl_space_copy(src
->space
));
1557 if (dst
->compressed
)
1558 ran
= isl_set_copy(dst
->hull
);
1560 ran
= isl_set_universe(isl_space_copy(dst
->space
));
1562 return isl_map_from_domain_and_range(dom
, ran
);
1565 /* Intersect the domains of the nested relations in domain and range
1566 * of "tagged" with "map".
1568 static __isl_give isl_map
*map_intersect_domains(__isl_take isl_map
*tagged
,
1569 __isl_keep isl_map
*map
)
1573 tagged
= isl_map_zip(tagged
);
1574 set
= isl_map_wrap(isl_map_copy(map
));
1575 tagged
= isl_map_intersect_domain(tagged
, set
);
1576 tagged
= isl_map_zip(tagged
);
1580 /* Return a pointer to the node that lives in the domain space of "map"
1581 * or NULL if there is no such node.
1583 static struct isl_sched_node
*find_domain_node(isl_ctx
*ctx
,
1584 struct isl_sched_graph
*graph
, __isl_keep isl_map
*map
)
1586 struct isl_sched_node
*node
;
1589 space
= isl_space_domain(isl_map_get_space(map
));
1590 node
= graph_find_node(ctx
, graph
, space
);
1591 isl_space_free(space
);
1596 /* Return a pointer to the node that lives in the range space of "map"
1597 * or NULL if there is no such node.
1599 static struct isl_sched_node
*find_range_node(isl_ctx
*ctx
,
1600 struct isl_sched_graph
*graph
, __isl_keep isl_map
*map
)
1602 struct isl_sched_node
*node
;
1605 space
= isl_space_range(isl_map_get_space(map
));
1606 node
= graph_find_node(ctx
, graph
, space
);
1607 isl_space_free(space
);
1612 /* Add a new edge to the graph based on the given map
1613 * and add it to data->graph->edge_table[data->type].
1614 * If a dependence relation of a given type happens to be identical
1615 * to one of the dependence relations of a type that was added before,
1616 * then we don't create a new edge, but instead mark the original edge
1617 * as also representing a dependence of the current type.
1619 * Edges of type isl_edge_condition or isl_edge_conditional_validity
1620 * may be specified as "tagged" dependence relations. That is, "map"
1621 * may contain elements (i -> a) -> (j -> b), where i -> j denotes
1622 * the dependence on iterations and a and b are tags.
1623 * edge->map is set to the relation containing the elements i -> j,
1624 * while edge->tagged_condition and edge->tagged_validity contain
1625 * the union of all the "map" relations
1626 * for which extract_edge is called that result in the same edge->map.
1628 * If the source or the destination node is compressed, then
1629 * intersect both "map" and "tagged" with the constraints that
1630 * were used to construct the compression.
1631 * This ensures that there are no schedule constraints defined
1632 * outside of these domains, while the scheduler no longer has
1633 * any control over those outside parts.
1635 static isl_stat
extract_edge(__isl_take isl_map
*map
, void *user
)
1637 isl_ctx
*ctx
= isl_map_get_ctx(map
);
1638 struct isl_extract_edge_data
*data
= user
;
1639 struct isl_sched_graph
*graph
= data
->graph
;
1640 struct isl_sched_node
*src
, *dst
;
1641 struct isl_sched_edge
*edge
;
1642 isl_map
*tagged
= NULL
;
1644 if (data
->type
== isl_edge_condition
||
1645 data
->type
== isl_edge_conditional_validity
) {
1646 if (isl_map_can_zip(map
)) {
1647 tagged
= isl_map_copy(map
);
1648 map
= isl_set_unwrap(isl_map_domain(isl_map_zip(map
)));
1650 tagged
= insert_dummy_tags(isl_map_copy(map
));
1654 src
= find_domain_node(ctx
, graph
, map
);
1655 dst
= find_range_node(ctx
, graph
, map
);
1659 isl_map_free(tagged
);
1663 if (src
->compressed
|| dst
->compressed
) {
1665 hull
= extract_hull(src
, dst
);
1667 tagged
= map_intersect_domains(tagged
, hull
);
1668 map
= isl_map_intersect(map
, hull
);
1671 graph
->edge
[graph
->n_edge
].src
= src
;
1672 graph
->edge
[graph
->n_edge
].dst
= dst
;
1673 graph
->edge
[graph
->n_edge
].map
= map
;
1674 graph
->edge
[graph
->n_edge
].types
= 0;
1675 graph
->edge
[graph
->n_edge
].tagged_condition
= NULL
;
1676 graph
->edge
[graph
->n_edge
].tagged_validity
= NULL
;
1677 set_type(&graph
->edge
[graph
->n_edge
], data
->type
);
1678 if (data
->type
== isl_edge_condition
)
1679 graph
->edge
[graph
->n_edge
].tagged_condition
=
1680 isl_union_map_from_map(tagged
);
1681 if (data
->type
== isl_edge_conditional_validity
)
1682 graph
->edge
[graph
->n_edge
].tagged_validity
=
1683 isl_union_map_from_map(tagged
);
1685 edge
= graph_find_matching_edge(graph
, &graph
->edge
[graph
->n_edge
]);
1688 return isl_stat_error
;
1690 if (edge
== &graph
->edge
[graph
->n_edge
])
1691 return graph_edge_table_add(ctx
, graph
, data
->type
,
1692 &graph
->edge
[graph
->n_edge
++]);
1694 if (merge_edge(edge
, &graph
->edge
[graph
->n_edge
]) < 0)
1697 return graph_edge_table_add(ctx
, graph
, data
->type
, edge
);
1700 /* Initialize the schedule graph "graph" from the schedule constraints "sc".
1702 * The context is included in the domain before the nodes of
1703 * the graphs are extracted in order to be able to exploit
1704 * any possible additional equalities.
1705 * Note that this intersection is only performed locally here.
1707 static isl_stat
graph_init(struct isl_sched_graph
*graph
,
1708 __isl_keep isl_schedule_constraints
*sc
)
1711 isl_union_set
*domain
;
1712 struct isl_extract_edge_data data
;
1713 enum isl_edge_type i
;
1717 return isl_stat_error
;
1719 ctx
= isl_schedule_constraints_get_ctx(sc
);
1721 domain
= isl_schedule_constraints_get_domain(sc
);
1722 graph
->n
= isl_union_set_n_set(domain
);
1723 isl_union_set_free(domain
);
1725 if (graph_alloc(ctx
, graph
, graph
->n
,
1726 isl_schedule_constraints_n_map(sc
)) < 0)
1727 return isl_stat_error
;
1729 if (compute_max_row(graph
, sc
) < 0)
1730 return isl_stat_error
;
1733 domain
= isl_schedule_constraints_get_domain(sc
);
1734 domain
= isl_union_set_intersect_params(domain
,
1735 isl_set_copy(sc
->context
));
1736 r
= isl_union_set_foreach_set(domain
, &extract_node
, graph
);
1737 isl_union_set_free(domain
);
1739 return isl_stat_error
;
1740 if (graph_init_table(ctx
, graph
) < 0)
1741 return isl_stat_error
;
1742 for (i
= isl_edge_first
; i
<= isl_edge_last
; ++i
)
1743 graph
->max_edge
[i
] = isl_union_map_n_map(sc
->constraint
[i
]);
1744 if (graph_init_edge_tables(ctx
, graph
) < 0)
1745 return isl_stat_error
;
1748 for (i
= isl_edge_first
; i
<= isl_edge_last
; ++i
) {
1750 if (isl_union_map_foreach_map(sc
->constraint
[i
],
1751 &extract_edge
, &data
) < 0)
1752 return isl_stat_error
;
1758 /* Check whether there is any dependence from node[j] to node[i]
1759 * or from node[i] to node[j].
1761 static isl_bool
node_follows_weak(int i
, int j
, void *user
)
1764 struct isl_sched_graph
*graph
= user
;
1766 f
= graph_has_any_edge(graph
, &graph
->node
[j
], &graph
->node
[i
]);
1769 return graph_has_any_edge(graph
, &graph
->node
[i
], &graph
->node
[j
]);
1772 /* Check whether there is a (conditional) validity dependence from node[j]
1773 * to node[i], forcing node[i] to follow node[j].
1775 static isl_bool
node_follows_strong(int i
, int j
, void *user
)
1777 struct isl_sched_graph
*graph
= user
;
1779 return graph_has_validity_edge(graph
, &graph
->node
[j
], &graph
->node
[i
]);
1782 /* Use Tarjan's algorithm for computing the strongly connected components
1783 * in the dependence graph only considering those edges defined by "follows".
1785 static int detect_ccs(isl_ctx
*ctx
, struct isl_sched_graph
*graph
,
1786 isl_bool (*follows
)(int i
, int j
, void *user
))
1789 struct isl_tarjan_graph
*g
= NULL
;
1791 g
= isl_tarjan_graph_init(ctx
, graph
->n
, follows
, graph
);
1799 while (g
->order
[i
] != -1) {
1800 graph
->node
[g
->order
[i
]].scc
= graph
->scc
;
1808 isl_tarjan_graph_free(g
);
1813 /* Apply Tarjan's algorithm to detect the strongly connected components
1814 * in the dependence graph.
1815 * Only consider the (conditional) validity dependences and clear "weak".
1817 static int detect_sccs(isl_ctx
*ctx
, struct isl_sched_graph
*graph
)
1820 return detect_ccs(ctx
, graph
, &node_follows_strong
);
1823 /* Apply Tarjan's algorithm to detect the (weakly) connected components
1824 * in the dependence graph.
1825 * Consider all dependences and set "weak".
1827 static int detect_wccs(isl_ctx
*ctx
, struct isl_sched_graph
*graph
)
1830 return detect_ccs(ctx
, graph
, &node_follows_weak
);
1833 static int cmp_scc(const void *a
, const void *b
, void *data
)
1835 struct isl_sched_graph
*graph
= data
;
1839 return graph
->node
[*i1
].scc
- graph
->node
[*i2
].scc
;
1842 /* Sort the elements of graph->sorted according to the corresponding SCCs.
1844 static int sort_sccs(struct isl_sched_graph
*graph
)
1846 return isl_sort(graph
->sorted
, graph
->n
, sizeof(int), &cmp_scc
, graph
);
1849 /* Given a dependence relation R from "node" to itself,
1850 * construct the set of coefficients of valid constraints for elements
1851 * in that dependence relation.
1852 * In particular, the result contains tuples of coefficients
1853 * c_0, c_n, c_x such that
1855 * c_0 + c_n n + c_x y - c_x x >= 0 for each (x,y) in R
1859 * c_0 + c_n n + c_x d >= 0 for each d in delta R = { y - x | (x,y) in R }
1861 * We choose here to compute the dual of delta R.
1862 * Alternatively, we could have computed the dual of R, resulting
1863 * in a set of tuples c_0, c_n, c_x, c_y, and then
1864 * plugged in (c_0, c_n, c_x, -c_x).
1866 * If "node" has been compressed, then the dependence relation
1867 * is also compressed before the set of coefficients is computed.
1869 static __isl_give isl_basic_set
*intra_coefficients(
1870 struct isl_sched_graph
*graph
, struct isl_sched_node
*node
,
1871 __isl_take isl_map
*map
)
1875 isl_basic_set
*coef
;
1876 isl_maybe_isl_basic_set m
;
1878 m
= isl_map_to_basic_set_try_get(graph
->intra_hmap
, map
);
1879 if (m
.valid
< 0 || m
.valid
) {
1884 key
= isl_map_copy(map
);
1885 if (node
->compressed
) {
1886 map
= isl_map_preimage_domain_multi_aff(map
,
1887 isl_multi_aff_copy(node
->decompress
));
1888 map
= isl_map_preimage_range_multi_aff(map
,
1889 isl_multi_aff_copy(node
->decompress
));
1891 delta
= isl_set_remove_divs(isl_map_deltas(map
));
1892 coef
= isl_set_coefficients(delta
);
1893 graph
->intra_hmap
= isl_map_to_basic_set_set(graph
->intra_hmap
, key
,
1894 isl_basic_set_copy(coef
));
1899 /* Given a dependence relation R, construct the set of coefficients
1900 * of valid constraints for elements in that dependence relation.
1901 * In particular, the result contains tuples of coefficients
1902 * c_0, c_n, c_x, c_y such that
1904 * c_0 + c_n n + c_x x + c_y y >= 0 for each (x,y) in R
1906 * If the source or destination nodes of "edge" have been compressed,
1907 * then the dependence relation is also compressed before
1908 * the set of coefficients is computed.
1910 static __isl_give isl_basic_set
*inter_coefficients(
1911 struct isl_sched_graph
*graph
, struct isl_sched_edge
*edge
,
1912 __isl_take isl_map
*map
)
1916 isl_basic_set
*coef
;
1917 isl_maybe_isl_basic_set m
;
1919 m
= isl_map_to_basic_set_try_get(graph
->inter_hmap
, map
);
1920 if (m
.valid
< 0 || m
.valid
) {
1925 key
= isl_map_copy(map
);
1926 if (edge
->src
->compressed
)
1927 map
= isl_map_preimage_domain_multi_aff(map
,
1928 isl_multi_aff_copy(edge
->src
->decompress
));
1929 if (edge
->dst
->compressed
)
1930 map
= isl_map_preimage_range_multi_aff(map
,
1931 isl_multi_aff_copy(edge
->dst
->decompress
));
1932 set
= isl_map_wrap(isl_map_remove_divs(map
));
1933 coef
= isl_set_coefficients(set
);
1934 graph
->inter_hmap
= isl_map_to_basic_set_set(graph
->inter_hmap
, key
,
1935 isl_basic_set_copy(coef
));
1940 /* Return the position of the coefficients of the variables in
1941 * the coefficients constraints "coef".
1943 * The space of "coef" is of the form
1945 * { coefficients[[cst, params] -> S] }
1947 * Return the position of S.
1949 static int coef_var_offset(__isl_keep isl_basic_set
*coef
)
1954 space
= isl_space_unwrap(isl_basic_set_get_space(coef
));
1955 offset
= isl_space_dim(space
, isl_dim_in
);
1956 isl_space_free(space
);
1961 /* Return the offset of the coefficients of the variables of "node"
1964 * Within each node, the coefficients have the following order:
1966 * - c_i_n (if parametric)
1967 * - positive and negative parts of c_i_x
1969 static int node_var_coef_offset(struct isl_sched_node
*node
)
1971 return node
->start
+ 1 + node
->nparam
;
1974 /* Construct an isl_dim_map for mapping constraints on coefficients
1975 * for "node" to the corresponding positions in graph->lp.
1976 * "offset" is the offset of the coefficients for the variables
1977 * in the input constraints.
1978 * "s" is the sign of the mapping.
1980 * The input constraints are given in terms of the coefficients (c_0, c_n, c_x).
1981 * The mapping produced by this function essentially plugs in
1982 * (0, 0, c_i_x^+ - c_i_x^-) if s = 1 and
1983 * (0, 0, -c_i_x^+ + c_i_x^-) if s = -1.
1984 * In graph->lp, the c_i_x^- appear before their c_i_x^+ counterpart.
1986 * The caller can extend the mapping to also map the other coefficients
1987 * (and therefore not plug in 0).
1989 static __isl_give isl_dim_map
*intra_dim_map(isl_ctx
*ctx
,
1990 struct isl_sched_graph
*graph
, struct isl_sched_node
*node
,
1995 isl_dim_map
*dim_map
;
1997 total
= isl_basic_set_total_dim(graph
->lp
);
1998 pos
= node_var_coef_offset(node
);
1999 dim_map
= isl_dim_map_alloc(ctx
, total
);
2000 isl_dim_map_range(dim_map
, pos
, 2, offset
, 1, node
->nvar
, -s
);
2001 isl_dim_map_range(dim_map
, pos
+ 1, 2, offset
, 1, node
->nvar
, s
);
2006 /* Construct an isl_dim_map for mapping constraints on coefficients
2007 * for "src" (node i) and "dst" (node j) to the corresponding positions
2009 * "offset" is the offset of the coefficients for the variables of "src"
2010 * in the input constraints.
2011 * "s" is the sign of the mapping.
2013 * The input constraints are given in terms of the coefficients
2014 * (c_0, c_n, c_x, c_y).
2015 * The mapping produced by this function essentially plugs in
2016 * (c_j_0 - c_i_0, c_j_n - c_i_n,
2017 * c_j_x^+ - c_j_x^-, -(c_i_x^+ - c_i_x^-)) if s = 1 and
2018 * (-c_j_0 + c_i_0, -c_j_n + c_i_n,
2019 * - (c_j_x^+ - c_j_x^-), c_i_x^+ - c_i_x^-) if s = -1.
2020 * In graph->lp, the c_*^- appear before their c_*^+ counterpart.
2022 * The caller can further extend the mapping.
2024 static __isl_give isl_dim_map
*inter_dim_map(isl_ctx
*ctx
,
2025 struct isl_sched_graph
*graph
, struct isl_sched_node
*src
,
2026 struct isl_sched_node
*dst
, int offset
, int s
)
2030 isl_dim_map
*dim_map
;
2032 total
= isl_basic_set_total_dim(graph
->lp
);
2033 dim_map
= isl_dim_map_alloc(ctx
, total
);
2035 isl_dim_map_range(dim_map
, dst
->start
, 0, 0, 0, 1, s
);
2036 isl_dim_map_range(dim_map
, dst
->start
+ 1, 1, 1, 1, dst
->nparam
, s
);
2037 pos
= node_var_coef_offset(dst
);
2038 isl_dim_map_range(dim_map
, pos
, 2, offset
+ src
->nvar
, 1,
2040 isl_dim_map_range(dim_map
, pos
+ 1, 2, offset
+ src
->nvar
, 1,
2043 isl_dim_map_range(dim_map
, src
->start
, 0, 0, 0, 1, -s
);
2044 isl_dim_map_range(dim_map
, src
->start
+ 1, 1, 1, 1, src
->nparam
, -s
);
2045 pos
= node_var_coef_offset(src
);
2046 isl_dim_map_range(dim_map
, pos
, 2, offset
, 1, src
->nvar
, s
);
2047 isl_dim_map_range(dim_map
, pos
+ 1, 2, offset
, 1, src
->nvar
, -s
);
2052 /* Add constraints to graph->lp that force validity for the given
2053 * dependence from a node i to itself.
2054 * That is, add constraints that enforce
2056 * (c_i_0 + c_i_n n + c_i_x y) - (c_i_0 + c_i_n n + c_i_x x)
2057 * = c_i_x (y - x) >= 0
2059 * for each (x,y) in R.
2060 * We obtain general constraints on coefficients (c_0, c_n, c_x)
2061 * of valid constraints for (y - x) and then plug in (0, 0, c_i_x^+ - c_i_x^-),
2062 * where c_i_x = c_i_x^+ - c_i_x^-, with c_i_x^+ and c_i_x^- non-negative.
2063 * In graph->lp, the c_i_x^- appear before their c_i_x^+ counterpart.
2065 * Actually, we do not construct constraints for the c_i_x themselves,
2066 * but for the coefficients of c_i_x written as a linear combination
2067 * of the columns in node->cmap.
2069 static isl_stat
add_intra_validity_constraints(struct isl_sched_graph
*graph
,
2070 struct isl_sched_edge
*edge
)
2073 isl_map
*map
= isl_map_copy(edge
->map
);
2074 isl_ctx
*ctx
= isl_map_get_ctx(map
);
2075 isl_dim_map
*dim_map
;
2076 isl_basic_set
*coef
;
2077 struct isl_sched_node
*node
= edge
->src
;
2079 coef
= intra_coefficients(graph
, node
, map
);
2081 offset
= coef_var_offset(coef
);
2083 coef
= isl_basic_set_transform_dims(coef
, isl_dim_set
,
2084 offset
, isl_mat_copy(node
->cmap
));
2086 return isl_stat_error
;
2088 dim_map
= intra_dim_map(ctx
, graph
, node
, offset
, 1);
2089 graph
->lp
= isl_basic_set_extend_constraints(graph
->lp
,
2090 coef
->n_eq
, coef
->n_ineq
);
2091 graph
->lp
= isl_basic_set_add_constraints_dim_map(graph
->lp
,
2097 /* Add constraints to graph->lp that force validity for the given
2098 * dependence from node i to node j.
2099 * That is, add constraints that enforce
2101 * (c_j_0 + c_j_n n + c_j_x y) - (c_i_0 + c_i_n n + c_i_x x) >= 0
2103 * for each (x,y) in R.
2104 * We obtain general constraints on coefficients (c_0, c_n, c_x, c_y)
2105 * of valid constraints for R and then plug in
2106 * (c_j_0 - c_i_0, c_j_n - c_i_n, c_j_x^+ - c_j_x^- - (c_i_x^+ - c_i_x^-)),
2107 * where c_* = c_*^+ - c_*^-, with c_*^+ and c_*^- non-negative.
2108 * In graph->lp, the c_*^- appear before their c_*^+ counterpart.
2110 * Actually, we do not construct constraints for the c_*_x themselves,
2111 * but for the coefficients of c_*_x written as a linear combination
2112 * of the columns in node->cmap.
2114 static isl_stat
add_inter_validity_constraints(struct isl_sched_graph
*graph
,
2115 struct isl_sched_edge
*edge
)
2118 isl_map
*map
= isl_map_copy(edge
->map
);
2119 isl_ctx
*ctx
= isl_map_get_ctx(map
);
2120 isl_dim_map
*dim_map
;
2121 isl_basic_set
*coef
;
2122 struct isl_sched_node
*src
= edge
->src
;
2123 struct isl_sched_node
*dst
= edge
->dst
;
2125 coef
= inter_coefficients(graph
, edge
, map
);
2127 offset
= coef_var_offset(coef
);
2129 coef
= isl_basic_set_transform_dims(coef
, isl_dim_set
,
2130 offset
, isl_mat_copy(src
->cmap
));
2131 coef
= isl_basic_set_transform_dims(coef
, isl_dim_set
,
2132 offset
+ src
->nvar
, isl_mat_copy(dst
->cmap
));
2134 return isl_stat_error
;
2136 dim_map
= inter_dim_map(ctx
, graph
, src
, dst
, offset
, 1);
2138 edge
->start
= graph
->lp
->n_ineq
;
2139 graph
->lp
= isl_basic_set_extend_constraints(graph
->lp
,
2140 coef
->n_eq
, coef
->n_ineq
);
2141 graph
->lp
= isl_basic_set_add_constraints_dim_map(graph
->lp
,
2144 return isl_stat_error
;
2145 edge
->end
= graph
->lp
->n_ineq
;
2150 /* Add constraints to graph->lp that bound the dependence distance for the given
2151 * dependence from a node i to itself.
2152 * If s = 1, we add the constraint
2154 * c_i_x (y - x) <= m_0 + m_n n
2158 * -c_i_x (y - x) + m_0 + m_n n >= 0
2160 * for each (x,y) in R.
2161 * If s = -1, we add the constraint
2163 * -c_i_x (y - x) <= m_0 + m_n n
2167 * c_i_x (y - x) + m_0 + m_n n >= 0
2169 * for each (x,y) in R.
2170 * We obtain general constraints on coefficients (c_0, c_n, c_x)
2171 * of valid constraints for (y - x) and then plug in (m_0, m_n, -s * c_i_x),
2172 * with each coefficient (except m_0) represented as a pair of non-negative
2175 * Actually, we do not construct constraints for the c_i_x themselves,
2176 * but for the coefficients of c_i_x written as a linear combination
2177 * of the columns in node->cmap.
2180 * If "local" is set, then we add constraints
2182 * c_i_x (y - x) <= 0
2186 * -c_i_x (y - x) <= 0
2188 * instead, forcing the dependence distance to be (less than or) equal to 0.
2189 * That is, we plug in (0, 0, -s * c_i_x),
2190 * Note that dependences marked local are treated as validity constraints
2191 * by add_all_validity_constraints and therefore also have
2192 * their distances bounded by 0 from below.
2194 static isl_stat
add_intra_proximity_constraints(struct isl_sched_graph
*graph
,
2195 struct isl_sched_edge
*edge
, int s
, int local
)
2199 isl_map
*map
= isl_map_copy(edge
->map
);
2200 isl_ctx
*ctx
= isl_map_get_ctx(map
);
2201 isl_dim_map
*dim_map
;
2202 isl_basic_set
*coef
;
2203 struct isl_sched_node
*node
= edge
->src
;
2205 coef
= intra_coefficients(graph
, node
, map
);
2207 offset
= coef_var_offset(coef
);
2209 coef
= isl_basic_set_transform_dims(coef
, isl_dim_set
,
2210 offset
, isl_mat_copy(node
->cmap
));
2212 return isl_stat_error
;
2214 nparam
= isl_space_dim(node
->space
, isl_dim_param
);
2215 dim_map
= intra_dim_map(ctx
, graph
, node
, offset
, -s
);
2218 isl_dim_map_range(dim_map
, 1, 0, 0, 0, 1, 1);
2219 isl_dim_map_range(dim_map
, 4, 2, 1, 1, nparam
, -1);
2220 isl_dim_map_range(dim_map
, 5, 2, 1, 1, nparam
, 1);
2222 graph
->lp
= isl_basic_set_extend_constraints(graph
->lp
,
2223 coef
->n_eq
, coef
->n_ineq
);
2224 graph
->lp
= isl_basic_set_add_constraints_dim_map(graph
->lp
,
2230 /* Add constraints to graph->lp that bound the dependence distance for the given
2231 * dependence from node i to node j.
2232 * If s = 1, we add the constraint
2234 * (c_j_0 + c_j_n n + c_j_x y) - (c_i_0 + c_i_n n + c_i_x x)
2239 * -(c_j_0 + c_j_n n + c_j_x y) + (c_i_0 + c_i_n n + c_i_x x) +
2242 * for each (x,y) in R.
2243 * If s = -1, we add the constraint
2245 * -((c_j_0 + c_j_n n + c_j_x y) - (c_i_0 + c_i_n n + c_i_x x))
2250 * (c_j_0 + c_j_n n + c_j_x y) - (c_i_0 + c_i_n n + c_i_x x) +
2253 * for each (x,y) in R.
2254 * We obtain general constraints on coefficients (c_0, c_n, c_x, c_y)
2255 * of valid constraints for R and then plug in
2256 * (m_0 - s*c_j_0 + s*c_i_0, m_n - s*c_j_n + s*c_i_n,
2258 * with each coefficient (except m_0, c_*_0 and c_*_n)
2259 * represented as a pair of non-negative coefficients.
2261 * Actually, we do not construct constraints for the c_*_x themselves,
2262 * but for the coefficients of c_*_x written as a linear combination
2263 * of the columns in node->cmap.
2266 * If "local" is set, then we add constraints
2268 * (c_j_0 + c_j_n n + c_j_x y) - (c_i_0 + c_i_n n + c_i_x x) <= 0
2272 * -((c_j_0 + c_j_n n + c_j_x y) - (c_i_0 + c_i_n n + c_i_x x)) <= 0
2274 * instead, forcing the dependence distance to be (less than or) equal to 0.
2275 * That is, we plug in
2276 * (-s*c_j_0 + s*c_i_0, -s*c_j_n + s*c_i_n, -s*c_j_x+s*c_i_x).
2277 * Note that dependences marked local are treated as validity constraints
2278 * by add_all_validity_constraints and therefore also have
2279 * their distances bounded by 0 from below.
2281 static isl_stat
add_inter_proximity_constraints(struct isl_sched_graph
*graph
,
2282 struct isl_sched_edge
*edge
, int s
, int local
)
2286 isl_map
*map
= isl_map_copy(edge
->map
);
2287 isl_ctx
*ctx
= isl_map_get_ctx(map
);
2288 isl_dim_map
*dim_map
;
2289 isl_basic_set
*coef
;
2290 struct isl_sched_node
*src
= edge
->src
;
2291 struct isl_sched_node
*dst
= edge
->dst
;
2293 coef
= inter_coefficients(graph
, edge
, map
);
2295 offset
= coef_var_offset(coef
);
2297 coef
= isl_basic_set_transform_dims(coef
, isl_dim_set
,
2298 offset
, isl_mat_copy(src
->cmap
));
2299 coef
= isl_basic_set_transform_dims(coef
, isl_dim_set
,
2300 offset
+ src
->nvar
, isl_mat_copy(dst
->cmap
));
2302 return isl_stat_error
;
2304 nparam
= isl_space_dim(src
->space
, isl_dim_param
);
2305 dim_map
= inter_dim_map(ctx
, graph
, src
, dst
, offset
, -s
);
2308 isl_dim_map_range(dim_map
, 1, 0, 0, 0, 1, 1);
2309 isl_dim_map_range(dim_map
, 4, 2, 1, 1, nparam
, -1);
2310 isl_dim_map_range(dim_map
, 5, 2, 1, 1, nparam
, 1);
2313 graph
->lp
= isl_basic_set_extend_constraints(graph
->lp
,
2314 coef
->n_eq
, coef
->n_ineq
);
2315 graph
->lp
= isl_basic_set_add_constraints_dim_map(graph
->lp
,
2321 /* Add all validity constraints to graph->lp.
2323 * An edge that is forced to be local needs to have its dependence
2324 * distances equal to zero. We take care of bounding them by 0 from below
2325 * here. add_all_proximity_constraints takes care of bounding them by 0
2328 * If "use_coincidence" is set, then we treat coincidence edges as local edges.
2329 * Otherwise, we ignore them.
2331 static int add_all_validity_constraints(struct isl_sched_graph
*graph
,
2332 int use_coincidence
)
2336 for (i
= 0; i
< graph
->n_edge
; ++i
) {
2337 struct isl_sched_edge
*edge
= &graph
->edge
[i
];
2340 local
= is_local(edge
) ||
2341 (is_coincidence(edge
) && use_coincidence
);
2342 if (!is_validity(edge
) && !local
)
2344 if (edge
->src
!= edge
->dst
)
2346 if (add_intra_validity_constraints(graph
, edge
) < 0)
2350 for (i
= 0; i
< graph
->n_edge
; ++i
) {
2351 struct isl_sched_edge
*edge
= &graph
->edge
[i
];
2354 local
= is_local(edge
) ||
2355 (is_coincidence(edge
) && use_coincidence
);
2356 if (!is_validity(edge
) && !local
)
2358 if (edge
->src
== edge
->dst
)
2360 if (add_inter_validity_constraints(graph
, edge
) < 0)
2367 /* Add constraints to graph->lp that bound the dependence distance
2368 * for all dependence relations.
2369 * If a given proximity dependence is identical to a validity
2370 * dependence, then the dependence distance is already bounded
2371 * from below (by zero), so we only need to bound the distance
2372 * from above. (This includes the case of "local" dependences
2373 * which are treated as validity dependence by add_all_validity_constraints.)
2374 * Otherwise, we need to bound the distance both from above and from below.
2376 * If "use_coincidence" is set, then we treat coincidence edges as local edges.
2377 * Otherwise, we ignore them.
2379 static int add_all_proximity_constraints(struct isl_sched_graph
*graph
,
2380 int use_coincidence
)
2384 for (i
= 0; i
< graph
->n_edge
; ++i
) {
2385 struct isl_sched_edge
*edge
= &graph
->edge
[i
];
2388 local
= is_local(edge
) ||
2389 (is_coincidence(edge
) && use_coincidence
);
2390 if (!is_proximity(edge
) && !local
)
2392 if (edge
->src
== edge
->dst
&&
2393 add_intra_proximity_constraints(graph
, edge
, 1, local
) < 0)
2395 if (edge
->src
!= edge
->dst
&&
2396 add_inter_proximity_constraints(graph
, edge
, 1, local
) < 0)
2398 if (is_validity(edge
) || local
)
2400 if (edge
->src
== edge
->dst
&&
2401 add_intra_proximity_constraints(graph
, edge
, -1, 0) < 0)
2403 if (edge
->src
!= edge
->dst
&&
2404 add_inter_proximity_constraints(graph
, edge
, -1, 0) < 0)
2411 /* Compute a basis for the rows in the linear part of the schedule
2412 * and extend this basis to a full basis. The remaining rows
2413 * can then be used to force linear independence from the rows
2416 * In particular, given the schedule rows S, we compute
2421 * with H the Hermite normal form of S. That is, all but the
2422 * first rank columns of H are zero and so each row in S is
2423 * a linear combination of the first rank rows of Q.
2424 * The matrix Q is then transposed because we will write the
2425 * coefficients of the next schedule row as a column vector s
2426 * and express this s as a linear combination s = Q c of the
2428 * Similarly, the matrix U is transposed such that we can
2429 * compute the coefficients c = U s from a schedule row s.
2431 static int node_update_cmap(struct isl_sched_node
*node
)
2434 int n_row
= isl_mat_rows(node
->sched
);
2436 H
= isl_mat_sub_alloc(node
->sched
, 0, n_row
,
2437 1 + node
->nparam
, node
->nvar
);
2439 H
= isl_mat_left_hermite(H
, 0, &U
, &Q
);
2440 isl_mat_free(node
->cmap
);
2441 isl_mat_free(node
->cinv
);
2442 isl_mat_free(node
->ctrans
);
2443 node
->ctrans
= isl_mat_copy(Q
);
2444 node
->cmap
= isl_mat_transpose(Q
);
2445 node
->cinv
= isl_mat_transpose(U
);
2446 node
->rank
= isl_mat_initial_non_zero_cols(H
);
2449 if (!node
->cmap
|| !node
->cinv
|| !node
->ctrans
|| node
->rank
< 0)
2454 /* Is "edge" marked as a validity or a conditional validity edge?
2456 static int is_any_validity(struct isl_sched_edge
*edge
)
2458 return is_validity(edge
) || is_conditional_validity(edge
);
2461 /* How many times should we count the constraints in "edge"?
2463 * If carry is set, then we are counting the number of
2464 * (validity or conditional validity) constraints that will be added
2465 * in setup_carry_lp and we count each edge exactly once.
2467 * Otherwise, we count as follows
2468 * validity -> 1 (>= 0)
2469 * validity+proximity -> 2 (>= 0 and upper bound)
2470 * proximity -> 2 (lower and upper bound)
2471 * local(+any) -> 2 (>= 0 and <= 0)
2473 * If an edge is only marked conditional_validity then it counts
2474 * as zero since it is only checked afterwards.
2476 * If "use_coincidence" is set, then we treat coincidence edges as local edges.
2477 * Otherwise, we ignore them.
2479 static int edge_multiplicity(struct isl_sched_edge
*edge
, int carry
,
2480 int use_coincidence
)
2484 if (is_proximity(edge
) || is_local(edge
))
2486 if (use_coincidence
&& is_coincidence(edge
))
2488 if (is_validity(edge
))
2493 /* Count the number of equality and inequality constraints
2494 * that will be added for the given map.
2496 * "use_coincidence" is set if we should take into account coincidence edges.
2498 static int count_map_constraints(struct isl_sched_graph
*graph
,
2499 struct isl_sched_edge
*edge
, __isl_take isl_map
*map
,
2500 int *n_eq
, int *n_ineq
, int carry
, int use_coincidence
)
2502 isl_basic_set
*coef
;
2503 int f
= edge_multiplicity(edge
, carry
, use_coincidence
);
2510 if (edge
->src
== edge
->dst
)
2511 coef
= intra_coefficients(graph
, edge
->src
, map
);
2513 coef
= inter_coefficients(graph
, edge
, map
);
2516 *n_eq
+= f
* coef
->n_eq
;
2517 *n_ineq
+= f
* coef
->n_ineq
;
2518 isl_basic_set_free(coef
);
2523 /* Count the number of equality and inequality constraints
2524 * that will be added to the main lp problem.
2525 * We count as follows
2526 * validity -> 1 (>= 0)
2527 * validity+proximity -> 2 (>= 0 and upper bound)
2528 * proximity -> 2 (lower and upper bound)
2529 * local(+any) -> 2 (>= 0 and <= 0)
2531 * If "use_coincidence" is set, then we treat coincidence edges as local edges.
2532 * Otherwise, we ignore them.
2534 static int count_constraints(struct isl_sched_graph
*graph
,
2535 int *n_eq
, int *n_ineq
, int use_coincidence
)
2539 *n_eq
= *n_ineq
= 0;
2540 for (i
= 0; i
< graph
->n_edge
; ++i
) {
2541 struct isl_sched_edge
*edge
= &graph
->edge
[i
];
2542 isl_map
*map
= isl_map_copy(edge
->map
);
2544 if (count_map_constraints(graph
, edge
, map
, n_eq
, n_ineq
,
2545 0, use_coincidence
) < 0)
2552 /* Count the number of constraints that will be added by
2553 * add_bound_constant_constraints to bound the values of the constant terms
2554 * and increment *n_eq and *n_ineq accordingly.
2556 * In practice, add_bound_constant_constraints only adds inequalities.
2558 static isl_stat
count_bound_constant_constraints(isl_ctx
*ctx
,
2559 struct isl_sched_graph
*graph
, int *n_eq
, int *n_ineq
)
2561 if (isl_options_get_schedule_max_constant_term(ctx
) == -1)
2564 *n_ineq
+= graph
->n
;
2569 /* Add constraints to bound the values of the constant terms in the schedule,
2570 * if requested by the user.
2572 * The maximal value of the constant terms is defined by the option
2573 * "schedule_max_constant_term".
2575 * Within each node, the coefficients have the following order:
2577 * - c_i_n (if parametric)
2578 * - positive and negative parts of c_i_x
2580 static isl_stat
add_bound_constant_constraints(isl_ctx
*ctx
,
2581 struct isl_sched_graph
*graph
)
2587 max
= isl_options_get_schedule_max_constant_term(ctx
);
2591 total
= isl_basic_set_dim(graph
->lp
, isl_dim_set
);
2593 for (i
= 0; i
< graph
->n
; ++i
) {
2594 struct isl_sched_node
*node
= &graph
->node
[i
];
2595 k
= isl_basic_set_alloc_inequality(graph
->lp
);
2597 return isl_stat_error
;
2598 isl_seq_clr(graph
->lp
->ineq
[k
], 1 + total
);
2599 isl_int_set_si(graph
->lp
->ineq
[k
][1 + node
->start
], -1);
2600 isl_int_set_si(graph
->lp
->ineq
[k
][0], max
);
2606 /* Count the number of constraints that will be added by
2607 * add_bound_coefficient_constraints and increment *n_eq and *n_ineq
2610 * In practice, add_bound_coefficient_constraints only adds inequalities.
2612 static int count_bound_coefficient_constraints(isl_ctx
*ctx
,
2613 struct isl_sched_graph
*graph
, int *n_eq
, int *n_ineq
)
2617 if (isl_options_get_schedule_max_coefficient(ctx
) == -1 &&
2618 !isl_options_get_schedule_treat_coalescing(ctx
))
2621 for (i
= 0; i
< graph
->n
; ++i
)
2622 *n_ineq
+= graph
->node
[i
].nparam
+ 2 * graph
->node
[i
].nvar
;
2627 /* Add constraints to graph->lp that bound the values of
2628 * the parameter schedule coefficients of "node" to "max" and
2629 * the variable schedule coefficients to the corresponding entry
2631 * In either case, a negative value means that no bound needs to be imposed.
2633 * For parameter coefficients, this amounts to adding a constraint
2641 * The variables coefficients are, however, not represented directly.
2642 * Instead, the variables coefficients c_x are written as a linear
2643 * combination c_x = cmap c_z of some other coefficients c_z,
2644 * which are in turn encoded as c_z = c_z^+ - c_z^-.
2645 * Let a_j be the elements of row i of node->cmap, then
2647 * -max_i <= c_x_i <= max_i
2651 * -max_i <= \sum_j a_j (c_z_j^+ - c_z_j^-) <= max_i
2655 * -\sum_j a_j (c_z_j^+ - c_z_j^-) + max_i >= 0
2656 * \sum_j a_j (c_z_j^+ - c_z_j^-) + max_i >= 0
2658 static isl_stat
node_add_coefficient_constraints(isl_ctx
*ctx
,
2659 struct isl_sched_graph
*graph
, struct isl_sched_node
*node
, int max
)
2665 total
= isl_basic_set_dim(graph
->lp
, isl_dim_set
);
2667 for (j
= 0; j
< node
->nparam
; ++j
) {
2673 k
= isl_basic_set_alloc_inequality(graph
->lp
);
2675 return isl_stat_error
;
2676 dim
= 1 + node
->start
+ 1 + j
;
2677 isl_seq_clr(graph
->lp
->ineq
[k
], 1 + total
);
2678 isl_int_set_si(graph
->lp
->ineq
[k
][dim
], -1);
2679 isl_int_set_si(graph
->lp
->ineq
[k
][0], max
);
2682 ineq
= isl_vec_alloc(ctx
, 1 + total
);
2683 ineq
= isl_vec_clr(ineq
);
2685 return isl_stat_error
;
2686 for (i
= 0; i
< node
->nvar
; ++i
) {
2687 int pos
= 1 + node_var_coef_offset(node
);
2689 if (isl_int_is_neg(node
->max
->el
[i
]))
2692 for (j
= 0; j
< node
->nvar
; ++j
) {
2693 isl_int_set(ineq
->el
[pos
+ 2 * j
],
2694 node
->cmap
->row
[i
][j
]);
2695 isl_int_neg(ineq
->el
[pos
+ 2 * j
+ 1],
2696 node
->cmap
->row
[i
][j
]);
2698 isl_int_set(ineq
->el
[0], node
->max
->el
[i
]);
2700 k
= isl_basic_set_alloc_inequality(graph
->lp
);
2703 isl_seq_cpy(graph
->lp
->ineq
[k
], ineq
->el
, 1 + total
);
2705 isl_seq_neg(ineq
->el
+ pos
, ineq
->el
+ pos
, 2 * node
->nvar
);
2706 k
= isl_basic_set_alloc_inequality(graph
->lp
);
2709 isl_seq_cpy(graph
->lp
->ineq
[k
], ineq
->el
, 1 + total
);
2716 return isl_stat_error
;
2719 /* Add constraints that bound the values of the variable and parameter
2720 * coefficients of the schedule.
2722 * The maximal value of the coefficients is defined by the option
2723 * 'schedule_max_coefficient' and the entries in node->max.
2724 * These latter entries are only set if either the schedule_max_coefficient
2725 * option or the schedule_treat_coalescing option is set.
2727 static isl_stat
add_bound_coefficient_constraints(isl_ctx
*ctx
,
2728 struct isl_sched_graph
*graph
)
2733 max
= isl_options_get_schedule_max_coefficient(ctx
);
2735 if (max
== -1 && !isl_options_get_schedule_treat_coalescing(ctx
))
2738 for (i
= 0; i
< graph
->n
; ++i
) {
2739 struct isl_sched_node
*node
= &graph
->node
[i
];
2741 if (node_add_coefficient_constraints(ctx
, graph
, node
, max
) < 0)
2742 return isl_stat_error
;
2748 /* Add a constraint to graph->lp that equates the value at position
2749 * "sum_pos" to the sum of the "n" values starting at "first".
2751 static isl_stat
add_sum_constraint(struct isl_sched_graph
*graph
,
2752 int sum_pos
, int first
, int n
)
2757 total
= isl_basic_set_dim(graph
->lp
, isl_dim_set
);
2759 k
= isl_basic_set_alloc_equality(graph
->lp
);
2761 return isl_stat_error
;
2762 isl_seq_clr(graph
->lp
->eq
[k
], 1 + total
);
2763 isl_int_set_si(graph
->lp
->eq
[k
][1 + sum_pos
], -1);
2764 for (i
= 0; i
< n
; ++i
)
2765 isl_int_set_si(graph
->lp
->eq
[k
][1 + first
+ i
], 1);
2770 /* Add a constraint to graph->lp that equates the value at position
2771 * "sum_pos" to the sum of the parameter coefficients of all nodes.
2773 * Within each node, the coefficients have the following order:
2775 * - c_i_n (if parametric)
2776 * - positive and negative parts of c_i_x
2778 static isl_stat
add_param_sum_constraint(struct isl_sched_graph
*graph
,
2784 total
= isl_basic_set_dim(graph
->lp
, isl_dim_set
);
2786 k
= isl_basic_set_alloc_equality(graph
->lp
);
2788 return isl_stat_error
;
2789 isl_seq_clr(graph
->lp
->eq
[k
], 1 + total
);
2790 isl_int_set_si(graph
->lp
->eq
[k
][1 + sum_pos
], -1);
2791 for (i
= 0; i
< graph
->n
; ++i
) {
2792 int pos
= 1 + graph
->node
[i
].start
+ 1;
2794 for (j
= 0; j
< graph
->node
[i
].nparam
; ++j
)
2795 isl_int_set_si(graph
->lp
->eq
[k
][pos
+ j
], 1);
2801 /* Add a constraint to graph->lp that equates the value at position
2802 * "sum_pos" to the sum of the variable coefficients of all nodes.
2804 * Within each node, the coefficients have the following order:
2806 * - c_i_n (if parametric)
2807 * - positive and negative parts of c_i_x
2809 static isl_stat
add_var_sum_constraint(struct isl_sched_graph
*graph
,
2815 total
= isl_basic_set_dim(graph
->lp
, isl_dim_set
);
2817 k
= isl_basic_set_alloc_equality(graph
->lp
);
2819 return isl_stat_error
;
2820 isl_seq_clr(graph
->lp
->eq
[k
], 1 + total
);
2821 isl_int_set_si(graph
->lp
->eq
[k
][1 + sum_pos
], -1);
2822 for (i
= 0; i
< graph
->n
; ++i
) {
2823 struct isl_sched_node
*node
= &graph
->node
[i
];
2824 int pos
= 1 + node_var_coef_offset(node
);
2826 for (j
= 0; j
< 2 * node
->nvar
; ++j
)
2827 isl_int_set_si(graph
->lp
->eq
[k
][pos
+ j
], 1);
2833 /* Construct an ILP problem for finding schedule coefficients
2834 * that result in non-negative, but small dependence distances
2835 * over all dependences.
2836 * In particular, the dependence distances over proximity edges
2837 * are bounded by m_0 + m_n n and we compute schedule coefficients
2838 * with small values (preferably zero) of m_n and m_0.
2840 * All variables of the ILP are non-negative. The actual coefficients
2841 * may be negative, so each coefficient is represented as the difference
2842 * of two non-negative variables. The negative part always appears
2843 * immediately before the positive part.
2844 * Other than that, the variables have the following order
2846 * - sum of positive and negative parts of m_n coefficients
2848 * - sum of all c_n coefficients
2849 * (unconstrained when computing non-parametric schedules)
2850 * - sum of positive and negative parts of all c_x coefficients
2851 * - positive and negative parts of m_n coefficients
2854 * - c_i_n (if parametric)
2855 * - positive and negative parts of c_i_x
2857 * The c_i_x are not represented directly, but through the columns of
2858 * node->cmap. That is, the computed values are for variable t_i_x
2859 * such that c_i_x = Q t_i_x with Q equal to node->cmap.
2861 * The constraints are those from the edges plus two or three equalities
2862 * to express the sums.
2864 * If "use_coincidence" is set, then we treat coincidence edges as local edges.
2865 * Otherwise, we ignore them.
2867 static isl_stat
setup_lp(isl_ctx
*ctx
, struct isl_sched_graph
*graph
,
2868 int use_coincidence
)
2878 parametric
= ctx
->opt
->schedule_parametric
;
2879 nparam
= isl_space_dim(graph
->node
[0].space
, isl_dim_param
);
2881 total
= param_pos
+ 2 * nparam
;
2882 for (i
= 0; i
< graph
->n
; ++i
) {
2883 struct isl_sched_node
*node
= &graph
->node
[graph
->sorted
[i
]];
2884 if (node_update_cmap(node
) < 0)
2885 return isl_stat_error
;
2886 node
->start
= total
;
2887 total
+= 1 + node
->nparam
+ 2 * node
->nvar
;
2890 if (count_constraints(graph
, &n_eq
, &n_ineq
, use_coincidence
) < 0)
2891 return isl_stat_error
;
2892 if (count_bound_constant_constraints(ctx
, graph
, &n_eq
, &n_ineq
) < 0)
2893 return isl_stat_error
;
2894 if (count_bound_coefficient_constraints(ctx
, graph
, &n_eq
, &n_ineq
) < 0)
2895 return isl_stat_error
;
2897 space
= isl_space_set_alloc(ctx
, 0, total
);
2898 isl_basic_set_free(graph
->lp
);
2899 n_eq
+= 2 + parametric
;
2901 graph
->lp
= isl_basic_set_alloc_space(space
, 0, n_eq
, n_ineq
);
2903 if (add_sum_constraint(graph
, 0, param_pos
, 2 * nparam
) < 0)
2904 return isl_stat_error
;
2905 if (parametric
&& add_param_sum_constraint(graph
, 2) < 0)
2906 return isl_stat_error
;
2907 if (add_var_sum_constraint(graph
, 3) < 0)
2908 return isl_stat_error
;
2909 if (add_bound_constant_constraints(ctx
, graph
) < 0)
2910 return isl_stat_error
;
2911 if (add_bound_coefficient_constraints(ctx
, graph
) < 0)
2912 return isl_stat_error
;
2913 if (add_all_validity_constraints(graph
, use_coincidence
) < 0)
2914 return isl_stat_error
;
2915 if (add_all_proximity_constraints(graph
, use_coincidence
) < 0)
2916 return isl_stat_error
;
2921 /* Analyze the conflicting constraint found by
2922 * isl_tab_basic_set_non_trivial_lexmin. If it corresponds to the validity
2923 * constraint of one of the edges between distinct nodes, living, moreover
2924 * in distinct SCCs, then record the source and sink SCC as this may
2925 * be a good place to cut between SCCs.
2927 static int check_conflict(int con
, void *user
)
2930 struct isl_sched_graph
*graph
= user
;
2932 if (graph
->src_scc
>= 0)
2935 con
-= graph
->lp
->n_eq
;
2937 if (con
>= graph
->lp
->n_ineq
)
2940 for (i
= 0; i
< graph
->n_edge
; ++i
) {
2941 if (!is_validity(&graph
->edge
[i
]))
2943 if (graph
->edge
[i
].src
== graph
->edge
[i
].dst
)
2945 if (graph
->edge
[i
].src
->scc
== graph
->edge
[i
].dst
->scc
)
2947 if (graph
->edge
[i
].start
> con
)
2949 if (graph
->edge
[i
].end
<= con
)
2951 graph
->src_scc
= graph
->edge
[i
].src
->scc
;
2952 graph
->dst_scc
= graph
->edge
[i
].dst
->scc
;
2958 /* Check whether the next schedule row of the given node needs to be
2959 * non-trivial. Lower-dimensional domains may have some trivial rows,
2960 * but as soon as the number of remaining required non-trivial rows
2961 * is as large as the number or remaining rows to be computed,
2962 * all remaining rows need to be non-trivial.
2964 static int needs_row(struct isl_sched_graph
*graph
, struct isl_sched_node
*node
)
2966 return node
->nvar
- node
->rank
>= graph
->maxvar
- graph
->n_row
;
2969 /* Solve the ILP problem constructed in setup_lp.
2970 * For each node such that all the remaining rows of its schedule
2971 * need to be non-trivial, we construct a non-triviality region.
2972 * This region imposes that the next row is independent of previous rows.
2973 * In particular the coefficients c_i_x are represented by t_i_x
2974 * variables with c_i_x = Q t_i_x and Q a unimodular matrix such that
2975 * its first columns span the rows of the previously computed part
2976 * of the schedule. The non-triviality region enforces that at least
2977 * one of the remaining components of t_i_x is non-zero, i.e.,
2978 * that the new schedule row depends on at least one of the remaining
2981 static __isl_give isl_vec
*solve_lp(struct isl_sched_graph
*graph
)
2987 for (i
= 0; i
< graph
->n
; ++i
) {
2988 struct isl_sched_node
*node
= &graph
->node
[i
];
2989 int skip
= node
->rank
;
2990 graph
->region
[i
].pos
= node_var_coef_offset(node
) + 2 * skip
;
2991 if (needs_row(graph
, node
))
2992 graph
->region
[i
].len
= 2 * (node
->nvar
- skip
);
2994 graph
->region
[i
].len
= 0;
2996 lp
= isl_basic_set_copy(graph
->lp
);
2997 sol
= isl_tab_basic_set_non_trivial_lexmin(lp
, 2, graph
->n
,
2998 graph
->region
, &check_conflict
, graph
);
3002 /* Extract the coefficients for the variables of "node" from "sol".
3004 * Within each node, the coefficients have the following order:
3006 * - c_i_n (if parametric)
3007 * - positive and negative parts of c_i_x
3009 * The c_i_x^- appear before their c_i_x^+ counterpart.
3011 * Return c_i_x = c_i_x^+ - c_i_x^-
3013 static __isl_give isl_vec
*extract_var_coef(struct isl_sched_node
*node
,
3014 __isl_keep isl_vec
*sol
)
3022 csol
= isl_vec_alloc(isl_vec_get_ctx(sol
), node
->nvar
);
3026 pos
= 1 + node_var_coef_offset(node
);
3027 for (i
= 0; i
< node
->nvar
; ++i
)
3028 isl_int_sub(csol
->el
[i
],
3029 sol
->el
[pos
+ 2 * i
+ 1], sol
->el
[pos
+ 2 * i
]);
3034 /* Update the schedules of all nodes based on the given solution
3035 * of the LP problem.
3036 * The new row is added to the current band.
3037 * All possibly negative coefficients are encoded as a difference
3038 * of two non-negative variables, so we need to perform the subtraction
3039 * here. Moreover, if use_cmap is set, then the solution does
3040 * not refer to the actual coefficients c_i_x, but instead to variables
3041 * t_i_x such that c_i_x = Q t_i_x and Q is equal to node->cmap.
3042 * In this case, we then also need to perform this multiplication
3043 * to obtain the values of c_i_x.
3045 * If coincident is set, then the caller guarantees that the new
3046 * row satisfies the coincidence constraints.
3048 static int update_schedule(struct isl_sched_graph
*graph
,
3049 __isl_take isl_vec
*sol
, int use_cmap
, int coincident
)
3052 isl_vec
*csol
= NULL
;
3057 isl_die(sol
->ctx
, isl_error_internal
,
3058 "no solution found", goto error
);
3059 if (graph
->n_total_row
>= graph
->max_row
)
3060 isl_die(sol
->ctx
, isl_error_internal
,
3061 "too many schedule rows", goto error
);
3063 for (i
= 0; i
< graph
->n
; ++i
) {
3064 struct isl_sched_node
*node
= &graph
->node
[i
];
3065 int pos
= node
->start
;
3066 int row
= isl_mat_rows(node
->sched
);
3069 csol
= extract_var_coef(node
, sol
);
3073 isl_map_free(node
->sched_map
);
3074 node
->sched_map
= NULL
;
3075 node
->sched
= isl_mat_add_rows(node
->sched
, 1);
3078 for (j
= 0; j
< 1 + node
->nparam
; ++j
)
3079 node
->sched
= isl_mat_set_element(node
->sched
,
3080 row
, j
, sol
->el
[1 + pos
+ j
]);
3082 csol
= isl_mat_vec_product(isl_mat_copy(node
->cmap
),
3086 for (j
= 0; j
< node
->nvar
; ++j
)
3087 node
->sched
= isl_mat_set_element(node
->sched
,
3088 row
, 1 + node
->nparam
+ j
, csol
->el
[j
]);
3089 node
->coincident
[graph
->n_total_row
] = coincident
;
3095 graph
->n_total_row
++;
3104 /* Convert row "row" of node->sched into an isl_aff living in "ls"
3105 * and return this isl_aff.
3107 static __isl_give isl_aff
*extract_schedule_row(__isl_take isl_local_space
*ls
,
3108 struct isl_sched_node
*node
, int row
)
3116 aff
= isl_aff_zero_on_domain(ls
);
3117 isl_mat_get_element(node
->sched
, row
, 0, &v
);
3118 aff
= isl_aff_set_constant(aff
, v
);
3119 for (j
= 0; j
< node
->nparam
; ++j
) {
3120 isl_mat_get_element(node
->sched
, row
, 1 + j
, &v
);
3121 aff
= isl_aff_set_coefficient(aff
, isl_dim_param
, j
, v
);
3123 for (j
= 0; j
< node
->nvar
; ++j
) {
3124 isl_mat_get_element(node
->sched
, row
, 1 + node
->nparam
+ j
, &v
);
3125 aff
= isl_aff_set_coefficient(aff
, isl_dim_in
, j
, v
);
3133 /* Convert the "n" rows starting at "first" of node->sched into a multi_aff
3134 * and return this multi_aff.
3136 * The result is defined over the uncompressed node domain.
3138 static __isl_give isl_multi_aff
*node_extract_partial_schedule_multi_aff(
3139 struct isl_sched_node
*node
, int first
, int n
)
3143 isl_local_space
*ls
;
3150 nrow
= isl_mat_rows(node
->sched
);
3151 if (node
->compressed
)
3152 space
= isl_multi_aff_get_domain_space(node
->decompress
);
3154 space
= isl_space_copy(node
->space
);
3155 ls
= isl_local_space_from_space(isl_space_copy(space
));
3156 space
= isl_space_from_domain(space
);
3157 space
= isl_space_add_dims(space
, isl_dim_out
, n
);
3158 ma
= isl_multi_aff_zero(space
);
3160 for (i
= first
; i
< first
+ n
; ++i
) {
3161 aff
= extract_schedule_row(isl_local_space_copy(ls
), node
, i
);
3162 ma
= isl_multi_aff_set_aff(ma
, i
- first
, aff
);
3165 isl_local_space_free(ls
);
3167 if (node
->compressed
)
3168 ma
= isl_multi_aff_pullback_multi_aff(ma
,
3169 isl_multi_aff_copy(node
->compress
));
3174 /* Convert node->sched into a multi_aff and return this multi_aff.
3176 * The result is defined over the uncompressed node domain.
3178 static __isl_give isl_multi_aff
*node_extract_schedule_multi_aff(
3179 struct isl_sched_node
*node
)
3183 nrow
= isl_mat_rows(node
->sched
);
3184 return node_extract_partial_schedule_multi_aff(node
, 0, nrow
);
3187 /* Convert node->sched into a map and return this map.
3189 * The result is cached in node->sched_map, which needs to be released
3190 * whenever node->sched is updated.
3191 * It is defined over the uncompressed node domain.
3193 static __isl_give isl_map
*node_extract_schedule(struct isl_sched_node
*node
)
3195 if (!node
->sched_map
) {
3198 ma
= node_extract_schedule_multi_aff(node
);
3199 node
->sched_map
= isl_map_from_multi_aff(ma
);
3202 return isl_map_copy(node
->sched_map
);
3205 /* Construct a map that can be used to update a dependence relation
3206 * based on the current schedule.
3207 * That is, construct a map expressing that source and sink
3208 * are executed within the same iteration of the current schedule.
3209 * This map can then be intersected with the dependence relation.
3210 * This is not the most efficient way, but this shouldn't be a critical
3213 static __isl_give isl_map
*specializer(struct isl_sched_node
*src
,
3214 struct isl_sched_node
*dst
)
3216 isl_map
*src_sched
, *dst_sched
;
3218 src_sched
= node_extract_schedule(src
);
3219 dst_sched
= node_extract_schedule(dst
);
3220 return isl_map_apply_range(src_sched
, isl_map_reverse(dst_sched
));
3223 /* Intersect the domains of the nested relations in domain and range
3224 * of "umap" with "map".
3226 static __isl_give isl_union_map
*intersect_domains(
3227 __isl_take isl_union_map
*umap
, __isl_keep isl_map
*map
)
3229 isl_union_set
*uset
;
3231 umap
= isl_union_map_zip(umap
);
3232 uset
= isl_union_set_from_set(isl_map_wrap(isl_map_copy(map
)));
3233 umap
= isl_union_map_intersect_domain(umap
, uset
);
3234 umap
= isl_union_map_zip(umap
);
3238 /* Update the dependence relation of the given edge based
3239 * on the current schedule.
3240 * If the dependence is carried completely by the current schedule, then
3241 * it is removed from the edge_tables. It is kept in the list of edges
3242 * as otherwise all edge_tables would have to be recomputed.
3244 static int update_edge(struct isl_sched_graph
*graph
,
3245 struct isl_sched_edge
*edge
)
3250 id
= specializer(edge
->src
, edge
->dst
);
3251 edge
->map
= isl_map_intersect(edge
->map
, isl_map_copy(id
));
3255 if (edge
->tagged_condition
) {
3256 edge
->tagged_condition
=
3257 intersect_domains(edge
->tagged_condition
, id
);
3258 if (!edge
->tagged_condition
)
3261 if (edge
->tagged_validity
) {
3262 edge
->tagged_validity
=
3263 intersect_domains(edge
->tagged_validity
, id
);
3264 if (!edge
->tagged_validity
)
3268 empty
= isl_map_plain_is_empty(edge
->map
);
3272 graph_remove_edge(graph
, edge
);
3281 /* Does the domain of "umap" intersect "uset"?
3283 static int domain_intersects(__isl_keep isl_union_map
*umap
,
3284 __isl_keep isl_union_set
*uset
)
3288 umap
= isl_union_map_copy(umap
);
3289 umap
= isl_union_map_intersect_domain(umap
, isl_union_set_copy(uset
));
3290 empty
= isl_union_map_is_empty(umap
);
3291 isl_union_map_free(umap
);
3293 return empty
< 0 ? -1 : !empty
;
3296 /* Does the range of "umap" intersect "uset"?
3298 static int range_intersects(__isl_keep isl_union_map
*umap
,
3299 __isl_keep isl_union_set
*uset
)
3303 umap
= isl_union_map_copy(umap
);
3304 umap
= isl_union_map_intersect_range(umap
, isl_union_set_copy(uset
));
3305 empty
= isl_union_map_is_empty(umap
);
3306 isl_union_map_free(umap
);
3308 return empty
< 0 ? -1 : !empty
;
3311 /* Are the condition dependences of "edge" local with respect to
3312 * the current schedule?
3314 * That is, are domain and range of the condition dependences mapped
3315 * to the same point?
3317 * In other words, is the condition false?
3319 static int is_condition_false(struct isl_sched_edge
*edge
)
3321 isl_union_map
*umap
;
3322 isl_map
*map
, *sched
, *test
;
3325 empty
= isl_union_map_is_empty(edge
->tagged_condition
);
3326 if (empty
< 0 || empty
)
3329 umap
= isl_union_map_copy(edge
->tagged_condition
);
3330 umap
= isl_union_map_zip(umap
);
3331 umap
= isl_union_set_unwrap(isl_union_map_domain(umap
));
3332 map
= isl_map_from_union_map(umap
);
3334 sched
= node_extract_schedule(edge
->src
);
3335 map
= isl_map_apply_domain(map
, sched
);
3336 sched
= node_extract_schedule(edge
->dst
);
3337 map
= isl_map_apply_range(map
, sched
);
3339 test
= isl_map_identity(isl_map_get_space(map
));
3340 local
= isl_map_is_subset(map
, test
);
3347 /* For each conditional validity constraint that is adjacent
3348 * to a condition with domain in condition_source or range in condition_sink,
3349 * turn it into an unconditional validity constraint.
3351 static int unconditionalize_adjacent_validity(struct isl_sched_graph
*graph
,
3352 __isl_take isl_union_set
*condition_source
,
3353 __isl_take isl_union_set
*condition_sink
)
3357 condition_source
= isl_union_set_coalesce(condition_source
);
3358 condition_sink
= isl_union_set_coalesce(condition_sink
);
3360 for (i
= 0; i
< graph
->n_edge
; ++i
) {
3362 isl_union_map
*validity
;
3364 if (!is_conditional_validity(&graph
->edge
[i
]))
3366 if (is_validity(&graph
->edge
[i
]))
3369 validity
= graph
->edge
[i
].tagged_validity
;
3370 adjacent
= domain_intersects(validity
, condition_sink
);
3371 if (adjacent
>= 0 && !adjacent
)
3372 adjacent
= range_intersects(validity
, condition_source
);
3378 set_validity(&graph
->edge
[i
]);
3381 isl_union_set_free(condition_source
);
3382 isl_union_set_free(condition_sink
);
3385 isl_union_set_free(condition_source
);
3386 isl_union_set_free(condition_sink
);
3390 /* Update the dependence relations of all edges based on the current schedule
3391 * and enforce conditional validity constraints that are adjacent
3392 * to satisfied condition constraints.
3394 * First check if any of the condition constraints are satisfied
3395 * (i.e., not local to the outer schedule) and keep track of
3396 * their domain and range.
3397 * Then update all dependence relations (which removes the non-local
3399 * Finally, if any condition constraints turned out to be satisfied,
3400 * then turn all adjacent conditional validity constraints into
3401 * unconditional validity constraints.
3403 static int update_edges(isl_ctx
*ctx
, struct isl_sched_graph
*graph
)
3407 isl_union_set
*source
, *sink
;
3409 source
= isl_union_set_empty(isl_space_params_alloc(ctx
, 0));
3410 sink
= isl_union_set_empty(isl_space_params_alloc(ctx
, 0));
3411 for (i
= 0; i
< graph
->n_edge
; ++i
) {
3413 isl_union_set
*uset
;
3414 isl_union_map
*umap
;
3416 if (!is_condition(&graph
->edge
[i
]))
3418 if (is_local(&graph
->edge
[i
]))
3420 local
= is_condition_false(&graph
->edge
[i
]);
3428 umap
= isl_union_map_copy(graph
->edge
[i
].tagged_condition
);
3429 uset
= isl_union_map_domain(umap
);
3430 source
= isl_union_set_union(source
, uset
);
3432 umap
= isl_union_map_copy(graph
->edge
[i
].tagged_condition
);
3433 uset
= isl_union_map_range(umap
);
3434 sink
= isl_union_set_union(sink
, uset
);
3437 for (i
= graph
->n_edge
- 1; i
>= 0; --i
) {
3438 if (update_edge(graph
, &graph
->edge
[i
]) < 0)
3443 return unconditionalize_adjacent_validity(graph
, source
, sink
);
3445 isl_union_set_free(source
);
3446 isl_union_set_free(sink
);
3449 isl_union_set_free(source
);
3450 isl_union_set_free(sink
);
3454 static void next_band(struct isl_sched_graph
*graph
)
3456 graph
->band_start
= graph
->n_total_row
;
3459 /* Return the union of the universe domains of the nodes in "graph"
3460 * that satisfy "pred".
3462 static __isl_give isl_union_set
*isl_sched_graph_domain(isl_ctx
*ctx
,
3463 struct isl_sched_graph
*graph
,
3464 int (*pred
)(struct isl_sched_node
*node
, int data
), int data
)
3470 for (i
= 0; i
< graph
->n
; ++i
)
3471 if (pred(&graph
->node
[i
], data
))
3475 isl_die(ctx
, isl_error_internal
,
3476 "empty component", return NULL
);
3478 set
= isl_set_universe(isl_space_copy(graph
->node
[i
].space
));
3479 dom
= isl_union_set_from_set(set
);
3481 for (i
= i
+ 1; i
< graph
->n
; ++i
) {
3482 if (!pred(&graph
->node
[i
], data
))
3484 set
= isl_set_universe(isl_space_copy(graph
->node
[i
].space
));
3485 dom
= isl_union_set_union(dom
, isl_union_set_from_set(set
));
3491 /* Return a list of unions of universe domains, where each element
3492 * in the list corresponds to an SCC (or WCC) indexed by node->scc.
3494 static __isl_give isl_union_set_list
*extract_sccs(isl_ctx
*ctx
,
3495 struct isl_sched_graph
*graph
)
3498 isl_union_set_list
*filters
;
3500 filters
= isl_union_set_list_alloc(ctx
, graph
->scc
);
3501 for (i
= 0; i
< graph
->scc
; ++i
) {
3504 dom
= isl_sched_graph_domain(ctx
, graph
, &node_scc_exactly
, i
);
3505 filters
= isl_union_set_list_add(filters
, dom
);
3511 /* Return a list of two unions of universe domains, one for the SCCs up
3512 * to and including graph->src_scc and another for the other SCCs.
3514 static __isl_give isl_union_set_list
*extract_split(isl_ctx
*ctx
,
3515 struct isl_sched_graph
*graph
)
3518 isl_union_set_list
*filters
;
3520 filters
= isl_union_set_list_alloc(ctx
, 2);
3521 dom
= isl_sched_graph_domain(ctx
, graph
,
3522 &node_scc_at_most
, graph
->src_scc
);
3523 filters
= isl_union_set_list_add(filters
, dom
);
3524 dom
= isl_sched_graph_domain(ctx
, graph
,
3525 &node_scc_at_least
, graph
->src_scc
+ 1);
3526 filters
= isl_union_set_list_add(filters
, dom
);
3531 /* Copy nodes that satisfy node_pred from the src dependence graph
3532 * to the dst dependence graph.
3534 static int copy_nodes(struct isl_sched_graph
*dst
, struct isl_sched_graph
*src
,
3535 int (*node_pred
)(struct isl_sched_node
*node
, int data
), int data
)
3540 for (i
= 0; i
< src
->n
; ++i
) {
3543 if (!node_pred(&src
->node
[i
], data
))
3547 dst
->node
[j
].space
= isl_space_copy(src
->node
[i
].space
);
3548 dst
->node
[j
].compressed
= src
->node
[i
].compressed
;
3549 dst
->node
[j
].hull
= isl_set_copy(src
->node
[i
].hull
);
3550 dst
->node
[j
].compress
=
3551 isl_multi_aff_copy(src
->node
[i
].compress
);
3552 dst
->node
[j
].decompress
=
3553 isl_multi_aff_copy(src
->node
[i
].decompress
);
3554 dst
->node
[j
].nvar
= src
->node
[i
].nvar
;
3555 dst
->node
[j
].nparam
= src
->node
[i
].nparam
;
3556 dst
->node
[j
].sched
= isl_mat_copy(src
->node
[i
].sched
);
3557 dst
->node
[j
].sched_map
= isl_map_copy(src
->node
[i
].sched_map
);
3558 dst
->node
[j
].coincident
= src
->node
[i
].coincident
;
3559 dst
->node
[j
].sizes
= isl_multi_val_copy(src
->node
[i
].sizes
);
3560 dst
->node
[j
].max
= isl_vec_copy(src
->node
[i
].max
);
3563 if (!dst
->node
[j
].space
|| !dst
->node
[j
].sched
)
3565 if (dst
->node
[j
].compressed
&&
3566 (!dst
->node
[j
].hull
|| !dst
->node
[j
].compress
||
3567 !dst
->node
[j
].decompress
))
3574 /* Copy non-empty edges that satisfy edge_pred from the src dependence graph
3575 * to the dst dependence graph.
3576 * If the source or destination node of the edge is not in the destination
3577 * graph, then it must be a backward proximity edge and it should simply
3580 static int copy_edges(isl_ctx
*ctx
, struct isl_sched_graph
*dst
,
3581 struct isl_sched_graph
*src
,
3582 int (*edge_pred
)(struct isl_sched_edge
*edge
, int data
), int data
)
3585 enum isl_edge_type t
;
3588 for (i
= 0; i
< src
->n_edge
; ++i
) {
3589 struct isl_sched_edge
*edge
= &src
->edge
[i
];
3591 isl_union_map
*tagged_condition
;
3592 isl_union_map
*tagged_validity
;
3593 struct isl_sched_node
*dst_src
, *dst_dst
;
3595 if (!edge_pred(edge
, data
))
3598 if (isl_map_plain_is_empty(edge
->map
))
3601 dst_src
= graph_find_node(ctx
, dst
, edge
->src
->space
);
3602 dst_dst
= graph_find_node(ctx
, dst
, edge
->dst
->space
);
3603 if (!dst_src
|| !dst_dst
) {
3604 if (is_validity(edge
) || is_conditional_validity(edge
))
3605 isl_die(ctx
, isl_error_internal
,
3606 "backward (conditional) validity edge",
3611 map
= isl_map_copy(edge
->map
);
3612 tagged_condition
= isl_union_map_copy(edge
->tagged_condition
);
3613 tagged_validity
= isl_union_map_copy(edge
->tagged_validity
);
3615 dst
->edge
[dst
->n_edge
].src
= dst_src
;
3616 dst
->edge
[dst
->n_edge
].dst
= dst_dst
;
3617 dst
->edge
[dst
->n_edge
].map
= map
;
3618 dst
->edge
[dst
->n_edge
].tagged_condition
= tagged_condition
;
3619 dst
->edge
[dst
->n_edge
].tagged_validity
= tagged_validity
;
3620 dst
->edge
[dst
->n_edge
].types
= edge
->types
;
3623 if (edge
->tagged_condition
&& !tagged_condition
)
3625 if (edge
->tagged_validity
&& !tagged_validity
)
3628 for (t
= isl_edge_first
; t
<= isl_edge_last
; ++t
) {
3630 graph_find_edge(src
, t
, edge
->src
, edge
->dst
))
3632 if (graph_edge_table_add(ctx
, dst
, t
,
3633 &dst
->edge
[dst
->n_edge
- 1]) < 0)
3641 /* Compute the maximal number of variables over all nodes.
3642 * This is the maximal number of linearly independent schedule
3643 * rows that we need to compute.
3644 * Just in case we end up in a part of the dependence graph
3645 * with only lower-dimensional domains, we make sure we will
3646 * compute the required amount of extra linearly independent rows.
3648 static int compute_maxvar(struct isl_sched_graph
*graph
)
3653 for (i
= 0; i
< graph
->n
; ++i
) {
3654 struct isl_sched_node
*node
= &graph
->node
[i
];
3657 if (node_update_cmap(node
) < 0)
3659 nvar
= node
->nvar
+ graph
->n_row
- node
->rank
;
3660 if (nvar
> graph
->maxvar
)
3661 graph
->maxvar
= nvar
;
3667 /* Extract the subgraph of "graph" that consists of the node satisfying
3668 * "node_pred" and the edges satisfying "edge_pred" and store
3669 * the result in "sub".
3671 static int extract_sub_graph(isl_ctx
*ctx
, struct isl_sched_graph
*graph
,
3672 int (*node_pred
)(struct isl_sched_node
*node
, int data
),
3673 int (*edge_pred
)(struct isl_sched_edge
*edge
, int data
),
3674 int data
, struct isl_sched_graph
*sub
)
3676 int i
, n
= 0, n_edge
= 0;
3679 for (i
= 0; i
< graph
->n
; ++i
)
3680 if (node_pred(&graph
->node
[i
], data
))
3682 for (i
= 0; i
< graph
->n_edge
; ++i
)
3683 if (edge_pred(&graph
->edge
[i
], data
))
3685 if (graph_alloc(ctx
, sub
, n
, n_edge
) < 0)
3687 if (copy_nodes(sub
, graph
, node_pred
, data
) < 0)
3689 if (graph_init_table(ctx
, sub
) < 0)
3691 for (t
= 0; t
<= isl_edge_last
; ++t
)
3692 sub
->max_edge
[t
] = graph
->max_edge
[t
];
3693 if (graph_init_edge_tables(ctx
, sub
) < 0)
3695 if (copy_edges(ctx
, sub
, graph
, edge_pred
, data
) < 0)
3697 sub
->n_row
= graph
->n_row
;
3698 sub
->max_row
= graph
->max_row
;
3699 sub
->n_total_row
= graph
->n_total_row
;
3700 sub
->band_start
= graph
->band_start
;
3705 static __isl_give isl_schedule_node
*compute_schedule(isl_schedule_node
*node
,
3706 struct isl_sched_graph
*graph
);
3707 static __isl_give isl_schedule_node
*compute_schedule_wcc(
3708 isl_schedule_node
*node
, struct isl_sched_graph
*graph
);
3710 /* Compute a schedule for a subgraph of "graph". In particular, for
3711 * the graph composed of nodes that satisfy node_pred and edges that
3712 * that satisfy edge_pred.
3713 * If the subgraph is known to consist of a single component, then wcc should
3714 * be set and then we call compute_schedule_wcc on the constructed subgraph.
3715 * Otherwise, we call compute_schedule, which will check whether the subgraph
3718 * The schedule is inserted at "node" and the updated schedule node
3721 static __isl_give isl_schedule_node
*compute_sub_schedule(
3722 __isl_take isl_schedule_node
*node
, isl_ctx
*ctx
,
3723 struct isl_sched_graph
*graph
,
3724 int (*node_pred
)(struct isl_sched_node
*node
, int data
),
3725 int (*edge_pred
)(struct isl_sched_edge
*edge
, int data
),
3728 struct isl_sched_graph split
= { 0 };
3730 if (extract_sub_graph(ctx
, graph
, node_pred
, edge_pred
, data
,
3735 node
= compute_schedule_wcc(node
, &split
);
3737 node
= compute_schedule(node
, &split
);
3739 graph_free(ctx
, &split
);
3742 graph_free(ctx
, &split
);
3743 return isl_schedule_node_free(node
);
3746 static int edge_scc_exactly(struct isl_sched_edge
*edge
, int scc
)
3748 return edge
->src
->scc
== scc
&& edge
->dst
->scc
== scc
;
3751 static int edge_dst_scc_at_most(struct isl_sched_edge
*edge
, int scc
)
3753 return edge
->dst
->scc
<= scc
;
3756 static int edge_src_scc_at_least(struct isl_sched_edge
*edge
, int scc
)
3758 return edge
->src
->scc
>= scc
;
3761 /* Reset the current band by dropping all its schedule rows.
3763 static int reset_band(struct isl_sched_graph
*graph
)
3768 drop
= graph
->n_total_row
- graph
->band_start
;
3769 graph
->n_total_row
-= drop
;
3770 graph
->n_row
-= drop
;
3772 for (i
= 0; i
< graph
->n
; ++i
) {
3773 struct isl_sched_node
*node
= &graph
->node
[i
];
3775 isl_map_free(node
->sched_map
);
3776 node
->sched_map
= NULL
;
3778 node
->sched
= isl_mat_drop_rows(node
->sched
,
3779 graph
->band_start
, drop
);
3788 /* Split the current graph into two parts and compute a schedule for each
3789 * part individually. In particular, one part consists of all SCCs up
3790 * to and including graph->src_scc, while the other part contains the other
3791 * SCCs. The split is enforced by a sequence node inserted at position "node"
3792 * in the schedule tree. Return the updated schedule node.
3793 * If either of these two parts consists of a sequence, then it is spliced
3794 * into the sequence containing the two parts.
3796 * The current band is reset. It would be possible to reuse
3797 * the previously computed rows as the first rows in the next
3798 * band, but recomputing them may result in better rows as we are looking
3799 * at a smaller part of the dependence graph.
3801 static __isl_give isl_schedule_node
*compute_split_schedule(
3802 __isl_take isl_schedule_node
*node
, struct isl_sched_graph
*graph
)
3806 isl_union_set_list
*filters
;
3811 if (reset_band(graph
) < 0)
3812 return isl_schedule_node_free(node
);
3816 ctx
= isl_schedule_node_get_ctx(node
);
3817 filters
= extract_split(ctx
, graph
);
3818 node
= isl_schedule_node_insert_sequence(node
, filters
);
3819 node
= isl_schedule_node_child(node
, 1);
3820 node
= isl_schedule_node_child(node
, 0);
3822 node
= compute_sub_schedule(node
, ctx
, graph
,
3823 &node_scc_at_least
, &edge_src_scc_at_least
,
3824 graph
->src_scc
+ 1, 0);
3825 is_seq
= isl_schedule_node_get_type(node
) == isl_schedule_node_sequence
;
3826 node
= isl_schedule_node_parent(node
);
3827 node
= isl_schedule_node_parent(node
);
3829 node
= isl_schedule_node_sequence_splice_child(node
, 1);
3830 node
= isl_schedule_node_child(node
, 0);
3831 node
= isl_schedule_node_child(node
, 0);
3832 node
= compute_sub_schedule(node
, ctx
, graph
,
3833 &node_scc_at_most
, &edge_dst_scc_at_most
,
3835 is_seq
= isl_schedule_node_get_type(node
) == isl_schedule_node_sequence
;
3836 node
= isl_schedule_node_parent(node
);
3837 node
= isl_schedule_node_parent(node
);
3839 node
= isl_schedule_node_sequence_splice_child(node
, 0);
3844 /* Insert a band node at position "node" in the schedule tree corresponding
3845 * to the current band in "graph". Mark the band node permutable
3846 * if "permutable" is set.
3847 * The partial schedules and the coincidence property are extracted
3848 * from the graph nodes.
3849 * Return the updated schedule node.
3851 static __isl_give isl_schedule_node
*insert_current_band(
3852 __isl_take isl_schedule_node
*node
, struct isl_sched_graph
*graph
,
3858 isl_multi_pw_aff
*mpa
;
3859 isl_multi_union_pw_aff
*mupa
;
3865 isl_die(isl_schedule_node_get_ctx(node
), isl_error_internal
,
3866 "graph should have at least one node",
3867 return isl_schedule_node_free(node
));
3869 start
= graph
->band_start
;
3870 end
= graph
->n_total_row
;
3873 ma
= node_extract_partial_schedule_multi_aff(&graph
->node
[0], start
, n
);
3874 mpa
= isl_multi_pw_aff_from_multi_aff(ma
);
3875 mupa
= isl_multi_union_pw_aff_from_multi_pw_aff(mpa
);
3877 for (i
= 1; i
< graph
->n
; ++i
) {
3878 isl_multi_union_pw_aff
*mupa_i
;
3880 ma
= node_extract_partial_schedule_multi_aff(&graph
->node
[i
],
3882 mpa
= isl_multi_pw_aff_from_multi_aff(ma
);
3883 mupa_i
= isl_multi_union_pw_aff_from_multi_pw_aff(mpa
);
3884 mupa
= isl_multi_union_pw_aff_union_add(mupa
, mupa_i
);
3886 node
= isl_schedule_node_insert_partial_schedule(node
, mupa
);
3888 for (i
= 0; i
< n
; ++i
)
3889 node
= isl_schedule_node_band_member_set_coincident(node
, i
,
3890 graph
->node
[0].coincident
[start
+ i
]);
3891 node
= isl_schedule_node_band_set_permutable(node
, permutable
);
3896 /* Update the dependence relations based on the current schedule,
3897 * add the current band to "node" and then continue with the computation
3899 * Return the updated schedule node.
3901 static __isl_give isl_schedule_node
*compute_next_band(
3902 __isl_take isl_schedule_node
*node
,
3903 struct isl_sched_graph
*graph
, int permutable
)
3910 ctx
= isl_schedule_node_get_ctx(node
);
3911 if (update_edges(ctx
, graph
) < 0)
3912 return isl_schedule_node_free(node
);
3913 node
= insert_current_band(node
, graph
, permutable
);
3916 node
= isl_schedule_node_child(node
, 0);
3917 node
= compute_schedule(node
, graph
);
3918 node
= isl_schedule_node_parent(node
);
3923 /* Add constraints to graph->lp that force the dependence "map" (which
3924 * is part of the dependence relation of "edge")
3925 * to be respected and attempt to carry it, where the edge is one from
3926 * a node j to itself. "pos" is the sequence number of the given map.
3927 * That is, add constraints that enforce
3929 * (c_j_0 + c_j_n n + c_j_x y) - (c_j_0 + c_j_n n + c_j_x x)
3930 * = c_j_x (y - x) >= e_i
3932 * for each (x,y) in R.
3933 * We obtain general constraints on coefficients (c_0, c_n, c_x)
3934 * of valid constraints for (y - x) and then plug in (-e_i, 0, c_j_x),
3935 * with each coefficient in c_j_x represented as a pair of non-negative
3938 static int add_intra_constraints(struct isl_sched_graph
*graph
,
3939 struct isl_sched_edge
*edge
, __isl_take isl_map
*map
, int pos
)
3942 isl_ctx
*ctx
= isl_map_get_ctx(map
);
3943 isl_dim_map
*dim_map
;
3944 isl_basic_set
*coef
;
3945 struct isl_sched_node
*node
= edge
->src
;
3947 coef
= intra_coefficients(graph
, node
, map
);
3951 offset
= coef_var_offset(coef
);
3952 dim_map
= intra_dim_map(ctx
, graph
, node
, offset
, 1);
3953 isl_dim_map_range(dim_map
, 3 + pos
, 0, 0, 0, 1, -1);
3954 graph
->lp
= isl_basic_set_extend_constraints(graph
->lp
,
3955 coef
->n_eq
, coef
->n_ineq
);
3956 graph
->lp
= isl_basic_set_add_constraints_dim_map(graph
->lp
,
3962 /* Add constraints to graph->lp that force the dependence "map" (which
3963 * is part of the dependence relation of "edge")
3964 * to be respected and attempt to carry it, where the edge is one from
3965 * node j to node k. "pos" is the sequence number of the given map.
3966 * That is, add constraints that enforce
3968 * (c_k_0 + c_k_n n + c_k_x y) - (c_j_0 + c_j_n n + c_j_x x) >= e_i
3970 * for each (x,y) in R.
3971 * We obtain general constraints on coefficients (c_0, c_n, c_x)
3972 * of valid constraints for R and then plug in
3973 * (-e_i + c_k_0 - c_j_0, c_k_n - c_j_n, c_k_x - c_j_x)
3974 * with each coefficient (except e_i, c_*_0 and c_*_n)
3975 * represented as a pair of non-negative coefficients.
3977 static int add_inter_constraints(struct isl_sched_graph
*graph
,
3978 struct isl_sched_edge
*edge
, __isl_take isl_map
*map
, int pos
)
3981 isl_ctx
*ctx
= isl_map_get_ctx(map
);
3982 isl_dim_map
*dim_map
;
3983 isl_basic_set
*coef
;
3984 struct isl_sched_node
*src
= edge
->src
;
3985 struct isl_sched_node
*dst
= edge
->dst
;
3987 coef
= inter_coefficients(graph
, edge
, map
);
3991 offset
= coef_var_offset(coef
);
3992 dim_map
= inter_dim_map(ctx
, graph
, src
, dst
, offset
, 1);
3993 isl_dim_map_range(dim_map
, 3 + pos
, 0, 0, 0, 1, -1);
3994 graph
->lp
= isl_basic_set_extend_constraints(graph
->lp
,
3995 coef
->n_eq
, coef
->n_ineq
);
3996 graph
->lp
= isl_basic_set_add_constraints_dim_map(graph
->lp
,
4002 /* Add constraints to graph->lp that force all (conditional) validity
4003 * dependences to be respected and attempt to carry them.
4005 static int add_all_constraints(struct isl_sched_graph
*graph
)
4011 for (i
= 0; i
< graph
->n_edge
; ++i
) {
4012 struct isl_sched_edge
*edge
= &graph
->edge
[i
];
4014 if (!is_any_validity(edge
))
4017 for (j
= 0; j
< edge
->map
->n
; ++j
) {
4018 isl_basic_map
*bmap
;
4021 bmap
= isl_basic_map_copy(edge
->map
->p
[j
]);
4022 map
= isl_map_from_basic_map(bmap
);
4024 if (edge
->src
== edge
->dst
&&
4025 add_intra_constraints(graph
, edge
, map
, pos
) < 0)
4027 if (edge
->src
!= edge
->dst
&&
4028 add_inter_constraints(graph
, edge
, map
, pos
) < 0)
4037 /* Count the number of equality and inequality constraints
4038 * that will be added to the carry_lp problem.
4039 * We count each edge exactly once.
4041 static int count_all_constraints(struct isl_sched_graph
*graph
,
4042 int *n_eq
, int *n_ineq
)
4046 *n_eq
= *n_ineq
= 0;
4047 for (i
= 0; i
< graph
->n_edge
; ++i
) {
4048 struct isl_sched_edge
*edge
= &graph
->edge
[i
];
4050 if (!is_any_validity(edge
))
4053 for (j
= 0; j
< edge
->map
->n
; ++j
) {
4054 isl_basic_map
*bmap
;
4057 bmap
= isl_basic_map_copy(edge
->map
->p
[j
]);
4058 map
= isl_map_from_basic_map(bmap
);
4060 if (count_map_constraints(graph
, edge
, map
,
4061 n_eq
, n_ineq
, 1, 0) < 0)
4069 /* Construct an LP problem for finding schedule coefficients
4070 * such that the schedule carries as many dependences as possible.
4071 * In particular, for each dependence i, we bound the dependence distance
4072 * from below by e_i, with 0 <= e_i <= 1 and then maximize the sum
4073 * of all e_i's. Dependences with e_i = 0 in the solution are simply
4074 * respected, while those with e_i > 0 (in practice e_i = 1) are carried.
4075 * Note that if the dependence relation is a union of basic maps,
4076 * then we have to consider each basic map individually as it may only
4077 * be possible to carry the dependences expressed by some of those
4078 * basic maps and not all of them.
4079 * Below, we consider each of those basic maps as a separate "edge".
4081 * All variables of the LP are non-negative. The actual coefficients
4082 * may be negative, so each coefficient is represented as the difference
4083 * of two non-negative variables. The negative part always appears
4084 * immediately before the positive part.
4085 * Other than that, the variables have the following order
4087 * - sum of (1 - e_i) over all edges
4088 * - sum of all c_n coefficients
4089 * (unconstrained when computing non-parametric schedules)
4090 * - sum of positive and negative parts of all c_x coefficients
4095 * - c_i_n (if parametric)
4096 * - positive and negative parts of c_i_x
4098 * The constraints are those from the (validity) edges plus three equalities
4099 * to express the sums and n_edge inequalities to express e_i <= 1.
4101 static isl_stat
setup_carry_lp(isl_ctx
*ctx
, struct isl_sched_graph
*graph
)
4111 for (i
= 0; i
< graph
->n_edge
; ++i
)
4112 n_edge
+= graph
->edge
[i
].map
->n
;
4115 for (i
= 0; i
< graph
->n
; ++i
) {
4116 struct isl_sched_node
*node
= &graph
->node
[graph
->sorted
[i
]];
4117 node
->start
= total
;
4118 total
+= 1 + node
->nparam
+ 2 * node
->nvar
;
4121 if (count_all_constraints(graph
, &n_eq
, &n_ineq
) < 0)
4122 return isl_stat_error
;
4124 dim
= isl_space_set_alloc(ctx
, 0, total
);
4125 isl_basic_set_free(graph
->lp
);
4128 graph
->lp
= isl_basic_set_alloc_space(dim
, 0, n_eq
, n_ineq
);
4129 graph
->lp
= isl_basic_set_set_rational(graph
->lp
);
4131 k
= isl_basic_set_alloc_equality(graph
->lp
);
4133 return isl_stat_error
;
4134 isl_seq_clr(graph
->lp
->eq
[k
], 1 + total
);
4135 isl_int_set_si(graph
->lp
->eq
[k
][0], -n_edge
);
4136 isl_int_set_si(graph
->lp
->eq
[k
][1], 1);
4137 for (i
= 0; i
< n_edge
; ++i
)
4138 isl_int_set_si(graph
->lp
->eq
[k
][4 + i
], 1);
4140 if (add_param_sum_constraint(graph
, 1) < 0)
4141 return isl_stat_error
;
4142 if (add_var_sum_constraint(graph
, 2) < 0)
4143 return isl_stat_error
;
4145 for (i
= 0; i
< n_edge
; ++i
) {
4146 k
= isl_basic_set_alloc_inequality(graph
->lp
);
4148 return isl_stat_error
;
4149 isl_seq_clr(graph
->lp
->ineq
[k
], 1 + total
);
4150 isl_int_set_si(graph
->lp
->ineq
[k
][4 + i
], -1);
4151 isl_int_set_si(graph
->lp
->ineq
[k
][0], 1);
4154 if (add_all_constraints(graph
) < 0)
4155 return isl_stat_error
;
4160 static __isl_give isl_schedule_node
*compute_component_schedule(
4161 __isl_take isl_schedule_node
*node
, struct isl_sched_graph
*graph
,
4164 /* Comparison function for sorting the statements based on
4165 * the corresponding value in "r".
4167 static int smaller_value(const void *a
, const void *b
, void *data
)
4173 return isl_int_cmp(r
->el
[*i1
], r
->el
[*i2
]);
4176 /* If the schedule_split_scaled option is set and if the linear
4177 * parts of the scheduling rows for all nodes in the graphs have
4178 * a non-trivial common divisor, then split off the remainder of the
4179 * constant term modulo this common divisor from the linear part.
4180 * Otherwise, insert a band node directly and continue with
4181 * the construction of the schedule.
4183 * If a non-trivial common divisor is found, then
4184 * the linear part is reduced and the remainder is enforced
4185 * by a sequence node with the children placed in the order
4186 * of this remainder.
4187 * In particular, we assign an scc index based on the remainder and
4188 * then rely on compute_component_schedule to insert the sequence and
4189 * to continue the schedule construction on each part.
4191 static __isl_give isl_schedule_node
*split_scaled(
4192 __isl_take isl_schedule_node
*node
, struct isl_sched_graph
*graph
)
4205 ctx
= isl_schedule_node_get_ctx(node
);
4206 if (!ctx
->opt
->schedule_split_scaled
)
4207 return compute_next_band(node
, graph
, 0);
4209 return compute_next_band(node
, graph
, 0);
4212 isl_int_init(gcd_i
);
4214 isl_int_set_si(gcd
, 0);
4216 row
= isl_mat_rows(graph
->node
[0].sched
) - 1;
4218 for (i
= 0; i
< graph
->n
; ++i
) {
4219 struct isl_sched_node
*node
= &graph
->node
[i
];
4220 int cols
= isl_mat_cols(node
->sched
);
4222 isl_seq_gcd(node
->sched
->row
[row
] + 1, cols
- 1, &gcd_i
);
4223 isl_int_gcd(gcd
, gcd
, gcd_i
);
4226 isl_int_clear(gcd_i
);
4228 if (isl_int_cmp_si(gcd
, 1) <= 0) {
4230 return compute_next_band(node
, graph
, 0);
4233 r
= isl_vec_alloc(ctx
, graph
->n
);
4234 order
= isl_calloc_array(ctx
, int, graph
->n
);
4238 for (i
= 0; i
< graph
->n
; ++i
) {
4239 struct isl_sched_node
*node
= &graph
->node
[i
];
4242 isl_int_fdiv_r(r
->el
[i
], node
->sched
->row
[row
][0], gcd
);
4243 isl_int_fdiv_q(node
->sched
->row
[row
][0],
4244 node
->sched
->row
[row
][0], gcd
);
4245 isl_int_mul(node
->sched
->row
[row
][0],
4246 node
->sched
->row
[row
][0], gcd
);
4247 node
->sched
= isl_mat_scale_down_row(node
->sched
, row
, gcd
);
4252 if (isl_sort(order
, graph
->n
, sizeof(order
[0]), &smaller_value
, r
) < 0)
4256 for (i
= 0; i
< graph
->n
; ++i
) {
4257 if (i
> 0 && isl_int_ne(r
->el
[order
[i
- 1]], r
->el
[order
[i
]]))
4259 graph
->node
[order
[i
]].scc
= scc
;
4268 if (update_edges(ctx
, graph
) < 0)
4269 return isl_schedule_node_free(node
);
4270 node
= insert_current_band(node
, graph
, 0);
4273 node
= isl_schedule_node_child(node
, 0);
4274 node
= compute_component_schedule(node
, graph
, 0);
4275 node
= isl_schedule_node_parent(node
);
4282 return isl_schedule_node_free(node
);
4285 /* Is the schedule row "sol" trivial on node "node"?
4286 * That is, is the solution zero on the dimensions orthogonal to
4287 * the previously found solutions?
4288 * Return 1 if the solution is trivial, 0 if it is not and -1 on error.
4290 * Each coefficient is represented as the difference between
4291 * two non-negative values in "sol". "sol" has been computed
4292 * in terms of the original iterators (i.e., without use of cmap).
4293 * We construct the schedule row s and write it as a linear
4294 * combination of (linear combinations of) previously computed schedule rows.
4295 * s = Q c or c = U s.
4296 * If the final entries of c are all zero, then the solution is trivial.
4298 static int is_trivial(struct isl_sched_node
*node
, __isl_keep isl_vec
*sol
)
4305 if (node
->nvar
== node
->rank
)
4308 node_sol
= extract_var_coef(node
, sol
);
4309 node_sol
= isl_mat_vec_product(isl_mat_copy(node
->cinv
), node_sol
);
4313 trivial
= isl_seq_first_non_zero(node_sol
->el
+ node
->rank
,
4314 node
->nvar
- node
->rank
) == -1;
4316 isl_vec_free(node_sol
);
4321 /* Is the schedule row "sol" trivial on any node where it should
4323 * "sol" has been computed in terms of the original iterators
4324 * (i.e., without use of cmap).
4325 * Return 1 if any solution is trivial, 0 if they are not and -1 on error.
4327 static int is_any_trivial(struct isl_sched_graph
*graph
,
4328 __isl_keep isl_vec
*sol
)
4332 for (i
= 0; i
< graph
->n
; ++i
) {
4333 struct isl_sched_node
*node
= &graph
->node
[i
];
4336 if (!needs_row(graph
, node
))
4338 trivial
= is_trivial(node
, sol
);
4339 if (trivial
< 0 || trivial
)
4346 /* Does the schedule represented by "sol" perform loop coalescing on "node"?
4347 * If so, return the position of the coalesced dimension.
4348 * Otherwise, return node->nvar or -1 on error.
4350 * In particular, look for pairs of coefficients c_i and c_j such that
4351 * |c_j/c_i| >= size_i, i.e., |c_j| >= |c_i * size_i|.
4352 * If any such pair is found, then return i.
4353 * If size_i is infinity, then no check on c_i needs to be performed.
4355 static int find_node_coalescing(struct isl_sched_node
*node
,
4356 __isl_keep isl_vec
*sol
)
4362 if (node
->nvar
<= 1)
4365 csol
= extract_var_coef(node
, sol
);
4369 for (i
= 0; i
< node
->nvar
; ++i
) {
4372 if (isl_int_is_zero(csol
->el
[i
]))
4374 v
= isl_multi_val_get_val(node
->sizes
, i
);
4377 if (!isl_val_is_int(v
)) {
4381 isl_int_mul(max
, v
->n
, csol
->el
[i
]);
4384 for (j
= 0; j
< node
->nvar
; ++j
) {
4387 if (isl_int_abs_ge(csol
->el
[j
], max
))
4403 /* Force the schedule coefficient at position "pos" of "node" to be zero
4405 * The coefficient is encoded as the difference between two non-negative
4406 * variables. Force these two variables to have the same value.
4408 static __isl_give isl_tab_lexmin
*zero_out_node_coef(
4409 __isl_take isl_tab_lexmin
*tl
, struct isl_sched_node
*node
, int pos
)
4415 ctx
= isl_space_get_ctx(node
->space
);
4416 dim
= isl_tab_lexmin_dim(tl
);
4418 return isl_tab_lexmin_free(tl
);
4419 eq
= isl_vec_alloc(ctx
, 1 + dim
);
4420 eq
= isl_vec_clr(eq
);
4422 return isl_tab_lexmin_free(tl
);
4424 pos
= 1 + node_var_coef_offset(node
) + 2 * pos
;
4425 isl_int_set_si(eq
->el
[pos
], 1);
4426 isl_int_set_si(eq
->el
[pos
+ 1], -1);
4427 tl
= isl_tab_lexmin_add_eq(tl
, eq
->el
);
4433 /* Return the lexicographically smallest rational point in the basic set
4434 * from which "tl" was constructed, double checking that this input set
4437 static __isl_give isl_vec
*non_empty_solution(__isl_keep isl_tab_lexmin
*tl
)
4441 sol
= isl_tab_lexmin_get_solution(tl
);
4445 isl_die(isl_vec_get_ctx(sol
), isl_error_internal
,
4446 "error in schedule construction",
4447 return isl_vec_free(sol
));
4451 /* Does the solution "sol" of the LP problem constructed by setup_carry_lp
4452 * carry any of the "n_edge" groups of dependences?
4453 * The value in the first position is the sum of (1 - e_i) over all "n_edge"
4454 * edges, with 0 <= e_i <= 1 equal to 1 when the dependences represented
4455 * by the edge are carried by the solution.
4456 * If the sum of the (1 - e_i) is smaller than "n_edge" then at least
4457 * one of those is carried.
4459 * Note that despite the fact that the problem is solved using a rational
4460 * solver, the solution is guaranteed to be integral.
4461 * Specifically, the dependence distance lower bounds e_i (and therefore
4462 * also their sum) are integers. See Lemma 5 of [1].
4464 * Any potential denominator of the sum is cleared by this function.
4465 * The denominator is not relevant for any of the other elements
4468 * [1] P. Feautrier, Some Efficient Solutions to the Affine Scheduling
4469 * Problem, Part II: Multi-Dimensional Time.
4470 * In Intl. Journal of Parallel Programming, 1992.
4472 static int carries_dependences(__isl_keep isl_vec
*sol
, int n_edge
)
4474 isl_int_divexact(sol
->el
[1], sol
->el
[1], sol
->el
[0]);
4475 isl_int_set_si(sol
->el
[0], 1);
4476 return isl_int_cmp_si(sol
->el
[1], n_edge
) < 0;
4479 /* Return the lexicographically smallest rational point in "lp",
4480 * assuming that all variables are non-negative and performing some
4481 * additional sanity checks.
4482 * In particular, "lp" should not be empty by construction.
4483 * Double check that this is the case.
4484 * Also, check that dependences are carried for at least one of
4485 * the "n_edge" edges.
4487 * If the computed schedule performs loop coalescing on a given node,
4488 * i.e., if it is of the form
4490 * c_i i + c_j j + ...
4492 * with |c_j/c_i| >= size_i, then force the coefficient c_i to be zero
4493 * to cut out this solution. Repeat this process until no more loop
4494 * coalescing occurs or until no more dependences can be carried.
4495 * In the latter case, revert to the previously computed solution.
4497 static __isl_give isl_vec
*non_neg_lexmin(struct isl_sched_graph
*graph
,
4498 __isl_take isl_basic_set
*lp
, int n_edge
)
4503 isl_vec
*sol
, *prev
= NULL
;
4504 int treat_coalescing
;
4508 ctx
= isl_basic_set_get_ctx(lp
);
4509 treat_coalescing
= isl_options_get_schedule_treat_coalescing(ctx
);
4510 tl
= isl_tab_lexmin_from_basic_set(lp
);
4513 sol
= non_empty_solution(tl
);
4517 if (!carries_dependences(sol
, n_edge
)) {
4519 isl_die(ctx
, isl_error_unknown
,
4520 "unable to carry dependences",
4526 prev
= isl_vec_free(prev
);
4527 if (!treat_coalescing
)
4529 for (i
= 0; i
< graph
->n
; ++i
) {
4530 struct isl_sched_node
*node
= &graph
->node
[i
];
4532 pos
= find_node_coalescing(node
, sol
);
4535 if (pos
< node
->nvar
)
4540 tl
= zero_out_node_coef(tl
, &graph
->node
[i
], pos
);
4542 } while (i
< graph
->n
);
4544 isl_tab_lexmin_free(tl
);
4548 isl_tab_lexmin_free(tl
);
4554 /* Construct a schedule row for each node such that as many dependences
4555 * as possible are carried and then continue with the next band.
4557 * If the computed schedule row turns out to be trivial on one or
4558 * more nodes where it should not be trivial, then we throw it away
4559 * and try again on each component separately.
4561 * If there is only one component, then we accept the schedule row anyway,
4562 * but we do not consider it as a complete row and therefore do not
4563 * increment graph->n_row. Note that the ranks of the nodes that
4564 * do get a non-trivial schedule part will get updated regardless and
4565 * graph->maxvar is computed based on these ranks. The test for
4566 * whether more schedule rows are required in compute_schedule_wcc
4567 * is therefore not affected.
4569 * Insert a band corresponding to the schedule row at position "node"
4570 * of the schedule tree and continue with the construction of the schedule.
4571 * This insertion and the continued construction is performed by split_scaled
4572 * after optionally checking for non-trivial common divisors.
4574 static __isl_give isl_schedule_node
*carry_dependences(
4575 __isl_take isl_schedule_node
*node
, struct isl_sched_graph
*graph
)
4588 for (i
= 0; i
< graph
->n_edge
; ++i
)
4589 n_edge
+= graph
->edge
[i
].map
->n
;
4591 ctx
= isl_schedule_node_get_ctx(node
);
4592 if (setup_carry_lp(ctx
, graph
) < 0)
4593 return isl_schedule_node_free(node
);
4595 lp
= isl_basic_set_copy(graph
->lp
);
4596 sol
= non_neg_lexmin(graph
, lp
, n_edge
);
4598 return isl_schedule_node_free(node
);
4600 trivial
= is_any_trivial(graph
, sol
);
4602 sol
= isl_vec_free(sol
);
4603 } else if (trivial
&& graph
->scc
> 1) {
4605 return compute_component_schedule(node
, graph
, 1);
4608 if (update_schedule(graph
, sol
, 0, 0) < 0)
4609 return isl_schedule_node_free(node
);
4613 return split_scaled(node
, graph
);
4616 /* Topologically sort statements mapped to the same schedule iteration
4617 * and add insert a sequence node in front of "node"
4618 * corresponding to this order.
4619 * If "initialized" is set, then it may be assumed that compute_maxvar
4620 * has been called on the current band. Otherwise, call
4621 * compute_maxvar if and before carry_dependences gets called.
4623 * If it turns out to be impossible to sort the statements apart,
4624 * because different dependences impose different orderings
4625 * on the statements, then we extend the schedule such that
4626 * it carries at least one more dependence.
4628 static __isl_give isl_schedule_node
*sort_statements(
4629 __isl_take isl_schedule_node
*node
, struct isl_sched_graph
*graph
,
4633 isl_union_set_list
*filters
;
4638 ctx
= isl_schedule_node_get_ctx(node
);
4640 isl_die(ctx
, isl_error_internal
,
4641 "graph should have at least one node",
4642 return isl_schedule_node_free(node
));
4647 if (update_edges(ctx
, graph
) < 0)
4648 return isl_schedule_node_free(node
);
4650 if (graph
->n_edge
== 0)
4653 if (detect_sccs(ctx
, graph
) < 0)
4654 return isl_schedule_node_free(node
);
4657 if (graph
->scc
< graph
->n
) {
4658 if (!initialized
&& compute_maxvar(graph
) < 0)
4659 return isl_schedule_node_free(node
);
4660 return carry_dependences(node
, graph
);
4663 filters
= extract_sccs(ctx
, graph
);
4664 node
= isl_schedule_node_insert_sequence(node
, filters
);
4669 /* Are there any (non-empty) (conditional) validity edges in the graph?
4671 static int has_validity_edges(struct isl_sched_graph
*graph
)
4675 for (i
= 0; i
< graph
->n_edge
; ++i
) {
4678 empty
= isl_map_plain_is_empty(graph
->edge
[i
].map
);
4683 if (is_any_validity(&graph
->edge
[i
]))
4690 /* Should we apply a Feautrier step?
4691 * That is, did the user request the Feautrier algorithm and are
4692 * there any validity dependences (left)?
4694 static int need_feautrier_step(isl_ctx
*ctx
, struct isl_sched_graph
*graph
)
4696 if (ctx
->opt
->schedule_algorithm
!= ISL_SCHEDULE_ALGORITHM_FEAUTRIER
)
4699 return has_validity_edges(graph
);
4702 /* Compute a schedule for a connected dependence graph using Feautrier's
4703 * multi-dimensional scheduling algorithm and return the updated schedule node.
4705 * The original algorithm is described in [1].
4706 * The main idea is to minimize the number of scheduling dimensions, by
4707 * trying to satisfy as many dependences as possible per scheduling dimension.
4709 * [1] P. Feautrier, Some Efficient Solutions to the Affine Scheduling
4710 * Problem, Part II: Multi-Dimensional Time.
4711 * In Intl. Journal of Parallel Programming, 1992.
4713 static __isl_give isl_schedule_node
*compute_schedule_wcc_feautrier(
4714 isl_schedule_node
*node
, struct isl_sched_graph
*graph
)
4716 return carry_dependences(node
, graph
);
4719 /* Turn off the "local" bit on all (condition) edges.
4721 static void clear_local_edges(struct isl_sched_graph
*graph
)
4725 for (i
= 0; i
< graph
->n_edge
; ++i
)
4726 if (is_condition(&graph
->edge
[i
]))
4727 clear_local(&graph
->edge
[i
]);
4730 /* Does "graph" have both condition and conditional validity edges?
4732 static int need_condition_check(struct isl_sched_graph
*graph
)
4735 int any_condition
= 0;
4736 int any_conditional_validity
= 0;
4738 for (i
= 0; i
< graph
->n_edge
; ++i
) {
4739 if (is_condition(&graph
->edge
[i
]))
4741 if (is_conditional_validity(&graph
->edge
[i
]))
4742 any_conditional_validity
= 1;
4745 return any_condition
&& any_conditional_validity
;
4748 /* Does "graph" contain any coincidence edge?
4750 static int has_any_coincidence(struct isl_sched_graph
*graph
)
4754 for (i
= 0; i
< graph
->n_edge
; ++i
)
4755 if (is_coincidence(&graph
->edge
[i
]))
4761 /* Extract the final schedule row as a map with the iteration domain
4762 * of "node" as domain.
4764 static __isl_give isl_map
*final_row(struct isl_sched_node
*node
)
4766 isl_local_space
*ls
;
4770 row
= isl_mat_rows(node
->sched
) - 1;
4771 ls
= isl_local_space_from_space(isl_space_copy(node
->space
));
4772 aff
= extract_schedule_row(ls
, node
, row
);
4773 return isl_map_from_aff(aff
);
4776 /* Is the conditional validity dependence in the edge with index "edge_index"
4777 * violated by the latest (i.e., final) row of the schedule?
4778 * That is, is i scheduled after j
4779 * for any conditional validity dependence i -> j?
4781 static int is_violated(struct isl_sched_graph
*graph
, int edge_index
)
4783 isl_map
*src_sched
, *dst_sched
, *map
;
4784 struct isl_sched_edge
*edge
= &graph
->edge
[edge_index
];
4787 src_sched
= final_row(edge
->src
);
4788 dst_sched
= final_row(edge
->dst
);
4789 map
= isl_map_copy(edge
->map
);
4790 map
= isl_map_apply_domain(map
, src_sched
);
4791 map
= isl_map_apply_range(map
, dst_sched
);
4792 map
= isl_map_order_gt(map
, isl_dim_in
, 0, isl_dim_out
, 0);
4793 empty
= isl_map_is_empty(map
);
4802 /* Does "graph" have any satisfied condition edges that
4803 * are adjacent to the conditional validity constraint with
4804 * domain "conditional_source" and range "conditional_sink"?
4806 * A satisfied condition is one that is not local.
4807 * If a condition was forced to be local already (i.e., marked as local)
4808 * then there is no need to check if it is in fact local.
4810 * Additionally, mark all adjacent condition edges found as local.
4812 static int has_adjacent_true_conditions(struct isl_sched_graph
*graph
,
4813 __isl_keep isl_union_set
*conditional_source
,
4814 __isl_keep isl_union_set
*conditional_sink
)
4819 for (i
= 0; i
< graph
->n_edge
; ++i
) {
4820 int adjacent
, local
;
4821 isl_union_map
*condition
;
4823 if (!is_condition(&graph
->edge
[i
]))
4825 if (is_local(&graph
->edge
[i
]))
4828 condition
= graph
->edge
[i
].tagged_condition
;
4829 adjacent
= domain_intersects(condition
, conditional_sink
);
4830 if (adjacent
>= 0 && !adjacent
)
4831 adjacent
= range_intersects(condition
,
4832 conditional_source
);
4838 set_local(&graph
->edge
[i
]);
4840 local
= is_condition_false(&graph
->edge
[i
]);
4850 /* Are there any violated conditional validity dependences with
4851 * adjacent condition dependences that are not local with respect
4852 * to the current schedule?
4853 * That is, is the conditional validity constraint violated?
4855 * Additionally, mark all those adjacent condition dependences as local.
4856 * We also mark those adjacent condition dependences that were not marked
4857 * as local before, but just happened to be local already. This ensures
4858 * that they remain local if the schedule is recomputed.
4860 * We first collect domain and range of all violated conditional validity
4861 * dependences and then check if there are any adjacent non-local
4862 * condition dependences.
4864 static int has_violated_conditional_constraint(isl_ctx
*ctx
,
4865 struct isl_sched_graph
*graph
)
4869 isl_union_set
*source
, *sink
;
4871 source
= isl_union_set_empty(isl_space_params_alloc(ctx
, 0));
4872 sink
= isl_union_set_empty(isl_space_params_alloc(ctx
, 0));
4873 for (i
= 0; i
< graph
->n_edge
; ++i
) {
4874 isl_union_set
*uset
;
4875 isl_union_map
*umap
;
4878 if (!is_conditional_validity(&graph
->edge
[i
]))
4881 violated
= is_violated(graph
, i
);
4889 umap
= isl_union_map_copy(graph
->edge
[i
].tagged_validity
);
4890 uset
= isl_union_map_domain(umap
);
4891 source
= isl_union_set_union(source
, uset
);
4892 source
= isl_union_set_coalesce(source
);
4894 umap
= isl_union_map_copy(graph
->edge
[i
].tagged_validity
);
4895 uset
= isl_union_map_range(umap
);
4896 sink
= isl_union_set_union(sink
, uset
);
4897 sink
= isl_union_set_coalesce(sink
);
4901 any
= has_adjacent_true_conditions(graph
, source
, sink
);
4903 isl_union_set_free(source
);
4904 isl_union_set_free(sink
);
4907 isl_union_set_free(source
);
4908 isl_union_set_free(sink
);
4912 /* Examine the current band (the rows between graph->band_start and
4913 * graph->n_total_row), deciding whether to drop it or add it to "node"
4914 * and then continue with the computation of the next band, if any.
4915 * If "initialized" is set, then it may be assumed that compute_maxvar
4916 * has been called on the current band. Otherwise, call
4917 * compute_maxvar if and before carry_dependences gets called.
4919 * The caller keeps looking for a new row as long as
4920 * graph->n_row < graph->maxvar. If the latest attempt to find
4921 * such a row failed (i.e., we still have graph->n_row < graph->maxvar),
4923 * - split between SCCs and start over (assuming we found an interesting
4924 * pair of SCCs between which to split)
4925 * - continue with the next band (assuming the current band has at least
4927 * - try to carry as many dependences as possible and continue with the next
4929 * In each case, we first insert a band node in the schedule tree
4930 * if any rows have been computed.
4932 * If the caller managed to complete the schedule, we insert a band node
4933 * (if any schedule rows were computed) and we finish off by topologically
4934 * sorting the statements based on the remaining dependences.
4936 static __isl_give isl_schedule_node
*compute_schedule_finish_band(
4937 __isl_take isl_schedule_node
*node
, struct isl_sched_graph
*graph
,
4945 if (graph
->n_row
< graph
->maxvar
) {
4947 int empty
= graph
->n_total_row
== graph
->band_start
;
4949 ctx
= isl_schedule_node_get_ctx(node
);
4950 if (!ctx
->opt
->schedule_maximize_band_depth
&& !empty
)
4951 return compute_next_band(node
, graph
, 1);
4952 if (graph
->src_scc
>= 0)
4953 return compute_split_schedule(node
, graph
);
4955 return compute_next_band(node
, graph
, 1);
4956 if (!initialized
&& compute_maxvar(graph
) < 0)
4957 return isl_schedule_node_free(node
);
4958 return carry_dependences(node
, graph
);
4961 insert
= graph
->n_total_row
> graph
->band_start
;
4963 node
= insert_current_band(node
, graph
, 1);
4964 node
= isl_schedule_node_child(node
, 0);
4966 node
= sort_statements(node
, graph
, initialized
);
4968 node
= isl_schedule_node_parent(node
);
4973 /* Construct a band of schedule rows for a connected dependence graph.
4974 * The caller is responsible for determining the strongly connected
4975 * components and calling compute_maxvar first.
4977 * We try to find a sequence of as many schedule rows as possible that result
4978 * in non-negative dependence distances (independent of the previous rows
4979 * in the sequence, i.e., such that the sequence is tilable), with as
4980 * many of the initial rows as possible satisfying the coincidence constraints.
4981 * The computation stops if we can't find any more rows or if we have found
4982 * all the rows we wanted to find.
4984 * If ctx->opt->schedule_outer_coincidence is set, then we force the
4985 * outermost dimension to satisfy the coincidence constraints. If this
4986 * turns out to be impossible, we fall back on the general scheme above
4987 * and try to carry as many dependences as possible.
4989 * If "graph" contains both condition and conditional validity dependences,
4990 * then we need to check that that the conditional schedule constraint
4991 * is satisfied, i.e., there are no violated conditional validity dependences
4992 * that are adjacent to any non-local condition dependences.
4993 * If there are, then we mark all those adjacent condition dependences
4994 * as local and recompute the current band. Those dependences that
4995 * are marked local will then be forced to be local.
4996 * The initial computation is performed with no dependences marked as local.
4997 * If we are lucky, then there will be no violated conditional validity
4998 * dependences adjacent to any non-local condition dependences.
4999 * Otherwise, we mark some additional condition dependences as local and
5000 * recompute. We continue this process until there are no violations left or
5001 * until we are no longer able to compute a schedule.
5002 * Since there are only a finite number of dependences,
5003 * there will only be a finite number of iterations.
5005 static isl_stat
compute_schedule_wcc_band(isl_ctx
*ctx
,
5006 struct isl_sched_graph
*graph
)
5008 int has_coincidence
;
5009 int use_coincidence
;
5010 int force_coincidence
= 0;
5011 int check_conditional
;
5013 if (sort_sccs(graph
) < 0)
5014 return isl_stat_error
;
5016 clear_local_edges(graph
);
5017 check_conditional
= need_condition_check(graph
);
5018 has_coincidence
= has_any_coincidence(graph
);
5020 if (ctx
->opt
->schedule_outer_coincidence
)
5021 force_coincidence
= 1;
5023 use_coincidence
= has_coincidence
;
5024 while (graph
->n_row
< graph
->maxvar
) {
5029 graph
->src_scc
= -1;
5030 graph
->dst_scc
= -1;
5032 if (setup_lp(ctx
, graph
, use_coincidence
) < 0)
5033 return isl_stat_error
;
5034 sol
= solve_lp(graph
);
5036 return isl_stat_error
;
5037 if (sol
->size
== 0) {
5038 int empty
= graph
->n_total_row
== graph
->band_start
;
5041 if (use_coincidence
&& (!force_coincidence
|| !empty
)) {
5042 use_coincidence
= 0;
5047 coincident
= !has_coincidence
|| use_coincidence
;
5048 if (update_schedule(graph
, sol
, 1, coincident
) < 0)
5049 return isl_stat_error
;
5051 if (!check_conditional
)
5053 violated
= has_violated_conditional_constraint(ctx
, graph
);
5055 return isl_stat_error
;
5058 if (reset_band(graph
) < 0)
5059 return isl_stat_error
;
5060 use_coincidence
= has_coincidence
;
5066 /* Compute a schedule for a connected dependence graph by considering
5067 * the graph as a whole and return the updated schedule node.
5069 * The actual schedule rows of the current band are computed by
5070 * compute_schedule_wcc_band. compute_schedule_finish_band takes
5071 * care of integrating the band into "node" and continuing
5074 static __isl_give isl_schedule_node
*compute_schedule_wcc_whole(
5075 __isl_take isl_schedule_node
*node
, struct isl_sched_graph
*graph
)
5082 ctx
= isl_schedule_node_get_ctx(node
);
5083 if (compute_schedule_wcc_band(ctx
, graph
) < 0)
5084 return isl_schedule_node_free(node
);
5086 return compute_schedule_finish_band(node
, graph
, 1);
5089 /* Clustering information used by compute_schedule_wcc_clustering.
5091 * "n" is the number of SCCs in the original dependence graph
5092 * "scc" is an array of "n" elements, each representing an SCC
5093 * of the original dependence graph. All entries in the same cluster
5094 * have the same number of schedule rows.
5095 * "scc_cluster" maps each SCC index to the cluster to which it belongs,
5096 * where each cluster is represented by the index of the first SCC
5097 * in the cluster. Initially, each SCC belongs to a cluster containing
5100 * "scc_in_merge" is used by merge_clusters_along_edge to keep
5101 * track of which SCCs need to be merged.
5103 * "cluster" contains the merged clusters of SCCs after the clustering
5106 * "scc_node" is a temporary data structure used inside copy_partial.
5107 * For each SCC, it keeps track of the number of nodes in the SCC
5108 * that have already been copied.
5110 struct isl_clustering
{
5112 struct isl_sched_graph
*scc
;
5113 struct isl_sched_graph
*cluster
;
5119 /* Initialize the clustering data structure "c" from "graph".
5121 * In particular, allocate memory, extract the SCCs from "graph"
5122 * into c->scc, initialize scc_cluster and construct
5123 * a band of schedule rows for each SCC.
5124 * Within each SCC, there is only one SCC by definition.
5125 * Each SCC initially belongs to a cluster containing only that SCC.
5127 static isl_stat
clustering_init(isl_ctx
*ctx
, struct isl_clustering
*c
,
5128 struct isl_sched_graph
*graph
)
5133 c
->scc
= isl_calloc_array(ctx
, struct isl_sched_graph
, c
->n
);
5134 c
->cluster
= isl_calloc_array(ctx
, struct isl_sched_graph
, c
->n
);
5135 c
->scc_cluster
= isl_calloc_array(ctx
, int, c
->n
);
5136 c
->scc_node
= isl_calloc_array(ctx
, int, c
->n
);
5137 c
->scc_in_merge
= isl_calloc_array(ctx
, int, c
->n
);
5138 if (!c
->scc
|| !c
->cluster
||
5139 !c
->scc_cluster
|| !c
->scc_node
|| !c
->scc_in_merge
)
5140 return isl_stat_error
;
5142 for (i
= 0; i
< c
->n
; ++i
) {
5143 if (extract_sub_graph(ctx
, graph
, &node_scc_exactly
,
5144 &edge_scc_exactly
, i
, &c
->scc
[i
]) < 0)
5145 return isl_stat_error
;
5147 if (compute_maxvar(&c
->scc
[i
]) < 0)
5148 return isl_stat_error
;
5149 if (compute_schedule_wcc_band(ctx
, &c
->scc
[i
]) < 0)
5150 return isl_stat_error
;
5151 c
->scc_cluster
[i
] = i
;
5157 /* Free all memory allocated for "c".
5159 static void clustering_free(isl_ctx
*ctx
, struct isl_clustering
*c
)
5164 for (i
= 0; i
< c
->n
; ++i
)
5165 graph_free(ctx
, &c
->scc
[i
]);
5168 for (i
= 0; i
< c
->n
; ++i
)
5169 graph_free(ctx
, &c
->cluster
[i
]);
5171 free(c
->scc_cluster
);
5173 free(c
->scc_in_merge
);
5176 /* Should we refrain from merging the cluster in "graph" with
5177 * any other cluster?
5178 * In particular, is its current schedule band empty and incomplete.
5180 static int bad_cluster(struct isl_sched_graph
*graph
)
5182 return graph
->n_row
< graph
->maxvar
&&
5183 graph
->n_total_row
== graph
->band_start
;
5186 /* Return the index of an edge in "graph" that can be used to merge
5187 * two clusters in "c".
5188 * Return graph->n_edge if no such edge can be found.
5189 * Return -1 on error.
5191 * In particular, return a proximity edge between two clusters
5192 * that is not marked "no_merge" and such that neither of the
5193 * two clusters has an incomplete, empty band.
5195 * If there are multiple such edges, then try and find the most
5196 * appropriate edge to use for merging. In particular, pick the edge
5197 * with the greatest weight. If there are multiple of those,
5198 * then pick one with the shortest distance between
5199 * the two cluster representatives.
5201 static int find_proximity(struct isl_sched_graph
*graph
,
5202 struct isl_clustering
*c
)
5204 int i
, best
= graph
->n_edge
, best_dist
, best_weight
;
5206 for (i
= 0; i
< graph
->n_edge
; ++i
) {
5207 struct isl_sched_edge
*edge
= &graph
->edge
[i
];
5210 if (!is_proximity(edge
))
5214 if (bad_cluster(&c
->scc
[edge
->src
->scc
]) ||
5215 bad_cluster(&c
->scc
[edge
->dst
->scc
]))
5217 dist
= c
->scc_cluster
[edge
->dst
->scc
] -
5218 c
->scc_cluster
[edge
->src
->scc
];
5221 weight
= edge
->weight
;
5222 if (best
< graph
->n_edge
) {
5223 if (best_weight
> weight
)
5225 if (best_weight
== weight
&& best_dist
<= dist
)
5230 best_weight
= weight
;
5236 /* Internal data structure used in mark_merge_sccs.
5238 * "graph" is the dependence graph in which a strongly connected
5239 * component is constructed.
5240 * "scc_cluster" maps each SCC index to the cluster to which it belongs.
5241 * "src" and "dst" are the indices of the nodes that are being merged.
5243 struct isl_mark_merge_sccs_data
{
5244 struct isl_sched_graph
*graph
;
5250 /* Check whether the cluster containing node "i" depends on the cluster
5251 * containing node "j". If "i" and "j" belong to the same cluster,
5252 * then they are taken to depend on each other to ensure that
5253 * the resulting strongly connected component consists of complete
5254 * clusters. Furthermore, if "i" and "j" are the two nodes that
5255 * are being merged, then they are taken to depend on each other as well.
5256 * Otherwise, check if there is a (conditional) validity dependence
5257 * from node[j] to node[i], forcing node[i] to follow node[j].
5259 static isl_bool
cluster_follows(int i
, int j
, void *user
)
5261 struct isl_mark_merge_sccs_data
*data
= user
;
5262 struct isl_sched_graph
*graph
= data
->graph
;
5263 int *scc_cluster
= data
->scc_cluster
;
5265 if (data
->src
== i
&& data
->dst
== j
)
5266 return isl_bool_true
;
5267 if (data
->src
== j
&& data
->dst
== i
)
5268 return isl_bool_true
;
5269 if (scc_cluster
[graph
->node
[i
].scc
] == scc_cluster
[graph
->node
[j
].scc
])
5270 return isl_bool_true
;
5272 return graph_has_validity_edge(graph
, &graph
->node
[j
], &graph
->node
[i
]);
5275 /* Mark all SCCs that belong to either of the two clusters in "c"
5276 * connected by the edge in "graph" with index "edge", or to any
5277 * of the intermediate clusters.
5278 * The marking is recorded in c->scc_in_merge.
5280 * The given edge has been selected for merging two clusters,
5281 * meaning that there is at least a proximity edge between the two nodes.
5282 * However, there may also be (indirect) validity dependences
5283 * between the two nodes. When merging the two clusters, all clusters
5284 * containing one or more of the intermediate nodes along the
5285 * indirect validity dependences need to be merged in as well.
5287 * First collect all such nodes by computing the strongly connected
5288 * component (SCC) containing the two nodes connected by the edge, where
5289 * the two nodes are considered to depend on each other to make
5290 * sure they end up in the same SCC. Similarly, each node is considered
5291 * to depend on every other node in the same cluster to ensure
5292 * that the SCC consists of complete clusters.
5294 * Then the original SCCs that contain any of these nodes are marked
5295 * in c->scc_in_merge.
5297 static isl_stat
mark_merge_sccs(isl_ctx
*ctx
, struct isl_sched_graph
*graph
,
5298 int edge
, struct isl_clustering
*c
)
5300 struct isl_mark_merge_sccs_data data
;
5301 struct isl_tarjan_graph
*g
;
5304 for (i
= 0; i
< c
->n
; ++i
)
5305 c
->scc_in_merge
[i
] = 0;
5308 data
.scc_cluster
= c
->scc_cluster
;
5309 data
.src
= graph
->edge
[edge
].src
- graph
->node
;
5310 data
.dst
= graph
->edge
[edge
].dst
- graph
->node
;
5312 g
= isl_tarjan_graph_component(ctx
, graph
->n
, data
.dst
,
5313 &cluster_follows
, &data
);
5319 isl_die(ctx
, isl_error_internal
,
5320 "expecting at least two nodes in component",
5322 if (g
->order
[--i
] != -1)
5323 isl_die(ctx
, isl_error_internal
,
5324 "expecting end of component marker", goto error
);
5326 for (--i
; i
>= 0 && g
->order
[i
] != -1; --i
) {
5327 int scc
= graph
->node
[g
->order
[i
]].scc
;
5328 c
->scc_in_merge
[scc
] = 1;
5331 isl_tarjan_graph_free(g
);
5334 isl_tarjan_graph_free(g
);
5335 return isl_stat_error
;
5338 /* Construct the identifier "cluster_i".
5340 static __isl_give isl_id
*cluster_id(isl_ctx
*ctx
, int i
)
5344 snprintf(name
, sizeof(name
), "cluster_%d", i
);
5345 return isl_id_alloc(ctx
, name
, NULL
);
5348 /* Construct the space of the cluster with index "i" containing
5349 * the strongly connected component "scc".
5351 * In particular, construct a space called cluster_i with dimension equal
5352 * to the number of schedule rows in the current band of "scc".
5354 static __isl_give isl_space
*cluster_space(struct isl_sched_graph
*scc
, int i
)
5360 nvar
= scc
->n_total_row
- scc
->band_start
;
5361 space
= isl_space_copy(scc
->node
[0].space
);
5362 space
= isl_space_params(space
);
5363 space
= isl_space_set_from_params(space
);
5364 space
= isl_space_add_dims(space
, isl_dim_set
, nvar
);
5365 id
= cluster_id(isl_space_get_ctx(space
), i
);
5366 space
= isl_space_set_tuple_id(space
, isl_dim_set
, id
);
5371 /* Collect the domain of the graph for merging clusters.
5373 * In particular, for each cluster with first SCC "i", construct
5374 * a set in the space called cluster_i with dimension equal
5375 * to the number of schedule rows in the current band of the cluster.
5377 static __isl_give isl_union_set
*collect_domain(isl_ctx
*ctx
,
5378 struct isl_sched_graph
*graph
, struct isl_clustering
*c
)
5382 isl_union_set
*domain
;
5384 space
= isl_space_params_alloc(ctx
, 0);
5385 domain
= isl_union_set_empty(space
);
5387 for (i
= 0; i
< graph
->scc
; ++i
) {
5390 if (!c
->scc_in_merge
[i
])
5392 if (c
->scc_cluster
[i
] != i
)
5394 space
= cluster_space(&c
->scc
[i
], i
);
5395 domain
= isl_union_set_add_set(domain
, isl_set_universe(space
));
5401 /* Construct a map from the original instances to the corresponding
5402 * cluster instance in the current bands of the clusters in "c".
5404 static __isl_give isl_union_map
*collect_cluster_map(isl_ctx
*ctx
,
5405 struct isl_sched_graph
*graph
, struct isl_clustering
*c
)
5409 isl_union_map
*cluster_map
;
5411 space
= isl_space_params_alloc(ctx
, 0);
5412 cluster_map
= isl_union_map_empty(space
);
5413 for (i
= 0; i
< graph
->scc
; ++i
) {
5417 if (!c
->scc_in_merge
[i
])
5420 id
= cluster_id(ctx
, c
->scc_cluster
[i
]);
5421 start
= c
->scc
[i
].band_start
;
5422 n
= c
->scc
[i
].n_total_row
- start
;
5423 for (j
= 0; j
< c
->scc
[i
].n
; ++j
) {
5426 struct isl_sched_node
*node
= &c
->scc
[i
].node
[j
];
5428 ma
= node_extract_partial_schedule_multi_aff(node
,
5430 ma
= isl_multi_aff_set_tuple_id(ma
, isl_dim_out
,
5432 map
= isl_map_from_multi_aff(ma
);
5433 cluster_map
= isl_union_map_add_map(cluster_map
, map
);
5441 /* Add "umap" to the schedule constraints "sc" of all types of "edge"
5442 * that are not isl_edge_condition or isl_edge_conditional_validity.
5444 static __isl_give isl_schedule_constraints
*add_non_conditional_constraints(
5445 struct isl_sched_edge
*edge
, __isl_keep isl_union_map
*umap
,
5446 __isl_take isl_schedule_constraints
*sc
)
5448 enum isl_edge_type t
;
5453 for (t
= isl_edge_first
; t
<= isl_edge_last
; ++t
) {
5454 if (t
== isl_edge_condition
||
5455 t
== isl_edge_conditional_validity
)
5457 if (!is_type(edge
, t
))
5459 sc
->constraint
[t
] = isl_union_map_union(sc
->constraint
[t
],
5460 isl_union_map_copy(umap
));
5461 if (!sc
->constraint
[t
])
5462 return isl_schedule_constraints_free(sc
);
5468 /* Add schedule constraints of types isl_edge_condition and
5469 * isl_edge_conditional_validity to "sc" by applying "umap" to
5470 * the domains of the wrapped relations in domain and range
5471 * of the corresponding tagged constraints of "edge".
5473 static __isl_give isl_schedule_constraints
*add_conditional_constraints(
5474 struct isl_sched_edge
*edge
, __isl_keep isl_union_map
*umap
,
5475 __isl_take isl_schedule_constraints
*sc
)
5477 enum isl_edge_type t
;
5478 isl_union_map
*tagged
;
5480 for (t
= isl_edge_condition
; t
<= isl_edge_conditional_validity
; ++t
) {
5481 if (!is_type(edge
, t
))
5483 if (t
== isl_edge_condition
)
5484 tagged
= isl_union_map_copy(edge
->tagged_condition
);
5486 tagged
= isl_union_map_copy(edge
->tagged_validity
);
5487 tagged
= isl_union_map_zip(tagged
);
5488 tagged
= isl_union_map_apply_domain(tagged
,
5489 isl_union_map_copy(umap
));
5490 tagged
= isl_union_map_zip(tagged
);
5491 sc
->constraint
[t
] = isl_union_map_union(sc
->constraint
[t
],
5493 if (!sc
->constraint
[t
])
5494 return isl_schedule_constraints_free(sc
);
5500 /* Given a mapping "cluster_map" from the original instances to
5501 * the cluster instances, add schedule constraints on the clusters
5502 * to "sc" corresponding to the original constraints represented by "edge".
5504 * For non-tagged dependence constraints, the cluster constraints
5505 * are obtained by applying "cluster_map" to the edge->map.
5507 * For tagged dependence constraints, "cluster_map" needs to be applied
5508 * to the domains of the wrapped relations in domain and range
5509 * of the tagged dependence constraints. Pick out the mappings
5510 * from these domains from "cluster_map" and construct their product.
5511 * This mapping can then be applied to the pair of domains.
5513 static __isl_give isl_schedule_constraints
*collect_edge_constraints(
5514 struct isl_sched_edge
*edge
, __isl_keep isl_union_map
*cluster_map
,
5515 __isl_take isl_schedule_constraints
*sc
)
5517 isl_union_map
*umap
;
5519 isl_union_set
*uset
;
5520 isl_union_map
*umap1
, *umap2
;
5525 umap
= isl_union_map_from_map(isl_map_copy(edge
->map
));
5526 umap
= isl_union_map_apply_domain(umap
,
5527 isl_union_map_copy(cluster_map
));
5528 umap
= isl_union_map_apply_range(umap
,
5529 isl_union_map_copy(cluster_map
));
5530 sc
= add_non_conditional_constraints(edge
, umap
, sc
);
5531 isl_union_map_free(umap
);
5533 if (!sc
|| (!is_condition(edge
) && !is_conditional_validity(edge
)))
5536 space
= isl_space_domain(isl_map_get_space(edge
->map
));
5537 uset
= isl_union_set_from_set(isl_set_universe(space
));
5538 umap1
= isl_union_map_copy(cluster_map
);
5539 umap1
= isl_union_map_intersect_domain(umap1
, uset
);
5540 space
= isl_space_range(isl_map_get_space(edge
->map
));
5541 uset
= isl_union_set_from_set(isl_set_universe(space
));
5542 umap2
= isl_union_map_copy(cluster_map
);
5543 umap2
= isl_union_map_intersect_domain(umap2
, uset
);
5544 umap
= isl_union_map_product(umap1
, umap2
);
5546 sc
= add_conditional_constraints(edge
, umap
, sc
);
5548 isl_union_map_free(umap
);
5552 /* Given a mapping "cluster_map" from the original instances to
5553 * the cluster instances, add schedule constraints on the clusters
5554 * to "sc" corresponding to all edges in "graph" between nodes that
5555 * belong to SCCs that are marked for merging in "scc_in_merge".
5557 static __isl_give isl_schedule_constraints
*collect_constraints(
5558 struct isl_sched_graph
*graph
, int *scc_in_merge
,
5559 __isl_keep isl_union_map
*cluster_map
,
5560 __isl_take isl_schedule_constraints
*sc
)
5564 for (i
= 0; i
< graph
->n_edge
; ++i
) {
5565 struct isl_sched_edge
*edge
= &graph
->edge
[i
];
5567 if (!scc_in_merge
[edge
->src
->scc
])
5569 if (!scc_in_merge
[edge
->dst
->scc
])
5571 sc
= collect_edge_constraints(edge
, cluster_map
, sc
);
5577 /* Construct a dependence graph for scheduling clusters with respect
5578 * to each other and store the result in "merge_graph".
5579 * In particular, the nodes of the graph correspond to the schedule
5580 * dimensions of the current bands of those clusters that have been
5581 * marked for merging in "c".
5583 * First construct an isl_schedule_constraints object for this domain
5584 * by transforming the edges in "graph" to the domain.
5585 * Then initialize a dependence graph for scheduling from these
5588 static isl_stat
init_merge_graph(isl_ctx
*ctx
, struct isl_sched_graph
*graph
,
5589 struct isl_clustering
*c
, struct isl_sched_graph
*merge_graph
)
5591 isl_union_set
*domain
;
5592 isl_union_map
*cluster_map
;
5593 isl_schedule_constraints
*sc
;
5596 domain
= collect_domain(ctx
, graph
, c
);
5597 sc
= isl_schedule_constraints_on_domain(domain
);
5599 return isl_stat_error
;
5600 cluster_map
= collect_cluster_map(ctx
, graph
, c
);
5601 sc
= collect_constraints(graph
, c
->scc_in_merge
, cluster_map
, sc
);
5602 isl_union_map_free(cluster_map
);
5604 r
= graph_init(merge_graph
, sc
);
5606 isl_schedule_constraints_free(sc
);
5611 /* Compute the maximal number of remaining schedule rows that still need
5612 * to be computed for the nodes that belong to clusters with the maximal
5613 * dimension for the current band (i.e., the band that is to be merged).
5614 * Only clusters that are about to be merged are considered.
5615 * "maxvar" is the maximal dimension for the current band.
5616 * "c" contains information about the clusters.
5618 * Return the maximal number of remaining schedule rows or -1 on error.
5620 static int compute_maxvar_max_slack(int maxvar
, struct isl_clustering
*c
)
5626 for (i
= 0; i
< c
->n
; ++i
) {
5628 struct isl_sched_graph
*scc
;
5630 if (!c
->scc_in_merge
[i
])
5633 nvar
= scc
->n_total_row
- scc
->band_start
;
5636 for (j
= 0; j
< scc
->n
; ++j
) {
5637 struct isl_sched_node
*node
= &scc
->node
[j
];
5640 if (node_update_cmap(node
) < 0)
5642 slack
= node
->nvar
- node
->rank
;
5643 if (slack
> max_slack
)
5651 /* If there are any clusters where the dimension of the current band
5652 * (i.e., the band that is to be merged) is smaller than "maxvar" and
5653 * if there are any nodes in such a cluster where the number
5654 * of remaining schedule rows that still need to be computed
5655 * is greater than "max_slack", then return the smallest current band
5656 * dimension of all these clusters. Otherwise return the original value
5657 * of "maxvar". Return -1 in case of any error.
5658 * Only clusters that are about to be merged are considered.
5659 * "c" contains information about the clusters.
5661 static int limit_maxvar_to_slack(int maxvar
, int max_slack
,
5662 struct isl_clustering
*c
)
5666 for (i
= 0; i
< c
->n
; ++i
) {
5668 struct isl_sched_graph
*scc
;
5670 if (!c
->scc_in_merge
[i
])
5673 nvar
= scc
->n_total_row
- scc
->band_start
;
5676 for (j
= 0; j
< scc
->n
; ++j
) {
5677 struct isl_sched_node
*node
= &scc
->node
[j
];
5680 if (node_update_cmap(node
) < 0)
5682 slack
= node
->nvar
- node
->rank
;
5683 if (slack
> max_slack
) {
5693 /* Adjust merge_graph->maxvar based on the number of remaining schedule rows
5694 * that still need to be computed. In particular, if there is a node
5695 * in a cluster where the dimension of the current band is smaller
5696 * than merge_graph->maxvar, but the number of remaining schedule rows
5697 * is greater than that of any node in a cluster with the maximal
5698 * dimension for the current band (i.e., merge_graph->maxvar),
5699 * then adjust merge_graph->maxvar to the (smallest) current band dimension
5700 * of those clusters. Without this adjustment, the total number of
5701 * schedule dimensions would be increased, resulting in a skewed view
5702 * of the number of coincident dimensions.
5703 * "c" contains information about the clusters.
5705 * If the maximize_band_depth option is set and merge_graph->maxvar is reduced,
5706 * then there is no point in attempting any merge since it will be rejected
5707 * anyway. Set merge_graph->maxvar to zero in such cases.
5709 static isl_stat
adjust_maxvar_to_slack(isl_ctx
*ctx
,
5710 struct isl_sched_graph
*merge_graph
, struct isl_clustering
*c
)
5712 int max_slack
, maxvar
;
5714 max_slack
= compute_maxvar_max_slack(merge_graph
->maxvar
, c
);
5716 return isl_stat_error
;
5717 maxvar
= limit_maxvar_to_slack(merge_graph
->maxvar
, max_slack
, c
);
5719 return isl_stat_error
;
5721 if (maxvar
< merge_graph
->maxvar
) {
5722 if (isl_options_get_schedule_maximize_band_depth(ctx
))
5723 merge_graph
->maxvar
= 0;
5725 merge_graph
->maxvar
= maxvar
;
5731 /* Return the number of coincident dimensions in the current band of "graph",
5732 * where the nodes of "graph" are assumed to be scheduled by a single band.
5734 static int get_n_coincident(struct isl_sched_graph
*graph
)
5738 for (i
= graph
->band_start
; i
< graph
->n_total_row
; ++i
)
5739 if (!graph
->node
[0].coincident
[i
])
5742 return i
- graph
->band_start
;
5745 /* Should the clusters be merged based on the cluster schedule
5746 * in the current (and only) band of "merge_graph", given that
5747 * coincidence should be maximized?
5749 * If the number of coincident schedule dimensions in the merged band
5750 * would be less than the maximal number of coincident schedule dimensions
5751 * in any of the merged clusters, then the clusters should not be merged.
5753 static isl_bool
ok_to_merge_coincident(struct isl_clustering
*c
,
5754 struct isl_sched_graph
*merge_graph
)
5761 for (i
= 0; i
< c
->n
; ++i
) {
5762 if (!c
->scc_in_merge
[i
])
5764 n_coincident
= get_n_coincident(&c
->scc
[i
]);
5765 if (n_coincident
> max_coincident
)
5766 max_coincident
= n_coincident
;
5769 n_coincident
= get_n_coincident(merge_graph
);
5771 return n_coincident
>= max_coincident
;
5774 /* Return the transformation on "node" expressed by the current (and only)
5775 * band of "merge_graph" applied to the clusters in "c".
5777 * First find the representation of "node" in its SCC in "c" and
5778 * extract the transformation expressed by the current band.
5779 * Then extract the transformation applied by "merge_graph"
5780 * to the cluster to which this SCC belongs.
5781 * Combine the two to obtain the complete transformation on the node.
5783 * Note that the range of the first transformation is an anonymous space,
5784 * while the domain of the second is named "cluster_X". The range
5785 * of the former therefore needs to be adjusted before the two
5788 static __isl_give isl_map
*extract_node_transformation(isl_ctx
*ctx
,
5789 struct isl_sched_node
*node
, struct isl_clustering
*c
,
5790 struct isl_sched_graph
*merge_graph
)
5792 struct isl_sched_node
*scc_node
, *cluster_node
;
5796 isl_multi_aff
*ma
, *ma2
;
5798 scc_node
= graph_find_node(ctx
, &c
->scc
[node
->scc
], node
->space
);
5799 start
= c
->scc
[node
->scc
].band_start
;
5800 n
= c
->scc
[node
->scc
].n_total_row
- start
;
5801 ma
= node_extract_partial_schedule_multi_aff(scc_node
, start
, n
);
5802 space
= cluster_space(&c
->scc
[node
->scc
], c
->scc_cluster
[node
->scc
]);
5803 cluster_node
= graph_find_node(ctx
, merge_graph
, space
);
5804 if (space
&& !cluster_node
)
5805 isl_die(ctx
, isl_error_internal
, "unable to find cluster",
5806 space
= isl_space_free(space
));
5807 id
= isl_space_get_tuple_id(space
, isl_dim_set
);
5808 ma
= isl_multi_aff_set_tuple_id(ma
, isl_dim_out
, id
);
5809 isl_space_free(space
);
5810 n
= merge_graph
->n_total_row
;
5811 ma2
= node_extract_partial_schedule_multi_aff(cluster_node
, 0, n
);
5812 ma
= isl_multi_aff_pullback_multi_aff(ma2
, ma
);
5814 return isl_map_from_multi_aff(ma
);
5817 /* Give a set of distances "set", are they bounded by a small constant
5818 * in direction "pos"?
5819 * In practice, check if they are bounded by 2 by checking that there
5820 * are no elements with a value greater than or equal to 3 or
5821 * smaller than or equal to -3.
5823 static isl_bool
distance_is_bounded(__isl_keep isl_set
*set
, int pos
)
5829 return isl_bool_error
;
5831 test
= isl_set_copy(set
);
5832 test
= isl_set_lower_bound_si(test
, isl_dim_set
, pos
, 3);
5833 bounded
= isl_set_is_empty(test
);
5836 if (bounded
< 0 || !bounded
)
5839 test
= isl_set_copy(set
);
5840 test
= isl_set_upper_bound_si(test
, isl_dim_set
, pos
, -3);
5841 bounded
= isl_set_is_empty(test
);
5847 /* Does the set "set" have a fixed (but possible parametric) value
5848 * at dimension "pos"?
5850 static isl_bool
has_single_value(__isl_keep isl_set
*set
, int pos
)
5856 return isl_bool_error
;
5857 set
= isl_set_copy(set
);
5858 n
= isl_set_dim(set
, isl_dim_set
);
5859 set
= isl_set_project_out(set
, isl_dim_set
, pos
+ 1, n
- (pos
+ 1));
5860 set
= isl_set_project_out(set
, isl_dim_set
, 0, pos
);
5861 single
= isl_set_is_singleton(set
);
5867 /* Does "map" have a fixed (but possible parametric) value
5868 * at dimension "pos" of either its domain or its range?
5870 static isl_bool
has_singular_src_or_dst(__isl_keep isl_map
*map
, int pos
)
5875 set
= isl_map_domain(isl_map_copy(map
));
5876 single
= has_single_value(set
, pos
);
5879 if (single
< 0 || single
)
5882 set
= isl_map_range(isl_map_copy(map
));
5883 single
= has_single_value(set
, pos
);
5889 /* Does the edge "edge" from "graph" have bounded dependence distances
5890 * in the merged graph "merge_graph" of a selection of clusters in "c"?
5892 * Extract the complete transformations of the source and destination
5893 * nodes of the edge, apply them to the edge constraints and
5894 * compute the differences. Finally, check if these differences are bounded
5895 * in each direction.
5897 * If the dimension of the band is greater than the number of
5898 * dimensions that can be expected to be optimized by the edge
5899 * (based on its weight), then also allow the differences to be unbounded
5900 * in the remaining dimensions, but only if either the source or
5901 * the destination has a fixed value in that direction.
5902 * This allows a statement that produces values that are used by
5903 * several instances of another statement to be merged with that
5905 * However, merging such clusters will introduce an inherently
5906 * large proximity distance inside the merged cluster, meaning
5907 * that proximity distances will no longer be optimized in
5908 * subsequent merges. These merges are therefore only allowed
5909 * after all other possible merges have been tried.
5910 * The first time such a merge is encountered, the weight of the edge
5911 * is replaced by a negative weight. The second time (i.e., after
5912 * all merges over edges with a non-negative weight have been tried),
5913 * the merge is allowed.
5915 static isl_bool
has_bounded_distances(isl_ctx
*ctx
, struct isl_sched_edge
*edge
,
5916 struct isl_sched_graph
*graph
, struct isl_clustering
*c
,
5917 struct isl_sched_graph
*merge_graph
)
5924 map
= isl_map_copy(edge
->map
);
5925 t
= extract_node_transformation(ctx
, edge
->src
, c
, merge_graph
);
5926 map
= isl_map_apply_domain(map
, t
);
5927 t
= extract_node_transformation(ctx
, edge
->dst
, c
, merge_graph
);
5928 map
= isl_map_apply_range(map
, t
);
5929 dist
= isl_map_deltas(isl_map_copy(map
));
5931 bounded
= isl_bool_true
;
5932 n
= isl_set_dim(dist
, isl_dim_set
);
5933 n_slack
= n
- edge
->weight
;
5934 if (edge
->weight
< 0)
5935 n_slack
-= graph
->max_weight
+ 1;
5936 for (i
= 0; i
< n
; ++i
) {
5937 isl_bool bounded_i
, singular_i
;
5939 bounded_i
= distance_is_bounded(dist
, i
);
5944 if (edge
->weight
>= 0)
5945 bounded
= isl_bool_false
;
5949 singular_i
= has_singular_src_or_dst(map
, i
);
5954 bounded
= isl_bool_false
;
5957 if (!bounded
&& i
>= n
&& edge
->weight
>= 0)
5958 edge
->weight
-= graph
->max_weight
+ 1;
5966 return isl_bool_error
;
5969 /* Should the clusters be merged based on the cluster schedule
5970 * in the current (and only) band of "merge_graph"?
5971 * "graph" is the original dependence graph, while "c" records
5972 * which SCCs are involved in the latest merge.
5974 * In particular, is there at least one proximity constraint
5975 * that is optimized by the merge?
5977 * A proximity constraint is considered to be optimized
5978 * if the dependence distances are small.
5980 static isl_bool
ok_to_merge_proximity(isl_ctx
*ctx
,
5981 struct isl_sched_graph
*graph
, struct isl_clustering
*c
,
5982 struct isl_sched_graph
*merge_graph
)
5986 for (i
= 0; i
< graph
->n_edge
; ++i
) {
5987 struct isl_sched_edge
*edge
= &graph
->edge
[i
];
5990 if (!is_proximity(edge
))
5992 if (!c
->scc_in_merge
[edge
->src
->scc
])
5994 if (!c
->scc_in_merge
[edge
->dst
->scc
])
5996 if (c
->scc_cluster
[edge
->dst
->scc
] ==
5997 c
->scc_cluster
[edge
->src
->scc
])
5999 bounded
= has_bounded_distances(ctx
, edge
, graph
, c
,
6001 if (bounded
< 0 || bounded
)
6005 return isl_bool_false
;
6008 /* Should the clusters be merged based on the cluster schedule
6009 * in the current (and only) band of "merge_graph"?
6010 * "graph" is the original dependence graph, while "c" records
6011 * which SCCs are involved in the latest merge.
6013 * If the current band is empty, then the clusters should not be merged.
6015 * If the band depth should be maximized and the merge schedule
6016 * is incomplete (meaning that the dimension of some of the schedule
6017 * bands in the original schedule will be reduced), then the clusters
6018 * should not be merged.
6020 * If the schedule_maximize_coincidence option is set, then check that
6021 * the number of coincident schedule dimensions is not reduced.
6023 * Finally, only allow the merge if at least one proximity
6024 * constraint is optimized.
6026 static isl_bool
ok_to_merge(isl_ctx
*ctx
, struct isl_sched_graph
*graph
,
6027 struct isl_clustering
*c
, struct isl_sched_graph
*merge_graph
)
6029 if (merge_graph
->n_total_row
== merge_graph
->band_start
)
6030 return isl_bool_false
;
6032 if (isl_options_get_schedule_maximize_band_depth(ctx
) &&
6033 merge_graph
->n_total_row
< merge_graph
->maxvar
)
6034 return isl_bool_false
;
6036 if (isl_options_get_schedule_maximize_coincidence(ctx
)) {
6039 ok
= ok_to_merge_coincident(c
, merge_graph
);
6044 return ok_to_merge_proximity(ctx
, graph
, c
, merge_graph
);
6047 /* Apply the schedule in "t_node" to the "n" rows starting at "first"
6048 * of the schedule in "node" and return the result.
6050 * That is, essentially compute
6052 * T * N(first:first+n-1)
6054 * taking into account the constant term and the parameter coefficients
6057 static __isl_give isl_mat
*node_transformation(isl_ctx
*ctx
,
6058 struct isl_sched_node
*t_node
, struct isl_sched_node
*node
,
6063 int n_row
, n_col
, n_param
, n_var
;
6065 n_param
= node
->nparam
;
6067 n_row
= isl_mat_rows(t_node
->sched
);
6068 n_col
= isl_mat_cols(node
->sched
);
6069 t
= isl_mat_alloc(ctx
, n_row
, n_col
);
6072 for (i
= 0; i
< n_row
; ++i
) {
6073 isl_seq_cpy(t
->row
[i
], t_node
->sched
->row
[i
], 1 + n_param
);
6074 isl_seq_clr(t
->row
[i
] + 1 + n_param
, n_var
);
6075 for (j
= 0; j
< n
; ++j
)
6076 isl_seq_addmul(t
->row
[i
],
6077 t_node
->sched
->row
[i
][1 + n_param
+ j
],
6078 node
->sched
->row
[first
+ j
],
6079 1 + n_param
+ n_var
);
6084 /* Apply the cluster schedule in "t_node" to the current band
6085 * schedule of the nodes in "graph".
6087 * In particular, replace the rows starting at band_start
6088 * by the result of applying the cluster schedule in "t_node"
6089 * to the original rows.
6091 * The coincidence of the schedule is determined by the coincidence
6092 * of the cluster schedule.
6094 static isl_stat
transform(isl_ctx
*ctx
, struct isl_sched_graph
*graph
,
6095 struct isl_sched_node
*t_node
)
6101 start
= graph
->band_start
;
6102 n
= graph
->n_total_row
- start
;
6104 n_new
= isl_mat_rows(t_node
->sched
);
6105 for (i
= 0; i
< graph
->n
; ++i
) {
6106 struct isl_sched_node
*node
= &graph
->node
[i
];
6109 t
= node_transformation(ctx
, t_node
, node
, start
, n
);
6110 node
->sched
= isl_mat_drop_rows(node
->sched
, start
, n
);
6111 node
->sched
= isl_mat_concat(node
->sched
, t
);
6112 node
->sched_map
= isl_map_free(node
->sched_map
);
6114 return isl_stat_error
;
6115 for (j
= 0; j
< n_new
; ++j
)
6116 node
->coincident
[start
+ j
] = t_node
->coincident
[j
];
6118 graph
->n_total_row
-= n
;
6120 graph
->n_total_row
+= n_new
;
6121 graph
->n_row
+= n_new
;
6126 /* Merge the clusters marked for merging in "c" into a single
6127 * cluster using the cluster schedule in the current band of "merge_graph".
6128 * The representative SCC for the new cluster is the SCC with
6129 * the smallest index.
6131 * The current band schedule of each SCC in the new cluster is obtained
6132 * by applying the schedule of the corresponding original cluster
6133 * to the original band schedule.
6134 * All SCCs in the new cluster have the same number of schedule rows.
6136 static isl_stat
merge(isl_ctx
*ctx
, struct isl_clustering
*c
,
6137 struct isl_sched_graph
*merge_graph
)
6143 for (i
= 0; i
< c
->n
; ++i
) {
6144 struct isl_sched_node
*node
;
6146 if (!c
->scc_in_merge
[i
])
6150 space
= cluster_space(&c
->scc
[i
], c
->scc_cluster
[i
]);
6152 return isl_stat_error
;
6153 node
= graph_find_node(ctx
, merge_graph
, space
);
6154 isl_space_free(space
);
6156 isl_die(ctx
, isl_error_internal
,
6157 "unable to find cluster",
6158 return isl_stat_error
);
6159 if (transform(ctx
, &c
->scc
[i
], node
) < 0)
6160 return isl_stat_error
;
6161 c
->scc_cluster
[i
] = cluster
;
6167 /* Try and merge the clusters of SCCs marked in c->scc_in_merge
6168 * by scheduling the current cluster bands with respect to each other.
6170 * Construct a dependence graph with a space for each cluster and
6171 * with the coordinates of each space corresponding to the schedule
6172 * dimensions of the current band of that cluster.
6173 * Construct a cluster schedule in this cluster dependence graph and
6174 * apply it to the current cluster bands if it is applicable
6175 * according to ok_to_merge.
6177 * If the number of remaining schedule dimensions in a cluster
6178 * with a non-maximal current schedule dimension is greater than
6179 * the number of remaining schedule dimensions in clusters
6180 * with a maximal current schedule dimension, then restrict
6181 * the number of rows to be computed in the cluster schedule
6182 * to the minimal such non-maximal current schedule dimension.
6183 * Do this by adjusting merge_graph.maxvar.
6185 * Return isl_bool_true if the clusters have effectively been merged
6186 * into a single cluster.
6188 * Note that since the standard scheduling algorithm minimizes the maximal
6189 * distance over proximity constraints, the proximity constraints between
6190 * the merged clusters may not be optimized any further than what is
6191 * sufficient to bring the distances within the limits of the internal
6192 * proximity constraints inside the individual clusters.
6193 * It may therefore make sense to perform an additional translation step
6194 * to bring the clusters closer to each other, while maintaining
6195 * the linear part of the merging schedule found using the standard
6196 * scheduling algorithm.
6198 static isl_bool
try_merge(isl_ctx
*ctx
, struct isl_sched_graph
*graph
,
6199 struct isl_clustering
*c
)
6201 struct isl_sched_graph merge_graph
= { 0 };
6204 if (init_merge_graph(ctx
, graph
, c
, &merge_graph
) < 0)
6207 if (compute_maxvar(&merge_graph
) < 0)
6209 if (adjust_maxvar_to_slack(ctx
, &merge_graph
,c
) < 0)
6211 if (compute_schedule_wcc_band(ctx
, &merge_graph
) < 0)
6213 merged
= ok_to_merge(ctx
, graph
, c
, &merge_graph
);
6214 if (merged
&& merge(ctx
, c
, &merge_graph
) < 0)
6217 graph_free(ctx
, &merge_graph
);
6220 graph_free(ctx
, &merge_graph
);
6221 return isl_bool_error
;
6224 /* Is there any edge marked "no_merge" between two SCCs that are
6225 * about to be merged (i.e., that are set in "scc_in_merge")?
6226 * "merge_edge" is the proximity edge along which the clusters of SCCs
6227 * are going to be merged.
6229 * If there is any edge between two SCCs with a negative weight,
6230 * while the weight of "merge_edge" is non-negative, then this
6231 * means that the edge was postponed. "merge_edge" should then
6232 * also be postponed since merging along the edge with negative weight should
6233 * be postponed until all edges with non-negative weight have been tried.
6234 * Replace the weight of "merge_edge" by a negative weight as well and
6235 * tell the caller not to attempt a merge.
6237 static int any_no_merge(struct isl_sched_graph
*graph
, int *scc_in_merge
,
6238 struct isl_sched_edge
*merge_edge
)
6242 for (i
= 0; i
< graph
->n_edge
; ++i
) {
6243 struct isl_sched_edge
*edge
= &graph
->edge
[i
];
6245 if (!scc_in_merge
[edge
->src
->scc
])
6247 if (!scc_in_merge
[edge
->dst
->scc
])
6251 if (merge_edge
->weight
>= 0 && edge
->weight
< 0) {
6252 merge_edge
->weight
-= graph
->max_weight
+ 1;
6260 /* Merge the two clusters in "c" connected by the edge in "graph"
6261 * with index "edge" into a single cluster.
6262 * If it turns out to be impossible to merge these two clusters,
6263 * then mark the edge as "no_merge" such that it will not be
6266 * First mark all SCCs that need to be merged. This includes the SCCs
6267 * in the two clusters, but it may also include the SCCs
6268 * of intermediate clusters.
6269 * If there is already a no_merge edge between any pair of such SCCs,
6270 * then simply mark the current edge as no_merge as well.
6271 * Likewise, if any of those edges was postponed by has_bounded_distances,
6272 * then postpone the current edge as well.
6273 * Otherwise, try and merge the clusters and mark "edge" as "no_merge"
6274 * if the clusters did not end up getting merged, unless the non-merge
6275 * is due to the fact that the edge was postponed. This postponement
6276 * can be recognized by a change in weight (from non-negative to negative).
6278 static isl_stat
merge_clusters_along_edge(isl_ctx
*ctx
,
6279 struct isl_sched_graph
*graph
, int edge
, struct isl_clustering
*c
)
6282 int edge_weight
= graph
->edge
[edge
].weight
;
6284 if (mark_merge_sccs(ctx
, graph
, edge
, c
) < 0)
6285 return isl_stat_error
;
6287 if (any_no_merge(graph
, c
->scc_in_merge
, &graph
->edge
[edge
]))
6288 merged
= isl_bool_false
;
6290 merged
= try_merge(ctx
, graph
, c
);
6292 return isl_stat_error
;
6293 if (!merged
&& edge_weight
== graph
->edge
[edge
].weight
)
6294 graph
->edge
[edge
].no_merge
= 1;
6299 /* Does "node" belong to the cluster identified by "cluster"?
6301 static int node_cluster_exactly(struct isl_sched_node
*node
, int cluster
)
6303 return node
->cluster
== cluster
;
6306 /* Does "edge" connect two nodes belonging to the cluster
6307 * identified by "cluster"?
6309 static int edge_cluster_exactly(struct isl_sched_edge
*edge
, int cluster
)
6311 return edge
->src
->cluster
== cluster
&& edge
->dst
->cluster
== cluster
;
6314 /* Swap the schedule of "node1" and "node2".
6315 * Both nodes have been derived from the same node in a common parent graph.
6316 * Since the "coincident" field is shared with that node
6317 * in the parent graph, there is no need to also swap this field.
6319 static void swap_sched(struct isl_sched_node
*node1
,
6320 struct isl_sched_node
*node2
)
6325 sched
= node1
->sched
;
6326 node1
->sched
= node2
->sched
;
6327 node2
->sched
= sched
;
6329 sched_map
= node1
->sched_map
;
6330 node1
->sched_map
= node2
->sched_map
;
6331 node2
->sched_map
= sched_map
;
6334 /* Copy the current band schedule from the SCCs that form the cluster
6335 * with index "pos" to the actual cluster at position "pos".
6336 * By construction, the index of the first SCC that belongs to the cluster
6339 * The order of the nodes inside both the SCCs and the cluster
6340 * is assumed to be same as the order in the original "graph".
6342 * Since the SCC graphs will no longer be used after this function,
6343 * the schedules are actually swapped rather than copied.
6345 static isl_stat
copy_partial(struct isl_sched_graph
*graph
,
6346 struct isl_clustering
*c
, int pos
)
6350 c
->cluster
[pos
].n_total_row
= c
->scc
[pos
].n_total_row
;
6351 c
->cluster
[pos
].n_row
= c
->scc
[pos
].n_row
;
6352 c
->cluster
[pos
].maxvar
= c
->scc
[pos
].maxvar
;
6354 for (i
= 0; i
< graph
->n
; ++i
) {
6358 if (graph
->node
[i
].cluster
!= pos
)
6360 s
= graph
->node
[i
].scc
;
6361 k
= c
->scc_node
[s
]++;
6362 swap_sched(&c
->cluster
[pos
].node
[j
], &c
->scc
[s
].node
[k
]);
6363 if (c
->scc
[s
].maxvar
> c
->cluster
[pos
].maxvar
)
6364 c
->cluster
[pos
].maxvar
= c
->scc
[s
].maxvar
;
6371 /* Is there a (conditional) validity dependence from node[j] to node[i],
6372 * forcing node[i] to follow node[j] or do the nodes belong to the same
6375 static isl_bool
node_follows_strong_or_same_cluster(int i
, int j
, void *user
)
6377 struct isl_sched_graph
*graph
= user
;
6379 if (graph
->node
[i
].cluster
== graph
->node
[j
].cluster
)
6380 return isl_bool_true
;
6381 return graph_has_validity_edge(graph
, &graph
->node
[j
], &graph
->node
[i
]);
6384 /* Extract the merged clusters of SCCs in "graph", sort them, and
6385 * store them in c->clusters. Update c->scc_cluster accordingly.
6387 * First keep track of the cluster containing the SCC to which a node
6388 * belongs in the node itself.
6389 * Then extract the clusters into c->clusters, copying the current
6390 * band schedule from the SCCs that belong to the cluster.
6391 * Do this only once per cluster.
6393 * Finally, topologically sort the clusters and update c->scc_cluster
6394 * to match the new scc numbering. While the SCCs were originally
6395 * sorted already, some SCCs that depend on some other SCCs may
6396 * have been merged with SCCs that appear before these other SCCs.
6397 * A reordering may therefore be required.
6399 static isl_stat
extract_clusters(isl_ctx
*ctx
, struct isl_sched_graph
*graph
,
6400 struct isl_clustering
*c
)
6404 for (i
= 0; i
< graph
->n
; ++i
)
6405 graph
->node
[i
].cluster
= c
->scc_cluster
[graph
->node
[i
].scc
];
6407 for (i
= 0; i
< graph
->scc
; ++i
) {
6408 if (c
->scc_cluster
[i
] != i
)
6410 if (extract_sub_graph(ctx
, graph
, &node_cluster_exactly
,
6411 &edge_cluster_exactly
, i
, &c
->cluster
[i
]) < 0)
6412 return isl_stat_error
;
6413 c
->cluster
[i
].src_scc
= -1;
6414 c
->cluster
[i
].dst_scc
= -1;
6415 if (copy_partial(graph
, c
, i
) < 0)
6416 return isl_stat_error
;
6419 if (detect_ccs(ctx
, graph
, &node_follows_strong_or_same_cluster
) < 0)
6420 return isl_stat_error
;
6421 for (i
= 0; i
< graph
->n
; ++i
)
6422 c
->scc_cluster
[graph
->node
[i
].scc
] = graph
->node
[i
].cluster
;
6427 /* Compute weights on the proximity edges of "graph" that can
6428 * be used by find_proximity to find the most appropriate
6429 * proximity edge to use to merge two clusters in "c".
6430 * The weights are also used by has_bounded_distances to determine
6431 * whether the merge should be allowed.
6432 * Store the maximum of the computed weights in graph->max_weight.
6434 * The computed weight is a measure for the number of remaining schedule
6435 * dimensions that can still be completely aligned.
6436 * In particular, compute the number of equalities between
6437 * input dimensions and output dimensions in the proximity constraints.
6438 * The directions that are already handled by outer schedule bands
6439 * are projected out prior to determining this number.
6441 * Edges that will never be considered by find_proximity are ignored.
6443 static isl_stat
compute_weights(struct isl_sched_graph
*graph
,
6444 struct isl_clustering
*c
)
6448 graph
->max_weight
= 0;
6450 for (i
= 0; i
< graph
->n_edge
; ++i
) {
6451 struct isl_sched_edge
*edge
= &graph
->edge
[i
];
6452 struct isl_sched_node
*src
= edge
->src
;
6453 struct isl_sched_node
*dst
= edge
->dst
;
6454 isl_basic_map
*hull
;
6457 if (!is_proximity(edge
))
6459 if (bad_cluster(&c
->scc
[edge
->src
->scc
]) ||
6460 bad_cluster(&c
->scc
[edge
->dst
->scc
]))
6462 if (c
->scc_cluster
[edge
->dst
->scc
] ==
6463 c
->scc_cluster
[edge
->src
->scc
])
6466 hull
= isl_map_affine_hull(isl_map_copy(edge
->map
));
6467 hull
= isl_basic_map_transform_dims(hull
, isl_dim_in
, 0,
6468 isl_mat_copy(src
->ctrans
));
6469 hull
= isl_basic_map_transform_dims(hull
, isl_dim_out
, 0,
6470 isl_mat_copy(dst
->ctrans
));
6471 hull
= isl_basic_map_project_out(hull
,
6472 isl_dim_in
, 0, src
->rank
);
6473 hull
= isl_basic_map_project_out(hull
,
6474 isl_dim_out
, 0, dst
->rank
);
6475 hull
= isl_basic_map_remove_divs(hull
);
6476 n_in
= isl_basic_map_dim(hull
, isl_dim_in
);
6477 n_out
= isl_basic_map_dim(hull
, isl_dim_out
);
6478 hull
= isl_basic_map_drop_constraints_not_involving_dims(hull
,
6479 isl_dim_in
, 0, n_in
);
6480 hull
= isl_basic_map_drop_constraints_not_involving_dims(hull
,
6481 isl_dim_out
, 0, n_out
);
6483 return isl_stat_error
;
6484 edge
->weight
= hull
->n_eq
;
6485 isl_basic_map_free(hull
);
6487 if (edge
->weight
> graph
->max_weight
)
6488 graph
->max_weight
= edge
->weight
;
6494 /* Call compute_schedule_finish_band on each of the clusters in "c"
6495 * in their topological order. This order is determined by the scc
6496 * fields of the nodes in "graph".
6497 * Combine the results in a sequence expressing the topological order.
6499 * If there is only one cluster left, then there is no need to introduce
6500 * a sequence node. Also, in this case, the cluster necessarily contains
6501 * the SCC at position 0 in the original graph and is therefore also
6502 * stored in the first cluster of "c".
6504 static __isl_give isl_schedule_node
*finish_bands_clustering(
6505 __isl_take isl_schedule_node
*node
, struct isl_sched_graph
*graph
,
6506 struct isl_clustering
*c
)
6510 isl_union_set_list
*filters
;
6512 if (graph
->scc
== 1)
6513 return compute_schedule_finish_band(node
, &c
->cluster
[0], 0);
6515 ctx
= isl_schedule_node_get_ctx(node
);
6517 filters
= extract_sccs(ctx
, graph
);
6518 node
= isl_schedule_node_insert_sequence(node
, filters
);
6520 for (i
= 0; i
< graph
->scc
; ++i
) {
6521 int j
= c
->scc_cluster
[i
];
6522 node
= isl_schedule_node_child(node
, i
);
6523 node
= isl_schedule_node_child(node
, 0);
6524 node
= compute_schedule_finish_band(node
, &c
->cluster
[j
], 0);
6525 node
= isl_schedule_node_parent(node
);
6526 node
= isl_schedule_node_parent(node
);
6532 /* Compute a schedule for a connected dependence graph by first considering
6533 * each strongly connected component (SCC) in the graph separately and then
6534 * incrementally combining them into clusters.
6535 * Return the updated schedule node.
6537 * Initially, each cluster consists of a single SCC, each with its
6538 * own band schedule. The algorithm then tries to merge pairs
6539 * of clusters along a proximity edge until no more suitable
6540 * proximity edges can be found. During this merging, the schedule
6541 * is maintained in the individual SCCs.
6542 * After the merging is completed, the full resulting clusters
6543 * are extracted and in finish_bands_clustering,
6544 * compute_schedule_finish_band is called on each of them to integrate
6545 * the band into "node" and to continue the computation.
6547 * compute_weights initializes the weights that are used by find_proximity.
6549 static __isl_give isl_schedule_node
*compute_schedule_wcc_clustering(
6550 __isl_take isl_schedule_node
*node
, struct isl_sched_graph
*graph
)
6553 struct isl_clustering c
;
6556 ctx
= isl_schedule_node_get_ctx(node
);
6558 if (clustering_init(ctx
, &c
, graph
) < 0)
6561 if (compute_weights(graph
, &c
) < 0)
6565 i
= find_proximity(graph
, &c
);
6568 if (i
>= graph
->n_edge
)
6570 if (merge_clusters_along_edge(ctx
, graph
, i
, &c
) < 0)
6574 if (extract_clusters(ctx
, graph
, &c
) < 0)
6577 node
= finish_bands_clustering(node
, graph
, &c
);
6579 clustering_free(ctx
, &c
);
6582 clustering_free(ctx
, &c
);
6583 return isl_schedule_node_free(node
);
6586 /* Compute a schedule for a connected dependence graph and return
6587 * the updated schedule node.
6589 * If Feautrier's algorithm is selected, we first recursively try to satisfy
6590 * as many validity dependences as possible. When all validity dependences
6591 * are satisfied we extend the schedule to a full-dimensional schedule.
6593 * Call compute_schedule_wcc_whole or compute_schedule_wcc_clustering
6594 * depending on whether the user has selected the option to try and
6595 * compute a schedule for the entire (weakly connected) component first.
6596 * If there is only a single strongly connected component (SCC), then
6597 * there is no point in trying to combine SCCs
6598 * in compute_schedule_wcc_clustering, so compute_schedule_wcc_whole
6599 * is called instead.
6601 static __isl_give isl_schedule_node
*compute_schedule_wcc(
6602 __isl_take isl_schedule_node
*node
, struct isl_sched_graph
*graph
)
6609 ctx
= isl_schedule_node_get_ctx(node
);
6610 if (detect_sccs(ctx
, graph
) < 0)
6611 return isl_schedule_node_free(node
);
6613 if (compute_maxvar(graph
) < 0)
6614 return isl_schedule_node_free(node
);
6616 if (need_feautrier_step(ctx
, graph
))
6617 return compute_schedule_wcc_feautrier(node
, graph
);
6619 if (graph
->scc
<= 1 || isl_options_get_schedule_whole_component(ctx
))
6620 return compute_schedule_wcc_whole(node
, graph
);
6622 return compute_schedule_wcc_clustering(node
, graph
);
6625 /* Compute a schedule for each group of nodes identified by node->scc
6626 * separately and then combine them in a sequence node (or as set node
6627 * if graph->weak is set) inserted at position "node" of the schedule tree.
6628 * Return the updated schedule node.
6630 * If "wcc" is set then each of the groups belongs to a single
6631 * weakly connected component in the dependence graph so that
6632 * there is no need for compute_sub_schedule to look for weakly
6633 * connected components.
6635 static __isl_give isl_schedule_node
*compute_component_schedule(
6636 __isl_take isl_schedule_node
*node
, struct isl_sched_graph
*graph
,
6641 isl_union_set_list
*filters
;
6645 ctx
= isl_schedule_node_get_ctx(node
);
6647 filters
= extract_sccs(ctx
, graph
);
6649 node
= isl_schedule_node_insert_set(node
, filters
);
6651 node
= isl_schedule_node_insert_sequence(node
, filters
);
6653 for (component
= 0; component
< graph
->scc
; ++component
) {
6654 node
= isl_schedule_node_child(node
, component
);
6655 node
= isl_schedule_node_child(node
, 0);
6656 node
= compute_sub_schedule(node
, ctx
, graph
,
6658 &edge_scc_exactly
, component
, wcc
);
6659 node
= isl_schedule_node_parent(node
);
6660 node
= isl_schedule_node_parent(node
);
6666 /* Compute a schedule for the given dependence graph and insert it at "node".
6667 * Return the updated schedule node.
6669 * We first check if the graph is connected (through validity and conditional
6670 * validity dependences) and, if not, compute a schedule
6671 * for each component separately.
6672 * If the schedule_serialize_sccs option is set, then we check for strongly
6673 * connected components instead and compute a separate schedule for
6674 * each such strongly connected component.
6676 static __isl_give isl_schedule_node
*compute_schedule(isl_schedule_node
*node
,
6677 struct isl_sched_graph
*graph
)
6684 ctx
= isl_schedule_node_get_ctx(node
);
6685 if (isl_options_get_schedule_serialize_sccs(ctx
)) {
6686 if (detect_sccs(ctx
, graph
) < 0)
6687 return isl_schedule_node_free(node
);
6689 if (detect_wccs(ctx
, graph
) < 0)
6690 return isl_schedule_node_free(node
);
6694 return compute_component_schedule(node
, graph
, 1);
6696 return compute_schedule_wcc(node
, graph
);
6699 /* Compute a schedule on sc->domain that respects the given schedule
6702 * In particular, the schedule respects all the validity dependences.
6703 * If the default isl scheduling algorithm is used, it tries to minimize
6704 * the dependence distances over the proximity dependences.
6705 * If Feautrier's scheduling algorithm is used, the proximity dependence
6706 * distances are only minimized during the extension to a full-dimensional
6709 * If there are any condition and conditional validity dependences,
6710 * then the conditional validity dependences may be violated inside
6711 * a tilable band, provided they have no adjacent non-local
6712 * condition dependences.
6714 __isl_give isl_schedule
*isl_schedule_constraints_compute_schedule(
6715 __isl_take isl_schedule_constraints
*sc
)
6717 isl_ctx
*ctx
= isl_schedule_constraints_get_ctx(sc
);
6718 struct isl_sched_graph graph
= { 0 };
6719 isl_schedule
*sched
;
6720 isl_schedule_node
*node
;
6721 isl_union_set
*domain
;
6723 sc
= isl_schedule_constraints_align_params(sc
);
6725 domain
= isl_schedule_constraints_get_domain(sc
);
6726 if (isl_union_set_n_set(domain
) == 0) {
6727 isl_schedule_constraints_free(sc
);
6728 return isl_schedule_from_domain(domain
);
6731 if (graph_init(&graph
, sc
) < 0)
6732 domain
= isl_union_set_free(domain
);
6734 node
= isl_schedule_node_from_domain(domain
);
6735 node
= isl_schedule_node_child(node
, 0);
6737 node
= compute_schedule(node
, &graph
);
6738 sched
= isl_schedule_node_get_schedule(node
);
6739 isl_schedule_node_free(node
);
6741 graph_free(ctx
, &graph
);
6742 isl_schedule_constraints_free(sc
);
6747 /* Compute a schedule for the given union of domains that respects
6748 * all the validity dependences and minimizes
6749 * the dependence distances over the proximity dependences.
6751 * This function is kept for backward compatibility.
6753 __isl_give isl_schedule
*isl_union_set_compute_schedule(
6754 __isl_take isl_union_set
*domain
,
6755 __isl_take isl_union_map
*validity
,
6756 __isl_take isl_union_map
*proximity
)
6758 isl_schedule_constraints
*sc
;
6760 sc
= isl_schedule_constraints_on_domain(domain
);
6761 sc
= isl_schedule_constraints_set_validity(sc
, validity
);
6762 sc
= isl_schedule_constraints_set_proximity(sc
, proximity
);
6764 return isl_schedule_constraints_compute_schedule(sc
);