2 * Copyright 2011 INRIA Saclay
3 * Copyright 2012-2014 Ecole Normale Superieure
5 * Use of this software is governed by the MIT license
7 * Written by Sven Verdoolaege, INRIA Saclay - Ile-de-France,
8 * Parc Club Orsay Universite, ZAC des vignes, 4 rue Jacques Monod,
10 * and Ecole Normale Superieure, 45 rue d'Ulm, 75230 Paris, France
14 #include <isl_aff_private.h>
17 #include <isl/schedule.h>
18 #include <isl/schedule_node.h>
20 #include <isl_schedule_private.h>
21 #include <isl_schedule_tree.h>
22 #include <isl_schedule_node_private.h>
23 #include <isl_band_private.h>
25 /* Return a schedule encapsulating the given schedule tree.
27 * We currently only allow schedule trees with a domain as root.
29 * The leaf field is initialized as a leaf node so that it can be
30 * used to represent leaves in the constructed schedule.
31 * The reference count is set to -1 since the isl_schedule_tree
32 * should never be freed. It is up to the (internal) users of
33 * these leaves to ensure that they are only used while the schedule
36 __isl_give isl_schedule
*isl_schedule_from_schedule_tree(isl_ctx
*ctx
,
37 __isl_take isl_schedule_tree
*tree
)
39 isl_schedule
*schedule
;
43 if (isl_schedule_tree_get_type(tree
) != isl_schedule_node_domain
)
44 isl_die(isl_schedule_tree_get_ctx(tree
), isl_error_unsupported
,
45 "root of schedule tree should be a domain",
48 schedule
= isl_calloc_type(ctx
, isl_schedule
);
52 schedule
->leaf
.ctx
= ctx
;
55 schedule
->root
= tree
;
56 schedule
->leaf
.ref
= -1;
57 schedule
->leaf
.type
= isl_schedule_node_leaf
;
61 isl_schedule_tree_free(tree
);
65 /* Return a pointer to a schedule with as single node
66 * a domain node with the given domain.
68 __isl_give isl_schedule
*isl_schedule_from_domain(
69 __isl_take isl_union_set
*domain
)
72 isl_schedule_tree
*tree
;
74 ctx
= isl_union_set_get_ctx(domain
);
75 tree
= isl_schedule_tree_from_domain(domain
);
76 return isl_schedule_from_schedule_tree(ctx
, tree
);
79 /* Return a pointer to a schedule with as single node
80 * a domain node with an empty domain.
82 __isl_give isl_schedule
*isl_schedule_empty(__isl_take isl_space
*space
)
84 return isl_schedule_from_domain(isl_union_set_empty(space
));
87 /* Return a new reference to "sched".
89 __isl_give isl_schedule
*isl_schedule_copy(__isl_keep isl_schedule
*sched
)
98 /* Return an isl_schedule that is equal to "schedule" and that has only
101 * We only need and support this function when the schedule is represented
102 * as a schedule tree.
104 __isl_give isl_schedule
*isl_schedule_cow(__isl_take isl_schedule
*schedule
)
107 isl_schedule_tree
*tree
;
111 if (schedule
->ref
== 1)
114 ctx
= isl_schedule_get_ctx(schedule
);
116 isl_die(ctx
, isl_error_internal
,
117 "only for schedule tree based schedules",
118 return isl_schedule_free(schedule
));
120 tree
= isl_schedule_tree_copy(schedule
->root
);
121 return isl_schedule_from_schedule_tree(ctx
, tree
);
124 __isl_null isl_schedule
*isl_schedule_free(__isl_take isl_schedule
*sched
)
129 if (--sched
->ref
> 0)
132 isl_band_list_free(sched
->band_forest
);
133 isl_schedule_tree_free(sched
->root
);
134 isl_ctx_deref(sched
->leaf
.ctx
);
139 /* Replace the root of "schedule" by "tree".
141 __isl_give isl_schedule
*isl_schedule_set_root(
142 __isl_take isl_schedule
*schedule
, __isl_take isl_schedule_tree
*tree
)
144 if (!schedule
|| !tree
)
146 if (schedule
->root
== tree
) {
147 isl_schedule_tree_free(tree
);
151 schedule
= isl_schedule_cow(schedule
);
154 isl_schedule_tree_free(schedule
->root
);
155 schedule
->root
= tree
;
159 isl_schedule_free(schedule
);
160 isl_schedule_tree_free(tree
);
164 isl_ctx
*isl_schedule_get_ctx(__isl_keep isl_schedule
*schedule
)
166 return schedule
? schedule
->leaf
.ctx
: NULL
;
169 /* Return a pointer to the leaf of "schedule".
171 * Even though these leaves are not reference counted, we still
172 * indicate that this function does not return a copy.
174 __isl_keep isl_schedule_tree
*isl_schedule_peek_leaf(
175 __isl_keep isl_schedule
*schedule
)
177 return schedule
? &schedule
->leaf
: NULL
;
180 /* Return the (parameter) space of the schedule, i.e., the space
181 * of the root domain.
183 __isl_give isl_space
*isl_schedule_get_space(
184 __isl_keep isl_schedule
*schedule
)
186 enum isl_schedule_node_type type
;
188 isl_union_set
*domain
;
193 isl_die(isl_schedule_get_ctx(schedule
), isl_error_invalid
,
194 "schedule tree representation not available",
196 type
= isl_schedule_tree_get_type(schedule
->root
);
197 if (type
!= isl_schedule_node_domain
)
198 isl_die(isl_schedule_get_ctx(schedule
), isl_error_internal
,
199 "root node not a domain node", return NULL
);
201 domain
= isl_schedule_tree_domain_get_domain(schedule
->root
);
202 space
= isl_union_set_get_space(domain
);
203 isl_union_set_free(domain
);
208 /* Return a pointer to the root of "schedule".
210 __isl_give isl_schedule_node
*isl_schedule_get_root(
211 __isl_keep isl_schedule
*schedule
)
214 isl_schedule_tree
*tree
;
215 isl_schedule_tree_list
*ancestors
;
221 isl_die(isl_schedule_get_ctx(schedule
), isl_error_invalid
,
222 "schedule tree representation not available",
225 ctx
= isl_schedule_get_ctx(schedule
);
226 tree
= isl_schedule_tree_copy(schedule
->root
);
227 schedule
= isl_schedule_copy(schedule
);
228 ancestors
= isl_schedule_tree_list_alloc(ctx
, 0);
229 return isl_schedule_node_alloc(schedule
, tree
, ancestors
, NULL
);
232 /* Set max_out to the maximal number of output dimensions over
235 static int update_max_out(__isl_take isl_map
*map
, void *user
)
238 int n_out
= isl_map_dim(map
, isl_dim_out
);
240 if (n_out
> *max_out
)
247 /* Internal data structure for map_pad_range.
249 * "max_out" is the maximal schedule dimension.
250 * "res" collects the results.
252 struct isl_pad_schedule_map_data
{
257 /* Pad the range of the given map with zeros to data->max_out and
258 * then add the result to data->res.
260 static int map_pad_range(__isl_take isl_map
*map
, void *user
)
262 struct isl_pad_schedule_map_data
*data
= user
;
264 int n_out
= isl_map_dim(map
, isl_dim_out
);
266 map
= isl_map_add_dims(map
, isl_dim_out
, data
->max_out
- n_out
);
267 for (i
= n_out
; i
< data
->max_out
; ++i
)
268 map
= isl_map_fix_si(map
, isl_dim_out
, i
, 0);
270 data
->res
= isl_union_map_add_map(data
->res
, map
);
277 /* Pad the ranges of the maps in the union map with zeros such they all have
278 * the same dimension.
280 static __isl_give isl_union_map
*pad_schedule_map(
281 __isl_take isl_union_map
*umap
)
283 struct isl_pad_schedule_map_data data
;
287 if (isl_union_map_n_map(umap
) <= 1)
291 if (isl_union_map_foreach_map(umap
, &update_max_out
, &data
.max_out
) < 0)
292 return isl_union_map_free(umap
);
294 data
.res
= isl_union_map_empty(isl_union_map_get_space(umap
));
295 if (isl_union_map_foreach_map(umap
, &map_pad_range
, &data
) < 0)
296 data
.res
= isl_union_map_free(data
.res
);
298 isl_union_map_free(umap
);
302 /* Return the domain of the root domain node of "schedule".
304 __isl_give isl_union_set
*isl_schedule_get_domain(
305 __isl_keep isl_schedule
*schedule
)
310 isl_die(isl_schedule_get_ctx(schedule
), isl_error_invalid
,
311 "schedule tree representation not available",
313 return isl_schedule_tree_domain_get_domain(schedule
->root
);
316 /* Traverse all nodes of "sched" in depth first preorder.
318 * If "fn" returns -1 on any of the nodes, then the traversal is aborted.
319 * If "fn" returns 0 on any of the nodes, then the subtree rooted
320 * at that node is skipped.
322 * Return 0 on success and -1 on failure.
324 int isl_schedule_foreach_schedule_node(__isl_keep isl_schedule
*sched
,
325 int (*fn
)(__isl_keep isl_schedule_node
*node
, void *user
), void *user
)
327 isl_schedule_node
*node
;
333 node
= isl_schedule_get_root(sched
);
334 r
= isl_schedule_node_foreach_descendant(node
, fn
, user
);
335 isl_schedule_node_free(node
);
340 /* Traverse the node of "sched" in depth first postorder,
341 * allowing the user to modify the visited node.
342 * The traversal continues from the node returned by the callback function.
343 * It is the responsibility of the user to ensure that this does not
344 * lead to an infinite loop. It is safest to always return a pointer
345 * to the same position (same ancestors and child positions) as the input node.
347 __isl_give isl_schedule
*isl_schedule_map_schedule_node(
348 __isl_take isl_schedule
*schedule
,
349 __isl_give isl_schedule_node
*(*fn
)(
350 __isl_take isl_schedule_node
*node
, void *user
), void *user
)
352 isl_schedule_node
*node
;
354 node
= isl_schedule_get_root(schedule
);
355 isl_schedule_free(schedule
);
357 node
= isl_schedule_node_map_descendant(node
, fn
, user
);
358 schedule
= isl_schedule_node_get_schedule(node
);
359 isl_schedule_node_free(node
);
364 /* Return an isl_union_map representation of the schedule.
365 * If we still have access to the schedule tree, then we return
366 * an isl_union_map corresponding to the subtree schedule of the child
367 * of the root domain node. That is, we do not intersect the domain
368 * of the returned isl_union_map with the domain constraints.
369 * Otherwise, we must have removed it because we created a band forest.
370 * If so, we extract the isl_union_map from the forest.
371 * This reconstructed schedule map
372 * then needs to be padded with zeros to unify the schedule space
373 * since the result of isl_band_list_get_suffix_schedule may not have
374 * a unified schedule space.
376 __isl_give isl_union_map
*isl_schedule_get_map(__isl_keep isl_schedule
*sched
)
378 enum isl_schedule_node_type type
;
379 isl_schedule_node
*node
;
386 type
= isl_schedule_tree_get_type(sched
->root
);
387 if (type
!= isl_schedule_node_domain
)
388 isl_die(isl_schedule_get_ctx(sched
), isl_error_internal
,
389 "root node not a domain node", return NULL
);
391 node
= isl_schedule_get_root(sched
);
392 node
= isl_schedule_node_child(node
, 0);
393 umap
= isl_schedule_node_get_subtree_schedule_union_map(node
);
394 isl_schedule_node_free(node
);
399 umap
= isl_band_list_get_suffix_schedule(sched
->band_forest
);
400 return pad_schedule_map(umap
);
403 static __isl_give isl_band_list
*construct_band_list(
404 __isl_take isl_schedule_node
*node
, __isl_take isl_union_set
*domain
,
405 __isl_keep isl_band
*parent
);
407 /* Construct an isl_band structure from the given schedule tree node,
408 * which may be either a band node or a leaf node.
409 * In the latter case, construct a zero-dimensional band.
410 * "domain" is the universe set of the domain elements that reach "node".
411 * "parent" is the parent isl_band of the isl_band constructed
414 * In case of a band node, we copy the properties (except tilability,
415 * which is implicit in an isl_band) to the isl_band.
416 * We assume that the band node is not zero-dimensional.
417 * If the child of the band node is not a leaf node,
418 * then we extract the children of the isl_band from this child.
420 static __isl_give isl_band
*construct_band(__isl_take isl_schedule_node
*node
,
421 __isl_take isl_union_set
*domain
, __isl_keep isl_band
*parent
)
425 isl_band
*band
= NULL
;
426 isl_multi_union_pw_aff
*mupa
;
428 if (!node
|| !domain
)
431 ctx
= isl_schedule_node_get_ctx(node
);
432 band
= isl_band_alloc(ctx
);
436 band
->schedule
= node
->schedule
;
437 band
->parent
= parent
;
439 if (isl_schedule_node_get_type(node
) == isl_schedule_node_leaf
) {
441 band
->pma
= isl_union_pw_multi_aff_from_domain(domain
);
442 isl_schedule_node_free(node
);
446 band
->n
= isl_schedule_node_band_n_member(node
);
448 isl_die(ctx
, isl_error_unsupported
,
449 "zero-dimensional band nodes not supported",
451 band
->coincident
= isl_alloc_array(ctx
, int, band
->n
);
452 if (band
->n
&& !band
->coincident
)
454 for (i
= 0; i
< band
->n
; ++i
)
455 band
->coincident
[i
] =
456 isl_schedule_node_band_member_get_coincident(node
, i
);
457 mupa
= isl_schedule_node_band_get_partial_schedule(node
);
458 band
->pma
= isl_union_pw_multi_aff_from_multi_union_pw_aff(mupa
);
462 node
= isl_schedule_node_child(node
, 0);
463 if (isl_schedule_node_get_type(node
) == isl_schedule_node_leaf
) {
464 isl_schedule_node_free(node
);
465 isl_union_set_free(domain
);
469 band
->children
= construct_band_list(node
, domain
, band
);
471 return isl_band_free(band
);
475 isl_union_set_free(domain
);
476 isl_schedule_node_free(node
);
481 /* Construct a list of isl_band structures from the children of "node".
482 * "node" itself is a sequence or set node, so that each of the child nodes
483 * is a filter node and the list returned by node_construct_band_list
484 * consists of a single element.
485 * "domain" is the universe set of the domain elements that reach "node".
486 * "parent" is the parent isl_band of the isl_band structures constructed
489 static __isl_give isl_band_list
*construct_band_list_from_children(
490 __isl_take isl_schedule_node
*node
, __isl_take isl_union_set
*domain
,
491 __isl_keep isl_band
*parent
)
497 n
= isl_schedule_node_n_children(node
);
499 ctx
= isl_schedule_node_get_ctx(node
);
500 list
= isl_band_list_alloc(ctx
, 0);
501 for (i
= 0; i
< n
; ++i
) {
502 isl_schedule_node
*child
;
503 isl_band_list
*list_i
;
505 child
= isl_schedule_node_get_child(node
, i
);
506 list_i
= construct_band_list(child
, isl_union_set_copy(domain
),
508 list
= isl_band_list_concat(list
, list_i
);
511 isl_union_set_free(domain
);
512 isl_schedule_node_free(node
);
517 /* Construct an isl_band structure from the given sequence node
518 * (or set node that is treated as a sequence node).
519 * A single-dimensional band is created with as schedule for each of
520 * filters of the children, the corresponding child position.
521 * "domain" is the universe set of the domain elements that reach "node".
522 * "parent" is the parent isl_band of the isl_band constructed
525 static __isl_give isl_band_list
*construct_band_list_sequence(
526 __isl_take isl_schedule_node
*node
, __isl_take isl_union_set
*domain
,
527 __isl_keep isl_band
*parent
)
531 isl_band
*band
= NULL
;
533 isl_union_pw_multi_aff
*upma
;
535 if (!node
|| !domain
)
538 ctx
= isl_schedule_node_get_ctx(node
);
539 band
= isl_band_alloc(ctx
);
543 band
->schedule
= node
->schedule
;
544 band
->parent
= parent
;
546 band
->coincident
= isl_calloc_array(ctx
, int, band
->n
);
547 if (!band
->coincident
)
550 n
= isl_schedule_node_n_children(node
);
551 space
= isl_union_set_get_space(domain
);
552 upma
= isl_union_pw_multi_aff_empty(isl_space_copy(space
));
554 space
= isl_space_set_from_params(space
);
555 space
= isl_space_add_dims(space
, isl_dim_set
, 1);
557 for (i
= 0; i
< n
; ++i
) {
558 isl_schedule_node
*child
;
559 isl_union_set
*filter
;
563 isl_union_pw_multi_aff
*upma_i
;
565 child
= isl_schedule_node_get_child(node
, i
);
566 filter
= isl_schedule_node_filter_get_filter(child
);
567 isl_schedule_node_free(child
);
568 filter
= isl_union_set_intersect(filter
,
569 isl_union_set_copy(domain
));
570 v
= isl_val_int_from_si(ctx
, i
);
571 vl
= isl_val_list_from_val(v
);
572 mv
= isl_multi_val_from_val_list(isl_space_copy(space
), vl
);
573 upma_i
= isl_union_pw_multi_aff_multi_val_on_domain(filter
, mv
);
574 upma
= isl_union_pw_multi_aff_union_add(upma
, upma_i
);
577 isl_space_free(space
);
583 band
->children
= construct_band_list_from_children(node
, domain
, band
);
585 band
= isl_band_free(band
);
586 return isl_band_list_from_band(band
);
588 isl_union_set_free(domain
);
589 isl_schedule_node_free(node
);
594 /* Construct a list of isl_band structures from "node" depending
595 * on the type of "node".
596 * "domain" is the universe set of the domain elements that reach "node".
597 * "parent" is the parent isl_band of the isl_band structures constructed
600 * If schedule_separate_components is set then set nodes are treated
601 * as sequence nodes. Otherwise, we directly extract an (implicitly
602 * parallel) list of isl_band structures.
604 * If "node" is a filter, then "domain" is updated by the filter.
606 static __isl_give isl_band_list
*construct_band_list(
607 __isl_take isl_schedule_node
*node
, __isl_take isl_union_set
*domain
,
608 __isl_keep isl_band
*parent
)
610 enum isl_schedule_node_type type
;
614 isl_union_set
*filter
;
616 if (!node
|| !domain
)
619 type
= isl_schedule_node_get_type(node
);
621 case isl_schedule_node_error
:
623 case isl_schedule_node_domain
:
624 isl_die(isl_schedule_node_get_ctx(node
), isl_error_invalid
,
625 "internal domain nodes not allowed", goto error
);
626 case isl_schedule_node_filter
:
627 filter
= isl_schedule_node_filter_get_filter(node
);
628 domain
= isl_union_set_intersect(domain
, filter
);
629 node
= isl_schedule_node_child(node
, 0);
630 return construct_band_list(node
, domain
, parent
);
631 case isl_schedule_node_set
:
632 ctx
= isl_schedule_node_get_ctx(node
);
633 if (isl_options_get_schedule_separate_components(ctx
))
634 return construct_band_list_sequence(node
, domain
,
637 return construct_band_list_from_children(node
, domain
,
639 case isl_schedule_node_sequence
:
640 return construct_band_list_sequence(node
, domain
, parent
);
641 case isl_schedule_node_leaf
:
642 case isl_schedule_node_band
:
643 band
= construct_band(node
, domain
, parent
);
644 list
= isl_band_list_from_band(band
);
650 isl_union_set_free(domain
);
651 isl_schedule_node_free(node
);
655 /* Return the roots of a band forest representation of the schedule.
656 * The band forest is constructed from the schedule tree,
657 * but once such a band forest is
658 * constructed, we forget about the original schedule tree since
659 * the user may modify the schedule through the band forest.
661 __isl_give isl_band_list
*isl_schedule_get_band_forest(
662 __isl_keep isl_schedule
*schedule
)
664 isl_schedule_node
*node
;
665 isl_union_set
*domain
;
669 if (schedule
->root
) {
670 node
= isl_schedule_get_root(schedule
);
671 domain
= isl_schedule_node_domain_get_domain(node
);
672 domain
= isl_union_set_universe(domain
);
673 node
= isl_schedule_node_child(node
, 0);
675 schedule
->band_forest
= construct_band_list(node
, domain
, NULL
);
676 schedule
->root
= isl_schedule_tree_free(schedule
->root
);
678 return isl_band_list_dup(schedule
->band_forest
);
681 /* Call "fn" on each band in the schedule in depth-first post-order.
683 int isl_schedule_foreach_band(__isl_keep isl_schedule
*sched
,
684 int (*fn
)(__isl_keep isl_band
*band
, void *user
), void *user
)
687 isl_band_list
*forest
;
692 forest
= isl_schedule_get_band_forest(sched
);
693 r
= isl_band_list_foreach_band(forest
, fn
, user
);
694 isl_band_list_free(forest
);
699 static __isl_give isl_printer
*print_band_list(__isl_take isl_printer
*p
,
700 __isl_keep isl_band_list
*list
);
702 static __isl_give isl_printer
*print_band(__isl_take isl_printer
*p
,
703 __isl_keep isl_band
*band
)
705 isl_band_list
*children
;
707 p
= isl_printer_start_line(p
);
708 p
= isl_printer_print_union_pw_multi_aff(p
, band
->pma
);
709 p
= isl_printer_end_line(p
);
711 if (!isl_band_has_children(band
))
714 children
= isl_band_get_children(band
);
716 p
= isl_printer_indent(p
, 4);
717 p
= print_band_list(p
, children
);
718 p
= isl_printer_indent(p
, -4);
720 isl_band_list_free(children
);
725 static __isl_give isl_printer
*print_band_list(__isl_take isl_printer
*p
,
726 __isl_keep isl_band_list
*list
)
730 n
= isl_band_list_n_band(list
);
731 for (i
= 0; i
< n
; ++i
) {
733 band
= isl_band_list_get_band(list
, i
);
734 p
= print_band(p
, band
);
741 /* Print "schedule" to "p".
743 * If "schedule" was created from a schedule tree, then we print
744 * the schedule tree representation. Otherwise, we print
745 * the band forest representation.
747 __isl_give isl_printer
*isl_printer_print_schedule(__isl_take isl_printer
*p
,
748 __isl_keep isl_schedule
*schedule
)
750 isl_band_list
*forest
;
753 return isl_printer_free(p
);
756 return isl_printer_print_schedule_tree(p
, schedule
->root
);
758 forest
= isl_schedule_get_band_forest(schedule
);
760 p
= print_band_list(p
, forest
);
762 isl_band_list_free(forest
);
767 void isl_schedule_dump(__isl_keep isl_schedule
*schedule
)
769 isl_printer
*printer
;
774 printer
= isl_printer_to_file(isl_schedule_get_ctx(schedule
), stderr
);
775 printer
= isl_printer_set_yaml_style(printer
, ISL_YAML_STYLE_BLOCK
);
776 printer
= isl_printer_print_schedule(printer
, schedule
);
778 isl_printer_free(printer
);