2 * Copyright 2011 INRIA Saclay
3 * Copyright 2012-2014 Ecole Normale Superieure
5 * Use of this software is governed by the MIT license
7 * Written by Sven Verdoolaege, INRIA Saclay - Ile-de-France,
8 * Parc Club Orsay Universite, ZAC des vignes, 4 rue Jacques Monod,
10 * and Ecole Normale Superieure, 45 rue d'Ulm, 75230 Paris, France
14 #include <isl_aff_private.h>
17 #include <isl/schedule.h>
18 #include <isl/schedule_node.h>
20 #include <isl_schedule_private.h>
21 #include <isl_schedule_tree.h>
22 #include <isl_schedule_node_private.h>
23 #include <isl_band_private.h>
25 /* Return a schedule encapsulating the given schedule tree.
27 * We currently only allow schedule trees with a domain or extension as root.
29 * The leaf field is initialized as a leaf node so that it can be
30 * used to represent leaves in the constructed schedule.
31 * The reference count is set to -1 since the isl_schedule_tree
32 * should never be freed. It is up to the (internal) users of
33 * these leaves to ensure that they are only used while the schedule
36 __isl_give isl_schedule
*isl_schedule_from_schedule_tree(isl_ctx
*ctx
,
37 __isl_take isl_schedule_tree
*tree
)
39 enum isl_schedule_node_type type
;
40 isl_schedule
*schedule
;
44 type
= isl_schedule_tree_get_type(tree
);
45 if (type
!= isl_schedule_node_domain
&&
46 type
!= isl_schedule_node_extension
)
47 isl_die(isl_schedule_tree_get_ctx(tree
), isl_error_unsupported
,
48 "root of schedule tree should be a domain or extension",
51 schedule
= isl_calloc_type(ctx
, isl_schedule
);
55 schedule
->leaf
.ctx
= ctx
;
58 schedule
->root
= tree
;
59 schedule
->leaf
.ref
= -1;
60 schedule
->leaf
.type
= isl_schedule_node_leaf
;
64 isl_schedule_tree_free(tree
);
68 /* Return a pointer to a schedule with as single node
69 * a domain node with the given domain.
71 __isl_give isl_schedule
*isl_schedule_from_domain(
72 __isl_take isl_union_set
*domain
)
75 isl_schedule_tree
*tree
;
77 ctx
= isl_union_set_get_ctx(domain
);
78 tree
= isl_schedule_tree_from_domain(domain
);
79 return isl_schedule_from_schedule_tree(ctx
, tree
);
82 /* Return a pointer to a schedule with as single node
83 * a domain node with an empty domain.
85 __isl_give isl_schedule
*isl_schedule_empty(__isl_take isl_space
*space
)
87 return isl_schedule_from_domain(isl_union_set_empty(space
));
90 /* Return a new reference to "sched".
92 __isl_give isl_schedule
*isl_schedule_copy(__isl_keep isl_schedule
*sched
)
101 /* Return an isl_schedule that is equal to "schedule" and that has only
102 * a single reference.
104 * We only need and support this function when the schedule is represented
105 * as a schedule tree.
107 __isl_give isl_schedule
*isl_schedule_cow(__isl_take isl_schedule
*schedule
)
110 isl_schedule_tree
*tree
;
114 if (schedule
->ref
== 1)
117 ctx
= isl_schedule_get_ctx(schedule
);
119 isl_die(ctx
, isl_error_internal
,
120 "only for schedule tree based schedules",
121 return isl_schedule_free(schedule
));
123 tree
= isl_schedule_tree_copy(schedule
->root
);
124 return isl_schedule_from_schedule_tree(ctx
, tree
);
127 __isl_null isl_schedule
*isl_schedule_free(__isl_take isl_schedule
*sched
)
132 if (--sched
->ref
> 0)
135 isl_band_list_free(sched
->band_forest
);
136 isl_schedule_tree_free(sched
->root
);
137 isl_ctx_deref(sched
->leaf
.ctx
);
142 /* Replace the root of "schedule" by "tree".
144 __isl_give isl_schedule
*isl_schedule_set_root(
145 __isl_take isl_schedule
*schedule
, __isl_take isl_schedule_tree
*tree
)
147 if (!schedule
|| !tree
)
149 if (schedule
->root
== tree
) {
150 isl_schedule_tree_free(tree
);
154 schedule
= isl_schedule_cow(schedule
);
157 isl_schedule_tree_free(schedule
->root
);
158 schedule
->root
= tree
;
162 isl_schedule_free(schedule
);
163 isl_schedule_tree_free(tree
);
167 isl_ctx
*isl_schedule_get_ctx(__isl_keep isl_schedule
*schedule
)
169 return schedule
? schedule
->leaf
.ctx
: NULL
;
172 /* Return a pointer to the leaf of "schedule".
174 * Even though these leaves are not reference counted, we still
175 * indicate that this function does not return a copy.
177 __isl_keep isl_schedule_tree
*isl_schedule_peek_leaf(
178 __isl_keep isl_schedule
*schedule
)
180 return schedule
? &schedule
->leaf
: NULL
;
183 /* Are "schedule1" and "schedule2" obviously equal to each other?
185 isl_bool
isl_schedule_plain_is_equal(__isl_keep isl_schedule
*schedule1
,
186 __isl_keep isl_schedule
*schedule2
)
188 if (!schedule1
|| !schedule2
)
189 return isl_bool_error
;
190 if (schedule1
== schedule2
)
191 return isl_bool_true
;
192 return isl_schedule_tree_plain_is_equal(schedule1
->root
,
196 /* Return the (parameter) space of the schedule, i.e., the space
197 * of the root domain.
199 __isl_give isl_space
*isl_schedule_get_space(
200 __isl_keep isl_schedule
*schedule
)
202 enum isl_schedule_node_type type
;
204 isl_union_set
*domain
;
209 isl_die(isl_schedule_get_ctx(schedule
), isl_error_invalid
,
210 "schedule tree representation not available",
212 type
= isl_schedule_tree_get_type(schedule
->root
);
213 if (type
!= isl_schedule_node_domain
)
214 isl_die(isl_schedule_get_ctx(schedule
), isl_error_internal
,
215 "root node not a domain node", return NULL
);
217 domain
= isl_schedule_tree_domain_get_domain(schedule
->root
);
218 space
= isl_union_set_get_space(domain
);
219 isl_union_set_free(domain
);
224 /* Return a pointer to the root of "schedule".
226 __isl_give isl_schedule_node
*isl_schedule_get_root(
227 __isl_keep isl_schedule
*schedule
)
230 isl_schedule_tree
*tree
;
231 isl_schedule_tree_list
*ancestors
;
237 isl_die(isl_schedule_get_ctx(schedule
), isl_error_invalid
,
238 "schedule tree representation not available",
241 ctx
= isl_schedule_get_ctx(schedule
);
242 tree
= isl_schedule_tree_copy(schedule
->root
);
243 schedule
= isl_schedule_copy(schedule
);
244 ancestors
= isl_schedule_tree_list_alloc(ctx
, 0);
245 return isl_schedule_node_alloc(schedule
, tree
, ancestors
, NULL
);
248 /* Set max_out to the maximal number of output dimensions over
251 static isl_stat
update_max_out(__isl_take isl_map
*map
, void *user
)
254 int n_out
= isl_map_dim(map
, isl_dim_out
);
256 if (n_out
> *max_out
)
263 /* Internal data structure for map_pad_range.
265 * "max_out" is the maximal schedule dimension.
266 * "res" collects the results.
268 struct isl_pad_schedule_map_data
{
273 /* Pad the range of the given map with zeros to data->max_out and
274 * then add the result to data->res.
276 static isl_stat
map_pad_range(__isl_take isl_map
*map
, void *user
)
278 struct isl_pad_schedule_map_data
*data
= user
;
280 int n_out
= isl_map_dim(map
, isl_dim_out
);
282 map
= isl_map_add_dims(map
, isl_dim_out
, data
->max_out
- n_out
);
283 for (i
= n_out
; i
< data
->max_out
; ++i
)
284 map
= isl_map_fix_si(map
, isl_dim_out
, i
, 0);
286 data
->res
= isl_union_map_add_map(data
->res
, map
);
288 return isl_stat_error
;
293 /* Pad the ranges of the maps in the union map with zeros such they all have
294 * the same dimension.
296 static __isl_give isl_union_map
*pad_schedule_map(
297 __isl_take isl_union_map
*umap
)
299 struct isl_pad_schedule_map_data data
;
303 if (isl_union_map_n_map(umap
) <= 1)
307 if (isl_union_map_foreach_map(umap
, &update_max_out
, &data
.max_out
) < 0)
308 return isl_union_map_free(umap
);
310 data
.res
= isl_union_map_empty(isl_union_map_get_space(umap
));
311 if (isl_union_map_foreach_map(umap
, &map_pad_range
, &data
) < 0)
312 data
.res
= isl_union_map_free(data
.res
);
314 isl_union_map_free(umap
);
318 /* Return the domain of the root domain node of "schedule".
320 __isl_give isl_union_set
*isl_schedule_get_domain(
321 __isl_keep isl_schedule
*schedule
)
326 isl_die(isl_schedule_get_ctx(schedule
), isl_error_invalid
,
327 "schedule tree representation not available",
329 return isl_schedule_tree_domain_get_domain(schedule
->root
);
332 /* Traverse all nodes of "sched" in depth first preorder.
334 * If "fn" returns -1 on any of the nodes, then the traversal is aborted.
335 * If "fn" returns 0 on any of the nodes, then the subtree rooted
336 * at that node is skipped.
338 * Return 0 on success and -1 on failure.
340 isl_stat
isl_schedule_foreach_schedule_node_top_down(
341 __isl_keep isl_schedule
*sched
,
342 isl_bool (*fn
)(__isl_keep isl_schedule_node
*node
, void *user
),
345 isl_schedule_node
*node
;
349 return isl_stat_error
;
351 node
= isl_schedule_get_root(sched
);
352 r
= isl_schedule_node_foreach_descendant_top_down(node
, fn
, user
);
353 isl_schedule_node_free(node
);
358 /* Traverse the node of "sched" in depth first postorder,
359 * allowing the user to modify the visited node.
360 * The traversal continues from the node returned by the callback function.
361 * It is the responsibility of the user to ensure that this does not
362 * lead to an infinite loop. It is safest to always return a pointer
363 * to the same position (same ancestors and child positions) as the input node.
365 __isl_give isl_schedule
*isl_schedule_map_schedule_node_bottom_up(
366 __isl_take isl_schedule
*schedule
,
367 __isl_give isl_schedule_node
*(*fn
)(
368 __isl_take isl_schedule_node
*node
, void *user
), void *user
)
370 isl_schedule_node
*node
;
372 node
= isl_schedule_get_root(schedule
);
373 isl_schedule_free(schedule
);
375 node
= isl_schedule_node_map_descendant_bottom_up(node
, fn
, user
);
376 schedule
= isl_schedule_node_get_schedule(node
);
377 isl_schedule_node_free(node
);
382 /* Wrapper around isl_schedule_node_reset_user for use as
383 * an isl_schedule_map_schedule_node_bottom_up callback.
385 static __isl_give isl_schedule_node
*reset_user(
386 __isl_take isl_schedule_node
*node
, void *user
)
388 return isl_schedule_node_reset_user(node
);
391 /* Reset the user pointer on all identifiers of parameters and tuples
392 * in the schedule "schedule".
394 __isl_give isl_schedule
*isl_schedule_reset_user(
395 __isl_take isl_schedule
*schedule
)
397 return isl_schedule_map_schedule_node_bottom_up(schedule
, &reset_user
,
401 /* Wrapper around isl_schedule_node_align_params for use as
402 * an isl_schedule_map_schedule_node_bottom_up callback.
404 static __isl_give isl_schedule_node
*align_params(
405 __isl_take isl_schedule_node
*node
, void *user
)
407 isl_space
*space
= user
;
409 return isl_schedule_node_align_params(node
, isl_space_copy(space
));
412 /* Align the parameters of all nodes in schedule "schedule"
413 * to those of "space".
415 __isl_give isl_schedule
*isl_schedule_align_params(
416 __isl_take isl_schedule
*schedule
, __isl_take isl_space
*space
)
418 schedule
= isl_schedule_map_schedule_node_bottom_up(schedule
,
419 &align_params
, space
);
420 isl_space_free(space
);
424 /* Wrapper around isl_schedule_node_pullback_union_pw_multi_aff for use as
425 * an isl_schedule_map_schedule_node_bottom_up callback.
427 static __isl_give isl_schedule_node
*pullback_upma(
428 __isl_take isl_schedule_node
*node
, void *user
)
430 isl_union_pw_multi_aff
*upma
= user
;
432 return isl_schedule_node_pullback_union_pw_multi_aff(node
,
433 isl_union_pw_multi_aff_copy(upma
));
436 /* Compute the pullback of "schedule" by the function represented by "upma".
437 * In other words, plug in "upma" in the iteration domains of "schedule".
439 * The schedule tree is not allowed to contain any expansion nodes.
441 __isl_give isl_schedule
*isl_schedule_pullback_union_pw_multi_aff(
442 __isl_take isl_schedule
*schedule
,
443 __isl_take isl_union_pw_multi_aff
*upma
)
445 schedule
= isl_schedule_map_schedule_node_bottom_up(schedule
,
446 &pullback_upma
, upma
);
447 isl_union_pw_multi_aff_free(upma
);
451 /* Intersect the domain of the schedule "schedule" with "domain".
452 * The root of "schedule" is required to be a domain node.
454 __isl_give isl_schedule
*isl_schedule_intersect_domain(
455 __isl_take isl_schedule
*schedule
, __isl_take isl_union_set
*domain
)
457 enum isl_schedule_node_type root_type
;
458 isl_schedule_node
*node
;
460 if (!schedule
|| !domain
)
463 root_type
= isl_schedule_tree_get_type(schedule
->root
);
464 if (root_type
!= isl_schedule_node_domain
)
465 isl_die(isl_schedule_get_ctx(schedule
), isl_error_invalid
,
466 "root node must be a domain node", goto error
);
468 node
= isl_schedule_get_root(schedule
);
469 isl_schedule_free(schedule
);
470 node
= isl_schedule_node_domain_intersect_domain(node
, domain
);
471 schedule
= isl_schedule_node_get_schedule(node
);
472 isl_schedule_node_free(node
);
476 isl_schedule_free(schedule
);
477 isl_union_set_free(domain
);
481 /* Return an isl_union_map representation of the schedule.
482 * If we still have access to the schedule tree, then we return
483 * an isl_union_map corresponding to the subtree schedule of the child
484 * of the root domain node. That is, we do not intersect the domain
485 * of the returned isl_union_map with the domain constraints.
486 * Otherwise, we must have removed it because we created a band forest.
487 * If so, we extract the isl_union_map from the forest.
488 * This reconstructed schedule map
489 * then needs to be padded with zeros to unify the schedule space
490 * since the result of isl_band_list_get_suffix_schedule may not have
491 * a unified schedule space.
493 __isl_give isl_union_map
*isl_schedule_get_map(__isl_keep isl_schedule
*sched
)
495 enum isl_schedule_node_type type
;
496 isl_schedule_node
*node
;
503 type
= isl_schedule_tree_get_type(sched
->root
);
504 if (type
!= isl_schedule_node_domain
)
505 isl_die(isl_schedule_get_ctx(sched
), isl_error_internal
,
506 "root node not a domain node", return NULL
);
508 node
= isl_schedule_get_root(sched
);
509 node
= isl_schedule_node_child(node
, 0);
510 umap
= isl_schedule_node_get_subtree_schedule_union_map(node
);
511 isl_schedule_node_free(node
);
516 umap
= isl_band_list_get_suffix_schedule(sched
->band_forest
);
517 return pad_schedule_map(umap
);
520 static __isl_give isl_band_list
*construct_band_list(
521 __isl_take isl_schedule_node
*node
, __isl_take isl_union_set
*domain
,
522 __isl_keep isl_band
*parent
);
524 /* Construct an isl_band structure from the given schedule tree node,
525 * which may be either a band node or a leaf node.
526 * In the latter case, construct a zero-dimensional band.
527 * "domain" is the universe set of the domain elements that reach "node".
528 * "parent" is the parent isl_band of the isl_band constructed
531 * In case of a band node, we copy the properties (except tilability,
532 * which is implicit in an isl_band) to the isl_band.
533 * We assume that the band node is not zero-dimensional.
534 * If the child of the band node is not a leaf node,
535 * then we extract the children of the isl_band from this child.
537 static __isl_give isl_band
*construct_band(__isl_take isl_schedule_node
*node
,
538 __isl_take isl_union_set
*domain
, __isl_keep isl_band
*parent
)
542 isl_band
*band
= NULL
;
543 isl_multi_union_pw_aff
*mupa
;
545 if (!node
|| !domain
)
548 ctx
= isl_schedule_node_get_ctx(node
);
549 band
= isl_band_alloc(ctx
);
553 band
->schedule
= node
->schedule
;
554 band
->parent
= parent
;
556 if (isl_schedule_node_get_type(node
) == isl_schedule_node_leaf
) {
558 band
->pma
= isl_union_pw_multi_aff_from_domain(domain
);
559 isl_schedule_node_free(node
);
563 band
->n
= isl_schedule_node_band_n_member(node
);
565 isl_die(ctx
, isl_error_unsupported
,
566 "zero-dimensional band nodes not supported",
568 band
->coincident
= isl_alloc_array(ctx
, int, band
->n
);
569 if (band
->n
&& !band
->coincident
)
571 for (i
= 0; i
< band
->n
; ++i
)
572 band
->coincident
[i
] =
573 isl_schedule_node_band_member_get_coincident(node
, i
);
574 mupa
= isl_schedule_node_band_get_partial_schedule(node
);
575 band
->pma
= isl_union_pw_multi_aff_from_multi_union_pw_aff(mupa
);
579 node
= isl_schedule_node_child(node
, 0);
580 if (isl_schedule_node_get_type(node
) == isl_schedule_node_leaf
) {
581 isl_schedule_node_free(node
);
582 isl_union_set_free(domain
);
586 band
->children
= construct_band_list(node
, domain
, band
);
588 return isl_band_free(band
);
592 isl_union_set_free(domain
);
593 isl_schedule_node_free(node
);
598 /* Construct a list of isl_band structures from the children of "node".
599 * "node" itself is a sequence or set node, so that each of the child nodes
600 * is a filter node and the list returned by node_construct_band_list
601 * consists of a single element.
602 * "domain" is the universe set of the domain elements that reach "node".
603 * "parent" is the parent isl_band of the isl_band structures constructed
606 static __isl_give isl_band_list
*construct_band_list_from_children(
607 __isl_take isl_schedule_node
*node
, __isl_take isl_union_set
*domain
,
608 __isl_keep isl_band
*parent
)
614 n
= isl_schedule_node_n_children(node
);
616 ctx
= isl_schedule_node_get_ctx(node
);
617 list
= isl_band_list_alloc(ctx
, 0);
618 for (i
= 0; i
< n
; ++i
) {
619 isl_schedule_node
*child
;
620 isl_band_list
*list_i
;
622 child
= isl_schedule_node_get_child(node
, i
);
623 list_i
= construct_band_list(child
, isl_union_set_copy(domain
),
625 list
= isl_band_list_concat(list
, list_i
);
628 isl_union_set_free(domain
);
629 isl_schedule_node_free(node
);
634 /* Construct an isl_band structure from the given sequence node
635 * (or set node that is treated as a sequence node).
636 * A single-dimensional band is created with as schedule for each of
637 * filters of the children, the corresponding child position.
638 * "domain" is the universe set of the domain elements that reach "node".
639 * "parent" is the parent isl_band of the isl_band constructed
642 static __isl_give isl_band_list
*construct_band_list_sequence(
643 __isl_take isl_schedule_node
*node
, __isl_take isl_union_set
*domain
,
644 __isl_keep isl_band
*parent
)
648 isl_band
*band
= NULL
;
650 isl_union_pw_multi_aff
*upma
;
652 if (!node
|| !domain
)
655 ctx
= isl_schedule_node_get_ctx(node
);
656 band
= isl_band_alloc(ctx
);
660 band
->schedule
= node
->schedule
;
661 band
->parent
= parent
;
663 band
->coincident
= isl_calloc_array(ctx
, int, band
->n
);
664 if (!band
->coincident
)
667 n
= isl_schedule_node_n_children(node
);
668 space
= isl_union_set_get_space(domain
);
669 upma
= isl_union_pw_multi_aff_empty(isl_space_copy(space
));
671 space
= isl_space_set_from_params(space
);
672 space
= isl_space_add_dims(space
, isl_dim_set
, 1);
674 for (i
= 0; i
< n
; ++i
) {
675 isl_schedule_node
*child
;
676 isl_union_set
*filter
;
680 isl_union_pw_multi_aff
*upma_i
;
682 child
= isl_schedule_node_get_child(node
, i
);
683 filter
= isl_schedule_node_filter_get_filter(child
);
684 isl_schedule_node_free(child
);
685 filter
= isl_union_set_intersect(filter
,
686 isl_union_set_copy(domain
));
687 v
= isl_val_int_from_si(ctx
, i
);
688 vl
= isl_val_list_from_val(v
);
689 mv
= isl_multi_val_from_val_list(isl_space_copy(space
), vl
);
690 upma_i
= isl_union_pw_multi_aff_multi_val_on_domain(filter
, mv
);
691 upma
= isl_union_pw_multi_aff_union_add(upma
, upma_i
);
694 isl_space_free(space
);
700 band
->children
= construct_band_list_from_children(node
, domain
, band
);
702 band
= isl_band_free(band
);
703 return isl_band_list_from_band(band
);
705 isl_union_set_free(domain
);
706 isl_schedule_node_free(node
);
711 /* Construct a list of isl_band structures from "node" depending
712 * on the type of "node".
713 * "domain" is the universe set of the domain elements that reach "node".
714 * "parent" is the parent isl_band of the isl_band structures constructed
717 * If schedule_separate_components is set then set nodes are treated
718 * as sequence nodes. Otherwise, we directly extract an (implicitly
719 * parallel) list of isl_band structures.
721 * If "node" is a filter, then "domain" is updated by the filter.
723 static __isl_give isl_band_list
*construct_band_list(
724 __isl_take isl_schedule_node
*node
, __isl_take isl_union_set
*domain
,
725 __isl_keep isl_band
*parent
)
727 enum isl_schedule_node_type type
;
731 isl_union_set
*filter
;
733 if (!node
|| !domain
)
736 type
= isl_schedule_node_get_type(node
);
738 case isl_schedule_node_error
:
740 case isl_schedule_node_context
:
741 isl_die(isl_schedule_node_get_ctx(node
), isl_error_unsupported
,
742 "context nodes not supported", goto error
);
743 case isl_schedule_node_domain
:
744 isl_die(isl_schedule_node_get_ctx(node
), isl_error_invalid
,
745 "internal domain nodes not allowed", goto error
);
746 case isl_schedule_node_expansion
:
747 isl_die(isl_schedule_node_get_ctx(node
), isl_error_unsupported
,
748 "expansion nodes not supported", goto error
);
749 case isl_schedule_node_extension
:
750 isl_die(isl_schedule_node_get_ctx(node
), isl_error_unsupported
,
751 "extension nodes not supported", goto error
);
752 case isl_schedule_node_filter
:
753 filter
= isl_schedule_node_filter_get_filter(node
);
754 domain
= isl_union_set_intersect(domain
, filter
);
755 node
= isl_schedule_node_child(node
, 0);
756 return construct_band_list(node
, domain
, parent
);
757 case isl_schedule_node_guard
:
758 isl_die(isl_schedule_node_get_ctx(node
), isl_error_unsupported
,
759 "guard nodes not supported", goto error
);
760 case isl_schedule_node_mark
:
761 isl_die(isl_schedule_node_get_ctx(node
), isl_error_unsupported
,
762 "mark nodes not supported", goto error
);
763 case isl_schedule_node_set
:
764 ctx
= isl_schedule_node_get_ctx(node
);
765 if (isl_options_get_schedule_separate_components(ctx
))
766 return construct_band_list_sequence(node
, domain
,
769 return construct_band_list_from_children(node
, domain
,
771 case isl_schedule_node_sequence
:
772 return construct_band_list_sequence(node
, domain
, parent
);
773 case isl_schedule_node_leaf
:
774 case isl_schedule_node_band
:
775 band
= construct_band(node
, domain
, parent
);
776 list
= isl_band_list_from_band(band
);
782 isl_union_set_free(domain
);
783 isl_schedule_node_free(node
);
787 /* Return the roots of a band forest representation of the schedule.
788 * The band forest is constructed from the schedule tree,
789 * but once such a band forest is
790 * constructed, we forget about the original schedule tree since
791 * the user may modify the schedule through the band forest.
793 __isl_give isl_band_list
*isl_schedule_get_band_forest(
794 __isl_keep isl_schedule
*schedule
)
796 isl_schedule_node
*node
;
797 isl_union_set
*domain
;
801 if (schedule
->root
) {
802 node
= isl_schedule_get_root(schedule
);
803 domain
= isl_schedule_node_domain_get_domain(node
);
804 domain
= isl_union_set_universe(domain
);
805 node
= isl_schedule_node_child(node
, 0);
807 schedule
->band_forest
= construct_band_list(node
, domain
, NULL
);
808 schedule
->root
= isl_schedule_tree_free(schedule
->root
);
810 return isl_band_list_dup(schedule
->band_forest
);
813 /* Call "fn" on each band in the schedule in depth-first post-order.
815 int isl_schedule_foreach_band(__isl_keep isl_schedule
*sched
,
816 int (*fn
)(__isl_keep isl_band
*band
, void *user
), void *user
)
819 isl_band_list
*forest
;
824 forest
= isl_schedule_get_band_forest(sched
);
825 r
= isl_band_list_foreach_band(forest
, fn
, user
);
826 isl_band_list_free(forest
);
831 static __isl_give isl_printer
*print_band_list(__isl_take isl_printer
*p
,
832 __isl_keep isl_band_list
*list
);
834 static __isl_give isl_printer
*print_band(__isl_take isl_printer
*p
,
835 __isl_keep isl_band
*band
)
837 isl_band_list
*children
;
839 p
= isl_printer_start_line(p
);
840 p
= isl_printer_print_union_pw_multi_aff(p
, band
->pma
);
841 p
= isl_printer_end_line(p
);
843 if (!isl_band_has_children(band
))
846 children
= isl_band_get_children(band
);
848 p
= isl_printer_indent(p
, 4);
849 p
= print_band_list(p
, children
);
850 p
= isl_printer_indent(p
, -4);
852 isl_band_list_free(children
);
857 static __isl_give isl_printer
*print_band_list(__isl_take isl_printer
*p
,
858 __isl_keep isl_band_list
*list
)
862 n
= isl_band_list_n_band(list
);
863 for (i
= 0; i
< n
; ++i
) {
865 band
= isl_band_list_get_band(list
, i
);
866 p
= print_band(p
, band
);
873 /* Insert a band node with partial schedule "partial" between the domain
874 * root node of "schedule" and its single child.
875 * Return a pointer to the updated schedule.
877 * If any of the nodes in the tree depend on the set of outer band nodes
878 * then we refuse to insert the band node.
880 __isl_give isl_schedule
*isl_schedule_insert_partial_schedule(
881 __isl_take isl_schedule
*schedule
,
882 __isl_take isl_multi_union_pw_aff
*partial
)
884 isl_schedule_node
*node
;
887 node
= isl_schedule_get_root(schedule
);
888 isl_schedule_free(schedule
);
891 if (isl_schedule_node_get_type(node
) != isl_schedule_node_domain
)
892 isl_die(isl_schedule_node_get_ctx(node
), isl_error_internal
,
893 "root node not a domain node", goto error
);
895 node
= isl_schedule_node_child(node
, 0);
896 anchored
= isl_schedule_node_is_subtree_anchored(node
);
900 isl_die(isl_schedule_node_get_ctx(node
), isl_error_invalid
,
901 "cannot insert band node in anchored subtree",
903 node
= isl_schedule_node_insert_partial_schedule(node
, partial
);
905 schedule
= isl_schedule_node_get_schedule(node
);
906 isl_schedule_node_free(node
);
910 isl_schedule_node_free(node
);
911 isl_multi_union_pw_aff_free(partial
);
915 /* Insert a context node with constraints "context" between the domain
916 * root node of "schedule" and its single child.
917 * Return a pointer to the updated schedule.
919 __isl_give isl_schedule
*isl_schedule_insert_context(
920 __isl_take isl_schedule
*schedule
, __isl_take isl_set
*context
)
922 isl_schedule_node
*node
;
924 node
= isl_schedule_get_root(schedule
);
925 isl_schedule_free(schedule
);
926 node
= isl_schedule_node_child(node
, 0);
927 node
= isl_schedule_node_insert_context(node
, context
);
928 schedule
= isl_schedule_node_get_schedule(node
);
929 isl_schedule_node_free(node
);
934 /* Insert a guard node with constraints "guard" between the domain
935 * root node of "schedule" and its single child.
936 * Return a pointer to the updated schedule.
938 __isl_give isl_schedule
*isl_schedule_insert_guard(
939 __isl_take isl_schedule
*schedule
, __isl_take isl_set
*guard
)
941 isl_schedule_node
*node
;
943 node
= isl_schedule_get_root(schedule
);
944 isl_schedule_free(schedule
);
945 node
= isl_schedule_node_child(node
, 0);
946 node
= isl_schedule_node_insert_guard(node
, guard
);
947 schedule
= isl_schedule_node_get_schedule(node
);
948 isl_schedule_node_free(node
);
953 /* Return a tree with as top-level node a filter corresponding to "filter" and
954 * as child, the (single) child of "tree".
955 * However, if this single child is of type "type", then the filter is inserted
956 * in the children of this single child instead.
958 static __isl_give isl_schedule_tree
*insert_filter_in_child_of_type(
959 __isl_take isl_schedule_tree
*tree
, __isl_take isl_union_set
*filter
,
960 enum isl_schedule_node_type type
)
962 if (!isl_schedule_tree_has_children(tree
)) {
963 isl_schedule_tree_free(tree
);
964 return isl_schedule_tree_from_filter(filter
);
966 tree
= isl_schedule_tree_child(tree
, 0);
969 if (isl_schedule_tree_get_type(tree
) == type
)
970 tree
= isl_schedule_tree_children_insert_filter(tree
, filter
);
972 tree
= isl_schedule_tree_insert_filter(tree
, filter
);
977 /* Construct a schedule that combines the schedules "schedule1" and "schedule2"
978 * with a top-level node (underneath the domain node) of type "type",
979 * either isl_schedule_node_sequence or isl_schedule_node_set.
980 * The domains of the two schedules are assumed to be disjoint.
982 * The new schedule has as domain the union of the domains of the two
983 * schedules. The child of the domain node is a node of type "type"
984 * with two filters corresponding to the domains of the input schedules.
985 * If one (or both) of the top-level nodes of the two schedules is itself
986 * of type "type", then the filter is pushed into the children of that
987 * node and the sequence of set is flattened.
989 __isl_give isl_schedule
*isl_schedule_pair(enum isl_schedule_node_type type
,
990 __isl_take isl_schedule
*schedule1
, __isl_take isl_schedule
*schedule2
)
994 enum isl_schedule_node_type root_type
;
995 isl_schedule_tree
*tree1
, *tree2
;
996 isl_union_set
*filter1
, *filter2
, *domain
;
998 if (!schedule1
|| !schedule2
)
1001 root_type
= isl_schedule_tree_get_type(schedule1
->root
);
1002 if (root_type
!= isl_schedule_node_domain
)
1003 isl_die(isl_schedule_get_ctx(schedule1
), isl_error_internal
,
1004 "root node not a domain node", goto error
);
1005 root_type
= isl_schedule_tree_get_type(schedule2
->root
);
1006 if (root_type
!= isl_schedule_node_domain
)
1007 isl_die(isl_schedule_get_ctx(schedule1
), isl_error_internal
,
1008 "root node not a domain node", goto error
);
1010 ctx
= isl_schedule_get_ctx(schedule1
);
1011 tree1
= isl_schedule_tree_copy(schedule1
->root
);
1012 filter1
= isl_schedule_tree_domain_get_domain(tree1
);
1013 tree2
= isl_schedule_tree_copy(schedule2
->root
);
1014 filter2
= isl_schedule_tree_domain_get_domain(tree2
);
1016 isl_schedule_free(schedule1
);
1017 isl_schedule_free(schedule2
);
1019 disjoint
= isl_union_set_is_disjoint(filter1
, filter2
);
1021 filter1
= isl_union_set_free(filter1
);
1023 isl_die(ctx
, isl_error_invalid
,
1024 "schedule domains not disjoint",
1025 filter1
= isl_union_set_free(filter1
));
1027 domain
= isl_union_set_union(isl_union_set_copy(filter1
),
1028 isl_union_set_copy(filter2
));
1029 filter1
= isl_union_set_gist(filter1
, isl_union_set_copy(domain
));
1030 filter2
= isl_union_set_gist(filter2
, isl_union_set_copy(domain
));
1032 tree1
= insert_filter_in_child_of_type(tree1
, filter1
, type
);
1033 tree2
= insert_filter_in_child_of_type(tree2
, filter2
, type
);
1035 tree1
= isl_schedule_tree_from_pair(type
, tree1
, tree2
);
1036 tree1
= isl_schedule_tree_insert_domain(tree1
, domain
);
1038 return isl_schedule_from_schedule_tree(ctx
, tree1
);
1040 isl_schedule_free(schedule1
);
1041 isl_schedule_free(schedule2
);
1045 /* Construct a schedule that combines the schedules "schedule1" and "schedule2"
1046 * through a sequence node.
1047 * The domains of the input schedules are assumed to be disjoint.
1049 __isl_give isl_schedule
*isl_schedule_sequence(
1050 __isl_take isl_schedule
*schedule1
, __isl_take isl_schedule
*schedule2
)
1052 return isl_schedule_pair(isl_schedule_node_sequence
,
1053 schedule1
, schedule2
);
1056 /* Construct a schedule that combines the schedules "schedule1" and "schedule2"
1057 * through a set node.
1058 * The domains of the input schedules are assumed to be disjoint.
1060 __isl_give isl_schedule
*isl_schedule_set(
1061 __isl_take isl_schedule
*schedule1
, __isl_take isl_schedule
*schedule2
)
1063 return isl_schedule_pair(isl_schedule_node_set
, schedule1
, schedule2
);
1066 /* Print "schedule" to "p".
1068 * If "schedule" was created from a schedule tree, then we print
1069 * the schedule tree representation. Otherwise, we print
1070 * the band forest representation.
1072 __isl_give isl_printer
*isl_printer_print_schedule(__isl_take isl_printer
*p
,
1073 __isl_keep isl_schedule
*schedule
)
1075 isl_band_list
*forest
;
1078 return isl_printer_free(p
);
1081 return isl_printer_print_schedule_tree(p
, schedule
->root
);
1083 forest
= isl_schedule_get_band_forest(schedule
);
1085 p
= print_band_list(p
, forest
);
1087 isl_band_list_free(forest
);
1092 void isl_schedule_dump(__isl_keep isl_schedule
*schedule
)
1094 isl_printer
*printer
;
1099 printer
= isl_printer_to_file(isl_schedule_get_ctx(schedule
), stderr
);
1100 printer
= isl_printer_set_yaml_style(printer
, ISL_YAML_STYLE_BLOCK
);
1101 printer
= isl_printer_print_schedule(printer
, schedule
);
1103 isl_printer_free(printer
);