2 * Copyright 2013 Ecole Normale Superieure
4 * Use of this software is governed by the MIT license
6 * Written by Sven Verdoolaege,
7 * Ecole Normale Superieure, 45 rue d'Ulm, 75230 Paris, France
11 #include <isl_schedule_band.h>
12 #include <isl_schedule_private.h>
13 #include <isl_schedule_node_private.h>
15 /* Create a new schedule node in the given schedule, point at the given
16 * tree with given ancestors and child positions.
17 * "child_pos" may be NULL if there are no ancestors.
19 __isl_give isl_schedule_node
*isl_schedule_node_alloc(
20 __isl_take isl_schedule
*schedule
, __isl_take isl_schedule_tree
*tree
,
21 __isl_take isl_schedule_tree_list
*ancestors
, int *child_pos
)
24 isl_schedule_node
*node
;
27 if (!schedule
|| !tree
|| !ancestors
)
29 n
= isl_schedule_tree_list_n_schedule_tree(ancestors
);
30 if (n
> 0 && !child_pos
)
32 ctx
= isl_schedule_get_ctx(schedule
);
33 node
= isl_calloc_type(ctx
, isl_schedule_node
);
37 node
->schedule
= schedule
;
39 node
->ancestors
= ancestors
;
40 node
->child_pos
= isl_alloc_array(ctx
, int, n
);
41 if (n
&& !node
->child_pos
)
42 return isl_schedule_node_free(node
);
43 for (i
= 0; i
< n
; ++i
)
44 node
->child_pos
[i
] = child_pos
[i
];
48 isl_schedule_free(schedule
);
49 isl_schedule_tree_free(tree
);
50 isl_schedule_tree_list_free(ancestors
);
54 /* Return a pointer to the root of a schedule tree with as single
55 * node a domain node with the given domain.
57 __isl_give isl_schedule_node
*isl_schedule_node_from_domain(
58 __isl_take isl_union_set
*domain
)
60 isl_schedule
*schedule
;
61 isl_schedule_node
*node
;
63 schedule
= isl_schedule_from_domain(domain
);
64 node
= isl_schedule_get_root(schedule
);
65 isl_schedule_free(schedule
);
70 /* Return the isl_ctx to which "node" belongs.
72 isl_ctx
*isl_schedule_node_get_ctx(__isl_keep isl_schedule_node
*node
)
74 return node
? isl_schedule_get_ctx(node
->schedule
) : NULL
;
77 /* Return a pointer to the leaf of the schedule into which "node" points.
79 * Even though these leaves are not reference counted, we still
80 * indicate that this function does not return a copy.
82 __isl_keep isl_schedule_tree
*isl_schedule_node_peek_leaf(
83 __isl_keep isl_schedule_node
*node
)
85 return node
? isl_schedule_peek_leaf(node
->schedule
) : NULL
;
88 /* Return a pointer to the leaf of the schedule into which "node" points.
90 * Even though these leaves are not reference counted, we still
91 * return a "copy" of the leaf here such that it can still be "freed"
94 __isl_give isl_schedule_tree
*isl_schedule_node_get_leaf(
95 __isl_keep isl_schedule_node
*node
)
97 return isl_schedule_tree_copy(isl_schedule_node_peek_leaf(node
));
100 /* Return the type of the node or isl_schedule_node_error on error.
102 enum isl_schedule_node_type
isl_schedule_node_get_type(
103 __isl_keep isl_schedule_node
*node
)
105 return node
? isl_schedule_tree_get_type(node
->tree
)
106 : isl_schedule_node_error
;
109 /* Return the type of the parent of "node" or isl_schedule_node_error on error.
111 enum isl_schedule_node_type
isl_schedule_node_get_parent_type(
112 __isl_keep isl_schedule_node
*node
)
116 isl_schedule_tree
*parent
;
117 enum isl_schedule_node_type type
;
120 return isl_schedule_node_error
;
121 has_parent
= isl_schedule_node_has_parent(node
);
123 return isl_schedule_node_error
;
125 isl_die(isl_schedule_node_get_ctx(node
), isl_error_invalid
,
126 "node has no parent", return isl_schedule_node_error
);
128 pos
= isl_schedule_tree_list_n_schedule_tree(node
->ancestors
) - 1;
129 parent
= isl_schedule_tree_list_get_schedule_tree(node
->ancestors
, pos
);
130 type
= isl_schedule_tree_get_type(parent
);
131 isl_schedule_tree_free(parent
);
136 /* Return a copy of the subtree that this node points to.
138 __isl_give isl_schedule_tree
*isl_schedule_node_get_tree(
139 __isl_keep isl_schedule_node
*node
)
144 return isl_schedule_tree_copy(node
->tree
);
147 /* Return a copy of the schedule into which "node" points.
149 __isl_give isl_schedule
*isl_schedule_node_get_schedule(
150 __isl_keep isl_schedule_node
*node
)
154 return isl_schedule_copy(node
->schedule
);
157 /* Return a fresh copy of "node".
159 __isl_take isl_schedule_node
*isl_schedule_node_dup(
160 __isl_keep isl_schedule_node
*node
)
165 return isl_schedule_node_alloc(isl_schedule_copy(node
->schedule
),
166 isl_schedule_tree_copy(node
->tree
),
167 isl_schedule_tree_list_copy(node
->ancestors
),
171 /* Return an isl_schedule_node that is equal to "node" and that has only
172 * a single reference.
174 __isl_give isl_schedule_node
*isl_schedule_node_cow(
175 __isl_take isl_schedule_node
*node
)
183 return isl_schedule_node_dup(node
);
186 /* Return a new reference to "node".
188 __isl_give isl_schedule_node
*isl_schedule_node_copy(
189 __isl_keep isl_schedule_node
*node
)
198 /* Free "node" and return NULL.
200 * Since the node may point to a leaf of its schedule, which
201 * point to a field inside the schedule, we need to make sure
202 * we free the tree before freeing the schedule.
204 __isl_null isl_schedule_node
*isl_schedule_node_free(
205 __isl_take isl_schedule_node
*node
)
212 isl_schedule_tree_list_free(node
->ancestors
);
213 free(node
->child_pos
);
214 isl_schedule_tree_free(node
->tree
);
215 isl_schedule_free(node
->schedule
);
221 /* Do "node1" and "node2" point to the same position in the same
224 int isl_schedule_node_is_equal(__isl_keep isl_schedule_node
*node1
,
225 __isl_keep isl_schedule_node
*node2
)
229 if (!node1
|| !node2
)
233 if (node1
->schedule
!= node2
->schedule
)
236 n1
= isl_schedule_node_get_tree_depth(node1
);
237 n2
= isl_schedule_node_get_tree_depth(node2
);
240 for (i
= 0; i
< n1
; ++i
)
241 if (node1
->child_pos
[i
] != node2
->child_pos
[i
])
247 /* Return the number of outer schedule dimensions of "node"
248 * in its schedule tree.
250 * Return -1 on error.
252 int isl_schedule_node_get_schedule_depth(__isl_keep isl_schedule_node
*node
)
260 n
= isl_schedule_tree_list_n_schedule_tree(node
->ancestors
);
261 for (i
= n
- 1; i
>= 0; --i
) {
262 isl_schedule_tree
*tree
;
264 tree
= isl_schedule_tree_list_get_schedule_tree(
268 if (tree
->type
== isl_schedule_node_band
)
269 depth
+= isl_schedule_tree_band_n_member(tree
);
270 isl_schedule_tree_free(tree
);
276 /* Internal data structure for
277 * isl_schedule_node_get_prefix_schedule_union_pw_multi_aff
279 * "initialized" is set if the filter field has been initialized.
280 * "universe_filter" is set if we are only collecting the universes of filters
281 * "collect_prefix" is set if we are collecting prefixes.
282 * "filter" collects all outer filters and is NULL until "initialized" is set.
283 * "prefix" collects all outer band partial schedules (if "collect_prefix"
284 * is set). If it is used, then it is initialized by the caller
285 * of collect_filter_prefix to a zero-dimensional function.
287 struct isl_schedule_node_get_filter_prefix_data
{
291 isl_union_set
*filter
;
292 isl_multi_union_pw_aff
*prefix
;
295 /* Update "data" based on the tree node "tree" in case "data" has
296 * not been initialized yet.
298 * Return 0 on success and -1 on error.
300 * If "tree" is a filter, then we set data->filter to this filter
302 * If "tree" is a domain, then this means we have reached the root
303 * of the schedule tree without being able to extract any information.
304 * We therefore initialize data->filter to the universe of the domain.
305 * If "tree" is a band with at least one member, then we set data->filter
306 * to the universe of the schedule domain and replace the zero-dimensional
307 * data->prefix by the band schedule (if data->collect_prefix is set).
309 static int collect_filter_prefix_init(__isl_keep isl_schedule_tree
*tree
,
310 struct isl_schedule_node_get_filter_prefix_data
*data
)
312 enum isl_schedule_node_type type
;
313 isl_multi_union_pw_aff
*mupa
;
314 isl_union_set
*filter
;
316 type
= isl_schedule_tree_get_type(tree
);
318 case isl_schedule_node_error
:
320 case isl_schedule_node_leaf
:
321 case isl_schedule_node_sequence
:
322 case isl_schedule_node_set
:
324 case isl_schedule_node_domain
:
325 filter
= isl_schedule_tree_domain_get_domain(tree
);
326 filter
= isl_union_set_universe(filter
);
327 data
->filter
= filter
;
329 case isl_schedule_node_band
:
330 if (isl_schedule_tree_band_n_member(tree
) == 0)
332 mupa
= isl_schedule_tree_band_get_partial_schedule(tree
);
333 if (data
->collect_prefix
) {
334 isl_multi_union_pw_aff_free(data
->prefix
);
335 mupa
= isl_multi_union_pw_aff_reset_tuple_id(mupa
,
337 data
->prefix
= isl_multi_union_pw_aff_copy(mupa
);
339 filter
= isl_multi_union_pw_aff_domain(mupa
);
340 filter
= isl_union_set_universe(filter
);
341 data
->filter
= filter
;
343 case isl_schedule_node_filter
:
344 filter
= isl_schedule_tree_filter_get_filter(tree
);
345 if (data
->universe_filter
)
346 filter
= isl_union_set_universe(filter
);
347 data
->filter
= filter
;
351 if ((data
->collect_prefix
&& !data
->prefix
) || !data
->filter
)
354 data
->initialized
= 1;
359 /* Update "data" based on the tree node "tree" in case "data" has
360 * already been initialized.
362 * Return 0 on success and -1 on error.
364 * If "tree" is a filter, then we intersect data->filter with this filter
366 * If "tree" is a band with at least one member and data->collect_prefix
367 * is set, then we extend data->prefix with the band schedule.
369 static int collect_filter_prefix_update(__isl_keep isl_schedule_tree
*tree
,
370 struct isl_schedule_node_get_filter_prefix_data
*data
)
372 enum isl_schedule_node_type type
;
373 isl_multi_union_pw_aff
*mupa
;
374 isl_union_set
*filter
;
376 type
= isl_schedule_tree_get_type(tree
);
378 case isl_schedule_node_error
:
380 case isl_schedule_node_domain
:
381 case isl_schedule_node_leaf
:
382 case isl_schedule_node_sequence
:
383 case isl_schedule_node_set
:
385 case isl_schedule_node_band
:
386 if (isl_schedule_tree_band_n_member(tree
) == 0)
388 if (!data
->collect_prefix
)
390 mupa
= isl_schedule_tree_band_get_partial_schedule(tree
);
391 data
->prefix
= isl_multi_union_pw_aff_flat_range_product(mupa
,
396 case isl_schedule_node_filter
:
397 filter
= isl_schedule_tree_filter_get_filter(tree
);
398 if (data
->universe_filter
)
399 filter
= isl_union_set_universe(filter
);
400 data
->filter
= isl_union_set_intersect(data
->filter
, filter
);
409 /* Collect filter and/or prefix information from the elements
410 * in "list" (which represent the ancestors of a node).
411 * Store the results in "data".
413 * Return 0 on success and -1 on error.
415 * We traverse the list from innermost ancestor (last element)
416 * to outermost ancestor (first element), calling collect_filter_prefix_init
417 * on each node as long as we have not been able to extract any information
418 * yet and collect_filter_prefix_update afterwards.
419 * On successful return, data->initialized will be set since the outermost
420 * ancestor is a domain node, which always results in an initialization.
422 static int collect_filter_prefix(__isl_keep isl_schedule_tree_list
*list
,
423 struct isl_schedule_node_get_filter_prefix_data
*data
)
427 data
->initialized
= 0;
433 n
= isl_schedule_tree_list_n_schedule_tree(list
);
434 for (i
= n
- 1; i
>= 0; --i
) {
435 isl_schedule_tree
*tree
;
438 tree
= isl_schedule_tree_list_get_schedule_tree(list
, i
);
441 if (!data
->initialized
)
442 r
= collect_filter_prefix_init(tree
, data
);
444 r
= collect_filter_prefix_update(tree
, data
);
445 isl_schedule_tree_free(tree
);
453 /* Return the concatenation of the partial schedules of all outer band
454 * nodes of "node" interesected with all outer filters
455 * as an isl_union_pw_multi_aff.
457 * If "node" is pointing at the root of the schedule tree, then
458 * there are no domain elements reaching the current node, so
459 * we return an empty result.
461 * We collect all the filters and partial schedules in collect_filter_prefix.
462 * The partial schedules are collected as an isl_multi_union_pw_aff.
463 * If this isl_multi_union_pw_aff is zero-dimensional, then it does not
464 * contain any domain information, so we construct the isl_union_pw_multi_aff
465 * result as a zero-dimensional function on the collected filter.
466 * Otherwise, we convert the isl_multi_union_pw_aff to
467 * an isl_multi_union_pw_aff and intersect the domain with the filter.
469 __isl_give isl_union_pw_multi_aff
*
470 isl_schedule_node_get_prefix_schedule_union_pw_multi_aff(
471 __isl_keep isl_schedule_node
*node
)
474 isl_union_pw_multi_aff
*prefix
;
475 struct isl_schedule_node_get_filter_prefix_data data
;
480 space
= isl_schedule_get_space(node
->schedule
);
481 if (node
->tree
== node
->schedule
->root
)
482 return isl_union_pw_multi_aff_empty(space
);
484 space
= isl_space_set_from_params(space
);
485 data
.universe_filter
= 0;
486 data
.collect_prefix
= 1;
487 data
.prefix
= isl_multi_union_pw_aff_zero(space
);
489 if (collect_filter_prefix(node
->ancestors
, &data
) < 0)
490 data
.prefix
= isl_multi_union_pw_aff_free(data
.prefix
);
493 isl_multi_union_pw_aff_dim(data
.prefix
, isl_dim_set
) == 0) {
494 isl_multi_union_pw_aff_free(data
.prefix
);
495 prefix
= isl_union_pw_multi_aff_from_domain(data
.filter
);
498 isl_union_pw_multi_aff_from_multi_union_pw_aff(data
.prefix
);
499 prefix
= isl_union_pw_multi_aff_intersect_domain(prefix
,
506 /* Return the concatenation of the partial schedules of all outer band
507 * nodes of "node" interesected with all outer filters
508 * as an isl_union_map.
510 __isl_give isl_union_map
*isl_schedule_node_get_prefix_schedule_union_map(
511 __isl_keep isl_schedule_node
*node
)
513 isl_union_pw_multi_aff
*upma
;
515 upma
= isl_schedule_node_get_prefix_schedule_union_pw_multi_aff(node
);
516 return isl_union_map_from_union_pw_multi_aff(upma
);
519 /* Return the union of universe sets of the domain elements that reach "node".
521 * If "node" is pointing at the root of the schedule tree, then
522 * there are no domain elements reaching the current node, so
523 * we return an empty result.
525 * Otherwise, we collect the universes of all filters reaching the node
526 * in collect_filter_prefix.
528 __isl_give isl_union_set
*isl_schedule_node_get_universe_domain(
529 __isl_keep isl_schedule_node
*node
)
531 struct isl_schedule_node_get_filter_prefix_data data
;
536 if (node
->tree
== node
->schedule
->root
) {
539 space
= isl_schedule_get_space(node
->schedule
);
540 return isl_union_set_empty(space
);
543 data
.universe_filter
= 1;
544 data
.collect_prefix
= 0;
547 if (collect_filter_prefix(node
->ancestors
, &data
) < 0)
548 data
.filter
= isl_union_set_free(data
.filter
);
553 /* Return the subtree schedule of "node".
555 * Since isl_schedule_tree_get_subtree_schedule_union_map does not handle
556 * trees that do not contain any schedule information, we first
557 * move down to the first relevant descendant and handle leaves ourselves.
559 __isl_give isl_union_map
*isl_schedule_node_get_subtree_schedule_union_map(
560 __isl_keep isl_schedule_node
*node
)
562 isl_schedule_tree
*tree
, *leaf
;
565 tree
= isl_schedule_node_get_tree(node
);
566 leaf
= isl_schedule_node_peek_leaf(node
);
567 tree
= isl_schedule_tree_first_schedule_descendant(tree
, leaf
);
571 isl_union_set
*domain
;
572 domain
= isl_schedule_node_get_universe_domain(node
);
573 isl_schedule_tree_free(tree
);
574 return isl_union_map_from_domain(domain
);
577 umap
= isl_schedule_tree_get_subtree_schedule_union_map(tree
);
578 isl_schedule_tree_free(tree
);
582 /* Return the number of ancestors of "node" in its schedule tree.
584 int isl_schedule_node_get_tree_depth(__isl_keep isl_schedule_node
*node
)
588 return isl_schedule_tree_list_n_schedule_tree(node
->ancestors
);
591 /* Does "node" have a parent?
593 * That is, does it point to any node of the schedule other than the root?
595 int isl_schedule_node_has_parent(__isl_keep isl_schedule_node
*node
)
599 if (!node
->ancestors
)
602 return isl_schedule_tree_list_n_schedule_tree(node
->ancestors
) != 0;
605 /* Return the position of "node" among the children of its parent.
607 int isl_schedule_node_get_child_position(__isl_keep isl_schedule_node
*node
)
614 has_parent
= isl_schedule_node_has_parent(node
);
618 isl_die(isl_schedule_node_get_ctx(node
), isl_error_invalid
,
619 "node has no parent", return -1);
621 n
= isl_schedule_tree_list_n_schedule_tree(node
->ancestors
);
622 return node
->child_pos
[n
- 1];
625 /* Does the parent (if any) of "node" have any children with a smaller child
626 * position than this one?
628 int isl_schedule_node_has_previous_sibling(__isl_keep isl_schedule_node
*node
)
635 has_parent
= isl_schedule_node_has_parent(node
);
636 if (has_parent
< 0 || !has_parent
)
639 n
= isl_schedule_tree_list_n_schedule_tree(node
->ancestors
);
641 return node
->child_pos
[n
- 1] > 0;
644 /* Does the parent (if any) of "node" have any children with a greater child
645 * position than this one?
647 int isl_schedule_node_has_next_sibling(__isl_keep isl_schedule_node
*node
)
651 isl_schedule_tree
*tree
;
655 has_parent
= isl_schedule_node_has_parent(node
);
656 if (has_parent
< 0 || !has_parent
)
659 n
= isl_schedule_tree_list_n_schedule_tree(node
->ancestors
);
660 tree
= isl_schedule_tree_list_get_schedule_tree(node
->ancestors
, n
- 1);
663 n_child
= isl_schedule_tree_list_n_schedule_tree(tree
->children
);
664 isl_schedule_tree_free(tree
);
666 return node
->child_pos
[n
- 1] + 1 < n_child
;
669 /* Does "node" have any children?
671 * Any node other than the leaf nodes is considered to have at least
672 * one child, even if the corresponding isl_schedule_tree does not
675 int isl_schedule_node_has_children(__isl_keep isl_schedule_node
*node
)
679 return !isl_schedule_tree_is_leaf(node
->tree
);
682 /* Return the number of children of "node"?
684 * Any node other than the leaf nodes is considered to have at least
685 * one child, even if the corresponding isl_schedule_tree does not
686 * have any children. That is, the number of children of "node" is
687 * only zero if its tree is the explicit empty tree. Otherwise,
688 * if the isl_schedule_tree has any children, then it is equal
689 * to the number of children of "node". If it has zero children,
690 * then "node" still has a leaf node as child.
692 int isl_schedule_node_n_children(__isl_keep isl_schedule_node
*node
)
699 if (isl_schedule_tree_is_leaf(node
->tree
))
702 n
= isl_schedule_tree_n_children(node
->tree
);
709 /* Move the "node" pointer to the ancestor of the given generation
710 * of the node it currently points to, where generation 0 is the node
711 * itself and generation 1 is its parent.
713 __isl_give isl_schedule_node
*isl_schedule_node_ancestor(
714 __isl_take isl_schedule_node
*node
, int generation
)
717 isl_schedule_tree
*tree
;
723 n
= isl_schedule_node_get_tree_depth(node
);
725 return isl_schedule_node_free(node
);
726 if (generation
< 0 || generation
> n
)
727 isl_die(isl_schedule_node_get_ctx(node
), isl_error_invalid
,
728 "generation out of bounds",
729 return isl_schedule_node_free(node
));
730 node
= isl_schedule_node_cow(node
);
734 tree
= isl_schedule_tree_list_get_schedule_tree(node
->ancestors
,
736 isl_schedule_tree_free(node
->tree
);
738 node
->ancestors
= isl_schedule_tree_list_drop(node
->ancestors
,
739 n
- generation
, generation
);
740 if (!node
->ancestors
|| !node
->tree
)
741 return isl_schedule_node_free(node
);
746 /* Move the "node" pointer to the parent of the node it currently points to.
748 __isl_give isl_schedule_node
*isl_schedule_node_parent(
749 __isl_take isl_schedule_node
*node
)
753 if (!isl_schedule_node_has_parent(node
))
754 isl_die(isl_schedule_node_get_ctx(node
), isl_error_invalid
,
755 "node has no parent",
756 return isl_schedule_node_free(node
));
757 return isl_schedule_node_ancestor(node
, 1);
760 /* Move the "node" pointer to the root of its schedule tree.
762 __isl_give isl_schedule_node
*isl_schedule_node_root(
763 __isl_take isl_schedule_node
*node
)
769 n
= isl_schedule_node_get_tree_depth(node
);
771 return isl_schedule_node_free(node
);
772 return isl_schedule_node_ancestor(node
, n
);
775 /* Move the "node" pointer to the child at position "pos" of the node
776 * it currently points to.
778 __isl_give isl_schedule_node
*isl_schedule_node_child(
779 __isl_take isl_schedule_node
*node
, int pos
)
783 isl_schedule_tree
*tree
;
786 node
= isl_schedule_node_cow(node
);
789 if (!isl_schedule_node_has_children(node
))
790 isl_die(isl_schedule_node_get_ctx(node
), isl_error_invalid
,
791 "node has no children",
792 return isl_schedule_node_free(node
));
794 ctx
= isl_schedule_node_get_ctx(node
);
795 n
= isl_schedule_tree_list_n_schedule_tree(node
->ancestors
);
796 child_pos
= isl_realloc_array(ctx
, node
->child_pos
, int, n
+ 1);
798 return isl_schedule_node_free(node
);
799 node
->child_pos
= child_pos
;
800 node
->child_pos
[n
] = pos
;
802 node
->ancestors
= isl_schedule_tree_list_add(node
->ancestors
,
803 isl_schedule_tree_copy(node
->tree
));
805 if (isl_schedule_tree_has_children(tree
))
806 tree
= isl_schedule_tree_get_child(tree
, pos
);
808 tree
= isl_schedule_node_get_leaf(node
);
809 isl_schedule_tree_free(node
->tree
);
812 if (!node
->tree
|| !node
->ancestors
)
813 return isl_schedule_node_free(node
);
818 /* Move the "node" pointer to the first child of the node
819 * it currently points to.
821 __isl_give isl_schedule_node
*isl_schedule_node_first_child(
822 __isl_take isl_schedule_node
*node
)
824 return isl_schedule_node_child(node
, 0);
827 /* Move the "node" pointer to the child of this node's parent in
828 * the previous child position.
830 __isl_give isl_schedule_node
*isl_schedule_node_previous_sibling(
831 __isl_take isl_schedule_node
*node
)
834 isl_schedule_tree
*parent
, *tree
;
836 node
= isl_schedule_node_cow(node
);
839 if (!isl_schedule_node_has_previous_sibling(node
))
840 isl_die(isl_schedule_node_get_ctx(node
), isl_error_invalid
,
841 "node has no previous sibling",
842 return isl_schedule_node_free(node
));
844 n
= isl_schedule_tree_list_n_schedule_tree(node
->ancestors
);
845 parent
= isl_schedule_tree_list_get_schedule_tree(node
->ancestors
,
848 return isl_schedule_node_free(node
);
849 node
->child_pos
[n
- 1]--;
850 tree
= isl_schedule_tree_list_get_schedule_tree(parent
->children
,
851 node
->child_pos
[n
- 1]);
852 isl_schedule_tree_free(parent
);
854 return isl_schedule_node_free(node
);
855 isl_schedule_tree_free(node
->tree
);
861 /* Move the "node" pointer to the child of this node's parent in
862 * the next child position.
864 __isl_give isl_schedule_node
*isl_schedule_node_next_sibling(
865 __isl_take isl_schedule_node
*node
)
868 isl_schedule_tree
*parent
, *tree
;
870 node
= isl_schedule_node_cow(node
);
873 if (!isl_schedule_node_has_next_sibling(node
))
874 isl_die(isl_schedule_node_get_ctx(node
), isl_error_invalid
,
875 "node has no next sibling",
876 return isl_schedule_node_free(node
));
878 n
= isl_schedule_tree_list_n_schedule_tree(node
->ancestors
);
879 parent
= isl_schedule_tree_list_get_schedule_tree(node
->ancestors
,
882 return isl_schedule_node_free(node
);
883 node
->child_pos
[n
- 1]++;
884 tree
= isl_schedule_tree_list_get_schedule_tree(parent
->children
,
885 node
->child_pos
[n
- 1]);
886 isl_schedule_tree_free(parent
);
888 return isl_schedule_node_free(node
);
889 isl_schedule_tree_free(node
->tree
);
895 /* Return a copy to the child at position "pos" of "node".
897 __isl_give isl_schedule_node
*isl_schedule_node_get_child(
898 __isl_keep isl_schedule_node
*node
, int pos
)
900 return isl_schedule_node_child(isl_schedule_node_copy(node
), pos
);
903 /* Traverse the descendant of "node" in depth-first order, including
904 * "node" itself. Call "enter" whenever a node is entered and "leave"
905 * whenever a node is left. The callback "enter" is responsible
906 * for moving to the deepest initial subtree of its argument that
907 * should be traversed.
909 static __isl_give isl_schedule_node
*traverse(
910 __isl_take isl_schedule_node
*node
,
911 __isl_give isl_schedule_node
*(*enter
)(
912 __isl_take isl_schedule_node
*node
, void *user
),
913 __isl_give isl_schedule_node
*(*leave
)(
914 __isl_take isl_schedule_node
*node
, void *user
),
922 depth
= isl_schedule_node_get_tree_depth(node
);
924 node
= enter(node
, user
);
925 node
= leave(node
, user
);
926 while (node
&& isl_schedule_node_get_tree_depth(node
) > depth
&&
927 !isl_schedule_node_has_next_sibling(node
)) {
928 node
= isl_schedule_node_parent(node
);
929 node
= leave(node
, user
);
931 if (node
&& isl_schedule_node_get_tree_depth(node
) > depth
)
932 node
= isl_schedule_node_next_sibling(node
);
933 } while (node
&& isl_schedule_node_get_tree_depth(node
) > depth
);
938 /* Internal data structure for isl_schedule_node_foreach_descendant.
940 * "fn" is the user-specified callback function.
941 * "user" is the user-specified argument for the callback.
943 struct isl_schedule_node_preorder_data
{
944 int (*fn
)(__isl_keep isl_schedule_node
*node
, void *user
);
948 /* Callback for "traverse" to enter a node and to move
949 * to the deepest initial subtree that should be traversed
950 * for use in a preorder visit.
952 * If the user callback returns a negative value, then we abort
953 * the traversal. If this callback returns zero, then we skip
954 * the subtree rooted at the current node. Otherwise, we move
955 * down to the first child and repeat the process until a leaf
958 static __isl_give isl_schedule_node
*preorder_enter(
959 __isl_take isl_schedule_node
*node
, void *user
)
961 struct isl_schedule_node_preorder_data
*data
= user
;
969 r
= data
->fn(node
, data
->user
);
971 return isl_schedule_node_free(node
);
974 } while (isl_schedule_node_has_children(node
) &&
975 (node
= isl_schedule_node_first_child(node
)) != NULL
);
980 /* Callback for "traverse" to leave a node
981 * for use in a preorder visit.
982 * Since we already visited the node when we entered it,
983 * we do not need to do anything here.
985 static __isl_give isl_schedule_node
*preorder_leave(
986 __isl_take isl_schedule_node
*node
, void *user
)
991 /* Traverse the descendants of "node" (including the node itself)
992 * in depth first preorder.
994 * If "fn" returns -1 on any of the nodes, then the traversal is aborted.
995 * If "fn" returns 0 on any of the nodes, then the subtree rooted
996 * at that node is skipped.
998 * Return 0 on success and -1 on failure.
1000 int isl_schedule_node_foreach_descendant(__isl_keep isl_schedule_node
*node
,
1001 int (*fn
)(__isl_keep isl_schedule_node
*node
, void *user
), void *user
)
1003 struct isl_schedule_node_preorder_data data
= { fn
, user
};
1005 node
= isl_schedule_node_copy(node
);
1006 node
= traverse(node
, &preorder_enter
, &preorder_leave
, &data
);
1007 isl_schedule_node_free(node
);
1009 return node
? 0 : -1;
1012 /* Internal data structure for isl_schedule_node_map_descendant.
1014 * "fn" is the user-specified callback function.
1015 * "user" is the user-specified argument for the callback.
1017 struct isl_schedule_node_postorder_data
{
1018 __isl_give isl_schedule_node
*(*fn
)(__isl_take isl_schedule_node
*node
,
1023 /* Callback for "traverse" to enter a node and to move
1024 * to the deepest initial subtree that should be traversed
1025 * for use in a postorder visit.
1027 * Since we are performing a postorder visit, we only need
1028 * to move to the deepest initial leaf here.
1030 static __isl_give isl_schedule_node
*postorder_enter(
1031 __isl_take isl_schedule_node
*node
, void *user
)
1033 while (node
&& isl_schedule_node_has_children(node
))
1034 node
= isl_schedule_node_first_child(node
);
1039 /* Callback for "traverse" to leave a node
1040 * for use in a postorder visit.
1042 * Since we are performing a postorder visit, we need
1043 * to call the user callback here.
1045 static __isl_give isl_schedule_node
*postorder_leave(
1046 __isl_take isl_schedule_node
*node
, void *user
)
1048 struct isl_schedule_node_postorder_data
*data
= user
;
1050 return data
->fn(node
, data
->user
);
1053 /* Traverse the descendants of "node" (including the node itself)
1054 * in depth first postorder, allowing the user to modify the visited node.
1055 * The traversal continues from the node returned by the callback function.
1056 * It is the responsibility of the user to ensure that this does not
1057 * lead to an infinite loop. It is safest to always return a pointer
1058 * to the same position (same ancestors and child positions) as the input node.
1060 __isl_give isl_schedule_node
*isl_schedule_node_map_descendant(
1061 __isl_take isl_schedule_node
*node
,
1062 __isl_give isl_schedule_node
*(*fn
)(__isl_take isl_schedule_node
*node
,
1063 void *user
), void *user
)
1065 struct isl_schedule_node_postorder_data data
= { fn
, user
};
1067 return traverse(node
, &postorder_enter
, &postorder_leave
, &data
);
1070 /* Return the number of members in the given band node.
1072 unsigned isl_schedule_node_band_n_member(__isl_keep isl_schedule_node
*node
)
1074 return node
? isl_schedule_tree_band_n_member(node
->tree
) : 0;
1077 /* Is the band member at position "pos" of the band node "node"
1078 * marked coincident?
1080 int isl_schedule_node_band_member_get_coincident(
1081 __isl_keep isl_schedule_node
*node
, int pos
)
1085 return isl_schedule_tree_band_member_get_coincident(node
->tree
, pos
);
1088 /* Mark the band member at position "pos" the band node "node"
1089 * as being coincident or not according to "coincident".
1091 __isl_give isl_schedule_node
*isl_schedule_node_band_member_set_coincident(
1092 __isl_take isl_schedule_node
*node
, int pos
, int coincident
)
1095 isl_schedule_tree
*tree
;
1099 c
= isl_schedule_node_band_member_get_coincident(node
, pos
);
1100 if (c
== coincident
)
1103 tree
= isl_schedule_tree_copy(node
->tree
);
1104 tree
= isl_schedule_tree_band_member_set_coincident(tree
, pos
,
1106 node
= isl_schedule_node_graft_tree(node
, tree
);
1111 /* Is the band node "node" marked permutable?
1113 int isl_schedule_node_band_get_permutable(__isl_keep isl_schedule_node
*node
)
1118 return isl_schedule_tree_band_get_permutable(node
->tree
);
1121 /* Mark the band node "node" permutable or not according to "permutable"?
1123 __isl_give isl_schedule_node
*isl_schedule_node_band_set_permutable(
1124 __isl_take isl_schedule_node
*node
, int permutable
)
1126 isl_schedule_tree
*tree
;
1130 if (isl_schedule_node_band_get_permutable(node
) == permutable
)
1133 tree
= isl_schedule_tree_copy(node
->tree
);
1134 tree
= isl_schedule_tree_band_set_permutable(tree
, permutable
);
1135 node
= isl_schedule_node_graft_tree(node
, tree
);
1140 /* Return the schedule space of the band node.
1142 __isl_give isl_space
*isl_schedule_node_band_get_space(
1143 __isl_keep isl_schedule_node
*node
)
1148 return isl_schedule_tree_band_get_space(node
->tree
);
1151 /* Return the schedule of the band node in isolation.
1153 __isl_give isl_multi_union_pw_aff
*isl_schedule_node_band_get_partial_schedule(
1154 __isl_keep isl_schedule_node
*node
)
1159 return isl_schedule_tree_band_get_partial_schedule(node
->tree
);
1162 /* Return the schedule of the band node in isolation in the form of
1165 * If the band does not have any members, then we construct a universe map
1166 * with the universe of the domain elements reaching the node as domain.
1167 * Otherwise, we extract an isl_multi_union_pw_aff representation and
1168 * convert that to an isl_union_map.
1170 __isl_give isl_union_map
*isl_schedule_node_band_get_partial_schedule_union_map(
1171 __isl_keep isl_schedule_node
*node
)
1173 isl_multi_union_pw_aff
*mupa
;
1178 if (isl_schedule_node_get_type(node
) != isl_schedule_node_band
)
1179 isl_die(isl_schedule_node_get_ctx(node
), isl_error_invalid
,
1180 "not a band node", return NULL
);
1181 if (isl_schedule_node_band_n_member(node
) == 0) {
1182 isl_union_set
*domain
;
1184 domain
= isl_schedule_node_get_universe_domain(node
);
1185 return isl_union_map_from_domain(domain
);
1188 mupa
= isl_schedule_node_band_get_partial_schedule(node
);
1189 return isl_union_map_from_multi_union_pw_aff(mupa
);
1192 /* Make sure that that spaces of "node" and "mv" are the same.
1193 * Return -1 on error, reporting the error to the user.
1195 static int check_space_multi_val(__isl_keep isl_schedule_node
*node
,
1196 __isl_keep isl_multi_val
*mv
)
1198 isl_space
*node_space
, *mv_space
;
1201 node_space
= isl_schedule_node_band_get_space(node
);
1202 mv_space
= isl_multi_val_get_space(mv
);
1203 equal
= isl_space_tuple_is_equal(node_space
, isl_dim_set
,
1204 mv_space
, isl_dim_set
);
1205 isl_space_free(mv_space
);
1206 isl_space_free(node_space
);
1210 isl_die(isl_schedule_node_get_ctx(node
), isl_error_invalid
,
1211 "spaces don't match", return -1);
1216 /* Multiply the partial schedule of the band node "node"
1217 * with the factors in "mv".
1219 __isl_give isl_schedule_node
*isl_schedule_node_band_scale(
1220 __isl_take isl_schedule_node
*node
, __isl_take isl_multi_val
*mv
)
1222 isl_schedule_tree
*tree
;
1226 if (check_space_multi_val(node
, mv
) < 0)
1229 tree
= isl_schedule_node_get_tree(node
);
1230 tree
= isl_schedule_tree_band_scale(tree
, mv
);
1231 return isl_schedule_node_graft_tree(node
, tree
);
1233 isl_multi_val_free(mv
);
1234 isl_schedule_node_free(node
);
1238 /* Divide the partial schedule of the band node "node"
1239 * by the factors in "mv".
1241 __isl_give isl_schedule_node
*isl_schedule_node_band_scale_down(
1242 __isl_take isl_schedule_node
*node
, __isl_take isl_multi_val
*mv
)
1244 isl_schedule_tree
*tree
;
1248 if (check_space_multi_val(node
, mv
) < 0)
1251 tree
= isl_schedule_node_get_tree(node
);
1252 tree
= isl_schedule_tree_band_scale_down(tree
, mv
);
1253 return isl_schedule_node_graft_tree(node
, tree
);
1255 isl_multi_val_free(mv
);
1256 isl_schedule_node_free(node
);
1260 /* Tile "node" with tile sizes "sizes".
1262 * The current node is replaced by two nested nodes corresponding
1263 * to the tile dimensions and the point dimensions.
1265 * Return a pointer to the outer (tile) node.
1267 * If the scale tile loops option is set, then the tile loops
1268 * are scaled by the tile sizes. If the shift point loops option is set,
1269 * then the point loops are shifted to start at zero.
1270 * In particular, these options affect the tile and point loop schedules
1273 * scale shift original tile point
1275 * 0 0 i floor(i/s) i
1276 * 1 0 i s * floor(i/s) i
1277 * 0 1 i floor(i/s) i - s * floor(i/s)
1278 * 1 1 i s * floor(i/s) i - s * floor(i/s)
1280 __isl_give isl_schedule_node
*isl_schedule_node_band_tile(
1281 __isl_take isl_schedule_node
*node
, __isl_take isl_multi_val
*sizes
)
1283 isl_schedule_tree
*tree
;
1285 if (!node
|| !sizes
)
1288 if (check_space_multi_val(node
, sizes
) < 0)
1291 tree
= isl_schedule_node_get_tree(node
);
1292 tree
= isl_schedule_tree_band_tile(tree
, sizes
);
1293 return isl_schedule_node_graft_tree(node
, tree
);
1295 isl_multi_val_free(sizes
);
1296 isl_schedule_node_free(node
);
1300 /* Move the band node "node" down to all the leaves in the subtree
1302 * Return a pointer to the node in the resulting tree that is in the same
1303 * position as the node pointed to by "node" in the original tree.
1305 * If the node only has a leaf child, then nothing needs to be done.
1306 * Otherwise, the child of the node is removed and the result is
1307 * appended to all the leaves in the subtree rooted at the original child.
1308 * The original node is then replaced by the result of this operation.
1310 __isl_give isl_schedule_node
*isl_schedule_node_band_sink(
1311 __isl_take isl_schedule_node
*node
)
1313 enum isl_schedule_node_type type
;
1314 isl_schedule_tree
*tree
, *child
;
1319 type
= isl_schedule_node_get_type(node
);
1320 if (type
!= isl_schedule_node_band
)
1321 isl_die(isl_schedule_node_get_ctx(node
), isl_error_invalid
,
1322 "not a band node", isl_schedule_node_free(node
));
1323 if (isl_schedule_tree_n_children(node
->tree
) == 0)
1326 tree
= isl_schedule_node_get_tree(node
);
1327 child
= isl_schedule_tree_get_child(tree
, 0);
1328 tree
= isl_schedule_tree_reset_children(tree
);
1329 tree
= isl_schedule_tree_append_to_leaves(child
, tree
);
1331 return isl_schedule_node_graft_tree(node
, tree
);
1334 /* Split "node" into two nested band nodes, one with the first "pos"
1335 * dimensions and one with the remaining dimensions.
1336 * The schedules of the two band nodes live in anonymous spaces.
1338 __isl_give isl_schedule_node
*isl_schedule_node_band_split(
1339 __isl_take isl_schedule_node
*node
, int pos
)
1341 isl_schedule_tree
*tree
;
1343 tree
= isl_schedule_node_get_tree(node
);
1344 tree
= isl_schedule_tree_band_split(tree
, pos
);
1345 return isl_schedule_node_graft_tree(node
, tree
);
1348 /* Return the domain of the domain node "node".
1350 __isl_give isl_union_set
*isl_schedule_node_domain_get_domain(
1351 __isl_keep isl_schedule_node
*node
)
1356 return isl_schedule_tree_domain_get_domain(node
->tree
);
1359 /* Return the filter of the filter node "node".
1361 __isl_give isl_union_set
*isl_schedule_node_filter_get_filter(
1362 __isl_keep isl_schedule_node
*node
)
1367 return isl_schedule_tree_filter_get_filter(node
->tree
);
1370 /* Replace the filter of filter node "node" by "filter".
1372 __isl_give isl_schedule_node
*isl_schedule_node_filter_set_filter(
1373 __isl_take isl_schedule_node
*node
, __isl_take isl_union_set
*filter
)
1375 isl_schedule_tree
*tree
;
1377 if (!node
|| !filter
)
1380 tree
= isl_schedule_tree_copy(node
->tree
);
1381 tree
= isl_schedule_tree_filter_set_filter(tree
, filter
);
1382 return isl_schedule_node_graft_tree(node
, tree
);
1384 isl_schedule_node_free(node
);
1385 isl_union_set_free(filter
);
1389 /* Update the ancestors of "node" to point to the tree that "node"
1391 * That is, replace the child in the original parent that corresponds
1392 * to the current tree position by node->tree and continue updating
1393 * the ancestors in the same way until the root is reached.
1395 * If "node" originally points to a leaf of the schedule tree, then make sure
1396 * that in the end it points to a leaf in the updated schedule tree.
1398 static __isl_give isl_schedule_node
*update_ancestors(
1399 __isl_take isl_schedule_node
*node
)
1404 isl_schedule_tree
*tree
;
1406 node
= isl_schedule_node_cow(node
);
1410 ctx
= isl_schedule_node_get_ctx(node
);
1411 n
= isl_schedule_tree_list_n_schedule_tree(node
->ancestors
);
1412 tree
= isl_schedule_tree_copy(node
->tree
);
1414 for (i
= n
- 1; i
>= 0; --i
) {
1415 isl_schedule_tree
*parent
;
1417 parent
= isl_schedule_tree_list_get_schedule_tree(
1418 node
->ancestors
, i
);
1419 parent
= isl_schedule_tree_replace_child(parent
,
1420 node
->child_pos
[i
], tree
);
1421 node
->ancestors
= isl_schedule_tree_list_set_schedule_tree(
1422 node
->ancestors
, i
, isl_schedule_tree_copy(parent
));
1427 is_leaf
= isl_schedule_tree_is_leaf(node
->tree
);
1428 node
->schedule
= isl_schedule_set_root(node
->schedule
, tree
);
1430 isl_schedule_tree_free(node
->tree
);
1431 node
->tree
= isl_schedule_node_get_leaf(node
);
1434 if (!node
->schedule
|| !node
->ancestors
)
1435 return isl_schedule_node_free(node
);
1440 /* Replace the subtree that "pos" points to by "tree", updating
1441 * the ancestors to maintain a consistent state.
1443 __isl_give isl_schedule_node
*isl_schedule_node_graft_tree(
1444 __isl_take isl_schedule_node
*pos
, __isl_take isl_schedule_tree
*tree
)
1448 if (pos
->tree
== tree
) {
1449 isl_schedule_tree_free(tree
);
1453 pos
= isl_schedule_node_cow(pos
);
1457 isl_schedule_tree_free(pos
->tree
);
1460 return update_ancestors(pos
);
1462 isl_schedule_node_free(pos
);
1463 isl_schedule_tree_free(tree
);
1467 /* Make sure we can insert a node between "node" and its parent.
1468 * Return -1 on error, reporting the reason why we cannot insert a node.
1470 static int check_insert(__isl_keep isl_schedule_node
*node
)
1473 enum isl_schedule_node_type type
;
1475 has_parent
= isl_schedule_node_has_parent(node
);
1479 isl_die(isl_schedule_node_get_ctx(node
), isl_error_invalid
,
1480 "cannot insert node outside of root", return -1);
1482 type
= isl_schedule_node_get_parent_type(node
);
1483 if (type
== isl_schedule_node_error
)
1485 if (type
== isl_schedule_node_set
|| type
== isl_schedule_node_sequence
)
1486 isl_die(isl_schedule_node_get_ctx(node
), isl_error_invalid
,
1487 "cannot insert node between set or sequence node "
1488 "and its filter children", return -1);
1493 /* Insert a band node with partial schedule "mupa" between "node" and
1495 * Return a pointer to the new band node.
1497 __isl_give isl_schedule_node
*isl_schedule_node_insert_partial_schedule(
1498 __isl_take isl_schedule_node
*node
,
1499 __isl_take isl_multi_union_pw_aff
*mupa
)
1501 isl_schedule_band
*band
;
1502 isl_schedule_tree
*tree
;
1504 if (check_insert(node
) < 0)
1505 node
= isl_schedule_node_free(node
);
1507 tree
= isl_schedule_node_get_tree(node
);
1508 band
= isl_schedule_band_from_multi_union_pw_aff(mupa
);
1509 tree
= isl_schedule_tree_insert_band(tree
, band
);
1510 node
= isl_schedule_node_graft_tree(node
, tree
);
1515 /* Insert a filter node with filter "filter" between "node" and its parent.
1516 * Return a pointer to the new filter node.
1518 __isl_give isl_schedule_node
*isl_schedule_node_insert_filter(
1519 __isl_take isl_schedule_node
*node
, __isl_take isl_union_set
*filter
)
1521 isl_schedule_tree
*tree
;
1523 if (check_insert(node
) < 0)
1524 node
= isl_schedule_node_free(node
);
1526 tree
= isl_schedule_node_get_tree(node
);
1527 tree
= isl_schedule_tree_insert_filter(tree
, filter
);
1528 node
= isl_schedule_node_graft_tree(node
, tree
);
1533 /* Attach the current subtree of "node" to a sequence of filter tree nodes
1534 * with filters described by "filters", attach this sequence
1535 * of filter tree nodes as children to a new tree of type "type" and
1536 * replace the original subtree of "node" by this new tree.
1538 static __isl_give isl_schedule_node
*isl_schedule_node_insert_children(
1539 __isl_take isl_schedule_node
*node
,
1540 enum isl_schedule_node_type type
,
1541 __isl_take isl_union_set_list
*filters
)
1545 isl_schedule_tree
*tree
;
1546 isl_schedule_tree_list
*list
;
1548 if (check_insert(node
) < 0)
1549 node
= isl_schedule_node_free(node
);
1551 if (!node
|| !filters
)
1554 ctx
= isl_schedule_node_get_ctx(node
);
1555 n
= isl_union_set_list_n_union_set(filters
);
1556 list
= isl_schedule_tree_list_alloc(ctx
, n
);
1557 for (i
= 0; i
< n
; ++i
) {
1558 isl_schedule_tree
*tree
;
1559 isl_union_set
*filter
;
1561 tree
= isl_schedule_node_get_tree(node
);
1562 filter
= isl_union_set_list_get_union_set(filters
, i
);
1563 tree
= isl_schedule_tree_insert_filter(tree
, filter
);
1564 list
= isl_schedule_tree_list_add(list
, tree
);
1566 tree
= isl_schedule_tree_from_children(type
, list
);
1567 node
= isl_schedule_node_graft_tree(node
, tree
);
1569 isl_union_set_list_free(filters
);
1572 isl_union_set_list_free(filters
);
1573 isl_schedule_node_free(node
);
1577 /* Insert a sequence node with child filters "filters" between "node" and
1578 * its parent. That is, the tree that "node" points to is attached
1579 * to each of the child nodes of the filter nodes.
1580 * Return a pointer to the new sequence node.
1582 __isl_give isl_schedule_node
*isl_schedule_node_insert_sequence(
1583 __isl_take isl_schedule_node
*node
,
1584 __isl_take isl_union_set_list
*filters
)
1586 return isl_schedule_node_insert_children(node
,
1587 isl_schedule_node_sequence
, filters
);
1590 /* Insert a set node with child filters "filters" between "node" and
1591 * its parent. That is, the tree that "node" points to is attached
1592 * to each of the child nodes of the filter nodes.
1593 * Return a pointer to the new set node.
1595 __isl_give isl_schedule_node
*isl_schedule_node_insert_set(
1596 __isl_take isl_schedule_node
*node
,
1597 __isl_take isl_union_set_list
*filters
)
1599 return isl_schedule_node_insert_children(node
,
1600 isl_schedule_node_set
, filters
);
1603 /* Reset the user pointer on all identifiers of parameters and tuples
1604 * in the schedule node "node".
1606 __isl_give isl_schedule_node
*isl_schedule_node_reset_user(
1607 __isl_take isl_schedule_node
*node
)
1609 isl_schedule_tree
*tree
;
1611 tree
= isl_schedule_node_get_tree(node
);
1612 tree
= isl_schedule_tree_reset_user(tree
);
1613 node
= isl_schedule_node_graft_tree(node
, tree
);
1618 /* Align the parameters of the schedule node "node" to those of "space".
1620 __isl_give isl_schedule_node
*isl_schedule_node_align_params(
1621 __isl_take isl_schedule_node
*node
, __isl_take isl_space
*space
)
1623 isl_schedule_tree
*tree
;
1625 tree
= isl_schedule_node_get_tree(node
);
1626 tree
= isl_schedule_tree_align_params(tree
, space
);
1627 node
= isl_schedule_node_graft_tree(node
, tree
);
1632 /* Return the position of the subtree containing "node" among the children
1633 * of "ancestor". "node" is assumed to be a descendant of "ancestor".
1634 * In particular, both nodes should point to the same schedule tree.
1636 * Return -1 on error.
1638 int isl_schedule_node_get_ancestor_child_position(
1639 __isl_keep isl_schedule_node
*node
,
1640 __isl_keep isl_schedule_node
*ancestor
)
1643 isl_schedule_tree
*tree
;
1645 if (!node
|| !ancestor
)
1648 if (node
->schedule
!= ancestor
->schedule
)
1649 isl_die(isl_schedule_node_get_ctx(node
), isl_error_invalid
,
1650 "not a descendant", return -1);
1652 n1
= isl_schedule_node_get_tree_depth(ancestor
);
1653 n2
= isl_schedule_node_get_tree_depth(node
);
1656 isl_die(isl_schedule_node_get_ctx(node
), isl_error_invalid
,
1657 "not a descendant", return -1);
1658 tree
= isl_schedule_tree_list_get_schedule_tree(node
->ancestors
, n1
);
1659 isl_schedule_tree_free(tree
);
1660 if (tree
!= ancestor
->tree
)
1661 isl_die(isl_schedule_node_get_ctx(node
), isl_error_invalid
,
1662 "not a descendant", return -1);
1664 return node
->child_pos
[n1
];
1667 /* Print "node" to "p".
1669 __isl_give isl_printer
*isl_printer_print_schedule_node(
1670 __isl_take isl_printer
*p
, __isl_keep isl_schedule_node
*node
)
1673 return isl_printer_free(p
);
1674 return isl_printer_print_schedule_tree_mark(p
, node
->schedule
->root
,
1675 isl_schedule_tree_list_n_schedule_tree(node
->ancestors
),
1679 void isl_schedule_node_dump(__isl_keep isl_schedule_node
*node
)
1682 isl_printer
*printer
;
1687 ctx
= isl_schedule_node_get_ctx(node
);
1688 printer
= isl_printer_to_file(ctx
, stderr
);
1689 printer
= isl_printer_set_yaml_style(printer
, ISL_YAML_STYLE_BLOCK
);
1690 printer
= isl_printer_print_schedule_node(printer
, node
);
1692 isl_printer_free(printer
);