2 * Copyright 2013-2014 Ecole Normale Superieure
4 * Use of this software is governed by the MIT license
6 * Written by Sven Verdoolaege,
7 * Ecole Normale Superieure, 45 rue d'Ulm, 75230 Paris, France
11 #include <isl_schedule_band.h>
12 #include <isl_schedule_private.h>
13 #include <isl_schedule_node_private.h>
15 /* Create a new schedule node in the given schedule, point at the given
16 * tree with given ancestors and child positions.
17 * "child_pos" may be NULL if there are no ancestors.
19 __isl_give isl_schedule_node
*isl_schedule_node_alloc(
20 __isl_take isl_schedule
*schedule
, __isl_take isl_schedule_tree
*tree
,
21 __isl_take isl_schedule_tree_list
*ancestors
, int *child_pos
)
24 isl_schedule_node
*node
;
27 if (!schedule
|| !tree
|| !ancestors
)
29 n
= isl_schedule_tree_list_n_schedule_tree(ancestors
);
30 if (n
> 0 && !child_pos
)
32 ctx
= isl_schedule_get_ctx(schedule
);
33 node
= isl_calloc_type(ctx
, isl_schedule_node
);
37 node
->schedule
= schedule
;
39 node
->ancestors
= ancestors
;
40 node
->child_pos
= isl_alloc_array(ctx
, int, n
);
41 if (n
&& !node
->child_pos
)
42 return isl_schedule_node_free(node
);
43 for (i
= 0; i
< n
; ++i
)
44 node
->child_pos
[i
] = child_pos
[i
];
48 isl_schedule_free(schedule
);
49 isl_schedule_tree_free(tree
);
50 isl_schedule_tree_list_free(ancestors
);
54 /* Return a pointer to the root of a schedule tree with as single
55 * node a domain node with the given domain.
57 __isl_give isl_schedule_node
*isl_schedule_node_from_domain(
58 __isl_take isl_union_set
*domain
)
60 isl_schedule
*schedule
;
61 isl_schedule_node
*node
;
63 schedule
= isl_schedule_from_domain(domain
);
64 node
= isl_schedule_get_root(schedule
);
65 isl_schedule_free(schedule
);
70 /* Return the isl_ctx to which "node" belongs.
72 isl_ctx
*isl_schedule_node_get_ctx(__isl_keep isl_schedule_node
*node
)
74 return node
? isl_schedule_get_ctx(node
->schedule
) : NULL
;
77 /* Return a pointer to the leaf of the schedule into which "node" points.
79 * Even though these leaves are not reference counted, we still
80 * indicate that this function does not return a copy.
82 __isl_keep isl_schedule_tree
*isl_schedule_node_peek_leaf(
83 __isl_keep isl_schedule_node
*node
)
85 return node
? isl_schedule_peek_leaf(node
->schedule
) : NULL
;
88 /* Return a pointer to the leaf of the schedule into which "node" points.
90 * Even though these leaves are not reference counted, we still
91 * return a "copy" of the leaf here such that it can still be "freed"
94 __isl_give isl_schedule_tree
*isl_schedule_node_get_leaf(
95 __isl_keep isl_schedule_node
*node
)
97 return isl_schedule_tree_copy(isl_schedule_node_peek_leaf(node
));
100 /* Return the type of the node or isl_schedule_node_error on error.
102 enum isl_schedule_node_type
isl_schedule_node_get_type(
103 __isl_keep isl_schedule_node
*node
)
105 return node
? isl_schedule_tree_get_type(node
->tree
)
106 : isl_schedule_node_error
;
109 /* Return the type of the parent of "node" or isl_schedule_node_error on error.
111 enum isl_schedule_node_type
isl_schedule_node_get_parent_type(
112 __isl_keep isl_schedule_node
*node
)
116 isl_schedule_tree
*parent
;
117 enum isl_schedule_node_type type
;
120 return isl_schedule_node_error
;
121 has_parent
= isl_schedule_node_has_parent(node
);
123 return isl_schedule_node_error
;
125 isl_die(isl_schedule_node_get_ctx(node
), isl_error_invalid
,
126 "node has no parent", return isl_schedule_node_error
);
128 pos
= isl_schedule_tree_list_n_schedule_tree(node
->ancestors
) - 1;
129 parent
= isl_schedule_tree_list_get_schedule_tree(node
->ancestors
, pos
);
130 type
= isl_schedule_tree_get_type(parent
);
131 isl_schedule_tree_free(parent
);
136 /* Return a copy of the subtree that this node points to.
138 __isl_give isl_schedule_tree
*isl_schedule_node_get_tree(
139 __isl_keep isl_schedule_node
*node
)
144 return isl_schedule_tree_copy(node
->tree
);
147 /* Return a copy of the schedule into which "node" points.
149 __isl_give isl_schedule
*isl_schedule_node_get_schedule(
150 __isl_keep isl_schedule_node
*node
)
154 return isl_schedule_copy(node
->schedule
);
157 /* Return a fresh copy of "node".
159 __isl_take isl_schedule_node
*isl_schedule_node_dup(
160 __isl_keep isl_schedule_node
*node
)
165 return isl_schedule_node_alloc(isl_schedule_copy(node
->schedule
),
166 isl_schedule_tree_copy(node
->tree
),
167 isl_schedule_tree_list_copy(node
->ancestors
),
171 /* Return an isl_schedule_node that is equal to "node" and that has only
172 * a single reference.
174 __isl_give isl_schedule_node
*isl_schedule_node_cow(
175 __isl_take isl_schedule_node
*node
)
183 return isl_schedule_node_dup(node
);
186 /* Return a new reference to "node".
188 __isl_give isl_schedule_node
*isl_schedule_node_copy(
189 __isl_keep isl_schedule_node
*node
)
198 /* Free "node" and return NULL.
200 * Since the node may point to a leaf of its schedule, which
201 * point to a field inside the schedule, we need to make sure
202 * we free the tree before freeing the schedule.
204 __isl_null isl_schedule_node
*isl_schedule_node_free(
205 __isl_take isl_schedule_node
*node
)
212 isl_schedule_tree_list_free(node
->ancestors
);
213 free(node
->child_pos
);
214 isl_schedule_tree_free(node
->tree
);
215 isl_schedule_free(node
->schedule
);
221 /* Do "node1" and "node2" point to the same position in the same
224 int isl_schedule_node_is_equal(__isl_keep isl_schedule_node
*node1
,
225 __isl_keep isl_schedule_node
*node2
)
229 if (!node1
|| !node2
)
233 if (node1
->schedule
!= node2
->schedule
)
236 n1
= isl_schedule_node_get_tree_depth(node1
);
237 n2
= isl_schedule_node_get_tree_depth(node2
);
240 for (i
= 0; i
< n1
; ++i
)
241 if (node1
->child_pos
[i
] != node2
->child_pos
[i
])
247 /* Return the number of outer schedule dimensions of "node"
248 * in its schedule tree.
250 * Return -1 on error.
252 int isl_schedule_node_get_schedule_depth(__isl_keep isl_schedule_node
*node
)
260 n
= isl_schedule_tree_list_n_schedule_tree(node
->ancestors
);
261 for (i
= n
- 1; i
>= 0; --i
) {
262 isl_schedule_tree
*tree
;
264 tree
= isl_schedule_tree_list_get_schedule_tree(
268 if (tree
->type
== isl_schedule_node_band
)
269 depth
+= isl_schedule_tree_band_n_member(tree
);
270 isl_schedule_tree_free(tree
);
276 /* Internal data structure for
277 * isl_schedule_node_get_prefix_schedule_union_pw_multi_aff
279 * "initialized" is set if the filter field has been initialized.
280 * "universe_filter" is set if we are only collecting the universes of filters
281 * "collect_prefix" is set if we are collecting prefixes.
282 * "filter" collects all outer filters and is NULL until "initialized" is set.
283 * "prefix" collects all outer band partial schedules (if "collect_prefix"
284 * is set). If it is used, then it is initialized by the caller
285 * of collect_filter_prefix to a zero-dimensional function.
287 struct isl_schedule_node_get_filter_prefix_data
{
291 isl_union_set
*filter
;
292 isl_multi_union_pw_aff
*prefix
;
295 /* Update "data" based on the tree node "tree" in case "data" has
296 * not been initialized yet.
298 * Return 0 on success and -1 on error.
300 * If "tree" is a filter, then we set data->filter to this filter
302 * If "tree" is a domain, then this means we have reached the root
303 * of the schedule tree without being able to extract any information.
304 * We therefore initialize data->filter to the universe of the domain.
305 * If "tree" is a band with at least one member, then we set data->filter
306 * to the universe of the schedule domain and replace the zero-dimensional
307 * data->prefix by the band schedule (if data->collect_prefix is set).
309 static int collect_filter_prefix_init(__isl_keep isl_schedule_tree
*tree
,
310 struct isl_schedule_node_get_filter_prefix_data
*data
)
312 enum isl_schedule_node_type type
;
313 isl_multi_union_pw_aff
*mupa
;
314 isl_union_set
*filter
;
316 type
= isl_schedule_tree_get_type(tree
);
318 case isl_schedule_node_error
:
320 case isl_schedule_node_leaf
:
321 case isl_schedule_node_sequence
:
322 case isl_schedule_node_set
:
324 case isl_schedule_node_domain
:
325 filter
= isl_schedule_tree_domain_get_domain(tree
);
326 filter
= isl_union_set_universe(filter
);
327 data
->filter
= filter
;
329 case isl_schedule_node_band
:
330 if (isl_schedule_tree_band_n_member(tree
) == 0)
332 mupa
= isl_schedule_tree_band_get_partial_schedule(tree
);
333 if (data
->collect_prefix
) {
334 isl_multi_union_pw_aff_free(data
->prefix
);
335 mupa
= isl_multi_union_pw_aff_reset_tuple_id(mupa
,
337 data
->prefix
= isl_multi_union_pw_aff_copy(mupa
);
339 filter
= isl_multi_union_pw_aff_domain(mupa
);
340 filter
= isl_union_set_universe(filter
);
341 data
->filter
= filter
;
343 case isl_schedule_node_filter
:
344 filter
= isl_schedule_tree_filter_get_filter(tree
);
345 if (data
->universe_filter
)
346 filter
= isl_union_set_universe(filter
);
347 data
->filter
= filter
;
351 if ((data
->collect_prefix
&& !data
->prefix
) || !data
->filter
)
354 data
->initialized
= 1;
359 /* Update "data" based on the tree node "tree" in case "data" has
360 * already been initialized.
362 * Return 0 on success and -1 on error.
364 * If "tree" is a filter, then we intersect data->filter with this filter
366 * If "tree" is a band with at least one member and data->collect_prefix
367 * is set, then we extend data->prefix with the band schedule.
369 static int collect_filter_prefix_update(__isl_keep isl_schedule_tree
*tree
,
370 struct isl_schedule_node_get_filter_prefix_data
*data
)
372 enum isl_schedule_node_type type
;
373 isl_multi_union_pw_aff
*mupa
;
374 isl_union_set
*filter
;
376 type
= isl_schedule_tree_get_type(tree
);
378 case isl_schedule_node_error
:
380 case isl_schedule_node_domain
:
381 case isl_schedule_node_leaf
:
382 case isl_schedule_node_sequence
:
383 case isl_schedule_node_set
:
385 case isl_schedule_node_band
:
386 if (isl_schedule_tree_band_n_member(tree
) == 0)
388 if (!data
->collect_prefix
)
390 mupa
= isl_schedule_tree_band_get_partial_schedule(tree
);
391 data
->prefix
= isl_multi_union_pw_aff_flat_range_product(mupa
,
396 case isl_schedule_node_filter
:
397 filter
= isl_schedule_tree_filter_get_filter(tree
);
398 if (data
->universe_filter
)
399 filter
= isl_union_set_universe(filter
);
400 data
->filter
= isl_union_set_intersect(data
->filter
, filter
);
409 /* Collect filter and/or prefix information from the elements
410 * in "list" (which represent the ancestors of a node).
411 * Store the results in "data".
413 * Return 0 on success and -1 on error.
415 * We traverse the list from innermost ancestor (last element)
416 * to outermost ancestor (first element), calling collect_filter_prefix_init
417 * on each node as long as we have not been able to extract any information
418 * yet and collect_filter_prefix_update afterwards.
419 * On successful return, data->initialized will be set since the outermost
420 * ancestor is a domain node, which always results in an initialization.
422 static int collect_filter_prefix(__isl_keep isl_schedule_tree_list
*list
,
423 struct isl_schedule_node_get_filter_prefix_data
*data
)
427 data
->initialized
= 0;
433 n
= isl_schedule_tree_list_n_schedule_tree(list
);
434 for (i
= n
- 1; i
>= 0; --i
) {
435 isl_schedule_tree
*tree
;
438 tree
= isl_schedule_tree_list_get_schedule_tree(list
, i
);
441 if (!data
->initialized
)
442 r
= collect_filter_prefix_init(tree
, data
);
444 r
= collect_filter_prefix_update(tree
, data
);
445 isl_schedule_tree_free(tree
);
453 /* Return the concatenation of the partial schedules of all outer band
454 * nodes of "node" interesected with all outer filters
455 * as an isl_union_pw_multi_aff.
457 * If "node" is pointing at the root of the schedule tree, then
458 * there are no domain elements reaching the current node, so
459 * we return an empty result.
461 * We collect all the filters and partial schedules in collect_filter_prefix.
462 * The partial schedules are collected as an isl_multi_union_pw_aff.
463 * If this isl_multi_union_pw_aff is zero-dimensional, then it does not
464 * contain any domain information, so we construct the isl_union_pw_multi_aff
465 * result as a zero-dimensional function on the collected filter.
466 * Otherwise, we convert the isl_multi_union_pw_aff to
467 * an isl_multi_union_pw_aff and intersect the domain with the filter.
469 __isl_give isl_union_pw_multi_aff
*
470 isl_schedule_node_get_prefix_schedule_union_pw_multi_aff(
471 __isl_keep isl_schedule_node
*node
)
474 isl_union_pw_multi_aff
*prefix
;
475 struct isl_schedule_node_get_filter_prefix_data data
;
480 space
= isl_schedule_get_space(node
->schedule
);
481 if (node
->tree
== node
->schedule
->root
)
482 return isl_union_pw_multi_aff_empty(space
);
484 space
= isl_space_set_from_params(space
);
485 data
.universe_filter
= 0;
486 data
.collect_prefix
= 1;
487 data
.prefix
= isl_multi_union_pw_aff_zero(space
);
489 if (collect_filter_prefix(node
->ancestors
, &data
) < 0)
490 data
.prefix
= isl_multi_union_pw_aff_free(data
.prefix
);
493 isl_multi_union_pw_aff_dim(data
.prefix
, isl_dim_set
) == 0) {
494 isl_multi_union_pw_aff_free(data
.prefix
);
495 prefix
= isl_union_pw_multi_aff_from_domain(data
.filter
);
498 isl_union_pw_multi_aff_from_multi_union_pw_aff(data
.prefix
);
499 prefix
= isl_union_pw_multi_aff_intersect_domain(prefix
,
506 /* Return the concatenation of the partial schedules of all outer band
507 * nodes of "node" interesected with all outer filters
508 * as an isl_union_map.
510 __isl_give isl_union_map
*isl_schedule_node_get_prefix_schedule_union_map(
511 __isl_keep isl_schedule_node
*node
)
513 isl_union_pw_multi_aff
*upma
;
515 upma
= isl_schedule_node_get_prefix_schedule_union_pw_multi_aff(node
);
516 return isl_union_map_from_union_pw_multi_aff(upma
);
519 /* Return the union of universe sets of the domain elements that reach "node".
521 * If "node" is pointing at the root of the schedule tree, then
522 * there are no domain elements reaching the current node, so
523 * we return an empty result.
525 * Otherwise, we collect the universes of all filters reaching the node
526 * in collect_filter_prefix.
528 __isl_give isl_union_set
*isl_schedule_node_get_universe_domain(
529 __isl_keep isl_schedule_node
*node
)
531 struct isl_schedule_node_get_filter_prefix_data data
;
536 if (node
->tree
== node
->schedule
->root
) {
539 space
= isl_schedule_get_space(node
->schedule
);
540 return isl_union_set_empty(space
);
543 data
.universe_filter
= 1;
544 data
.collect_prefix
= 0;
547 if (collect_filter_prefix(node
->ancestors
, &data
) < 0)
548 data
.filter
= isl_union_set_free(data
.filter
);
553 /* Return the subtree schedule of "node".
555 * Since isl_schedule_tree_get_subtree_schedule_union_map does not handle
556 * trees that do not contain any schedule information, we first
557 * move down to the first relevant descendant and handle leaves ourselves.
559 __isl_give isl_union_map
*isl_schedule_node_get_subtree_schedule_union_map(
560 __isl_keep isl_schedule_node
*node
)
562 isl_schedule_tree
*tree
, *leaf
;
565 tree
= isl_schedule_node_get_tree(node
);
566 leaf
= isl_schedule_node_peek_leaf(node
);
567 tree
= isl_schedule_tree_first_schedule_descendant(tree
, leaf
);
571 isl_union_set
*domain
;
572 domain
= isl_schedule_node_get_universe_domain(node
);
573 isl_schedule_tree_free(tree
);
574 return isl_union_map_from_domain(domain
);
577 umap
= isl_schedule_tree_get_subtree_schedule_union_map(tree
);
578 isl_schedule_tree_free(tree
);
582 /* Return the number of ancestors of "node" in its schedule tree.
584 int isl_schedule_node_get_tree_depth(__isl_keep isl_schedule_node
*node
)
588 return isl_schedule_tree_list_n_schedule_tree(node
->ancestors
);
591 /* Does "node" have a parent?
593 * That is, does it point to any node of the schedule other than the root?
595 int isl_schedule_node_has_parent(__isl_keep isl_schedule_node
*node
)
599 if (!node
->ancestors
)
602 return isl_schedule_tree_list_n_schedule_tree(node
->ancestors
) != 0;
605 /* Return the position of "node" among the children of its parent.
607 int isl_schedule_node_get_child_position(__isl_keep isl_schedule_node
*node
)
614 has_parent
= isl_schedule_node_has_parent(node
);
618 isl_die(isl_schedule_node_get_ctx(node
), isl_error_invalid
,
619 "node has no parent", return -1);
621 n
= isl_schedule_tree_list_n_schedule_tree(node
->ancestors
);
622 return node
->child_pos
[n
- 1];
625 /* Does the parent (if any) of "node" have any children with a smaller child
626 * position than this one?
628 int isl_schedule_node_has_previous_sibling(__isl_keep isl_schedule_node
*node
)
635 has_parent
= isl_schedule_node_has_parent(node
);
636 if (has_parent
< 0 || !has_parent
)
639 n
= isl_schedule_tree_list_n_schedule_tree(node
->ancestors
);
641 return node
->child_pos
[n
- 1] > 0;
644 /* Does the parent (if any) of "node" have any children with a greater child
645 * position than this one?
647 int isl_schedule_node_has_next_sibling(__isl_keep isl_schedule_node
*node
)
651 isl_schedule_tree
*tree
;
655 has_parent
= isl_schedule_node_has_parent(node
);
656 if (has_parent
< 0 || !has_parent
)
659 n
= isl_schedule_tree_list_n_schedule_tree(node
->ancestors
);
660 tree
= isl_schedule_tree_list_get_schedule_tree(node
->ancestors
, n
- 1);
663 n_child
= isl_schedule_tree_list_n_schedule_tree(tree
->children
);
664 isl_schedule_tree_free(tree
);
666 return node
->child_pos
[n
- 1] + 1 < n_child
;
669 /* Does "node" have any children?
671 * Any node other than the leaf nodes is considered to have at least
672 * one child, even if the corresponding isl_schedule_tree does not
675 int isl_schedule_node_has_children(__isl_keep isl_schedule_node
*node
)
679 return !isl_schedule_tree_is_leaf(node
->tree
);
682 /* Return the number of children of "node"?
684 * Any node other than the leaf nodes is considered to have at least
685 * one child, even if the corresponding isl_schedule_tree does not
686 * have any children. That is, the number of children of "node" is
687 * only zero if its tree is the explicit empty tree. Otherwise,
688 * if the isl_schedule_tree has any children, then it is equal
689 * to the number of children of "node". If it has zero children,
690 * then "node" still has a leaf node as child.
692 int isl_schedule_node_n_children(__isl_keep isl_schedule_node
*node
)
699 if (isl_schedule_tree_is_leaf(node
->tree
))
702 n
= isl_schedule_tree_n_children(node
->tree
);
709 /* Move the "node" pointer to the ancestor of the given generation
710 * of the node it currently points to, where generation 0 is the node
711 * itself and generation 1 is its parent.
713 __isl_give isl_schedule_node
*isl_schedule_node_ancestor(
714 __isl_take isl_schedule_node
*node
, int generation
)
717 isl_schedule_tree
*tree
;
723 n
= isl_schedule_node_get_tree_depth(node
);
725 return isl_schedule_node_free(node
);
726 if (generation
< 0 || generation
> n
)
727 isl_die(isl_schedule_node_get_ctx(node
), isl_error_invalid
,
728 "generation out of bounds",
729 return isl_schedule_node_free(node
));
730 node
= isl_schedule_node_cow(node
);
734 tree
= isl_schedule_tree_list_get_schedule_tree(node
->ancestors
,
736 isl_schedule_tree_free(node
->tree
);
738 node
->ancestors
= isl_schedule_tree_list_drop(node
->ancestors
,
739 n
- generation
, generation
);
740 if (!node
->ancestors
|| !node
->tree
)
741 return isl_schedule_node_free(node
);
746 /* Move the "node" pointer to the parent of the node it currently points to.
748 __isl_give isl_schedule_node
*isl_schedule_node_parent(
749 __isl_take isl_schedule_node
*node
)
753 if (!isl_schedule_node_has_parent(node
))
754 isl_die(isl_schedule_node_get_ctx(node
), isl_error_invalid
,
755 "node has no parent",
756 return isl_schedule_node_free(node
));
757 return isl_schedule_node_ancestor(node
, 1);
760 /* Move the "node" pointer to the root of its schedule tree.
762 __isl_give isl_schedule_node
*isl_schedule_node_root(
763 __isl_take isl_schedule_node
*node
)
769 n
= isl_schedule_node_get_tree_depth(node
);
771 return isl_schedule_node_free(node
);
772 return isl_schedule_node_ancestor(node
, n
);
775 /* Move the "node" pointer to the child at position "pos" of the node
776 * it currently points to.
778 __isl_give isl_schedule_node
*isl_schedule_node_child(
779 __isl_take isl_schedule_node
*node
, int pos
)
783 isl_schedule_tree
*tree
;
786 node
= isl_schedule_node_cow(node
);
789 if (!isl_schedule_node_has_children(node
))
790 isl_die(isl_schedule_node_get_ctx(node
), isl_error_invalid
,
791 "node has no children",
792 return isl_schedule_node_free(node
));
794 ctx
= isl_schedule_node_get_ctx(node
);
795 n
= isl_schedule_tree_list_n_schedule_tree(node
->ancestors
);
796 child_pos
= isl_realloc_array(ctx
, node
->child_pos
, int, n
+ 1);
798 return isl_schedule_node_free(node
);
799 node
->child_pos
= child_pos
;
800 node
->child_pos
[n
] = pos
;
802 node
->ancestors
= isl_schedule_tree_list_add(node
->ancestors
,
803 isl_schedule_tree_copy(node
->tree
));
805 if (isl_schedule_tree_has_children(tree
))
806 tree
= isl_schedule_tree_get_child(tree
, pos
);
808 tree
= isl_schedule_node_get_leaf(node
);
809 isl_schedule_tree_free(node
->tree
);
812 if (!node
->tree
|| !node
->ancestors
)
813 return isl_schedule_node_free(node
);
818 /* Move the "node" pointer to the first child of the node
819 * it currently points to.
821 __isl_give isl_schedule_node
*isl_schedule_node_first_child(
822 __isl_take isl_schedule_node
*node
)
824 return isl_schedule_node_child(node
, 0);
827 /* Move the "node" pointer to the child of this node's parent in
828 * the previous child position.
830 __isl_give isl_schedule_node
*isl_schedule_node_previous_sibling(
831 __isl_take isl_schedule_node
*node
)
834 isl_schedule_tree
*parent
, *tree
;
836 node
= isl_schedule_node_cow(node
);
839 if (!isl_schedule_node_has_previous_sibling(node
))
840 isl_die(isl_schedule_node_get_ctx(node
), isl_error_invalid
,
841 "node has no previous sibling",
842 return isl_schedule_node_free(node
));
844 n
= isl_schedule_tree_list_n_schedule_tree(node
->ancestors
);
845 parent
= isl_schedule_tree_list_get_schedule_tree(node
->ancestors
,
848 return isl_schedule_node_free(node
);
849 node
->child_pos
[n
- 1]--;
850 tree
= isl_schedule_tree_list_get_schedule_tree(parent
->children
,
851 node
->child_pos
[n
- 1]);
852 isl_schedule_tree_free(parent
);
854 return isl_schedule_node_free(node
);
855 isl_schedule_tree_free(node
->tree
);
861 /* Move the "node" pointer to the child of this node's parent in
862 * the next child position.
864 __isl_give isl_schedule_node
*isl_schedule_node_next_sibling(
865 __isl_take isl_schedule_node
*node
)
868 isl_schedule_tree
*parent
, *tree
;
870 node
= isl_schedule_node_cow(node
);
873 if (!isl_schedule_node_has_next_sibling(node
))
874 isl_die(isl_schedule_node_get_ctx(node
), isl_error_invalid
,
875 "node has no next sibling",
876 return isl_schedule_node_free(node
));
878 n
= isl_schedule_tree_list_n_schedule_tree(node
->ancestors
);
879 parent
= isl_schedule_tree_list_get_schedule_tree(node
->ancestors
,
882 return isl_schedule_node_free(node
);
883 node
->child_pos
[n
- 1]++;
884 tree
= isl_schedule_tree_list_get_schedule_tree(parent
->children
,
885 node
->child_pos
[n
- 1]);
886 isl_schedule_tree_free(parent
);
888 return isl_schedule_node_free(node
);
889 isl_schedule_tree_free(node
->tree
);
895 /* Return a copy to the child at position "pos" of "node".
897 __isl_give isl_schedule_node
*isl_schedule_node_get_child(
898 __isl_keep isl_schedule_node
*node
, int pos
)
900 return isl_schedule_node_child(isl_schedule_node_copy(node
), pos
);
903 /* Traverse the descendant of "node" in depth-first order, including
904 * "node" itself. Call "enter" whenever a node is entered and "leave"
905 * whenever a node is left. The callback "enter" is responsible
906 * for moving to the deepest initial subtree of its argument that
907 * should be traversed.
909 static __isl_give isl_schedule_node
*traverse(
910 __isl_take isl_schedule_node
*node
,
911 __isl_give isl_schedule_node
*(*enter
)(
912 __isl_take isl_schedule_node
*node
, void *user
),
913 __isl_give isl_schedule_node
*(*leave
)(
914 __isl_take isl_schedule_node
*node
, void *user
),
922 depth
= isl_schedule_node_get_tree_depth(node
);
924 node
= enter(node
, user
);
925 node
= leave(node
, user
);
926 while (node
&& isl_schedule_node_get_tree_depth(node
) > depth
&&
927 !isl_schedule_node_has_next_sibling(node
)) {
928 node
= isl_schedule_node_parent(node
);
929 node
= leave(node
, user
);
931 if (node
&& isl_schedule_node_get_tree_depth(node
) > depth
)
932 node
= isl_schedule_node_next_sibling(node
);
933 } while (node
&& isl_schedule_node_get_tree_depth(node
) > depth
);
938 /* Internal data structure for isl_schedule_node_foreach_descendant.
940 * "fn" is the user-specified callback function.
941 * "user" is the user-specified argument for the callback.
943 struct isl_schedule_node_preorder_data
{
944 int (*fn
)(__isl_keep isl_schedule_node
*node
, void *user
);
948 /* Callback for "traverse" to enter a node and to move
949 * to the deepest initial subtree that should be traversed
950 * for use in a preorder visit.
952 * If the user callback returns a negative value, then we abort
953 * the traversal. If this callback returns zero, then we skip
954 * the subtree rooted at the current node. Otherwise, we move
955 * down to the first child and repeat the process until a leaf
958 static __isl_give isl_schedule_node
*preorder_enter(
959 __isl_take isl_schedule_node
*node
, void *user
)
961 struct isl_schedule_node_preorder_data
*data
= user
;
969 r
= data
->fn(node
, data
->user
);
971 return isl_schedule_node_free(node
);
974 } while (isl_schedule_node_has_children(node
) &&
975 (node
= isl_schedule_node_first_child(node
)) != NULL
);
980 /* Callback for "traverse" to leave a node
981 * for use in a preorder visit.
982 * Since we already visited the node when we entered it,
983 * we do not need to do anything here.
985 static __isl_give isl_schedule_node
*preorder_leave(
986 __isl_take isl_schedule_node
*node
, void *user
)
991 /* Traverse the descendants of "node" (including the node itself)
992 * in depth first preorder.
994 * If "fn" returns -1 on any of the nodes, then the traversal is aborted.
995 * If "fn" returns 0 on any of the nodes, then the subtree rooted
996 * at that node is skipped.
998 * Return 0 on success and -1 on failure.
1000 int isl_schedule_node_foreach_descendant(__isl_keep isl_schedule_node
*node
,
1001 int (*fn
)(__isl_keep isl_schedule_node
*node
, void *user
), void *user
)
1003 struct isl_schedule_node_preorder_data data
= { fn
, user
};
1005 node
= isl_schedule_node_copy(node
);
1006 node
= traverse(node
, &preorder_enter
, &preorder_leave
, &data
);
1007 isl_schedule_node_free(node
);
1009 return node
? 0 : -1;
1012 /* Internal data structure for isl_schedule_node_map_descendant.
1014 * "fn" is the user-specified callback function.
1015 * "user" is the user-specified argument for the callback.
1017 struct isl_schedule_node_postorder_data
{
1018 __isl_give isl_schedule_node
*(*fn
)(__isl_take isl_schedule_node
*node
,
1023 /* Callback for "traverse" to enter a node and to move
1024 * to the deepest initial subtree that should be traversed
1025 * for use in a postorder visit.
1027 * Since we are performing a postorder visit, we only need
1028 * to move to the deepest initial leaf here.
1030 static __isl_give isl_schedule_node
*postorder_enter(
1031 __isl_take isl_schedule_node
*node
, void *user
)
1033 while (node
&& isl_schedule_node_has_children(node
))
1034 node
= isl_schedule_node_first_child(node
);
1039 /* Callback for "traverse" to leave a node
1040 * for use in a postorder visit.
1042 * Since we are performing a postorder visit, we need
1043 * to call the user callback here.
1045 static __isl_give isl_schedule_node
*postorder_leave(
1046 __isl_take isl_schedule_node
*node
, void *user
)
1048 struct isl_schedule_node_postorder_data
*data
= user
;
1050 return data
->fn(node
, data
->user
);
1053 /* Traverse the descendants of "node" (including the node itself)
1054 * in depth first postorder, allowing the user to modify the visited node.
1055 * The traversal continues from the node returned by the callback function.
1056 * It is the responsibility of the user to ensure that this does not
1057 * lead to an infinite loop. It is safest to always return a pointer
1058 * to the same position (same ancestors and child positions) as the input node.
1060 __isl_give isl_schedule_node
*isl_schedule_node_map_descendant(
1061 __isl_take isl_schedule_node
*node
,
1062 __isl_give isl_schedule_node
*(*fn
)(__isl_take isl_schedule_node
*node
,
1063 void *user
), void *user
)
1065 struct isl_schedule_node_postorder_data data
= { fn
, user
};
1067 return traverse(node
, &postorder_enter
, &postorder_leave
, &data
);
1070 /* Traverse the ancestors of "node" from the root down to and including
1071 * the parent of "node", calling "fn" on each of them.
1073 * If "fn" returns -1 on any of the nodes, then the traversal is aborted.
1075 * Return 0 on success and -1 on failure.
1077 int isl_schedule_node_foreach_ancestor_top_down(
1078 __isl_keep isl_schedule_node
*node
,
1079 int (*fn
)(__isl_keep isl_schedule_node
*node
, void *user
), void *user
)
1086 n
= isl_schedule_node_get_tree_depth(node
);
1087 for (i
= 0; i
< n
; ++i
) {
1088 isl_schedule_node
*ancestor
;
1091 ancestor
= isl_schedule_node_copy(node
);
1092 ancestor
= isl_schedule_node_ancestor(ancestor
, n
- i
);
1093 r
= fn(ancestor
, user
);
1094 isl_schedule_node_free(ancestor
);
1102 /* Return the number of members in the given band node.
1104 unsigned isl_schedule_node_band_n_member(__isl_keep isl_schedule_node
*node
)
1106 return node
? isl_schedule_tree_band_n_member(node
->tree
) : 0;
1109 /* Is the band member at position "pos" of the band node "node"
1110 * marked coincident?
1112 int isl_schedule_node_band_member_get_coincident(
1113 __isl_keep isl_schedule_node
*node
, int pos
)
1117 return isl_schedule_tree_band_member_get_coincident(node
->tree
, pos
);
1120 /* Mark the band member at position "pos" the band node "node"
1121 * as being coincident or not according to "coincident".
1123 __isl_give isl_schedule_node
*isl_schedule_node_band_member_set_coincident(
1124 __isl_take isl_schedule_node
*node
, int pos
, int coincident
)
1127 isl_schedule_tree
*tree
;
1131 c
= isl_schedule_node_band_member_get_coincident(node
, pos
);
1132 if (c
== coincident
)
1135 tree
= isl_schedule_tree_copy(node
->tree
);
1136 tree
= isl_schedule_tree_band_member_set_coincident(tree
, pos
,
1138 node
= isl_schedule_node_graft_tree(node
, tree
);
1143 /* Is the band node "node" marked permutable?
1145 int isl_schedule_node_band_get_permutable(__isl_keep isl_schedule_node
*node
)
1150 return isl_schedule_tree_band_get_permutable(node
->tree
);
1153 /* Mark the band node "node" permutable or not according to "permutable"?
1155 __isl_give isl_schedule_node
*isl_schedule_node_band_set_permutable(
1156 __isl_take isl_schedule_node
*node
, int permutable
)
1158 isl_schedule_tree
*tree
;
1162 if (isl_schedule_node_band_get_permutable(node
) == permutable
)
1165 tree
= isl_schedule_tree_copy(node
->tree
);
1166 tree
= isl_schedule_tree_band_set_permutable(tree
, permutable
);
1167 node
= isl_schedule_node_graft_tree(node
, tree
);
1172 /* Return the schedule space of the band node.
1174 __isl_give isl_space
*isl_schedule_node_band_get_space(
1175 __isl_keep isl_schedule_node
*node
)
1180 return isl_schedule_tree_band_get_space(node
->tree
);
1183 /* Return the schedule of the band node in isolation.
1185 __isl_give isl_multi_union_pw_aff
*isl_schedule_node_band_get_partial_schedule(
1186 __isl_keep isl_schedule_node
*node
)
1191 return isl_schedule_tree_band_get_partial_schedule(node
->tree
);
1194 /* Return the schedule of the band node in isolation in the form of
1197 * If the band does not have any members, then we construct a universe map
1198 * with the universe of the domain elements reaching the node as domain.
1199 * Otherwise, we extract an isl_multi_union_pw_aff representation and
1200 * convert that to an isl_union_map.
1202 __isl_give isl_union_map
*isl_schedule_node_band_get_partial_schedule_union_map(
1203 __isl_keep isl_schedule_node
*node
)
1205 isl_multi_union_pw_aff
*mupa
;
1210 if (isl_schedule_node_get_type(node
) != isl_schedule_node_band
)
1211 isl_die(isl_schedule_node_get_ctx(node
), isl_error_invalid
,
1212 "not a band node", return NULL
);
1213 if (isl_schedule_node_band_n_member(node
) == 0) {
1214 isl_union_set
*domain
;
1216 domain
= isl_schedule_node_get_universe_domain(node
);
1217 return isl_union_map_from_domain(domain
);
1220 mupa
= isl_schedule_node_band_get_partial_schedule(node
);
1221 return isl_union_map_from_multi_union_pw_aff(mupa
);
1224 /* Make sure that that spaces of "node" and "mv" are the same.
1225 * Return -1 on error, reporting the error to the user.
1227 static int check_space_multi_val(__isl_keep isl_schedule_node
*node
,
1228 __isl_keep isl_multi_val
*mv
)
1230 isl_space
*node_space
, *mv_space
;
1233 node_space
= isl_schedule_node_band_get_space(node
);
1234 mv_space
= isl_multi_val_get_space(mv
);
1235 equal
= isl_space_tuple_is_equal(node_space
, isl_dim_set
,
1236 mv_space
, isl_dim_set
);
1237 isl_space_free(mv_space
);
1238 isl_space_free(node_space
);
1242 isl_die(isl_schedule_node_get_ctx(node
), isl_error_invalid
,
1243 "spaces don't match", return -1);
1248 /* Multiply the partial schedule of the band node "node"
1249 * with the factors in "mv".
1251 __isl_give isl_schedule_node
*isl_schedule_node_band_scale(
1252 __isl_take isl_schedule_node
*node
, __isl_take isl_multi_val
*mv
)
1254 isl_schedule_tree
*tree
;
1258 if (check_space_multi_val(node
, mv
) < 0)
1261 tree
= isl_schedule_node_get_tree(node
);
1262 tree
= isl_schedule_tree_band_scale(tree
, mv
);
1263 return isl_schedule_node_graft_tree(node
, tree
);
1265 isl_multi_val_free(mv
);
1266 isl_schedule_node_free(node
);
1270 /* Divide the partial schedule of the band node "node"
1271 * by the factors in "mv".
1273 __isl_give isl_schedule_node
*isl_schedule_node_band_scale_down(
1274 __isl_take isl_schedule_node
*node
, __isl_take isl_multi_val
*mv
)
1276 isl_schedule_tree
*tree
;
1280 if (check_space_multi_val(node
, mv
) < 0)
1283 tree
= isl_schedule_node_get_tree(node
);
1284 tree
= isl_schedule_tree_band_scale_down(tree
, mv
);
1285 return isl_schedule_node_graft_tree(node
, tree
);
1287 isl_multi_val_free(mv
);
1288 isl_schedule_node_free(node
);
1292 /* Tile "node" with tile sizes "sizes".
1294 * The current node is replaced by two nested nodes corresponding
1295 * to the tile dimensions and the point dimensions.
1297 * Return a pointer to the outer (tile) node.
1299 * If the scale tile loops option is set, then the tile loops
1300 * are scaled by the tile sizes. If the shift point loops option is set,
1301 * then the point loops are shifted to start at zero.
1302 * In particular, these options affect the tile and point loop schedules
1305 * scale shift original tile point
1307 * 0 0 i floor(i/s) i
1308 * 1 0 i s * floor(i/s) i
1309 * 0 1 i floor(i/s) i - s * floor(i/s)
1310 * 1 1 i s * floor(i/s) i - s * floor(i/s)
1312 __isl_give isl_schedule_node
*isl_schedule_node_band_tile(
1313 __isl_take isl_schedule_node
*node
, __isl_take isl_multi_val
*sizes
)
1315 isl_schedule_tree
*tree
;
1317 if (!node
|| !sizes
)
1320 if (check_space_multi_val(node
, sizes
) < 0)
1323 tree
= isl_schedule_node_get_tree(node
);
1324 tree
= isl_schedule_tree_band_tile(tree
, sizes
);
1325 return isl_schedule_node_graft_tree(node
, tree
);
1327 isl_multi_val_free(sizes
);
1328 isl_schedule_node_free(node
);
1332 /* Move the band node "node" down to all the leaves in the subtree
1334 * Return a pointer to the node in the resulting tree that is in the same
1335 * position as the node pointed to by "node" in the original tree.
1337 * If the node only has a leaf child, then nothing needs to be done.
1338 * Otherwise, the child of the node is removed and the result is
1339 * appended to all the leaves in the subtree rooted at the original child.
1340 * The original node is then replaced by the result of this operation.
1342 __isl_give isl_schedule_node
*isl_schedule_node_band_sink(
1343 __isl_take isl_schedule_node
*node
)
1345 enum isl_schedule_node_type type
;
1346 isl_schedule_tree
*tree
, *child
;
1351 type
= isl_schedule_node_get_type(node
);
1352 if (type
!= isl_schedule_node_band
)
1353 isl_die(isl_schedule_node_get_ctx(node
), isl_error_invalid
,
1354 "not a band node", isl_schedule_node_free(node
));
1355 if (isl_schedule_tree_n_children(node
->tree
) == 0)
1358 tree
= isl_schedule_node_get_tree(node
);
1359 child
= isl_schedule_tree_get_child(tree
, 0);
1360 tree
= isl_schedule_tree_reset_children(tree
);
1361 tree
= isl_schedule_tree_append_to_leaves(child
, tree
);
1363 return isl_schedule_node_graft_tree(node
, tree
);
1366 /* Split "node" into two nested band nodes, one with the first "pos"
1367 * dimensions and one with the remaining dimensions.
1368 * The schedules of the two band nodes live in anonymous spaces.
1370 __isl_give isl_schedule_node
*isl_schedule_node_band_split(
1371 __isl_take isl_schedule_node
*node
, int pos
)
1373 isl_schedule_tree
*tree
;
1375 tree
= isl_schedule_node_get_tree(node
);
1376 tree
= isl_schedule_tree_band_split(tree
, pos
);
1377 return isl_schedule_node_graft_tree(node
, tree
);
1380 /* Return the domain of the domain node "node".
1382 __isl_give isl_union_set
*isl_schedule_node_domain_get_domain(
1383 __isl_keep isl_schedule_node
*node
)
1388 return isl_schedule_tree_domain_get_domain(node
->tree
);
1391 /* Return the filter of the filter node "node".
1393 __isl_give isl_union_set
*isl_schedule_node_filter_get_filter(
1394 __isl_keep isl_schedule_node
*node
)
1399 return isl_schedule_tree_filter_get_filter(node
->tree
);
1402 /* Replace the filter of filter node "node" by "filter".
1404 __isl_give isl_schedule_node
*isl_schedule_node_filter_set_filter(
1405 __isl_take isl_schedule_node
*node
, __isl_take isl_union_set
*filter
)
1407 isl_schedule_tree
*tree
;
1409 if (!node
|| !filter
)
1412 tree
= isl_schedule_tree_copy(node
->tree
);
1413 tree
= isl_schedule_tree_filter_set_filter(tree
, filter
);
1414 return isl_schedule_node_graft_tree(node
, tree
);
1416 isl_schedule_node_free(node
);
1417 isl_union_set_free(filter
);
1421 /* Update the ancestors of "node" to point to the tree that "node"
1423 * That is, replace the child in the original parent that corresponds
1424 * to the current tree position by node->tree and continue updating
1425 * the ancestors in the same way until the root is reached.
1427 * If "node" originally points to a leaf of the schedule tree, then make sure
1428 * that in the end it points to a leaf in the updated schedule tree.
1430 static __isl_give isl_schedule_node
*update_ancestors(
1431 __isl_take isl_schedule_node
*node
)
1436 isl_schedule_tree
*tree
;
1438 node
= isl_schedule_node_cow(node
);
1442 ctx
= isl_schedule_node_get_ctx(node
);
1443 n
= isl_schedule_tree_list_n_schedule_tree(node
->ancestors
);
1444 tree
= isl_schedule_tree_copy(node
->tree
);
1446 for (i
= n
- 1; i
>= 0; --i
) {
1447 isl_schedule_tree
*parent
;
1449 parent
= isl_schedule_tree_list_get_schedule_tree(
1450 node
->ancestors
, i
);
1451 parent
= isl_schedule_tree_replace_child(parent
,
1452 node
->child_pos
[i
], tree
);
1453 node
->ancestors
= isl_schedule_tree_list_set_schedule_tree(
1454 node
->ancestors
, i
, isl_schedule_tree_copy(parent
));
1459 is_leaf
= isl_schedule_tree_is_leaf(node
->tree
);
1460 node
->schedule
= isl_schedule_set_root(node
->schedule
, tree
);
1462 isl_schedule_tree_free(node
->tree
);
1463 node
->tree
= isl_schedule_node_get_leaf(node
);
1466 if (!node
->schedule
|| !node
->ancestors
)
1467 return isl_schedule_node_free(node
);
1472 /* Replace the subtree that "pos" points to by "tree", updating
1473 * the ancestors to maintain a consistent state.
1475 __isl_give isl_schedule_node
*isl_schedule_node_graft_tree(
1476 __isl_take isl_schedule_node
*pos
, __isl_take isl_schedule_tree
*tree
)
1480 if (pos
->tree
== tree
) {
1481 isl_schedule_tree_free(tree
);
1485 pos
= isl_schedule_node_cow(pos
);
1489 isl_schedule_tree_free(pos
->tree
);
1492 return update_ancestors(pos
);
1494 isl_schedule_node_free(pos
);
1495 isl_schedule_tree_free(tree
);
1499 /* Make sure we can insert a node between "node" and its parent.
1500 * Return -1 on error, reporting the reason why we cannot insert a node.
1502 static int check_insert(__isl_keep isl_schedule_node
*node
)
1505 enum isl_schedule_node_type type
;
1507 has_parent
= isl_schedule_node_has_parent(node
);
1511 isl_die(isl_schedule_node_get_ctx(node
), isl_error_invalid
,
1512 "cannot insert node outside of root", return -1);
1514 type
= isl_schedule_node_get_parent_type(node
);
1515 if (type
== isl_schedule_node_error
)
1517 if (type
== isl_schedule_node_set
|| type
== isl_schedule_node_sequence
)
1518 isl_die(isl_schedule_node_get_ctx(node
), isl_error_invalid
,
1519 "cannot insert node between set or sequence node "
1520 "and its filter children", return -1);
1525 /* Insert a band node with partial schedule "mupa" between "node" and
1527 * Return a pointer to the new band node.
1529 __isl_give isl_schedule_node
*isl_schedule_node_insert_partial_schedule(
1530 __isl_take isl_schedule_node
*node
,
1531 __isl_take isl_multi_union_pw_aff
*mupa
)
1533 isl_schedule_band
*band
;
1534 isl_schedule_tree
*tree
;
1536 if (check_insert(node
) < 0)
1537 node
= isl_schedule_node_free(node
);
1539 tree
= isl_schedule_node_get_tree(node
);
1540 band
= isl_schedule_band_from_multi_union_pw_aff(mupa
);
1541 tree
= isl_schedule_tree_insert_band(tree
, band
);
1542 node
= isl_schedule_node_graft_tree(node
, tree
);
1547 /* Insert a filter node with filter "filter" between "node" and its parent.
1548 * Return a pointer to the new filter node.
1550 __isl_give isl_schedule_node
*isl_schedule_node_insert_filter(
1551 __isl_take isl_schedule_node
*node
, __isl_take isl_union_set
*filter
)
1553 isl_schedule_tree
*tree
;
1555 if (check_insert(node
) < 0)
1556 node
= isl_schedule_node_free(node
);
1558 tree
= isl_schedule_node_get_tree(node
);
1559 tree
= isl_schedule_tree_insert_filter(tree
, filter
);
1560 node
= isl_schedule_node_graft_tree(node
, tree
);
1565 /* Attach the current subtree of "node" to a sequence of filter tree nodes
1566 * with filters described by "filters", attach this sequence
1567 * of filter tree nodes as children to a new tree of type "type" and
1568 * replace the original subtree of "node" by this new tree.
1570 static __isl_give isl_schedule_node
*isl_schedule_node_insert_children(
1571 __isl_take isl_schedule_node
*node
,
1572 enum isl_schedule_node_type type
,
1573 __isl_take isl_union_set_list
*filters
)
1577 isl_schedule_tree
*tree
;
1578 isl_schedule_tree_list
*list
;
1580 if (check_insert(node
) < 0)
1581 node
= isl_schedule_node_free(node
);
1583 if (!node
|| !filters
)
1586 ctx
= isl_schedule_node_get_ctx(node
);
1587 n
= isl_union_set_list_n_union_set(filters
);
1588 list
= isl_schedule_tree_list_alloc(ctx
, n
);
1589 for (i
= 0; i
< n
; ++i
) {
1590 isl_schedule_tree
*tree
;
1591 isl_union_set
*filter
;
1593 tree
= isl_schedule_node_get_tree(node
);
1594 filter
= isl_union_set_list_get_union_set(filters
, i
);
1595 tree
= isl_schedule_tree_insert_filter(tree
, filter
);
1596 list
= isl_schedule_tree_list_add(list
, tree
);
1598 tree
= isl_schedule_tree_from_children(type
, list
);
1599 node
= isl_schedule_node_graft_tree(node
, tree
);
1601 isl_union_set_list_free(filters
);
1604 isl_union_set_list_free(filters
);
1605 isl_schedule_node_free(node
);
1609 /* Insert a sequence node with child filters "filters" between "node" and
1610 * its parent. That is, the tree that "node" points to is attached
1611 * to each of the child nodes of the filter nodes.
1612 * Return a pointer to the new sequence node.
1614 __isl_give isl_schedule_node
*isl_schedule_node_insert_sequence(
1615 __isl_take isl_schedule_node
*node
,
1616 __isl_take isl_union_set_list
*filters
)
1618 return isl_schedule_node_insert_children(node
,
1619 isl_schedule_node_sequence
, filters
);
1622 /* Insert a set node with child filters "filters" between "node" and
1623 * its parent. That is, the tree that "node" points to is attached
1624 * to each of the child nodes of the filter nodes.
1625 * Return a pointer to the new set node.
1627 __isl_give isl_schedule_node
*isl_schedule_node_insert_set(
1628 __isl_take isl_schedule_node
*node
,
1629 __isl_take isl_union_set_list
*filters
)
1631 return isl_schedule_node_insert_children(node
,
1632 isl_schedule_node_set
, filters
);
1635 /* Reset the user pointer on all identifiers of parameters and tuples
1636 * in the schedule node "node".
1638 __isl_give isl_schedule_node
*isl_schedule_node_reset_user(
1639 __isl_take isl_schedule_node
*node
)
1641 isl_schedule_tree
*tree
;
1643 tree
= isl_schedule_node_get_tree(node
);
1644 tree
= isl_schedule_tree_reset_user(tree
);
1645 node
= isl_schedule_node_graft_tree(node
, tree
);
1650 /* Align the parameters of the schedule node "node" to those of "space".
1652 __isl_give isl_schedule_node
*isl_schedule_node_align_params(
1653 __isl_take isl_schedule_node
*node
, __isl_take isl_space
*space
)
1655 isl_schedule_tree
*tree
;
1657 tree
= isl_schedule_node_get_tree(node
);
1658 tree
= isl_schedule_tree_align_params(tree
, space
);
1659 node
= isl_schedule_node_graft_tree(node
, tree
);
1664 /* Compute the pullback of schedule node "node"
1665 * by the function represented by "upma".
1666 * In other words, plug in "upma" in the iteration domains
1667 * of schedule node "node".
1669 * Note that this is only a helper function for
1670 * isl_schedule_pullback_union_pw_multi_aff. In order to maintain consistency,
1671 * this function should not be called on a single node without also
1672 * calling it on all the other nodes.
1674 __isl_give isl_schedule_node
*isl_schedule_node_pullback_union_pw_multi_aff(
1675 __isl_take isl_schedule_node
*node
,
1676 __isl_take isl_union_pw_multi_aff
*upma
)
1678 isl_schedule_tree
*tree
;
1680 tree
= isl_schedule_node_get_tree(node
);
1681 tree
= isl_schedule_tree_pullback_union_pw_multi_aff(tree
, upma
);
1682 node
= isl_schedule_node_graft_tree(node
, tree
);
1687 /* Return the position of the subtree containing "node" among the children
1688 * of "ancestor". "node" is assumed to be a descendant of "ancestor".
1689 * In particular, both nodes should point to the same schedule tree.
1691 * Return -1 on error.
1693 int isl_schedule_node_get_ancestor_child_position(
1694 __isl_keep isl_schedule_node
*node
,
1695 __isl_keep isl_schedule_node
*ancestor
)
1698 isl_schedule_tree
*tree
;
1700 if (!node
|| !ancestor
)
1703 if (node
->schedule
!= ancestor
->schedule
)
1704 isl_die(isl_schedule_node_get_ctx(node
), isl_error_invalid
,
1705 "not a descendant", return -1);
1707 n1
= isl_schedule_node_get_tree_depth(ancestor
);
1708 n2
= isl_schedule_node_get_tree_depth(node
);
1711 isl_die(isl_schedule_node_get_ctx(node
), isl_error_invalid
,
1712 "not a descendant", return -1);
1713 tree
= isl_schedule_tree_list_get_schedule_tree(node
->ancestors
, n1
);
1714 isl_schedule_tree_free(tree
);
1715 if (tree
!= ancestor
->tree
)
1716 isl_die(isl_schedule_node_get_ctx(node
), isl_error_invalid
,
1717 "not a descendant", return -1);
1719 return node
->child_pos
[n1
];
1722 /* Given two nodes that point to the same schedule tree, return their
1723 * closest shared ancestor.
1725 * Since the two nodes point to the same schedule, they share at least
1726 * one ancestor, the root of the schedule. We move down from the root
1727 * to the first ancestor where the respective children have a different
1728 * child position. This is the requested ancestor.
1729 * If there is no ancestor where the children have a different position,
1730 * then one node is an ancestor of the other and then this node is
1731 * the requested ancestor.
1733 __isl_give isl_schedule_node
*isl_schedule_node_get_shared_ancestor(
1734 __isl_keep isl_schedule_node
*node1
,
1735 __isl_keep isl_schedule_node
*node2
)
1739 if (!node1
|| !node2
)
1741 if (node1
->schedule
!= node2
->schedule
)
1742 isl_die(isl_schedule_node_get_ctx(node1
), isl_error_invalid
,
1743 "not part of same schedule", return NULL
);
1744 n1
= isl_schedule_node_get_tree_depth(node1
);
1745 n2
= isl_schedule_node_get_tree_depth(node2
);
1747 return isl_schedule_node_get_shared_ancestor(node2
, node1
);
1749 return isl_schedule_node_copy(node1
);
1750 if (isl_schedule_node_is_equal(node1
, node2
))
1751 return isl_schedule_node_copy(node1
);
1753 for (i
= 0; i
< n1
; ++i
)
1754 if (node1
->child_pos
[i
] != node2
->child_pos
[i
])
1757 node1
= isl_schedule_node_copy(node1
);
1758 return isl_schedule_node_ancestor(node1
, n1
- i
);
1761 /* Print "node" to "p".
1763 __isl_give isl_printer
*isl_printer_print_schedule_node(
1764 __isl_take isl_printer
*p
, __isl_keep isl_schedule_node
*node
)
1767 return isl_printer_free(p
);
1768 return isl_printer_print_schedule_tree_mark(p
, node
->schedule
->root
,
1769 isl_schedule_tree_list_n_schedule_tree(node
->ancestors
),
1773 void isl_schedule_node_dump(__isl_keep isl_schedule_node
*node
)
1776 isl_printer
*printer
;
1781 ctx
= isl_schedule_node_get_ctx(node
);
1782 printer
= isl_printer_to_file(ctx
, stderr
);
1783 printer
= isl_printer_set_yaml_style(printer
, ISL_YAML_STYLE_BLOCK
);
1784 printer
= isl_printer_print_schedule_node(printer
, node
);
1786 isl_printer_free(printer
);