add isl_schedule_node_band_scale_down
[isl.git] / isl_schedule_node.c
blob2feacd62f7723f50028dbee434270662d70ef7b0
1 /*
2 * Copyright 2013 Ecole Normale Superieure
4 * Use of this software is governed by the MIT license
6 * Written by Sven Verdoolaege,
7 * Ecole Normale Superieure, 45 rue d'Ulm, 75230 Paris, France
8 */
10 #include <isl/set.h>
11 #include <isl_schedule_band.h>
12 #include <isl_schedule_private.h>
13 #include <isl_schedule_node_private.h>
15 /* Create a new schedule node in the given schedule, point at the given
16 * tree with given ancestors and child positions.
17 * "child_pos" may be NULL if there are no ancestors.
19 __isl_give isl_schedule_node *isl_schedule_node_alloc(
20 __isl_take isl_schedule *schedule, __isl_take isl_schedule_tree *tree,
21 __isl_take isl_schedule_tree_list *ancestors, int *child_pos)
23 isl_ctx *ctx;
24 isl_schedule_node *node;
25 int i, n;
27 if (!schedule || !tree || !ancestors)
28 goto error;
29 n = isl_schedule_tree_list_n_schedule_tree(ancestors);
30 if (n > 0 && !child_pos)
31 goto error;
32 ctx = isl_schedule_get_ctx(schedule);
33 node = isl_calloc_type(ctx, isl_schedule_node);
34 if (!node)
35 goto error;
36 node->ref = 1;
37 node->schedule = schedule;
38 node->tree = tree;
39 node->ancestors = ancestors;
40 node->child_pos = isl_alloc_array(ctx, int, n);
41 if (n && !node->child_pos)
42 return isl_schedule_node_free(node);
43 for (i = 0; i < n; ++i)
44 node->child_pos[i] = child_pos[i];
46 return node;
47 error:
48 isl_schedule_free(schedule);
49 isl_schedule_tree_free(tree);
50 isl_schedule_tree_list_free(ancestors);
51 return NULL;
54 /* Return a pointer to the root of a schedule tree with as single
55 * node a domain node with the given domain.
57 __isl_give isl_schedule_node *isl_schedule_node_from_domain(
58 __isl_take isl_union_set *domain)
60 isl_schedule *schedule;
61 isl_schedule_node *node;
63 schedule = isl_schedule_from_domain(domain);
64 node = isl_schedule_get_root(schedule);
65 isl_schedule_free(schedule);
67 return node;
70 /* Return the isl_ctx to which "node" belongs.
72 isl_ctx *isl_schedule_node_get_ctx(__isl_keep isl_schedule_node *node)
74 return node ? isl_schedule_get_ctx(node->schedule) : NULL;
77 /* Return a pointer to the leaf of the schedule into which "node" points.
79 * Even though these leaves are not reference counted, we still
80 * indicate that this function does not return a copy.
82 __isl_keep isl_schedule_tree *isl_schedule_node_peek_leaf(
83 __isl_keep isl_schedule_node *node)
85 return node ? isl_schedule_peek_leaf(node->schedule) : NULL;
88 /* Return a pointer to the leaf of the schedule into which "node" points.
90 * Even though these leaves are not reference counted, we still
91 * return a "copy" of the leaf here such that it can still be "freed"
92 * by the user.
94 __isl_give isl_schedule_tree *isl_schedule_node_get_leaf(
95 __isl_keep isl_schedule_node *node)
97 return isl_schedule_tree_copy(isl_schedule_node_peek_leaf(node));
100 /* Return the type of the node or isl_schedule_node_error on error.
102 enum isl_schedule_node_type isl_schedule_node_get_type(
103 __isl_keep isl_schedule_node *node)
105 return node ? isl_schedule_tree_get_type(node->tree)
106 : isl_schedule_node_error;
109 /* Return the type of the parent of "node" or isl_schedule_node_error on error.
111 enum isl_schedule_node_type isl_schedule_node_get_parent_type(
112 __isl_keep isl_schedule_node *node)
114 int pos;
115 int has_parent;
116 isl_schedule_tree *parent;
117 enum isl_schedule_node_type type;
119 if (!node)
120 return isl_schedule_node_error;
121 has_parent = isl_schedule_node_has_parent(node);
122 if (has_parent < 0)
123 return isl_schedule_node_error;
124 if (!has_parent)
125 isl_die(isl_schedule_node_get_ctx(node), isl_error_invalid,
126 "node has no parent", return isl_schedule_node_error);
128 pos = isl_schedule_tree_list_n_schedule_tree(node->ancestors) - 1;
129 parent = isl_schedule_tree_list_get_schedule_tree(node->ancestors, pos);
130 type = isl_schedule_tree_get_type(parent);
131 isl_schedule_tree_free(parent);
133 return type;
136 /* Return a copy of the subtree that this node points to.
138 __isl_give isl_schedule_tree *isl_schedule_node_get_tree(
139 __isl_keep isl_schedule_node *node)
141 if (!node)
142 return NULL;
144 return isl_schedule_tree_copy(node->tree);
147 /* Return a copy of the schedule into which "node" points.
149 __isl_give isl_schedule *isl_schedule_node_get_schedule(
150 __isl_keep isl_schedule_node *node)
152 if (!node)
153 return NULL;
154 return isl_schedule_copy(node->schedule);
157 /* Return a fresh copy of "node".
159 __isl_take isl_schedule_node *isl_schedule_node_dup(
160 __isl_keep isl_schedule_node *node)
162 if (!node)
163 return NULL;
165 return isl_schedule_node_alloc(isl_schedule_copy(node->schedule),
166 isl_schedule_tree_copy(node->tree),
167 isl_schedule_tree_list_copy(node->ancestors),
168 node->child_pos);
171 /* Return an isl_schedule_node that is equal to "node" and that has only
172 * a single reference.
174 __isl_give isl_schedule_node *isl_schedule_node_cow(
175 __isl_take isl_schedule_node *node)
177 if (!node)
178 return NULL;
180 if (node->ref == 1)
181 return node;
182 node->ref--;
183 return isl_schedule_node_dup(node);
186 /* Return a new reference to "node".
188 __isl_give isl_schedule_node *isl_schedule_node_copy(
189 __isl_keep isl_schedule_node *node)
191 if (!node)
192 return NULL;
194 node->ref++;
195 return node;
198 /* Free "node" and return NULL.
200 * Since the node may point to a leaf of its schedule, which
201 * point to a field inside the schedule, we need to make sure
202 * we free the tree before freeing the schedule.
204 __isl_null isl_schedule_node *isl_schedule_node_free(
205 __isl_take isl_schedule_node *node)
207 if (!node)
208 return NULL;
209 if (--node->ref > 0)
210 return NULL;
212 isl_schedule_tree_list_free(node->ancestors);
213 free(node->child_pos);
214 isl_schedule_tree_free(node->tree);
215 isl_schedule_free(node->schedule);
216 free(node);
218 return NULL;
221 /* Internal data structure for
222 * isl_schedule_node_get_prefix_schedule_union_pw_multi_aff
224 * "initialized" is set if the filter field has been initialized.
225 * "universe_filter" is set if we are only collecting the universes of filters
226 * "collect_prefix" is set if we are collecting prefixes.
227 * "filter" collects all outer filters and is NULL until "initialized" is set.
228 * "prefix" collects all outer band partial schedules (if "collect_prefix"
229 * is set). If it is used, then it is initialized by the caller
230 * of collect_filter_prefix to a zero-dimensional function.
232 struct isl_schedule_node_get_filter_prefix_data {
233 int initialized;
234 int universe_filter;
235 int collect_prefix;
236 isl_union_set *filter;
237 isl_multi_union_pw_aff *prefix;
240 /* Update "data" based on the tree node "tree" in case "data" has
241 * not been initialized yet.
243 * Return 0 on success and -1 on error.
245 * If "tree" is a filter, then we set data->filter to this filter
246 * (or its universe).
247 * If "tree" is a domain, then this means we have reached the root
248 * of the schedule tree without being able to extract any information.
249 * We therefore initialize data->filter to the universe of the domain.
250 * If "tree" is a band with at least one member, then we set data->filter
251 * to the universe of the schedule domain and replace the zero-dimensional
252 * data->prefix by the band schedule (if data->collect_prefix is set).
254 static int collect_filter_prefix_init(__isl_keep isl_schedule_tree *tree,
255 struct isl_schedule_node_get_filter_prefix_data *data)
257 enum isl_schedule_node_type type;
258 isl_multi_union_pw_aff *mupa;
259 isl_union_set *filter;
261 type = isl_schedule_tree_get_type(tree);
262 switch (type) {
263 case isl_schedule_node_error:
264 return -1;
265 case isl_schedule_node_leaf:
266 case isl_schedule_node_sequence:
267 case isl_schedule_node_set:
268 return 0;
269 case isl_schedule_node_domain:
270 filter = isl_schedule_tree_domain_get_domain(tree);
271 filter = isl_union_set_universe(filter);
272 data->filter = filter;
273 break;
274 case isl_schedule_node_band:
275 if (isl_schedule_tree_band_n_member(tree) == 0)
276 return 0;
277 mupa = isl_schedule_tree_band_get_partial_schedule(tree);
278 if (data->collect_prefix) {
279 isl_multi_union_pw_aff_free(data->prefix);
280 mupa = isl_multi_union_pw_aff_reset_tuple_id(mupa,
281 isl_dim_set);
282 data->prefix = isl_multi_union_pw_aff_copy(mupa);
284 filter = isl_multi_union_pw_aff_domain(mupa);
285 filter = isl_union_set_universe(filter);
286 data->filter = filter;
287 break;
288 case isl_schedule_node_filter:
289 filter = isl_schedule_tree_filter_get_filter(tree);
290 if (data->universe_filter)
291 filter = isl_union_set_universe(filter);
292 data->filter = filter;
293 break;
296 if ((data->collect_prefix && !data->prefix) || !data->filter)
297 return -1;
299 data->initialized = 1;
301 return 0;
304 /* Update "data" based on the tree node "tree" in case "data" has
305 * already been initialized.
307 * Return 0 on success and -1 on error.
309 * If "tree" is a filter, then we intersect data->filter with this filter
310 * (or its universe).
311 * If "tree" is a band with at least one member and data->collect_prefix
312 * is set, then we extend data->prefix with the band schedule.
314 static int collect_filter_prefix_update(__isl_keep isl_schedule_tree *tree,
315 struct isl_schedule_node_get_filter_prefix_data *data)
317 enum isl_schedule_node_type type;
318 isl_multi_union_pw_aff *mupa;
319 isl_union_set *filter;
321 type = isl_schedule_tree_get_type(tree);
322 switch (type) {
323 case isl_schedule_node_error:
324 return -1;
325 case isl_schedule_node_domain:
326 case isl_schedule_node_leaf:
327 case isl_schedule_node_sequence:
328 case isl_schedule_node_set:
329 break;
330 case isl_schedule_node_band:
331 if (isl_schedule_tree_band_n_member(tree) == 0)
332 break;
333 if (!data->collect_prefix)
334 break;
335 mupa = isl_schedule_tree_band_get_partial_schedule(tree);
336 data->prefix = isl_multi_union_pw_aff_flat_range_product(mupa,
337 data->prefix);
338 if (!data->prefix)
339 return -1;
340 break;
341 case isl_schedule_node_filter:
342 filter = isl_schedule_tree_filter_get_filter(tree);
343 if (data->universe_filter)
344 filter = isl_union_set_universe(filter);
345 data->filter = isl_union_set_intersect(data->filter, filter);
346 if (!data->filter)
347 return -1;
348 break;
351 return 0;
354 /* Collect filter and/or prefix information from the elements
355 * in "list" (which represent the ancestors of a node).
356 * Store the results in "data".
358 * Return 0 on success and -1 on error.
360 * We traverse the list from innermost ancestor (last element)
361 * to outermost ancestor (first element), calling collect_filter_prefix_init
362 * on each node as long as we have not been able to extract any information
363 * yet and collect_filter_prefix_update afterwards.
364 * On successful return, data->initialized will be set since the outermost
365 * ancestor is a domain node, which always results in an initialization.
367 static int collect_filter_prefix(__isl_keep isl_schedule_tree_list *list,
368 struct isl_schedule_node_get_filter_prefix_data *data)
370 int i, n;
372 data->initialized = 0;
373 data->filter = NULL;
375 if (!list)
376 return -1;
378 n = isl_schedule_tree_list_n_schedule_tree(list);
379 for (i = n - 1; i >= 0; --i) {
380 isl_schedule_tree *tree;
381 int r;
383 tree = isl_schedule_tree_list_get_schedule_tree(list, i);
384 if (!tree)
385 return -1;
386 if (!data->initialized)
387 r = collect_filter_prefix_init(tree, data);
388 else
389 r = collect_filter_prefix_update(tree, data);
390 isl_schedule_tree_free(tree);
391 if (r < 0)
392 return -1;
395 return 0;
398 /* Return the concatenation of the partial schedules of all outer band
399 * nodes of "node" interesected with all outer filters
400 * as an isl_union_pw_multi_aff.
402 * If "node" is pointing at the root of the schedule tree, then
403 * there are no domain elements reaching the current node, so
404 * we return an empty result.
406 * We collect all the filters and partial schedules in collect_filter_prefix.
407 * The partial schedules are collected as an isl_multi_union_pw_aff.
408 * If this isl_multi_union_pw_aff is zero-dimensional, then it does not
409 * contain any domain information, so we construct the isl_union_pw_multi_aff
410 * result as a zero-dimensional function on the collected filter.
411 * Otherwise, we convert the isl_multi_union_pw_aff to
412 * an isl_multi_union_pw_aff and intersect the domain with the filter.
414 __isl_give isl_union_pw_multi_aff *
415 isl_schedule_node_get_prefix_schedule_union_pw_multi_aff(
416 __isl_keep isl_schedule_node *node)
418 isl_space *space;
419 isl_union_pw_multi_aff *prefix;
420 struct isl_schedule_node_get_filter_prefix_data data;
422 if (!node)
423 return NULL;
425 space = isl_schedule_get_space(node->schedule);
426 if (node->tree == node->schedule->root)
427 return isl_union_pw_multi_aff_empty(space);
429 space = isl_space_set_from_params(space);
430 data.universe_filter = 0;
431 data.collect_prefix = 1;
432 data.prefix = isl_multi_union_pw_aff_zero(space);
434 if (collect_filter_prefix(node->ancestors, &data) < 0)
435 data.prefix = isl_multi_union_pw_aff_free(data.prefix);
437 if (data.prefix &&
438 isl_multi_union_pw_aff_dim(data.prefix, isl_dim_set) == 0) {
439 isl_multi_union_pw_aff_free(data.prefix);
440 prefix = isl_union_pw_multi_aff_from_domain(data.filter);
441 } else {
442 prefix =
443 isl_union_pw_multi_aff_from_multi_union_pw_aff(data.prefix);
444 prefix = isl_union_pw_multi_aff_intersect_domain(prefix,
445 data.filter);
448 return prefix;
451 /* Return the concatenation of the partial schedules of all outer band
452 * nodes of "node" interesected with all outer filters
453 * as an isl_union_map.
455 __isl_give isl_union_map *isl_schedule_node_get_prefix_schedule_union_map(
456 __isl_keep isl_schedule_node *node)
458 isl_union_pw_multi_aff *upma;
460 upma = isl_schedule_node_get_prefix_schedule_union_pw_multi_aff(node);
461 return isl_union_map_from_union_pw_multi_aff(upma);
464 /* Return the union of universe sets of the domain elements that reach "node".
466 * If "node" is pointing at the root of the schedule tree, then
467 * there are no domain elements reaching the current node, so
468 * we return an empty result.
470 * Otherwise, we collect the universes of all filters reaching the node
471 * in collect_filter_prefix.
473 __isl_give isl_union_set *isl_schedule_node_get_universe_domain(
474 __isl_keep isl_schedule_node *node)
476 struct isl_schedule_node_get_filter_prefix_data data;
478 if (!node)
479 return NULL;
481 if (node->tree == node->schedule->root) {
482 isl_space *space;
484 space = isl_schedule_get_space(node->schedule);
485 return isl_union_set_empty(space);
488 data.universe_filter = 1;
489 data.collect_prefix = 0;
490 data.prefix = NULL;
492 if (collect_filter_prefix(node->ancestors, &data) < 0)
493 data.filter = isl_union_set_free(data.filter);
495 return data.filter;
498 /* Return the subtree schedule of "node".
500 * Since isl_schedule_tree_get_subtree_schedule_union_map does not handle
501 * trees that do not contain any schedule information, we first
502 * move down to the first relevant descendant and handle leaves ourselves.
504 __isl_give isl_union_map *isl_schedule_node_get_subtree_schedule_union_map(
505 __isl_keep isl_schedule_node *node)
507 isl_schedule_tree *tree, *leaf;
508 isl_union_map *umap;
510 tree = isl_schedule_node_get_tree(node);
511 leaf = isl_schedule_node_peek_leaf(node);
512 tree = isl_schedule_tree_first_schedule_descendant(tree, leaf);
513 if (!tree)
514 return NULL;
515 if (tree == leaf) {
516 isl_union_set *domain;
517 domain = isl_schedule_node_get_universe_domain(node);
518 isl_schedule_tree_free(tree);
519 return isl_union_map_from_domain(domain);
522 umap = isl_schedule_tree_get_subtree_schedule_union_map(tree);
523 isl_schedule_tree_free(tree);
524 return umap;
527 /* Does "node" have a parent?
529 * That is, does it point to any node of the schedule other than the root?
531 int isl_schedule_node_has_parent(__isl_keep isl_schedule_node *node)
533 if (!node)
534 return -1;
535 if (!node->ancestors)
536 return -1;
538 return isl_schedule_tree_list_n_schedule_tree(node->ancestors) != 0;
541 /* Return the position of "node" among the children of its parent.
543 int isl_schedule_node_get_child_position(__isl_keep isl_schedule_node *node)
545 int n;
546 int has_parent;
548 if (!node)
549 return -1;
550 has_parent = isl_schedule_node_has_parent(node);
551 if (has_parent < 0)
552 return -1;
553 if (!has_parent)
554 isl_die(isl_schedule_node_get_ctx(node), isl_error_invalid,
555 "node has no parent", return -1);
557 n = isl_schedule_tree_list_n_schedule_tree(node->ancestors);
558 return node->child_pos[n - 1];
561 /* Does the parent (if any) of "node" have any children with a smaller child
562 * position than this one?
564 int isl_schedule_node_has_previous_sibling(__isl_keep isl_schedule_node *node)
566 int n;
567 int has_parent;
569 if (!node)
570 return -1;
571 has_parent = isl_schedule_node_has_parent(node);
572 if (has_parent < 0 || !has_parent)
573 return has_parent;
575 n = isl_schedule_tree_list_n_schedule_tree(node->ancestors);
577 return node->child_pos[n - 1] > 0;
580 /* Does the parent (if any) of "node" have any children with a greater child
581 * position than this one?
583 int isl_schedule_node_has_next_sibling(__isl_keep isl_schedule_node *node)
585 int n, n_child;
586 int has_parent;
587 isl_schedule_tree *tree;
589 if (!node)
590 return -1;
591 has_parent = isl_schedule_node_has_parent(node);
592 if (has_parent < 0 || !has_parent)
593 return has_parent;
595 n = isl_schedule_tree_list_n_schedule_tree(node->ancestors);
596 tree = isl_schedule_tree_list_get_schedule_tree(node->ancestors, n - 1);
597 if (!tree)
598 return -1;
599 n_child = isl_schedule_tree_list_n_schedule_tree(tree->children);
600 isl_schedule_tree_free(tree);
602 return node->child_pos[n - 1] + 1 < n_child;
605 /* Does "node" have any children?
607 * Any node other than the leaf nodes is considered to have at least
608 * one child, even if the corresponding isl_schedule_tree does not
609 * have any children.
611 int isl_schedule_node_has_children(__isl_keep isl_schedule_node *node)
613 if (!node)
614 return -1;
615 return !isl_schedule_tree_is_leaf(node->tree);
618 /* Return the number of children of "node"?
620 * Any node other than the leaf nodes is considered to have at least
621 * one child, even if the corresponding isl_schedule_tree does not
622 * have any children. That is, the number of children of "node" is
623 * only zero if its tree is the explicit empty tree. Otherwise,
624 * if the isl_schedule_tree has any children, then it is equal
625 * to the number of children of "node". If it has zero children,
626 * then "node" still has a leaf node as child.
628 int isl_schedule_node_n_children(__isl_keep isl_schedule_node *node)
630 int n;
632 if (!node)
633 return -1;
635 if (isl_schedule_tree_is_leaf(node->tree))
636 return 0;
638 n = isl_schedule_tree_n_children(node->tree);
639 if (n == 0)
640 return 1;
642 return n;
645 /* Move the "node" pointer to the parent of the node it currently points to.
647 __isl_give isl_schedule_node *isl_schedule_node_parent(
648 __isl_take isl_schedule_node *node)
650 int n;
651 isl_schedule_tree *tree;
653 node = isl_schedule_node_cow(node);
654 if (!node)
655 return NULL;
656 if (!isl_schedule_node_has_parent(node))
657 isl_die(isl_schedule_node_get_ctx(node), isl_error_invalid,
658 "node has no parent",
659 return isl_schedule_node_free(node));
660 n = isl_schedule_tree_list_n_schedule_tree(node->ancestors);
661 tree = isl_schedule_tree_list_get_schedule_tree(node->ancestors, n - 1);
662 isl_schedule_tree_free(node->tree);
663 node->tree = tree;
664 node->ancestors = isl_schedule_tree_list_drop(node->ancestors,
665 n - 1, 1);
666 if (!node->ancestors || !node->tree)
667 return isl_schedule_node_free(node);
669 return node;
672 /* Move the "node" pointer to the child at position "pos" of the node
673 * it currently points to.
675 __isl_give isl_schedule_node *isl_schedule_node_child(
676 __isl_take isl_schedule_node *node, int pos)
678 int n;
679 isl_ctx *ctx;
680 isl_schedule_tree *tree;
681 int *child_pos;
683 node = isl_schedule_node_cow(node);
684 if (!node)
685 return NULL;
686 if (!isl_schedule_node_has_children(node))
687 isl_die(isl_schedule_node_get_ctx(node), isl_error_invalid,
688 "node has no children",
689 return isl_schedule_node_free(node));
691 ctx = isl_schedule_node_get_ctx(node);
692 n = isl_schedule_tree_list_n_schedule_tree(node->ancestors);
693 child_pos = isl_realloc_array(ctx, node->child_pos, int, n + 1);
694 if (!child_pos)
695 return isl_schedule_node_free(node);
696 node->child_pos = child_pos;
697 node->child_pos[n] = pos;
699 node->ancestors = isl_schedule_tree_list_add(node->ancestors,
700 isl_schedule_tree_copy(node->tree));
701 tree = node->tree;
702 if (isl_schedule_tree_has_children(tree))
703 tree = isl_schedule_tree_get_child(tree, pos);
704 else
705 tree = isl_schedule_node_get_leaf(node);
706 isl_schedule_tree_free(node->tree);
707 node->tree = tree;
709 if (!node->tree || !node->ancestors)
710 return isl_schedule_node_free(node);
712 return node;
715 /* Move the "node" pointer to the first child of the node
716 * it currently points to.
718 __isl_give isl_schedule_node *isl_schedule_node_first_child(
719 __isl_take isl_schedule_node *node)
721 return isl_schedule_node_child(node, 0);
724 /* Move the "node" pointer to the child of this node's parent in
725 * the previous child position.
727 __isl_give isl_schedule_node *isl_schedule_node_previous_sibling(
728 __isl_take isl_schedule_node *node)
730 int n;
731 isl_schedule_tree *parent, *tree;
733 node = isl_schedule_node_cow(node);
734 if (!node)
735 return NULL;
736 if (!isl_schedule_node_has_previous_sibling(node))
737 isl_die(isl_schedule_node_get_ctx(node), isl_error_invalid,
738 "node has no previous sibling",
739 return isl_schedule_node_free(node));
741 n = isl_schedule_tree_list_n_schedule_tree(node->ancestors);
742 parent = isl_schedule_tree_list_get_schedule_tree(node->ancestors,
743 n - 1);
744 if (!parent)
745 return isl_schedule_node_free(node);
746 node->child_pos[n - 1]--;
747 tree = isl_schedule_tree_list_get_schedule_tree(parent->children,
748 node->child_pos[n - 1]);
749 isl_schedule_tree_free(parent);
750 if (!tree)
751 return isl_schedule_node_free(node);
752 isl_schedule_tree_free(node->tree);
753 node->tree = tree;
755 return node;
758 /* Move the "node" pointer to the child of this node's parent in
759 * the next child position.
761 __isl_give isl_schedule_node *isl_schedule_node_next_sibling(
762 __isl_take isl_schedule_node *node)
764 int n;
765 isl_schedule_tree *parent, *tree;
767 node = isl_schedule_node_cow(node);
768 if (!node)
769 return NULL;
770 if (!isl_schedule_node_has_next_sibling(node))
771 isl_die(isl_schedule_node_get_ctx(node), isl_error_invalid,
772 "node has no next sibling",
773 return isl_schedule_node_free(node));
775 n = isl_schedule_tree_list_n_schedule_tree(node->ancestors);
776 parent = isl_schedule_tree_list_get_schedule_tree(node->ancestors,
777 n - 1);
778 if (!parent)
779 return isl_schedule_node_free(node);
780 node->child_pos[n - 1]++;
781 tree = isl_schedule_tree_list_get_schedule_tree(parent->children,
782 node->child_pos[n - 1]);
783 isl_schedule_tree_free(parent);
784 if (!tree)
785 return isl_schedule_node_free(node);
786 isl_schedule_tree_free(node->tree);
787 node->tree = tree;
789 return node;
792 /* Return a copy to the child at position "pos" of "node".
794 __isl_give isl_schedule_node *isl_schedule_node_get_child(
795 __isl_keep isl_schedule_node *node, int pos)
797 return isl_schedule_node_child(isl_schedule_node_copy(node), pos);
800 /* Return the number of members in the given band node.
802 unsigned isl_schedule_node_band_n_member(__isl_keep isl_schedule_node *node)
804 return node ? isl_schedule_tree_band_n_member(node->tree) : 0;
807 /* Is the band member at position "pos" of the band node "node"
808 * marked coincident?
810 int isl_schedule_node_band_member_get_coincident(
811 __isl_keep isl_schedule_node *node, int pos)
813 if (!node)
814 return -1;
815 return isl_schedule_tree_band_member_get_coincident(node->tree, pos);
818 /* Mark the band member at position "pos" the band node "node"
819 * as being coincident or not according to "coincident".
821 __isl_give isl_schedule_node *isl_schedule_node_band_member_set_coincident(
822 __isl_take isl_schedule_node *node, int pos, int coincident)
824 int c;
825 isl_schedule_tree *tree;
827 if (!node)
828 return NULL;
829 c = isl_schedule_node_band_member_get_coincident(node, pos);
830 if (c == coincident)
831 return node;
833 tree = isl_schedule_tree_copy(node->tree);
834 tree = isl_schedule_tree_band_member_set_coincident(tree, pos,
835 coincident);
836 node = isl_schedule_node_graft_tree(node, tree);
838 return node;
841 /* Is the band node "node" marked permutable?
843 int isl_schedule_node_band_get_permutable(__isl_keep isl_schedule_node *node)
845 if (!node)
846 return -1;
848 return isl_schedule_tree_band_get_permutable(node->tree);
851 /* Mark the band node "node" permutable or not according to "permutable"?
853 __isl_give isl_schedule_node *isl_schedule_node_band_set_permutable(
854 __isl_take isl_schedule_node *node, int permutable)
856 isl_schedule_tree *tree;
858 if (!node)
859 return NULL;
860 if (isl_schedule_node_band_get_permutable(node) == permutable)
861 return node;
863 tree = isl_schedule_tree_copy(node->tree);
864 tree = isl_schedule_tree_band_set_permutable(tree, permutable);
865 node = isl_schedule_node_graft_tree(node, tree);
867 return node;
870 /* Return the schedule space of the band node.
872 __isl_give isl_space *isl_schedule_node_band_get_space(
873 __isl_keep isl_schedule_node *node)
875 if (!node)
876 return NULL;
878 return isl_schedule_tree_band_get_space(node->tree);
881 /* Return the schedule of the band node in isolation.
883 __isl_give isl_multi_union_pw_aff *isl_schedule_node_band_get_partial_schedule(
884 __isl_keep isl_schedule_node *node)
886 if (!node)
887 return NULL;
889 return isl_schedule_tree_band_get_partial_schedule(node->tree);
892 /* Return the schedule of the band node in isolation in the form of
893 * an isl_union_map.
895 * If the band does not have any members, then we construct a universe map
896 * with the universe of the domain elements reaching the node as domain.
897 * Otherwise, we extract an isl_multi_union_pw_aff representation and
898 * convert that to an isl_union_map.
900 __isl_give isl_union_map *isl_schedule_node_band_get_partial_schedule_union_map(
901 __isl_keep isl_schedule_node *node)
903 isl_multi_union_pw_aff *mupa;
905 if (!node)
906 return NULL;
908 if (isl_schedule_node_get_type(node) != isl_schedule_node_band)
909 isl_die(isl_schedule_node_get_ctx(node), isl_error_invalid,
910 "not a band node", return NULL);
911 if (isl_schedule_node_band_n_member(node) == 0) {
912 isl_union_set *domain;
914 domain = isl_schedule_node_get_universe_domain(node);
915 return isl_union_map_from_domain(domain);
918 mupa = isl_schedule_node_band_get_partial_schedule(node);
919 return isl_union_map_from_multi_union_pw_aff(mupa);
922 /* Make sure that that spaces of "node" and "mv" are the same.
923 * Return -1 on error, reporting the error to the user.
925 static int check_space_multi_val(__isl_keep isl_schedule_node *node,
926 __isl_keep isl_multi_val *mv)
928 isl_space *node_space, *mv_space;
929 int equal;
931 node_space = isl_schedule_node_band_get_space(node);
932 mv_space = isl_multi_val_get_space(mv);
933 equal = isl_space_tuple_is_equal(node_space, isl_dim_set,
934 mv_space, isl_dim_set);
935 isl_space_free(mv_space);
936 isl_space_free(node_space);
937 if (equal < 0)
938 return -1;
939 if (!equal)
940 isl_die(isl_schedule_node_get_ctx(node), isl_error_invalid,
941 "spaces don't match", return -1);
943 return 0;
946 /* Multiply the partial schedule of the band node "node"
947 * with the factors in "mv".
949 __isl_give isl_schedule_node *isl_schedule_node_band_scale(
950 __isl_take isl_schedule_node *node, __isl_take isl_multi_val *mv)
952 isl_schedule_tree *tree;
954 if (!node || !mv)
955 goto error;
956 if (check_space_multi_val(node, mv) < 0)
957 goto error;
959 tree = isl_schedule_node_get_tree(node);
960 tree = isl_schedule_tree_band_scale(tree, mv);
961 return isl_schedule_node_graft_tree(node, tree);
962 error:
963 isl_multi_val_free(mv);
964 isl_schedule_node_free(node);
965 return NULL;
968 /* Divide the partial schedule of the band node "node"
969 * by the factors in "mv".
971 __isl_give isl_schedule_node *isl_schedule_node_band_scale_down(
972 __isl_take isl_schedule_node *node, __isl_take isl_multi_val *mv)
974 isl_schedule_tree *tree;
976 if (!node || !mv)
977 goto error;
978 if (check_space_multi_val(node, mv) < 0)
979 goto error;
981 tree = isl_schedule_node_get_tree(node);
982 tree = isl_schedule_tree_band_scale_down(tree, mv);
983 return isl_schedule_node_graft_tree(node, tree);
984 error:
985 isl_multi_val_free(mv);
986 isl_schedule_node_free(node);
987 return NULL;
990 /* Tile "node" with tile sizes "sizes".
992 * The current node is replaced by two nested nodes corresponding
993 * to the tile dimensions and the point dimensions.
995 * Return a pointer to the outer (tile) node.
997 * If the scale tile loops option is set, then the tile loops
998 * are scaled by the tile sizes. If the shift point loops option is set,
999 * then the point loops are shifted to start at zero.
1000 * In particular, these options affect the tile and point loop schedules
1001 * as follows
1003 * scale shift original tile point
1005 * 0 0 i floor(i/s) i
1006 * 1 0 i s * floor(i/s) i
1007 * 0 1 i floor(i/s) i - s * floor(i/s)
1008 * 1 1 i s * floor(i/s) i - s * floor(i/s)
1010 __isl_give isl_schedule_node *isl_schedule_node_band_tile(
1011 __isl_take isl_schedule_node *node, __isl_take isl_multi_val *sizes)
1013 isl_schedule_tree *tree;
1015 if (!node || !sizes)
1016 goto error;
1018 if (check_space_multi_val(node, sizes) < 0)
1019 goto error;
1021 tree = isl_schedule_node_get_tree(node);
1022 tree = isl_schedule_tree_band_tile(tree, sizes);
1023 return isl_schedule_node_graft_tree(node, tree);
1024 error:
1025 isl_multi_val_free(sizes);
1026 isl_schedule_node_free(node);
1027 return NULL;
1030 /* Split "node" into two nested band nodes, one with the first "pos"
1031 * dimensions and one with the remaining dimensions.
1032 * The schedules of the two band nodes live in anonymous spaces.
1034 __isl_give isl_schedule_node *isl_schedule_node_band_split(
1035 __isl_take isl_schedule_node *node, int pos)
1037 isl_schedule_tree *tree;
1039 tree = isl_schedule_node_get_tree(node);
1040 tree = isl_schedule_tree_band_split(tree, pos);
1041 return isl_schedule_node_graft_tree(node, tree);
1044 /* Return the domain of the domain node "node".
1046 __isl_give isl_union_set *isl_schedule_node_domain_get_domain(
1047 __isl_keep isl_schedule_node *node)
1049 if (!node)
1050 return NULL;
1052 return isl_schedule_tree_domain_get_domain(node->tree);
1055 /* Return the filter of the filter node "node".
1057 __isl_give isl_union_set *isl_schedule_node_filter_get_filter(
1058 __isl_keep isl_schedule_node *node)
1060 if (!node)
1061 return NULL;
1063 return isl_schedule_tree_filter_get_filter(node->tree);
1066 /* Replace the filter of filter node "node" by "filter".
1068 __isl_give isl_schedule_node *isl_schedule_node_filter_set_filter(
1069 __isl_take isl_schedule_node *node, __isl_take isl_union_set *filter)
1071 isl_schedule_tree *tree;
1073 if (!node || !filter)
1074 goto error;
1076 tree = isl_schedule_tree_copy(node->tree);
1077 tree = isl_schedule_tree_filter_set_filter(tree, filter);
1078 return isl_schedule_node_graft_tree(node, tree);
1079 error:
1080 isl_schedule_node_free(node);
1081 isl_union_set_free(filter);
1082 return NULL;
1085 /* Update the ancestors of "node" to point to the tree that "node"
1086 * now points to.
1087 * That is, replace the child in the original parent that corresponds
1088 * to the current tree position by node->tree and continue updating
1089 * the ancestors in the same way until the root is reached.
1091 * If "node" originally points to a leaf of the schedule tree, then make sure
1092 * that in the end it points to a leaf in the updated schedule tree.
1094 static __isl_give isl_schedule_node *update_ancestors(
1095 __isl_take isl_schedule_node *node)
1097 int i, n;
1098 int is_leaf;
1099 isl_ctx *ctx;
1100 isl_schedule_tree *tree;
1102 node = isl_schedule_node_cow(node);
1103 if (!node)
1104 return NULL;
1106 ctx = isl_schedule_node_get_ctx(node);
1107 n = isl_schedule_tree_list_n_schedule_tree(node->ancestors);
1108 tree = isl_schedule_tree_copy(node->tree);
1110 for (i = n - 1; i >= 0; --i) {
1111 isl_schedule_tree *parent;
1113 parent = isl_schedule_tree_list_get_schedule_tree(
1114 node->ancestors, i);
1115 parent = isl_schedule_tree_replace_child(parent,
1116 node->child_pos[i], tree);
1117 node->ancestors = isl_schedule_tree_list_set_schedule_tree(
1118 node->ancestors, i, isl_schedule_tree_copy(parent));
1120 tree = parent;
1123 is_leaf = isl_schedule_tree_is_leaf(node->tree);
1124 node->schedule = isl_schedule_set_root(node->schedule, tree);
1125 if (is_leaf) {
1126 isl_schedule_tree_free(node->tree);
1127 node->tree = isl_schedule_node_get_leaf(node);
1130 if (!node->schedule || !node->ancestors)
1131 return isl_schedule_node_free(node);
1133 return node;
1136 /* Replace the subtree that "pos" points to by "tree", updating
1137 * the ancestors to maintain a consistent state.
1139 __isl_give isl_schedule_node *isl_schedule_node_graft_tree(
1140 __isl_take isl_schedule_node *pos, __isl_take isl_schedule_tree *tree)
1142 if (!tree || !pos)
1143 goto error;
1144 if (pos->tree == tree) {
1145 isl_schedule_tree_free(tree);
1146 return pos;
1149 pos = isl_schedule_node_cow(pos);
1150 if (!pos)
1151 goto error;
1153 isl_schedule_tree_free(pos->tree);
1154 pos->tree = tree;
1156 return update_ancestors(pos);
1157 error:
1158 isl_schedule_node_free(pos);
1159 isl_schedule_tree_free(tree);
1160 return NULL;
1163 /* Make sure we can insert a node between "node" and its parent.
1164 * Return -1 on error, reporting the reason why we cannot insert a node.
1166 static int check_insert(__isl_keep isl_schedule_node *node)
1168 int has_parent;
1169 enum isl_schedule_node_type type;
1171 has_parent = isl_schedule_node_has_parent(node);
1172 if (has_parent < 0)
1173 return -1;
1174 if (!has_parent)
1175 isl_die(isl_schedule_node_get_ctx(node), isl_error_invalid,
1176 "cannot insert node outside of root", return -1);
1178 type = isl_schedule_node_get_parent_type(node);
1179 if (type == isl_schedule_node_error)
1180 return -1;
1181 if (type == isl_schedule_node_set || type == isl_schedule_node_sequence)
1182 isl_die(isl_schedule_node_get_ctx(node), isl_error_invalid,
1183 "cannot insert node between set or sequence node "
1184 "and its filter children", return -1);
1186 return 0;
1189 /* Insert a band node with partial schedule "mupa" between "node" and
1190 * its parent.
1191 * Return a pointer to the new band node.
1193 __isl_give isl_schedule_node *isl_schedule_node_insert_partial_schedule(
1194 __isl_take isl_schedule_node *node,
1195 __isl_take isl_multi_union_pw_aff *mupa)
1197 isl_schedule_band *band;
1198 isl_schedule_tree *tree;
1200 if (check_insert(node) < 0)
1201 node = isl_schedule_node_free(node);
1203 tree = isl_schedule_node_get_tree(node);
1204 band = isl_schedule_band_from_multi_union_pw_aff(mupa);
1205 tree = isl_schedule_tree_insert_band(tree, band);
1206 node = isl_schedule_node_graft_tree(node, tree);
1208 return node;
1211 /* Insert a filter node with filter "filter" between "node" and its parent.
1212 * Return a pointer to the new filter node.
1214 __isl_give isl_schedule_node *isl_schedule_node_insert_filter(
1215 __isl_take isl_schedule_node *node, __isl_take isl_union_set *filter)
1217 isl_schedule_tree *tree;
1219 if (check_insert(node) < 0)
1220 node = isl_schedule_node_free(node);
1222 tree = isl_schedule_node_get_tree(node);
1223 tree = isl_schedule_tree_insert_filter(tree, filter);
1224 node = isl_schedule_node_graft_tree(node, tree);
1226 return node;
1229 /* Attach the current subtree of "node" to a sequence of filter tree nodes
1230 * with filters described by "filters", attach this sequence
1231 * of filter tree nodes as children to a new tree of type "type" and
1232 * replace the original subtree of "node" by this new tree.
1234 static __isl_give isl_schedule_node *isl_schedule_node_insert_children(
1235 __isl_take isl_schedule_node *node,
1236 enum isl_schedule_node_type type,
1237 __isl_take isl_union_set_list *filters)
1239 int i, n;
1240 isl_ctx *ctx;
1241 isl_schedule_tree *tree;
1242 isl_schedule_tree_list *list;
1244 if (check_insert(node) < 0)
1245 node = isl_schedule_node_free(node);
1247 if (!node || !filters)
1248 goto error;
1250 ctx = isl_schedule_node_get_ctx(node);
1251 n = isl_union_set_list_n_union_set(filters);
1252 list = isl_schedule_tree_list_alloc(ctx, n);
1253 for (i = 0; i < n; ++i) {
1254 isl_schedule_tree *tree;
1255 isl_union_set *filter;
1257 tree = isl_schedule_node_get_tree(node);
1258 filter = isl_union_set_list_get_union_set(filters, i);
1259 tree = isl_schedule_tree_insert_filter(tree, filter);
1260 list = isl_schedule_tree_list_add(list, tree);
1262 tree = isl_schedule_tree_from_children(type, list);
1263 node = isl_schedule_node_graft_tree(node, tree);
1265 isl_union_set_list_free(filters);
1266 return node;
1267 error:
1268 isl_union_set_list_free(filters);
1269 isl_schedule_node_free(node);
1270 return NULL;
1273 /* Insert a sequence node with child filters "filters" between "node" and
1274 * its parent. That is, the tree that "node" points to is attached
1275 * to each of the child nodes of the filter nodes.
1276 * Return a pointer to the new sequence node.
1278 __isl_give isl_schedule_node *isl_schedule_node_insert_sequence(
1279 __isl_take isl_schedule_node *node,
1280 __isl_take isl_union_set_list *filters)
1282 return isl_schedule_node_insert_children(node,
1283 isl_schedule_node_sequence, filters);
1286 /* Insert a set node with child filters "filters" between "node" and
1287 * its parent. That is, the tree that "node" points to is attached
1288 * to each of the child nodes of the filter nodes.
1289 * Return a pointer to the new set node.
1291 __isl_give isl_schedule_node *isl_schedule_node_insert_set(
1292 __isl_take isl_schedule_node *node,
1293 __isl_take isl_union_set_list *filters)
1295 return isl_schedule_node_insert_children(node,
1296 isl_schedule_node_set, filters);
1299 /* Print "node" to "p".
1301 __isl_give isl_printer *isl_printer_print_schedule_node(
1302 __isl_take isl_printer *p, __isl_keep isl_schedule_node *node)
1304 if (!node)
1305 return isl_printer_free(p);
1306 return isl_printer_print_schedule_tree_mark(p, node->schedule->root,
1307 isl_schedule_tree_list_n_schedule_tree(node->ancestors),
1308 node->child_pos);
1311 void isl_schedule_node_dump(__isl_keep isl_schedule_node *node)
1313 isl_ctx *ctx;
1314 isl_printer *printer;
1316 if (!node)
1317 return;
1319 ctx = isl_schedule_node_get_ctx(node);
1320 printer = isl_printer_to_file(ctx, stderr);
1321 printer = isl_printer_set_yaml_style(printer, ISL_YAML_STYLE_BLOCK);
1322 printer = isl_printer_print_schedule_node(printer, node);
1324 isl_printer_free(printer);