2 * Copyright 2012-2014 Ecole Normale Superieure
3 * Copyright 2014 INRIA Rocquencourt
5 * Use of this software is governed by the MIT license
7 * Written by Sven Verdoolaege,
8 * Ecole Normale Superieure, 45 rue d’Ulm, 75230 Paris, France
9 * and Inria Paris - Rocquencourt, Domaine de Voluceau - Rocquencourt,
10 * B.P. 105 - 78153 Le Chesnay, France
16 #include <isl/space.h>
18 #include <isl/constraint.h>
21 #include <isl/union_set.h>
22 #include <isl/union_map.h>
23 #include <isl/schedule_node.h>
24 #include <isl/options.h>
26 #include <isl_tarjan.h>
27 #include <isl_ast_private.h>
28 #include <isl_ast_build_expr.h>
29 #include <isl_ast_build_private.h>
30 #include <isl_ast_graft_private.h>
32 /* Try and reduce the number of disjuncts in the representation of "set",
33 * without dropping explicit representations of local variables.
35 static __isl_give isl_set
*isl_set_coalesce_preserve(__isl_take isl_set
*set
)
43 ctx
= isl_set_get_ctx(set
);
44 save_preserve
= isl_options_get_coalesce_preserve_locals(ctx
);
45 isl_options_set_coalesce_preserve_locals(ctx
, 1);
46 set
= isl_set_coalesce(set
);
47 isl_options_set_coalesce_preserve_locals(ctx
, save_preserve
);
51 /* Data used in generate_domain.
53 * "build" is the input build.
54 * "list" collects the results.
56 struct isl_generate_domain_data
{
59 isl_ast_graft_list
*list
;
62 static __isl_give isl_ast_graft_list
*generate_next_level(
63 __isl_take isl_union_map
*executed
,
64 __isl_take isl_ast_build
*build
);
65 static __isl_give isl_ast_graft_list
*generate_code(
66 __isl_take isl_union_map
*executed
, __isl_take isl_ast_build
*build
,
69 /* Generate an AST for a single domain based on
70 * the (non single valued) inverse schedule "executed".
72 * We extend the schedule with the iteration domain
73 * and continue generating through a call to generate_code.
75 * In particular, if executed has the form
79 * then we continue generating code on
83 * The extended inverse schedule is clearly single valued
84 * ensuring that the nested generate_code will not reach this function,
85 * but will instead create calls to all elements of D that need
86 * to be executed from the current schedule domain.
88 static isl_stat
generate_non_single_valued(__isl_take isl_map
*executed
,
89 struct isl_generate_domain_data
*data
)
93 isl_ast_graft_list
*list
;
95 build
= isl_ast_build_copy(data
->build
);
97 identity
= isl_set_identity(isl_map_range(isl_map_copy(executed
)));
98 executed
= isl_map_domain_product(executed
, identity
);
99 build
= isl_ast_build_set_single_valued(build
, 1);
101 list
= generate_code(isl_union_map_from_map(executed
), build
, 1);
103 data
->list
= isl_ast_graft_list_concat(data
->list
, list
);
108 /* Call the at_each_domain callback, if requested by the user,
109 * after recording the current inverse schedule in the build.
111 static __isl_give isl_ast_graft
*at_each_domain(__isl_take isl_ast_graft
*graft
,
112 __isl_keep isl_map
*executed
, __isl_keep isl_ast_build
*build
)
114 if (!graft
|| !build
)
115 return isl_ast_graft_free(graft
);
116 if (!build
->at_each_domain
)
119 build
= isl_ast_build_copy(build
);
120 build
= isl_ast_build_set_executed(build
,
121 isl_union_map_from_map(isl_map_copy(executed
)));
123 return isl_ast_graft_free(graft
);
125 graft
->node
= build
->at_each_domain(graft
->node
,
126 build
, build
->at_each_domain_user
);
127 isl_ast_build_free(build
);
130 graft
= isl_ast_graft_free(graft
);
135 /* Generate a call expression for the single executed
136 * domain element "map" and put a guard around it based its (simplified)
137 * domain. "executed" is the original inverse schedule from which "map"
138 * has been derived. In particular, "map" is either identical to "executed"
139 * or it is the result of gisting "executed" with respect to the build domain.
140 * "executed" is only used if there is an at_each_domain callback.
142 * At this stage, any pending constraints in the build can no longer
143 * be simplified with respect to any enforced constraints since
144 * the call node does not have any enforced constraints.
145 * Since all pending constraints not covered by any enforced constraints
146 * will be added as a guard to the graft in create_node_scaled,
147 * even in the eliminated case, the pending constraints
148 * can be considered to have been generated by outer constructs.
150 * If the user has set an at_each_domain callback, it is called
151 * on the constructed call expression node.
153 static isl_stat
add_domain(__isl_take isl_map
*executed
,
154 __isl_take isl_map
*map
, struct isl_generate_domain_data
*data
)
156 isl_ast_build
*build
;
157 isl_ast_graft
*graft
;
158 isl_ast_graft_list
*list
;
159 isl_set
*guard
, *pending
;
161 build
= isl_ast_build_copy(data
->build
);
162 pending
= isl_ast_build_get_pending(build
);
163 build
= isl_ast_build_replace_pending_by_guard(build
, pending
);
165 guard
= isl_map_domain(isl_map_copy(map
));
166 guard
= isl_set_compute_divs(guard
);
167 guard
= isl_set_coalesce_preserve(guard
);
168 guard
= isl_set_gist(guard
, isl_ast_build_get_generated(build
));
169 guard
= isl_ast_build_specialize(build
, guard
);
171 graft
= isl_ast_graft_alloc_domain(map
, build
);
172 graft
= at_each_domain(graft
, executed
, build
);
173 isl_ast_build_free(build
);
174 isl_map_free(executed
);
175 graft
= isl_ast_graft_add_guard(graft
, guard
, data
->build
);
177 list
= isl_ast_graft_list_from_ast_graft(graft
);
178 data
->list
= isl_ast_graft_list_concat(data
->list
, list
);
183 /* Generate an AST for a single domain based on
184 * the inverse schedule "executed" and add it to data->list.
186 * If there is more than one domain element associated to the current
187 * schedule "time", then we need to continue the generation process
188 * in generate_non_single_valued.
189 * Note that the inverse schedule being single-valued may depend
190 * on constraints that are only available in the original context
191 * domain specified by the user. We therefore first introduce
192 * some of the constraints of data->build->domain. In particular,
193 * we intersect with a single-disjunct approximation of this set.
194 * We perform this approximation to avoid further splitting up
195 * the executed relation, possibly introducing a disjunctive guard
198 * On the other hand, we only perform the test after having taken the gist
199 * of the domain as the resulting map is the one from which the call
200 * expression is constructed. Using this map to construct the call
201 * expression usually yields simpler results in cases where the original
202 * map is not obviously single-valued.
203 * If the original map is obviously single-valued, then the gist
204 * operation is skipped.
206 * Because we perform the single-valuedness test on the gisted map,
207 * we may in rare cases fail to recognize that the inverse schedule
208 * is single-valued. This becomes problematic if this happens
209 * from the recursive call through generate_non_single_valued
210 * as we would then end up in an infinite recursion.
211 * We therefore check if we are inside a call to generate_non_single_valued
212 * and revert to the ungisted map if the gisted map turns out not to be
215 * Otherwise, call add_domain to generate a call expression (with guard) and
216 * to call the at_each_domain callback, if any.
218 static isl_stat
generate_domain(__isl_take isl_map
*executed
, void *user
)
220 struct isl_generate_domain_data
*data
= user
;
225 domain
= isl_ast_build_get_domain(data
->build
);
226 domain
= isl_set_from_basic_set(isl_set_simple_hull(domain
));
227 executed
= isl_map_intersect_domain(executed
, domain
);
228 empty
= isl_map_is_empty(executed
);
232 isl_map_free(executed
);
236 sv
= isl_map_plain_is_single_valued(executed
);
240 return add_domain(executed
, isl_map_copy(executed
), data
);
242 executed
= isl_map_coalesce(executed
);
243 map
= isl_map_copy(executed
);
244 map
= isl_ast_build_compute_gist_map_domain(data
->build
, map
);
245 sv
= isl_map_is_single_valued(map
);
250 if (data
->build
->single_valued
)
251 map
= isl_map_copy(executed
);
253 return generate_non_single_valued(executed
, data
);
256 return add_domain(executed
, map
, data
);
259 isl_map_free(executed
);
260 return isl_stat_error
;
263 /* Call build->create_leaf to a create "leaf" node in the AST,
264 * encapsulate the result in an isl_ast_graft and return the result
265 * as a 1-element list.
267 * Note that the node returned by the user may be an entire tree.
269 * Since the node itself cannot enforce any constraints, we turn
270 * all pending constraints into guards and add them to the resulting
271 * graft to ensure that they will be generated.
273 * Before we pass control to the user, we first clear some information
274 * from the build that is (presumbably) only meaningful
275 * for the current code generation.
276 * This includes the create_leaf callback itself, so we make a copy
277 * of the build first.
279 static __isl_give isl_ast_graft_list
*call_create_leaf(
280 __isl_take isl_union_map
*executed
, __isl_take isl_ast_build
*build
)
284 isl_ast_graft
*graft
;
285 isl_ast_build
*user_build
;
287 guard
= isl_ast_build_get_pending(build
);
288 user_build
= isl_ast_build_copy(build
);
289 user_build
= isl_ast_build_replace_pending_by_guard(user_build
,
290 isl_set_copy(guard
));
291 user_build
= isl_ast_build_set_executed(user_build
, executed
);
292 user_build
= isl_ast_build_clear_local_info(user_build
);
296 node
= build
->create_leaf(user_build
, build
->create_leaf_user
);
297 graft
= isl_ast_graft_alloc(node
, build
);
298 graft
= isl_ast_graft_add_guard(graft
, guard
, build
);
299 isl_ast_build_free(build
);
300 return isl_ast_graft_list_from_ast_graft(graft
);
303 static __isl_give isl_ast_graft_list
*build_ast_from_child(
304 __isl_take isl_ast_build
*build
, __isl_take isl_schedule_node
*node
,
305 __isl_take isl_union_map
*executed
);
307 /* Generate an AST after having handled the complete schedule
308 * of this call to the code generator or the complete band
309 * if we are generating an AST from a schedule tree.
311 * If we are inside a band node, then move on to the child of the band.
313 * If the user has specified a create_leaf callback, control
314 * is passed to the user in call_create_leaf.
316 * Otherwise, we generate one or more calls for each individual
317 * domain in generate_domain.
319 static __isl_give isl_ast_graft_list
*generate_inner_level(
320 __isl_take isl_union_map
*executed
, __isl_take isl_ast_build
*build
)
323 struct isl_generate_domain_data data
= { build
};
325 if (!build
|| !executed
)
328 if (isl_ast_build_has_schedule_node(build
)) {
329 isl_schedule_node
*node
;
330 node
= isl_ast_build_get_schedule_node(build
);
331 build
= isl_ast_build_reset_schedule_node(build
);
332 return build_ast_from_child(build
, node
, executed
);
335 if (build
->create_leaf
)
336 return call_create_leaf(executed
, build
);
338 ctx
= isl_union_map_get_ctx(executed
);
339 data
.list
= isl_ast_graft_list_alloc(ctx
, 0);
340 if (isl_union_map_foreach_map(executed
, &generate_domain
, &data
) < 0)
341 data
.list
= isl_ast_graft_list_free(data
.list
);
344 error
: data
.list
= NULL
;
345 isl_ast_build_free(build
);
346 isl_union_map_free(executed
);
350 /* Call the before_each_for callback, if requested by the user.
352 static __isl_give isl_ast_node
*before_each_for(__isl_take isl_ast_node
*node
,
353 __isl_keep isl_ast_build
*build
)
358 return isl_ast_node_free(node
);
359 if (!build
->before_each_for
)
361 id
= build
->before_each_for(build
, build
->before_each_for_user
);
362 node
= isl_ast_node_set_annotation(node
, id
);
366 /* Call the after_each_for callback, if requested by the user.
368 static __isl_give isl_ast_graft
*after_each_for(__isl_take isl_ast_graft
*graft
,
369 __isl_keep isl_ast_build
*build
)
371 if (!graft
|| !build
)
372 return isl_ast_graft_free(graft
);
373 if (!build
->after_each_for
)
375 graft
->node
= build
->after_each_for(graft
->node
, build
,
376 build
->after_each_for_user
);
378 return isl_ast_graft_free(graft
);
382 /* Plug in all the know values of the current and outer dimensions
383 * in the domain of "executed". In principle, we only need to plug
384 * in the known value of the current dimension since the values of
385 * outer dimensions have been plugged in already.
386 * However, it turns out to be easier to just plug in all known values.
388 static __isl_give isl_union_map
*plug_in_values(
389 __isl_take isl_union_map
*executed
, __isl_keep isl_ast_build
*build
)
391 return isl_ast_build_substitute_values_union_map_domain(build
,
395 /* Check if the constraint "c" is a lower bound on dimension "pos",
396 * an upper bound, or independent of dimension "pos".
398 static int constraint_type(isl_constraint
*c
, int pos
)
400 if (isl_constraint_is_lower_bound(c
, isl_dim_set
, pos
))
402 if (isl_constraint_is_upper_bound(c
, isl_dim_set
, pos
))
407 /* Compare the types of the constraints "a" and "b",
408 * resulting in constraints that are independent of "depth"
409 * to be sorted before the lower bounds on "depth", which in
410 * turn are sorted before the upper bounds on "depth".
412 static int cmp_constraint(__isl_keep isl_constraint
*a
,
413 __isl_keep isl_constraint
*b
, void *user
)
416 int t1
= constraint_type(a
, *depth
);
417 int t2
= constraint_type(b
, *depth
);
422 /* Extract a lower bound on dimension "pos" from constraint "c".
424 * If the constraint is of the form
428 * then we essentially return
430 * l = ceil(-f(...)/a)
432 * However, if the current dimension is strided, then we need to make
433 * sure that the lower bound we construct is of the form
437 * with f the offset and s the stride.
438 * We therefore compute
440 * f + s * ceil((l - f)/s)
442 static __isl_give isl_aff
*lower_bound(__isl_keep isl_constraint
*c
,
443 int pos
, __isl_keep isl_ast_build
*build
)
447 aff
= isl_constraint_get_bound(c
, isl_dim_set
, pos
);
448 aff
= isl_aff_ceil(aff
);
450 if (isl_ast_build_has_stride(build
, pos
)) {
454 offset
= isl_ast_build_get_offset(build
, pos
);
455 stride
= isl_ast_build_get_stride(build
, pos
);
457 aff
= isl_aff_sub(aff
, isl_aff_copy(offset
));
458 aff
= isl_aff_scale_down_val(aff
, isl_val_copy(stride
));
459 aff
= isl_aff_ceil(aff
);
460 aff
= isl_aff_scale_val(aff
, stride
);
461 aff
= isl_aff_add(aff
, offset
);
464 aff
= isl_ast_build_compute_gist_aff(build
, aff
);
469 /* Return the exact lower bound (or upper bound if "upper" is set)
470 * of "domain" as a piecewise affine expression.
472 * If we are computing a lower bound (of a strided dimension), then
473 * we need to make sure it is of the form
477 * where f is the offset and s is the stride.
478 * We therefore need to include the stride constraint before computing
481 static __isl_give isl_pw_aff
*exact_bound(__isl_keep isl_set
*domain
,
482 __isl_keep isl_ast_build
*build
, int upper
)
487 isl_pw_multi_aff
*pma
;
489 domain
= isl_set_copy(domain
);
491 stride
= isl_ast_build_get_stride_constraint(build
);
492 domain
= isl_set_intersect(domain
, stride
);
494 it_map
= isl_ast_build_map_to_iterator(build
, domain
);
496 pma
= isl_map_lexmax_pw_multi_aff(it_map
);
498 pma
= isl_map_lexmin_pw_multi_aff(it_map
);
499 pa
= isl_pw_multi_aff_get_pw_aff(pma
, 0);
500 isl_pw_multi_aff_free(pma
);
501 pa
= isl_ast_build_compute_gist_pw_aff(build
, pa
);
502 pa
= isl_pw_aff_coalesce(pa
);
507 /* Callback for sorting the isl_pw_aff_list passed to reduce_list and
508 * remove_redundant_lower_bounds.
510 static int reduce_list_cmp(__isl_keep isl_pw_aff
*a
, __isl_keep isl_pw_aff
*b
,
513 return isl_pw_aff_plain_cmp(a
, b
);
516 /* Given a list of lower bounds "list", remove those that are redundant
517 * with respect to the other bounds in "list" and the domain of "build".
519 * We first sort the bounds in the same way as they would be sorted
520 * by set_for_node_expressions so that we can try and remove the last
523 * For a lower bound to be effective, there needs to be at least
524 * one domain element for which it is larger than all other lower bounds.
525 * For each lower bound we therefore intersect the domain with
526 * the conditions that it is larger than all other bounds and
527 * check whether the result is empty. If so, the bound can be removed.
529 static __isl_give isl_pw_aff_list
*remove_redundant_lower_bounds(
530 __isl_take isl_pw_aff_list
*list
, __isl_keep isl_ast_build
*build
)
535 list
= isl_pw_aff_list_sort(list
, &reduce_list_cmp
, NULL
);
539 n
= isl_pw_aff_list_n_pw_aff(list
);
543 domain
= isl_ast_build_get_domain(build
);
545 for (i
= n
- 1; i
>= 0; --i
) {
550 domain_i
= isl_set_copy(domain
);
551 pa_i
= isl_pw_aff_list_get_pw_aff(list
, i
);
553 for (j
= 0; j
< n
; ++j
) {
560 pa_j
= isl_pw_aff_list_get_pw_aff(list
, j
);
561 better
= isl_pw_aff_gt_set(isl_pw_aff_copy(pa_i
), pa_j
);
562 domain_i
= isl_set_intersect(domain_i
, better
);
565 empty
= isl_set_is_empty(domain_i
);
567 isl_set_free(domain_i
);
568 isl_pw_aff_free(pa_i
);
574 list
= isl_pw_aff_list_drop(list
, i
, 1);
578 isl_set_free(domain
);
582 isl_set_free(domain
);
583 return isl_pw_aff_list_free(list
);
586 /* Extract a lower bound on dimension "pos" from each constraint
587 * in "constraints" and return the list of lower bounds.
588 * If "constraints" has zero elements, then we extract a lower bound
589 * from "domain" instead.
591 * If the current dimension is strided, then the lower bound
592 * is adjusted by lower_bound to match the stride information.
593 * This modification may make one or more lower bounds redundant
594 * with respect to the other lower bounds. We therefore check
595 * for this condition and remove the redundant lower bounds.
597 static __isl_give isl_pw_aff_list
*lower_bounds(
598 __isl_keep isl_constraint_list
*constraints
, int pos
,
599 __isl_keep isl_set
*domain
, __isl_keep isl_ast_build
*build
)
602 isl_pw_aff_list
*list
;
608 n
= isl_constraint_list_n_constraint(constraints
);
611 pa
= exact_bound(domain
, build
, 0);
612 return isl_pw_aff_list_from_pw_aff(pa
);
615 ctx
= isl_ast_build_get_ctx(build
);
616 list
= isl_pw_aff_list_alloc(ctx
,n
);
618 for (i
= 0; i
< n
; ++i
) {
622 c
= isl_constraint_list_get_constraint(constraints
, i
);
623 aff
= lower_bound(c
, pos
, build
);
624 isl_constraint_free(c
);
625 list
= isl_pw_aff_list_add(list
, isl_pw_aff_from_aff(aff
));
628 if (isl_ast_build_has_stride(build
, pos
))
629 list
= remove_redundant_lower_bounds(list
, build
);
634 /* Extract an upper bound on dimension "pos" from each constraint
635 * in "constraints" and return the list of upper bounds.
636 * If "constraints" has zero elements, then we extract an upper bound
637 * from "domain" instead.
639 static __isl_give isl_pw_aff_list
*upper_bounds(
640 __isl_keep isl_constraint_list
*constraints
, int pos
,
641 __isl_keep isl_set
*domain
, __isl_keep isl_ast_build
*build
)
644 isl_pw_aff_list
*list
;
647 n
= isl_constraint_list_n_constraint(constraints
);
650 pa
= exact_bound(domain
, build
, 1);
651 return isl_pw_aff_list_from_pw_aff(pa
);
654 ctx
= isl_ast_build_get_ctx(build
);
655 list
= isl_pw_aff_list_alloc(ctx
,n
);
657 for (i
= 0; i
< n
; ++i
) {
661 c
= isl_constraint_list_get_constraint(constraints
, i
);
662 aff
= isl_constraint_get_bound(c
, isl_dim_set
, pos
);
663 isl_constraint_free(c
);
664 aff
= isl_aff_floor(aff
);
665 list
= isl_pw_aff_list_add(list
, isl_pw_aff_from_aff(aff
));
671 /* Return an isl_ast_expr that performs the reduction of type "type"
672 * on AST expressions corresponding to the elements in "list".
674 * The list is assumed to contain at least one element.
675 * If the list contains exactly one element, then the returned isl_ast_expr
676 * simply computes that affine expression.
677 * If the list contains more than one element, then we sort it
678 * using a fairly abitrary but hopefully reasonably stable order.
680 static __isl_give isl_ast_expr
*reduce_list(enum isl_ast_op_type type
,
681 __isl_keep isl_pw_aff_list
*list
, __isl_keep isl_ast_build
*build
)
690 n
= isl_pw_aff_list_n_pw_aff(list
);
693 return isl_ast_build_expr_from_pw_aff_internal(build
,
694 isl_pw_aff_list_get_pw_aff(list
, 0));
696 ctx
= isl_pw_aff_list_get_ctx(list
);
697 expr
= isl_ast_expr_alloc_op(ctx
, type
, n
);
701 list
= isl_pw_aff_list_copy(list
);
702 list
= isl_pw_aff_list_sort(list
, &reduce_list_cmp
, NULL
);
704 return isl_ast_expr_free(expr
);
706 for (i
= 0; i
< n
; ++i
) {
707 isl_ast_expr
*expr_i
;
709 expr_i
= isl_ast_build_expr_from_pw_aff_internal(build
,
710 isl_pw_aff_list_get_pw_aff(list
, i
));
713 expr
->u
.op
.args
[i
] = expr_i
;
716 isl_pw_aff_list_free(list
);
719 isl_pw_aff_list_free(list
);
720 isl_ast_expr_free(expr
);
724 /* Add guards implied by the "generated constraints",
725 * but not (necessarily) enforced by the generated AST to "guard".
726 * In particular, if there is any stride constraints,
727 * then add the guard implied by those constraints.
728 * If we have generated a degenerate loop, then add the guard
729 * implied by "bounds" on the outer dimensions, i.e., the guard
730 * that ensures that the single value actually exists.
731 * Since there may also be guards implied by a combination
732 * of these constraints, we first combine them before
733 * deriving the implied constraints.
735 static __isl_give isl_set
*add_implied_guards(__isl_take isl_set
*guard
,
736 int degenerate
, __isl_keep isl_basic_set
*bounds
,
737 __isl_keep isl_ast_build
*build
)
739 int depth
, has_stride
;
743 depth
= isl_ast_build_get_depth(build
);
744 has_stride
= isl_ast_build_has_stride(build
, depth
);
745 if (!has_stride
&& !degenerate
)
748 space
= isl_basic_set_get_space(bounds
);
749 dom
= isl_set_universe(space
);
752 bounds
= isl_basic_set_copy(bounds
);
753 bounds
= isl_basic_set_drop_constraints_not_involving_dims(
754 bounds
, isl_dim_set
, depth
, 1);
755 set
= isl_set_from_basic_set(bounds
);
756 dom
= isl_set_intersect(dom
, set
);
760 set
= isl_ast_build_get_stride_constraint(build
);
761 dom
= isl_set_intersect(dom
, set
);
764 dom
= isl_set_eliminate(dom
, isl_dim_set
, depth
, 1);
765 dom
= isl_ast_build_compute_gist(build
, dom
);
766 guard
= isl_set_intersect(guard
, dom
);
771 /* Update "graft" based on "sub_build" for the degenerate case.
773 * "build" is the build in which graft->node was created
774 * "sub_build" contains information about the current level itself,
775 * including the single value attained.
777 * We set the initialization part of the for loop to the single
778 * value attained by the current dimension.
779 * The increment and condition are not strictly needed as the are known
780 * to be "1" and "iterator <= value" respectively.
782 static __isl_give isl_ast_graft
*refine_degenerate(
783 __isl_take isl_ast_graft
*graft
, __isl_keep isl_ast_build
*build
,
784 __isl_keep isl_ast_build
*sub_build
)
788 if (!graft
|| !sub_build
)
789 return isl_ast_graft_free(graft
);
791 value
= isl_pw_aff_copy(sub_build
->value
);
793 graft
->node
->u
.f
.init
= isl_ast_build_expr_from_pw_aff_internal(build
,
795 if (!graft
->node
->u
.f
.init
)
796 return isl_ast_graft_free(graft
);
801 /* Return the intersection of constraints in "list" as a set.
803 static __isl_give isl_set
*intersect_constraints(
804 __isl_keep isl_constraint_list
*list
)
809 n
= isl_constraint_list_n_constraint(list
);
811 isl_die(isl_constraint_list_get_ctx(list
), isl_error_internal
,
812 "expecting at least one constraint", return NULL
);
814 bset
= isl_basic_set_from_constraint(
815 isl_constraint_list_get_constraint(list
, 0));
816 for (i
= 1; i
< n
; ++i
) {
817 isl_basic_set
*bset_i
;
819 bset_i
= isl_basic_set_from_constraint(
820 isl_constraint_list_get_constraint(list
, i
));
821 bset
= isl_basic_set_intersect(bset
, bset_i
);
824 return isl_set_from_basic_set(bset
);
827 /* Compute the constraints on the outer dimensions enforced by
828 * graft->node and add those constraints to graft->enforced,
829 * in case the upper bound is expressed as a set "upper".
831 * In particular, if l(...) is a lower bound in "lower", and
833 * -a i + f(...) >= 0 or a i <= f(...)
835 * is an upper bound ocnstraint on the current dimension i,
836 * then the for loop enforces the constraint
838 * -a l(...) + f(...) >= 0 or a l(...) <= f(...)
840 * We therefore simply take each lower bound in turn, plug it into
841 * the upper bounds and compute the intersection over all lower bounds.
843 * If a lower bound is a rational expression, then
844 * isl_basic_set_preimage_multi_aff will force this rational
845 * expression to have only integer values. However, the loop
846 * itself does not enforce this integrality constraint. We therefore
847 * use the ceil of the lower bounds instead of the lower bounds themselves.
848 * Other constraints will make sure that the for loop is only executed
849 * when each of the lower bounds attains an integral value.
850 * In particular, potentially rational values only occur in
851 * lower_bound if the offset is a (seemingly) rational expression,
852 * but then outer conditions will make sure that this rational expression
853 * only attains integer values.
855 static __isl_give isl_ast_graft
*set_enforced_from_set(
856 __isl_take isl_ast_graft
*graft
,
857 __isl_keep isl_pw_aff_list
*lower
, int pos
, __isl_keep isl_set
*upper
)
860 isl_basic_set
*enforced
;
861 isl_pw_multi_aff
*pma
;
864 if (!graft
|| !lower
)
865 return isl_ast_graft_free(graft
);
867 space
= isl_set_get_space(upper
);
868 enforced
= isl_basic_set_universe(isl_space_copy(space
));
870 space
= isl_space_map_from_set(space
);
871 pma
= isl_pw_multi_aff_identity(space
);
873 n
= isl_pw_aff_list_n_pw_aff(lower
);
874 for (i
= 0; i
< n
; ++i
) {
878 isl_pw_multi_aff
*pma_i
;
880 pa
= isl_pw_aff_list_get_pw_aff(lower
, i
);
881 pa
= isl_pw_aff_ceil(pa
);
882 pma_i
= isl_pw_multi_aff_copy(pma
);
883 pma_i
= isl_pw_multi_aff_set_pw_aff(pma_i
, pos
, pa
);
884 enforced_i
= isl_set_copy(upper
);
885 enforced_i
= isl_set_preimage_pw_multi_aff(enforced_i
, pma_i
);
886 hull
= isl_set_simple_hull(enforced_i
);
887 enforced
= isl_basic_set_intersect(enforced
, hull
);
890 isl_pw_multi_aff_free(pma
);
892 graft
= isl_ast_graft_enforce(graft
, enforced
);
897 /* Compute the constraints on the outer dimensions enforced by
898 * graft->node and add those constraints to graft->enforced,
899 * in case the upper bound is expressed as
900 * a list of affine expressions "upper".
902 * The enforced condition is that each lower bound expression is less
903 * than or equal to each upper bound expression.
905 static __isl_give isl_ast_graft
*set_enforced_from_list(
906 __isl_take isl_ast_graft
*graft
,
907 __isl_keep isl_pw_aff_list
*lower
, __isl_keep isl_pw_aff_list
*upper
)
910 isl_basic_set
*enforced
;
912 lower
= isl_pw_aff_list_copy(lower
);
913 upper
= isl_pw_aff_list_copy(upper
);
914 cond
= isl_pw_aff_list_le_set(lower
, upper
);
915 enforced
= isl_set_simple_hull(cond
);
916 graft
= isl_ast_graft_enforce(graft
, enforced
);
921 /* Does "aff" have a negative constant term?
923 static isl_stat
aff_constant_is_negative(__isl_take isl_set
*set
,
924 __isl_take isl_aff
*aff
, void *user
)
929 v
= isl_aff_get_constant_val(aff
);
930 *neg
= isl_val_is_neg(v
);
935 return *neg
? isl_stat_ok
: isl_stat_error
;
938 /* Does "pa" have a negative constant term over its entire domain?
940 static isl_stat
pw_aff_constant_is_negative(__isl_take isl_pw_aff
*pa
,
946 r
= isl_pw_aff_foreach_piece(pa
, &aff_constant_is_negative
, user
);
949 return (*neg
&& r
>= 0) ? isl_stat_ok
: isl_stat_error
;
952 /* Does each element in "list" have a negative constant term?
954 * The callback terminates the iteration as soon an element has been
955 * found that does not have a negative constant term.
957 static int list_constant_is_negative(__isl_keep isl_pw_aff_list
*list
)
961 if (isl_pw_aff_list_foreach(list
,
962 &pw_aff_constant_is_negative
, &neg
) < 0 && neg
)
968 /* Add 1 to each of the elements in "list", where each of these elements
969 * is defined over the internal schedule space of "build".
971 static __isl_give isl_pw_aff_list
*list_add_one(
972 __isl_take isl_pw_aff_list
*list
, __isl_keep isl_ast_build
*build
)
979 space
= isl_ast_build_get_space(build
, 1);
980 aff
= isl_aff_zero_on_domain(isl_local_space_from_space(space
));
981 aff
= isl_aff_add_constant_si(aff
, 1);
982 one
= isl_pw_aff_from_aff(aff
);
984 n
= isl_pw_aff_list_n_pw_aff(list
);
985 for (i
= 0; i
< n
; ++i
) {
987 pa
= isl_pw_aff_list_get_pw_aff(list
, i
);
988 pa
= isl_pw_aff_add(pa
, isl_pw_aff_copy(one
));
989 list
= isl_pw_aff_list_set_pw_aff(list
, i
, pa
);
992 isl_pw_aff_free(one
);
997 /* Set the condition part of the for node graft->node in case
998 * the upper bound is represented as a list of piecewise affine expressions.
1000 * In particular, set the condition to
1002 * iterator <= min(list of upper bounds)
1004 * If each of the upper bounds has a negative constant term, then
1005 * set the condition to
1007 * iterator < min(list of (upper bound + 1)s)
1010 static __isl_give isl_ast_graft
*set_for_cond_from_list(
1011 __isl_take isl_ast_graft
*graft
, __isl_keep isl_pw_aff_list
*list
,
1012 __isl_keep isl_ast_build
*build
)
1015 isl_ast_expr
*bound
, *iterator
, *cond
;
1016 enum isl_ast_op_type type
= isl_ast_op_le
;
1018 if (!graft
|| !list
)
1019 return isl_ast_graft_free(graft
);
1021 neg
= list_constant_is_negative(list
);
1023 return isl_ast_graft_free(graft
);
1024 list
= isl_pw_aff_list_copy(list
);
1026 list
= list_add_one(list
, build
);
1027 type
= isl_ast_op_lt
;
1030 bound
= reduce_list(isl_ast_op_min
, list
, build
);
1031 iterator
= isl_ast_expr_copy(graft
->node
->u
.f
.iterator
);
1032 cond
= isl_ast_expr_alloc_binary(type
, iterator
, bound
);
1033 graft
->node
->u
.f
.cond
= cond
;
1035 isl_pw_aff_list_free(list
);
1036 if (!graft
->node
->u
.f
.cond
)
1037 return isl_ast_graft_free(graft
);
1041 /* Set the condition part of the for node graft->node in case
1042 * the upper bound is represented as a set.
1044 static __isl_give isl_ast_graft
*set_for_cond_from_set(
1045 __isl_take isl_ast_graft
*graft
, __isl_keep isl_set
*set
,
1046 __isl_keep isl_ast_build
*build
)
1053 cond
= isl_ast_build_expr_from_set_internal(build
, isl_set_copy(set
));
1054 graft
->node
->u
.f
.cond
= cond
;
1055 if (!graft
->node
->u
.f
.cond
)
1056 return isl_ast_graft_free(graft
);
1060 /* Construct an isl_ast_expr for the increment (i.e., stride) of
1061 * the current dimension.
1063 static __isl_give isl_ast_expr
*for_inc(__isl_keep isl_ast_build
*build
)
1071 ctx
= isl_ast_build_get_ctx(build
);
1072 depth
= isl_ast_build_get_depth(build
);
1074 if (!isl_ast_build_has_stride(build
, depth
))
1075 return isl_ast_expr_alloc_int_si(ctx
, 1);
1077 v
= isl_ast_build_get_stride(build
, depth
);
1078 return isl_ast_expr_from_val(v
);
1081 /* Should we express the loop condition as
1083 * iterator <= min(list of upper bounds)
1085 * or as a conjunction of constraints?
1087 * The first is constructed from a list of upper bounds.
1088 * The second is constructed from a set.
1090 * If there are no upper bounds in "constraints", then this could mean
1091 * that "domain" simply doesn't have an upper bound or that we didn't
1092 * pick any upper bound. In the first case, we want to generate the
1093 * loop condition as a(n empty) conjunction of constraints
1094 * In the second case, we will compute
1095 * a single upper bound from "domain" and so we use the list form.
1097 * If there are upper bounds in "constraints",
1098 * then we use the list form iff the atomic_upper_bound option is set.
1100 static int use_upper_bound_list(isl_ctx
*ctx
, int n_upper
,
1101 __isl_keep isl_set
*domain
, int depth
)
1104 return isl_options_get_ast_build_atomic_upper_bound(ctx
);
1106 return isl_set_dim_has_upper_bound(domain
, isl_dim_set
, depth
);
1109 /* Fill in the expressions of the for node in graft->node.
1112 * - set the initialization part of the loop to the maximum of the lower bounds
1113 * - extract the increment from the stride of the current dimension
1114 * - construct the for condition either based on a list of upper bounds
1115 * or on a set of upper bound constraints.
1117 static __isl_give isl_ast_graft
*set_for_node_expressions(
1118 __isl_take isl_ast_graft
*graft
, __isl_keep isl_pw_aff_list
*lower
,
1119 int use_list
, __isl_keep isl_pw_aff_list
*upper_list
,
1120 __isl_keep isl_set
*upper_set
, __isl_keep isl_ast_build
*build
)
1127 build
= isl_ast_build_copy(build
);
1130 node
->u
.f
.init
= reduce_list(isl_ast_op_max
, lower
, build
);
1131 node
->u
.f
.inc
= for_inc(build
);
1133 if (!node
->u
.f
.init
|| !node
->u
.f
.inc
)
1134 graft
= isl_ast_graft_free(graft
);
1137 graft
= set_for_cond_from_list(graft
, upper_list
, build
);
1139 graft
= set_for_cond_from_set(graft
, upper_set
, build
);
1141 isl_ast_build_free(build
);
1146 /* Update "graft" based on "bounds" and "domain" for the generic,
1147 * non-degenerate, case.
1149 * "c_lower" and "c_upper" contain the lower and upper bounds
1150 * that the loop node should express.
1151 * "domain" is the subset of the intersection of the constraints
1152 * for which some code is executed.
1154 * There may be zero lower bounds or zero upper bounds in "constraints"
1155 * in case the list of constraints was created
1156 * based on the atomic option or based on separation with explicit bounds.
1157 * In that case, we use "domain" to derive lower and/or upper bounds.
1159 * We first compute a list of one or more lower bounds.
1161 * Then we decide if we want to express the condition as
1163 * iterator <= min(list of upper bounds)
1165 * or as a conjunction of constraints.
1167 * The set of enforced constraints is then computed either based on
1168 * a list of upper bounds or on a set of upper bound constraints.
1169 * We do not compute any enforced constraints if we were forced
1170 * to compute a lower or upper bound using exact_bound. The domains
1171 * of the resulting expressions may imply some bounds on outer dimensions
1172 * that we do not want to appear in the enforced constraints since
1173 * they are not actually enforced by the corresponding code.
1175 * Finally, we fill in the expressions of the for node.
1177 static __isl_give isl_ast_graft
*refine_generic_bounds(
1178 __isl_take isl_ast_graft
*graft
,
1179 __isl_take isl_constraint_list
*c_lower
,
1180 __isl_take isl_constraint_list
*c_upper
,
1181 __isl_keep isl_set
*domain
, __isl_keep isl_ast_build
*build
)
1185 isl_pw_aff_list
*lower
;
1187 isl_set
*upper_set
= NULL
;
1188 isl_pw_aff_list
*upper_list
= NULL
;
1189 int n_lower
, n_upper
;
1191 if (!graft
|| !c_lower
|| !c_upper
|| !build
)
1194 depth
= isl_ast_build_get_depth(build
);
1195 ctx
= isl_ast_graft_get_ctx(graft
);
1197 n_lower
= isl_constraint_list_n_constraint(c_lower
);
1198 n_upper
= isl_constraint_list_n_constraint(c_upper
);
1200 use_list
= use_upper_bound_list(ctx
, n_upper
, domain
, depth
);
1202 lower
= lower_bounds(c_lower
, depth
, domain
, build
);
1205 upper_list
= upper_bounds(c_upper
, depth
, domain
, build
);
1206 else if (n_upper
> 0)
1207 upper_set
= intersect_constraints(c_upper
);
1209 upper_set
= isl_set_universe(isl_set_get_space(domain
));
1211 if (n_lower
== 0 || n_upper
== 0)
1214 graft
= set_enforced_from_list(graft
, lower
, upper_list
);
1216 graft
= set_enforced_from_set(graft
, lower
, depth
, upper_set
);
1218 graft
= set_for_node_expressions(graft
, lower
, use_list
, upper_list
,
1221 isl_pw_aff_list_free(lower
);
1222 isl_pw_aff_list_free(upper_list
);
1223 isl_set_free(upper_set
);
1224 isl_constraint_list_free(c_lower
);
1225 isl_constraint_list_free(c_upper
);
1229 isl_constraint_list_free(c_lower
);
1230 isl_constraint_list_free(c_upper
);
1231 return isl_ast_graft_free(graft
);
1234 /* Internal data structure used inside count_constraints to keep
1235 * track of the number of constraints that are independent of dimension "pos",
1236 * the lower bounds in "pos" and the upper bounds in "pos".
1238 struct isl_ast_count_constraints_data
{
1246 /* Increment data->n_indep, data->lower or data->upper depending
1247 * on whether "c" is independenct of dimensions data->pos,
1248 * a lower bound or an upper bound.
1250 static isl_stat
count_constraints(__isl_take isl_constraint
*c
, void *user
)
1252 struct isl_ast_count_constraints_data
*data
= user
;
1254 if (isl_constraint_is_lower_bound(c
, isl_dim_set
, data
->pos
))
1256 else if (isl_constraint_is_upper_bound(c
, isl_dim_set
, data
->pos
))
1261 isl_constraint_free(c
);
1266 /* Update "graft" based on "bounds" and "domain" for the generic,
1267 * non-degenerate, case.
1269 * "list" respresent the list of bounds that need to be encoded by
1270 * the for loop. Only the constraints that involve the iterator
1271 * are relevant here. The other constraints are taken care of by
1272 * the caller and are included in the generated constraints of "build".
1273 * "domain" is the subset of the intersection of the constraints
1274 * for which some code is executed.
1275 * "build" is the build in which graft->node was created.
1277 * We separate lower bounds, upper bounds and constraints that
1278 * are independent of the loop iterator.
1280 * The actual for loop bounds are generated in refine_generic_bounds.
1282 static __isl_give isl_ast_graft
*refine_generic_split(
1283 __isl_take isl_ast_graft
*graft
, __isl_take isl_constraint_list
*list
,
1284 __isl_keep isl_set
*domain
, __isl_keep isl_ast_build
*build
)
1286 struct isl_ast_count_constraints_data data
;
1287 isl_constraint_list
*lower
;
1288 isl_constraint_list
*upper
;
1291 return isl_ast_graft_free(graft
);
1293 data
.pos
= isl_ast_build_get_depth(build
);
1295 list
= isl_constraint_list_sort(list
, &cmp_constraint
, &data
.pos
);
1297 return isl_ast_graft_free(graft
);
1299 data
.n_indep
= data
.n_lower
= data
.n_upper
= 0;
1300 if (isl_constraint_list_foreach(list
, &count_constraints
, &data
) < 0) {
1301 isl_constraint_list_free(list
);
1302 return isl_ast_graft_free(graft
);
1305 lower
= isl_constraint_list_drop(list
, 0, data
.n_indep
);
1306 upper
= isl_constraint_list_copy(lower
);
1307 lower
= isl_constraint_list_drop(lower
, data
.n_lower
, data
.n_upper
);
1308 upper
= isl_constraint_list_drop(upper
, 0, data
.n_lower
);
1310 return refine_generic_bounds(graft
, lower
, upper
, domain
, build
);
1313 /* Update "graft" based on "bounds" and "domain" for the generic,
1314 * non-degenerate, case.
1316 * "bounds" respresent the bounds that need to be encoded by
1317 * the for loop (or a guard around the for loop).
1318 * "domain" is the subset of "bounds" for which some code is executed.
1319 * "build" is the build in which graft->node was created.
1321 * We break up "bounds" into a list of constraints and continue with
1322 * refine_generic_split.
1324 static __isl_give isl_ast_graft
*refine_generic(
1325 __isl_take isl_ast_graft
*graft
,
1326 __isl_keep isl_basic_set
*bounds
, __isl_keep isl_set
*domain
,
1327 __isl_keep isl_ast_build
*build
)
1329 isl_constraint_list
*list
;
1331 if (!build
|| !graft
)
1332 return isl_ast_graft_free(graft
);
1334 list
= isl_basic_set_get_constraint_list(bounds
);
1336 graft
= refine_generic_split(graft
, list
, domain
, build
);
1341 /* Create a for node for the current level.
1343 * Mark the for node degenerate if "degenerate" is set.
1345 static __isl_give isl_ast_node
*create_for(__isl_keep isl_ast_build
*build
,
1355 depth
= isl_ast_build_get_depth(build
);
1356 id
= isl_ast_build_get_iterator_id(build
, depth
);
1357 node
= isl_ast_node_alloc_for(id
);
1359 node
= isl_ast_node_for_mark_degenerate(node
);
1364 /* If the ast_build_exploit_nested_bounds option is set, then return
1365 * the constraints enforced by all elements in "list".
1366 * Otherwise, return the universe.
1368 static __isl_give isl_basic_set
*extract_shared_enforced(
1369 __isl_keep isl_ast_graft_list
*list
, __isl_keep isl_ast_build
*build
)
1377 ctx
= isl_ast_graft_list_get_ctx(list
);
1378 if (isl_options_get_ast_build_exploit_nested_bounds(ctx
))
1379 return isl_ast_graft_list_extract_shared_enforced(list
, build
);
1381 space
= isl_ast_build_get_space(build
, 1);
1382 return isl_basic_set_universe(space
);
1385 /* Return the pending constraints of "build" that are not already taken
1386 * care of (by a combination of "enforced" and the generated constraints
1389 static __isl_give isl_set
*extract_pending(__isl_keep isl_ast_build
*build
,
1390 __isl_keep isl_basic_set
*enforced
)
1392 isl_set
*guard
, *context
;
1394 guard
= isl_ast_build_get_pending(build
);
1395 context
= isl_set_from_basic_set(isl_basic_set_copy(enforced
));
1396 context
= isl_set_intersect(context
,
1397 isl_ast_build_get_generated(build
));
1398 return isl_set_gist(guard
, context
);
1401 /* Create an AST node for the current dimension based on
1402 * the schedule domain "bounds" and return the node encapsulated
1403 * in an isl_ast_graft.
1405 * "executed" is the current inverse schedule, taking into account
1406 * the bounds in "bounds"
1407 * "domain" is the domain of "executed", with inner dimensions projected out.
1408 * It may be a strict subset of "bounds" in case "bounds" was created
1409 * based on the atomic option or based on separation with explicit bounds.
1411 * "domain" may satisfy additional equalities that result
1412 * from intersecting "executed" with "bounds" in add_node.
1413 * It may also satisfy some global constraints that were dropped out because
1414 * we performed separation with explicit bounds.
1415 * The very first step is then to copy these constraints to "bounds".
1417 * Since we may be calling before_each_for and after_each_for
1418 * callbacks, we record the current inverse schedule in the build.
1420 * We consider three builds,
1421 * "build" is the one in which the current level is created,
1422 * "body_build" is the build in which the next level is created,
1423 * "sub_build" is essentially the same as "body_build", except that
1424 * the depth has not been increased yet.
1426 * "build" already contains information (in strides and offsets)
1427 * about the strides at the current level, but this information is not
1428 * reflected in the build->domain.
1429 * We first add this information and the "bounds" to the sub_build->domain.
1430 * isl_ast_build_set_loop_bounds adds the stride information and
1431 * checks whether the current dimension attains
1432 * only a single value and whether this single value can be represented using
1433 * a single affine expression.
1434 * In the first case, the current level is considered "degenerate".
1435 * In the second, sub-case, the current level is considered "eliminated".
1436 * Eliminated levels don't need to be reflected in the AST since we can
1437 * simply plug in the affine expression. For degenerate, but non-eliminated,
1438 * levels, we do introduce a for node, but mark is as degenerate so that
1439 * it can be printed as an assignment of the single value to the loop
1442 * If the current level is eliminated, we explicitly plug in the value
1443 * for the current level found by isl_ast_build_set_loop_bounds in the
1444 * inverse schedule. This ensures that if we are working on a slice
1445 * of the domain based on information available in the inverse schedule
1446 * and the build domain, that then this information is also reflected
1447 * in the inverse schedule. This operation also eliminates the current
1448 * dimension from the inverse schedule making sure no inner dimensions depend
1449 * on the current dimension. Otherwise, we create a for node, marking
1450 * it degenerate if appropriate. The initial for node is still incomplete
1451 * and will be completed in either refine_degenerate or refine_generic.
1453 * We then generate a sequence of grafts for the next level,
1454 * create a surrounding graft for the current level and insert
1455 * the for node we created (if the current level is not eliminated).
1456 * Before creating a graft for the current level, we first extract
1457 * hoistable constraints from the child guards and combine them
1458 * with the pending constraints in the build. These constraints
1459 * are used to simplify the child guards and then added to the guard
1460 * of the current graft to ensure that they will be generated.
1461 * If the hoisted guard is a disjunction, then we use it directly
1462 * to gist the guards on the children before intersect it with the
1463 * pending constraints. We do so because this disjunction is typically
1464 * identical to the guards on the children such that these guards
1465 * can be effectively removed completely. After the intersection,
1466 * the gist operation would have a harder time figuring this out.
1468 * Finally, we set the bounds of the for loop in either
1469 * refine_degenerate or refine_generic.
1470 * We do so in a context where the pending constraints of the build
1471 * have been replaced by the guard of the current graft.
1473 static __isl_give isl_ast_graft
*create_node_scaled(
1474 __isl_take isl_union_map
*executed
,
1475 __isl_take isl_basic_set
*bounds
, __isl_take isl_set
*domain
,
1476 __isl_take isl_ast_build
*build
)
1480 isl_bool eliminated
;
1481 isl_basic_set
*hull
;
1482 isl_basic_set
*enforced
;
1483 isl_set
*guard
, *hoisted
;
1484 isl_ast_node
*node
= NULL
;
1485 isl_ast_graft
*graft
;
1486 isl_ast_graft_list
*children
;
1487 isl_ast_build
*sub_build
;
1488 isl_ast_build
*body_build
;
1490 domain
= isl_ast_build_eliminate_divs(build
, domain
);
1491 domain
= isl_set_detect_equalities(domain
);
1492 hull
= isl_set_unshifted_simple_hull(isl_set_copy(domain
));
1493 bounds
= isl_basic_set_intersect(bounds
, hull
);
1494 build
= isl_ast_build_set_executed(build
, isl_union_map_copy(executed
));
1496 depth
= isl_ast_build_get_depth(build
);
1497 sub_build
= isl_ast_build_copy(build
);
1498 bounds
= isl_basic_set_remove_redundancies(bounds
);
1499 bounds
= isl_ast_build_specialize_basic_set(sub_build
, bounds
);
1500 sub_build
= isl_ast_build_set_loop_bounds(sub_build
,
1501 isl_basic_set_copy(bounds
));
1502 degenerate
= isl_ast_build_has_value(sub_build
);
1503 eliminated
= isl_ast_build_has_affine_value(sub_build
, depth
);
1504 if (degenerate
< 0 || eliminated
< 0)
1505 executed
= isl_union_map_free(executed
);
1507 bounds
= isl_ast_build_compute_gist_basic_set(build
, bounds
);
1508 sub_build
= isl_ast_build_set_pending_generated(sub_build
,
1509 isl_basic_set_copy(bounds
));
1511 executed
= plug_in_values(executed
, sub_build
);
1513 node
= create_for(build
, degenerate
);
1515 body_build
= isl_ast_build_copy(sub_build
);
1516 body_build
= isl_ast_build_increase_depth(body_build
);
1518 node
= before_each_for(node
, body_build
);
1519 children
= generate_next_level(executed
,
1520 isl_ast_build_copy(body_build
));
1522 enforced
= extract_shared_enforced(children
, build
);
1523 guard
= extract_pending(sub_build
, enforced
);
1524 hoisted
= isl_ast_graft_list_extract_hoistable_guard(children
, build
);
1525 if (isl_set_n_basic_set(hoisted
) > 1)
1526 children
= isl_ast_graft_list_gist_guards(children
,
1527 isl_set_copy(hoisted
));
1528 guard
= isl_set_intersect(guard
, hoisted
);
1530 guard
= add_implied_guards(guard
, degenerate
, bounds
, build
);
1532 graft
= isl_ast_graft_alloc_from_children(children
,
1533 isl_set_copy(guard
), enforced
, build
, sub_build
);
1536 isl_ast_build
*for_build
;
1538 graft
= isl_ast_graft_insert_for(graft
, node
);
1539 for_build
= isl_ast_build_copy(build
);
1540 for_build
= isl_ast_build_replace_pending_by_guard(for_build
,
1541 isl_set_copy(guard
));
1543 graft
= refine_degenerate(graft
, for_build
, sub_build
);
1545 graft
= refine_generic(graft
, bounds
,
1547 isl_ast_build_free(for_build
);
1549 isl_set_free(guard
);
1551 graft
= after_each_for(graft
, body_build
);
1553 isl_ast_build_free(body_build
);
1554 isl_ast_build_free(sub_build
);
1555 isl_ast_build_free(build
);
1556 isl_basic_set_free(bounds
);
1557 isl_set_free(domain
);
1562 /* Internal data structure for checking if all constraints involving
1563 * the input dimension "depth" are such that the other coefficients
1564 * are multiples of "m", reducing "m" if they are not.
1565 * If "m" is reduced all the way down to "1", then the check has failed
1566 * and we break out of the iteration.
1568 struct isl_check_scaled_data
{
1573 /* If constraint "c" involves the input dimension data->depth,
1574 * then make sure that all the other coefficients are multiples of data->m,
1575 * reducing data->m if needed.
1576 * Break out of the iteration if data->m has become equal to "1".
1578 static isl_stat
constraint_check_scaled(__isl_take isl_constraint
*c
,
1581 struct isl_check_scaled_data
*data
= user
;
1584 enum isl_dim_type t
[] = { isl_dim_param
, isl_dim_in
, isl_dim_out
,
1587 if (!isl_constraint_involves_dims(c
, isl_dim_in
, data
->depth
, 1)) {
1588 isl_constraint_free(c
);
1592 for (i
= 0; i
< 4; ++i
) {
1593 n
= isl_constraint_dim(c
, t
[i
]);
1596 for (j
= 0; j
< n
; ++j
) {
1599 if (t
[i
] == isl_dim_in
&& j
== data
->depth
)
1601 if (!isl_constraint_involves_dims(c
, t
[i
], j
, 1))
1603 d
= isl_constraint_get_coefficient_val(c
, t
[i
], j
);
1604 data
->m
= isl_val_gcd(data
->m
, d
);
1605 if (isl_val_is_one(data
->m
))
1612 isl_constraint_free(c
);
1614 return i
< 4 ? isl_stat_error
: isl_stat_ok
;
1617 /* For each constraint of "bmap" that involves the input dimension data->depth,
1618 * make sure that all the other coefficients are multiples of data->m,
1619 * reducing data->m if needed.
1620 * Break out of the iteration if data->m has become equal to "1".
1622 static isl_stat
basic_map_check_scaled(__isl_take isl_basic_map
*bmap
,
1627 r
= isl_basic_map_foreach_constraint(bmap
,
1628 &constraint_check_scaled
, user
);
1629 isl_basic_map_free(bmap
);
1634 /* For each constraint of "map" that involves the input dimension data->depth,
1635 * make sure that all the other coefficients are multiples of data->m,
1636 * reducing data->m if needed.
1637 * Break out of the iteration if data->m has become equal to "1".
1639 static isl_stat
map_check_scaled(__isl_take isl_map
*map
, void *user
)
1643 r
= isl_map_foreach_basic_map(map
, &basic_map_check_scaled
, user
);
1649 /* Create an AST node for the current dimension based on
1650 * the schedule domain "bounds" and return the node encapsulated
1651 * in an isl_ast_graft.
1653 * "executed" is the current inverse schedule, taking into account
1654 * the bounds in "bounds"
1655 * "domain" is the domain of "executed", with inner dimensions projected out.
1658 * Before moving on to the actual AST node construction in create_node_scaled,
1659 * we first check if the current dimension is strided and if we can scale
1660 * down this stride. Note that we only do this if the ast_build_scale_strides
1663 * In particular, let the current dimension take on values
1667 * with a an integer. We check if we can find an integer m that (obviously)
1668 * divides both f and s.
1670 * If so, we check if the current dimension only appears in constraints
1671 * where the coefficients of the other variables are multiples of m.
1672 * We perform this extra check to avoid the risk of introducing
1673 * divisions by scaling down the current dimension.
1675 * If so, we scale the current dimension down by a factor of m.
1676 * That is, we plug in
1680 * Note that in principle we could always scale down strided loops
1685 * but this may result in i' taking on larger values than the original i,
1686 * due to the shift by "f".
1687 * By constrast, the scaling in (1) can only reduce the (absolute) value "i".
1689 static __isl_give isl_ast_graft
*create_node(__isl_take isl_union_map
*executed
,
1690 __isl_take isl_basic_set
*bounds
, __isl_take isl_set
*domain
,
1691 __isl_take isl_ast_build
*build
)
1693 struct isl_check_scaled_data data
;
1698 ctx
= isl_ast_build_get_ctx(build
);
1699 if (!isl_options_get_ast_build_scale_strides(ctx
))
1700 return create_node_scaled(executed
, bounds
, domain
, build
);
1702 data
.depth
= isl_ast_build_get_depth(build
);
1703 if (!isl_ast_build_has_stride(build
, data
.depth
))
1704 return create_node_scaled(executed
, bounds
, domain
, build
);
1706 offset
= isl_ast_build_get_offset(build
, data
.depth
);
1707 data
.m
= isl_ast_build_get_stride(build
, data
.depth
);
1709 offset
= isl_aff_free(offset
);
1710 offset
= isl_aff_scale_down_val(offset
, isl_val_copy(data
.m
));
1711 d
= isl_aff_get_denominator_val(offset
);
1713 executed
= isl_union_map_free(executed
);
1715 if (executed
&& isl_val_is_divisible_by(data
.m
, d
))
1716 data
.m
= isl_val_div(data
.m
, d
);
1718 data
.m
= isl_val_set_si(data
.m
, 1);
1722 if (!isl_val_is_one(data
.m
)) {
1723 if (isl_union_map_foreach_map(executed
, &map_check_scaled
,
1725 !isl_val_is_one(data
.m
))
1726 executed
= isl_union_map_free(executed
);
1729 if (!isl_val_is_one(data
.m
)) {
1734 isl_union_map
*umap
;
1736 space
= isl_ast_build_get_space(build
, 1);
1737 space
= isl_space_map_from_set(space
);
1738 ma
= isl_multi_aff_identity(space
);
1739 aff
= isl_multi_aff_get_aff(ma
, data
.depth
);
1740 aff
= isl_aff_scale_val(aff
, isl_val_copy(data
.m
));
1741 ma
= isl_multi_aff_set_aff(ma
, data
.depth
, aff
);
1743 bounds
= isl_basic_set_preimage_multi_aff(bounds
,
1744 isl_multi_aff_copy(ma
));
1745 domain
= isl_set_preimage_multi_aff(domain
,
1746 isl_multi_aff_copy(ma
));
1747 map
= isl_map_reverse(isl_map_from_multi_aff(ma
));
1748 umap
= isl_union_map_from_map(map
);
1749 executed
= isl_union_map_apply_domain(executed
,
1750 isl_union_map_copy(umap
));
1751 build
= isl_ast_build_scale_down(build
, isl_val_copy(data
.m
),
1754 isl_aff_free(offset
);
1755 isl_val_free(data
.m
);
1757 return create_node_scaled(executed
, bounds
, domain
, build
);
1760 /* Add the basic set to the list that "user" points to.
1762 static isl_stat
collect_basic_set(__isl_take isl_basic_set
*bset
, void *user
)
1764 isl_basic_set_list
**list
= user
;
1766 *list
= isl_basic_set_list_add(*list
, bset
);
1771 /* Extract the basic sets of "set" and collect them in an isl_basic_set_list.
1773 static __isl_give isl_basic_set_list
*isl_basic_set_list_from_set(
1774 __isl_take isl_set
*set
)
1778 isl_basic_set_list
*list
;
1783 ctx
= isl_set_get_ctx(set
);
1785 n
= isl_set_n_basic_set(set
);
1786 list
= isl_basic_set_list_alloc(ctx
, n
);
1787 if (isl_set_foreach_basic_set(set
, &collect_basic_set
, &list
) < 0)
1788 list
= isl_basic_set_list_free(list
);
1794 /* Generate code for the schedule domain "bounds"
1795 * and add the result to "list".
1797 * We mainly detect strides here and check if the bounds do not
1798 * conflict with the current build domain
1799 * and then pass over control to create_node.
1801 * "bounds" reflects the bounds on the current dimension and possibly
1802 * some extra conditions on outer dimensions.
1803 * It does not, however, include any divs involving the current dimension,
1804 * so it does not capture any stride constraints.
1805 * We therefore need to compute that part of the schedule domain that
1806 * intersects with "bounds" and derive the strides from the result.
1808 static __isl_give isl_ast_graft_list
*add_node(
1809 __isl_take isl_ast_graft_list
*list
, __isl_take isl_union_map
*executed
,
1810 __isl_take isl_basic_set
*bounds
, __isl_take isl_ast_build
*build
)
1812 isl_ast_graft
*graft
;
1813 isl_set
*domain
= NULL
;
1814 isl_union_set
*uset
;
1815 int empty
, disjoint
;
1817 uset
= isl_union_set_from_basic_set(isl_basic_set_copy(bounds
));
1818 executed
= isl_union_map_intersect_domain(executed
, uset
);
1819 empty
= isl_union_map_is_empty(executed
);
1825 uset
= isl_union_map_domain(isl_union_map_copy(executed
));
1826 domain
= isl_set_from_union_set(uset
);
1827 domain
= isl_ast_build_specialize(build
, domain
);
1829 domain
= isl_set_compute_divs(domain
);
1830 domain
= isl_ast_build_eliminate_inner(build
, domain
);
1831 disjoint
= isl_set_is_disjoint(domain
, build
->domain
);
1837 build
= isl_ast_build_detect_strides(build
, isl_set_copy(domain
));
1839 graft
= create_node(executed
, bounds
, domain
,
1840 isl_ast_build_copy(build
));
1841 list
= isl_ast_graft_list_add(list
, graft
);
1842 isl_ast_build_free(build
);
1845 list
= isl_ast_graft_list_free(list
);
1847 isl_set_free(domain
);
1848 isl_basic_set_free(bounds
);
1849 isl_union_map_free(executed
);
1850 isl_ast_build_free(build
);
1854 /* Does any element of i follow or coincide with any element of j
1855 * at the current depth for equal values of the outer dimensions?
1857 static isl_bool
domain_follows_at_depth(__isl_keep isl_basic_set
*i
,
1858 __isl_keep isl_basic_set
*j
, void *user
)
1860 int depth
= *(int *) user
;
1861 isl_basic_map
*test
;
1865 test
= isl_basic_map_from_domain_and_range(isl_basic_set_copy(i
),
1866 isl_basic_set_copy(j
));
1867 for (l
= 0; l
< depth
; ++l
)
1868 test
= isl_basic_map_equate(test
, isl_dim_in
, l
,
1870 test
= isl_basic_map_order_ge(test
, isl_dim_in
, depth
,
1871 isl_dim_out
, depth
);
1872 empty
= isl_basic_map_is_empty(test
);
1873 isl_basic_map_free(test
);
1875 return isl_bool_not(empty
);
1878 /* Split up each element of "list" into a part that is related to "bset"
1879 * according to "gt" and a part that is not.
1880 * Return a list that consist of "bset" and all the pieces.
1882 static __isl_give isl_basic_set_list
*add_split_on(
1883 __isl_take isl_basic_set_list
*list
, __isl_take isl_basic_set
*bset
,
1884 __isl_keep isl_basic_map
*gt
)
1887 isl_basic_set_list
*res
;
1890 bset
= isl_basic_set_free(bset
);
1892 gt
= isl_basic_map_copy(gt
);
1893 gt
= isl_basic_map_intersect_domain(gt
, isl_basic_set_copy(bset
));
1894 n
= isl_basic_set_list_n_basic_set(list
);
1895 res
= isl_basic_set_list_from_basic_set(bset
);
1896 for (i
= 0; res
&& i
< n
; ++i
) {
1897 isl_basic_set
*bset
;
1898 isl_set
*set1
, *set2
;
1899 isl_basic_map
*bmap
;
1902 bset
= isl_basic_set_list_get_basic_set(list
, i
);
1903 bmap
= isl_basic_map_copy(gt
);
1904 bmap
= isl_basic_map_intersect_range(bmap
, bset
);
1905 bset
= isl_basic_map_range(bmap
);
1906 empty
= isl_basic_set_is_empty(bset
);
1908 res
= isl_basic_set_list_free(res
);
1910 isl_basic_set_free(bset
);
1911 bset
= isl_basic_set_list_get_basic_set(list
, i
);
1912 res
= isl_basic_set_list_add(res
, bset
);
1916 res
= isl_basic_set_list_add(res
, isl_basic_set_copy(bset
));
1917 set1
= isl_set_from_basic_set(bset
);
1918 bset
= isl_basic_set_list_get_basic_set(list
, i
);
1919 set2
= isl_set_from_basic_set(bset
);
1920 set1
= isl_set_subtract(set2
, set1
);
1921 set1
= isl_set_make_disjoint(set1
);
1923 res
= isl_basic_set_list_concat(res
,
1924 isl_basic_set_list_from_set(set1
));
1926 isl_basic_map_free(gt
);
1927 isl_basic_set_list_free(list
);
1931 static __isl_give isl_ast_graft_list
*generate_sorted_domains(
1932 __isl_keep isl_basic_set_list
*domain_list
,
1933 __isl_keep isl_union_map
*executed
,
1934 __isl_keep isl_ast_build
*build
);
1936 /* Internal data structure for add_nodes.
1938 * "executed" and "build" are extra arguments to be passed to add_node.
1939 * "list" collects the results.
1941 struct isl_add_nodes_data
{
1942 isl_union_map
*executed
;
1943 isl_ast_build
*build
;
1945 isl_ast_graft_list
*list
;
1948 /* Generate code for the schedule domains in "scc"
1949 * and add the results to "list".
1951 * The domains in "scc" form a strongly connected component in the ordering.
1952 * If the number of domains in "scc" is larger than 1, then this means
1953 * that we cannot determine a valid ordering for the domains in the component.
1954 * This should be fairly rare because the individual domains
1955 * have been made disjoint first.
1956 * The problem is that the domains may be integrally disjoint but not
1957 * rationally disjoint. For example, we may have domains
1959 * { [i,i] : 0 <= i <= 1 } and { [i,1-i] : 0 <= i <= 1 }
1961 * These two domains have an empty intersection, but their rational
1962 * relaxations do intersect. It is impossible to order these domains
1963 * in the second dimension because the first should be ordered before
1964 * the second for outer dimension equal to 0, while it should be ordered
1965 * after for outer dimension equal to 1.
1967 * This may happen in particular in case of unrolling since the domain
1968 * of each slice is replaced by its simple hull.
1970 * For each basic set i in "scc" and for each of the following basic sets j,
1971 * we split off that part of the basic set i that shares the outer dimensions
1972 * with j and lies before j in the current dimension.
1973 * We collect all the pieces in a new list that replaces "scc".
1975 * While the elements in "scc" should be disjoint, we double-check
1976 * this property to avoid running into an infinite recursion in case
1977 * they intersect due to some internal error.
1979 static isl_stat
add_nodes(__isl_take isl_basic_set_list
*scc
, void *user
)
1981 struct isl_add_nodes_data
*data
= user
;
1983 isl_basic_set
*bset
, *first
;
1984 isl_basic_set_list
*list
;
1988 n
= isl_basic_set_list_n_basic_set(scc
);
1989 bset
= isl_basic_set_list_get_basic_set(scc
, 0);
1991 isl_basic_set_list_free(scc
);
1992 data
->list
= add_node(data
->list
,
1993 isl_union_map_copy(data
->executed
), bset
,
1994 isl_ast_build_copy(data
->build
));
1995 return data
->list
? isl_stat_ok
: isl_stat_error
;
1998 depth
= isl_ast_build_get_depth(data
->build
);
1999 space
= isl_basic_set_get_space(bset
);
2000 space
= isl_space_map_from_set(space
);
2001 gt
= isl_basic_map_universe(space
);
2002 for (i
= 0; i
< depth
; ++i
)
2003 gt
= isl_basic_map_equate(gt
, isl_dim_in
, i
, isl_dim_out
, i
);
2004 gt
= isl_basic_map_order_gt(gt
, isl_dim_in
, depth
, isl_dim_out
, depth
);
2006 first
= isl_basic_set_copy(bset
);
2007 list
= isl_basic_set_list_from_basic_set(bset
);
2008 for (i
= 1; i
< n
; ++i
) {
2011 bset
= isl_basic_set_list_get_basic_set(scc
, i
);
2013 disjoint
= isl_basic_set_is_disjoint(bset
, first
);
2015 list
= isl_basic_set_list_free(list
);
2017 isl_die(isl_basic_set_list_get_ctx(scc
),
2019 "basic sets in scc are assumed to be disjoint",
2020 list
= isl_basic_set_list_free(list
));
2022 list
= add_split_on(list
, bset
, gt
);
2024 isl_basic_set_free(first
);
2025 isl_basic_map_free(gt
);
2026 isl_basic_set_list_free(scc
);
2028 data
->list
= isl_ast_graft_list_concat(data
->list
,
2029 generate_sorted_domains(scc
, data
->executed
, data
->build
));
2030 isl_basic_set_list_free(scc
);
2032 return data
->list
? isl_stat_ok
: isl_stat_error
;
2035 /* Sort the domains in "domain_list" according to the execution order
2036 * at the current depth (for equal values of the outer dimensions),
2037 * generate code for each of them, collecting the results in a list.
2038 * If no code is generated (because the intersection of the inverse schedule
2039 * with the domains turns out to be empty), then an empty list is returned.
2041 * The caller is responsible for ensuring that the basic sets in "domain_list"
2042 * are pair-wise disjoint. It can, however, in principle happen that
2043 * two basic sets should be ordered one way for one value of the outer
2044 * dimensions and the other way for some other value of the outer dimensions.
2045 * We therefore play safe and look for strongly connected components.
2046 * The function add_nodes takes care of handling non-trivial components.
2048 static __isl_give isl_ast_graft_list
*generate_sorted_domains(
2049 __isl_keep isl_basic_set_list
*domain_list
,
2050 __isl_keep isl_union_map
*executed
, __isl_keep isl_ast_build
*build
)
2053 struct isl_add_nodes_data data
;
2060 ctx
= isl_basic_set_list_get_ctx(domain_list
);
2061 n
= isl_basic_set_list_n_basic_set(domain_list
);
2062 data
.list
= isl_ast_graft_list_alloc(ctx
, n
);
2066 return add_node(data
.list
, isl_union_map_copy(executed
),
2067 isl_basic_set_list_get_basic_set(domain_list
, 0),
2068 isl_ast_build_copy(build
));
2070 depth
= isl_ast_build_get_depth(build
);
2071 data
.executed
= executed
;
2073 if (isl_basic_set_list_foreach_scc(domain_list
,
2074 &domain_follows_at_depth
, &depth
,
2075 &add_nodes
, &data
) < 0)
2076 data
.list
= isl_ast_graft_list_free(data
.list
);
2081 /* Do i and j share any values for the outer dimensions?
2083 static isl_bool
shared_outer(__isl_keep isl_basic_set
*i
,
2084 __isl_keep isl_basic_set
*j
, void *user
)
2086 int depth
= *(int *) user
;
2087 isl_basic_map
*test
;
2091 test
= isl_basic_map_from_domain_and_range(isl_basic_set_copy(i
),
2092 isl_basic_set_copy(j
));
2093 for (l
= 0; l
< depth
; ++l
)
2094 test
= isl_basic_map_equate(test
, isl_dim_in
, l
,
2096 empty
= isl_basic_map_is_empty(test
);
2097 isl_basic_map_free(test
);
2099 return isl_bool_not(empty
);
2102 /* Internal data structure for generate_sorted_domains_wrap.
2104 * "n" is the total number of basic sets
2105 * "executed" and "build" are extra arguments to be passed
2106 * to generate_sorted_domains.
2108 * "single" is set to 1 by generate_sorted_domains_wrap if there
2109 * is only a single component.
2110 * "list" collects the results.
2112 struct isl_ast_generate_parallel_domains_data
{
2114 isl_union_map
*executed
;
2115 isl_ast_build
*build
;
2118 isl_ast_graft_list
*list
;
2121 /* Call generate_sorted_domains on "scc", fuse the result into a list
2122 * with either zero or one graft and collect the these single element
2123 * lists into data->list.
2125 * If there is only one component, i.e., if the number of basic sets
2126 * in the current component is equal to the total number of basic sets,
2127 * then data->single is set to 1 and the result of generate_sorted_domains
2130 static isl_stat
generate_sorted_domains_wrap(__isl_take isl_basic_set_list
*scc
,
2133 struct isl_ast_generate_parallel_domains_data
*data
= user
;
2134 isl_ast_graft_list
*list
;
2136 list
= generate_sorted_domains(scc
, data
->executed
, data
->build
);
2137 data
->single
= isl_basic_set_list_n_basic_set(scc
) == data
->n
;
2139 list
= isl_ast_graft_list_fuse(list
, data
->build
);
2143 data
->list
= isl_ast_graft_list_concat(data
->list
, list
);
2145 isl_basic_set_list_free(scc
);
2147 return isl_stat_error
;
2152 /* Look for any (weakly connected) components in the "domain_list"
2153 * of domains that share some values of the outer dimensions.
2154 * That is, domains in different components do not share any values
2155 * of the outer dimensions. This means that these components
2156 * can be freely reordered.
2157 * Within each of the components, we sort the domains according
2158 * to the execution order at the current depth.
2160 * If there is more than one component, then generate_sorted_domains_wrap
2161 * fuses the result of each call to generate_sorted_domains
2162 * into a list with either zero or one graft and collects these (at most)
2163 * single element lists into a bigger list. This means that the elements of the
2164 * final list can be freely reordered. In particular, we sort them
2165 * according to an arbitrary but fixed ordering to ease merging of
2166 * graft lists from different components.
2168 static __isl_give isl_ast_graft_list
*generate_parallel_domains(
2169 __isl_keep isl_basic_set_list
*domain_list
,
2170 __isl_keep isl_union_map
*executed
, __isl_keep isl_ast_build
*build
)
2173 struct isl_ast_generate_parallel_domains_data data
;
2178 data
.n
= isl_basic_set_list_n_basic_set(domain_list
);
2180 return generate_sorted_domains(domain_list
, executed
, build
);
2182 depth
= isl_ast_build_get_depth(build
);
2184 data
.executed
= executed
;
2187 if (isl_basic_set_list_foreach_scc(domain_list
, &shared_outer
, &depth
,
2188 &generate_sorted_domains_wrap
,
2190 data
.list
= isl_ast_graft_list_free(data
.list
);
2193 data
.list
= isl_ast_graft_list_sort_guard(data
.list
);
2198 /* Internal data for separate_domain.
2200 * "explicit" is set if we only want to use explicit bounds.
2202 * "domain" collects the separated domains.
2204 struct isl_separate_domain_data
{
2205 isl_ast_build
*build
;
2210 /* Extract implicit bounds on the current dimension for the executed "map".
2212 * The domain of "map" may involve inner dimensions, so we
2213 * need to eliminate them.
2215 static __isl_give isl_set
*implicit_bounds(__isl_take isl_map
*map
,
2216 __isl_keep isl_ast_build
*build
)
2220 domain
= isl_map_domain(map
);
2221 domain
= isl_ast_build_eliminate(build
, domain
);
2226 /* Extract explicit bounds on the current dimension for the executed "map".
2228 * Rather than eliminating the inner dimensions as in implicit_bounds,
2229 * we simply drop any constraints involving those inner dimensions.
2230 * The idea is that most bounds that are implied by constraints on the
2231 * inner dimensions will be enforced by for loops and not by explicit guards.
2232 * There is then no need to separate along those bounds.
2234 static __isl_give isl_set
*explicit_bounds(__isl_take isl_map
*map
,
2235 __isl_keep isl_ast_build
*build
)
2241 dim
= isl_map_dim(map
, isl_dim_out
);
2243 return isl_map_domain(isl_map_free(map
));
2244 map
= isl_map_drop_constraints_involving_dims(map
, isl_dim_out
, 0, dim
);
2246 domain
= isl_map_domain(map
);
2247 depth
= isl_ast_build_get_depth(build
);
2248 dim
= isl_set_dim(domain
, isl_dim_set
);
2249 domain
= isl_set_detect_equalities(domain
);
2250 domain
= isl_set_drop_constraints_involving_dims(domain
,
2251 isl_dim_set
, depth
+ 1, dim
- (depth
+ 1));
2252 domain
= isl_set_remove_divs_involving_dims(domain
,
2253 isl_dim_set
, depth
, 1);
2254 domain
= isl_set_remove_unknown_divs(domain
);
2259 /* Split data->domain into pieces that intersect with the range of "map"
2260 * and pieces that do not intersect with the range of "map"
2261 * and then add that part of the range of "map" that does not intersect
2262 * with data->domain.
2264 static isl_stat
separate_domain(__isl_take isl_map
*map
, void *user
)
2266 struct isl_separate_domain_data
*data
= user
;
2271 domain
= explicit_bounds(map
, data
->build
);
2273 domain
= implicit_bounds(map
, data
->build
);
2275 domain
= isl_set_coalesce(domain
);
2276 domain
= isl_set_make_disjoint(domain
);
2277 d1
= isl_set_subtract(isl_set_copy(domain
), isl_set_copy(data
->domain
));
2278 d2
= isl_set_subtract(isl_set_copy(data
->domain
), isl_set_copy(domain
));
2279 data
->domain
= isl_set_intersect(data
->domain
, domain
);
2280 data
->domain
= isl_set_union(data
->domain
, d1
);
2281 data
->domain
= isl_set_union(data
->domain
, d2
);
2286 /* Separate the schedule domains of "executed".
2288 * That is, break up the domain of "executed" into basic sets,
2289 * such that for each basic set S, every element in S is associated with
2290 * the same domain spaces.
2292 * "space" is the (single) domain space of "executed".
2294 static __isl_give isl_set
*separate_schedule_domains(
2295 __isl_take isl_space
*space
, __isl_take isl_union_map
*executed
,
2296 __isl_keep isl_ast_build
*build
)
2298 struct isl_separate_domain_data data
= { build
};
2301 ctx
= isl_ast_build_get_ctx(build
);
2302 data
.explicit = isl_options_get_ast_build_separation_bounds(ctx
) ==
2303 ISL_AST_BUILD_SEPARATION_BOUNDS_EXPLICIT
;
2304 data
.domain
= isl_set_empty(space
);
2305 if (isl_union_map_foreach_map(executed
, &separate_domain
, &data
) < 0)
2306 data
.domain
= isl_set_free(data
.domain
);
2308 isl_union_map_free(executed
);
2312 /* Temporary data used during the search for a lower bound for unrolling.
2314 * "build" is the build in which the unrolling will be performed
2315 * "domain" is the original set for which to find a lower bound
2316 * "depth" is the dimension for which to find a lower boudn
2317 * "expansion" is the expansion that needs to be applied to "domain"
2318 * in the unrolling that will be performed
2320 * "lower" is the best lower bound found so far. It is NULL if we have not
2322 * "n" is the corresponding size. If lower is NULL, then the value of n
2324 * "n_div" is the maximal number of integer divisions in the first
2325 * unrolled iteration (after expansion). It is set to -1 if it hasn't
2326 * been computed yet.
2328 struct isl_find_unroll_data
{
2329 isl_ast_build
*build
;
2332 isl_basic_map
*expansion
;
2339 /* Return the constraint
2341 * i_"depth" = aff + offset
2343 static __isl_give isl_constraint
*at_offset(int depth
, __isl_keep isl_aff
*aff
,
2346 aff
= isl_aff_copy(aff
);
2347 aff
= isl_aff_add_coefficient_si(aff
, isl_dim_in
, depth
, -1);
2348 aff
= isl_aff_add_constant_si(aff
, offset
);
2349 return isl_equality_from_aff(aff
);
2352 /* Update *user to the number of integer divsions in the first element
2353 * of "ma", if it is larger than the current value.
2355 static isl_stat
update_n_div(__isl_take isl_set
*set
,
2356 __isl_take isl_multi_aff
*ma
, void *user
)
2362 aff
= isl_multi_aff_get_aff(ma
, 0);
2363 n_div
= isl_aff_dim(aff
, isl_dim_div
);
2365 isl_multi_aff_free(ma
);
2371 return n_div
>= 0 ? isl_stat_ok
: isl_stat_error
;
2374 /* Get the number of integer divisions in the expression for the iterator
2375 * value at the first slice in the unrolling based on lower bound "lower",
2376 * taking into account the expansion that needs to be performed on this slice.
2378 static int get_expanded_n_div(struct isl_find_unroll_data
*data
,
2379 __isl_keep isl_aff
*lower
)
2383 isl_map
*it_map
, *expansion
;
2384 isl_pw_multi_aff
*pma
;
2387 c
= at_offset(data
->depth
, lower
, 0);
2388 set
= isl_set_copy(data
->domain
);
2389 set
= isl_set_add_constraint(set
, c
);
2390 expansion
= isl_map_from_basic_map(isl_basic_map_copy(data
->expansion
));
2391 set
= isl_set_apply(set
, expansion
);
2392 it_map
= isl_ast_build_map_to_iterator(data
->build
, set
);
2393 pma
= isl_pw_multi_aff_from_map(it_map
);
2395 if (isl_pw_multi_aff_foreach_piece(pma
, &update_n_div
, &n
) < 0)
2397 isl_pw_multi_aff_free(pma
);
2402 /* Is the lower bound "lower" with corresponding iteration count "n"
2403 * better than the one stored in "data"?
2404 * If there is no upper bound on the iteration count ("n" is infinity) or
2405 * if the count is too large, then we cannot use this lower bound.
2406 * Otherwise, if there was no previous lower bound or
2407 * if the iteration count of the new lower bound is smaller than
2408 * the iteration count of the previous lower bound, then we consider
2409 * the new lower bound to be better.
2410 * If the iteration count is the same, then compare the number
2411 * of integer divisions that would be needed to express
2412 * the iterator value at the first slice in the unrolling
2413 * according to the lower bound. If we end up computing this
2414 * number, then store the lowest value in data->n_div.
2416 static int is_better_lower_bound(struct isl_find_unroll_data
*data
,
2417 __isl_keep isl_aff
*lower
, __isl_keep isl_val
*n
)
2424 if (isl_val_is_infty(n
))
2426 if (isl_val_cmp_si(n
, INT_MAX
) > 0)
2430 cmp
= isl_val_cmp_si(n
, *data
->n
);
2435 if (data
->n_div
< 0)
2436 data
->n_div
= get_expanded_n_div(data
, data
->lower
);
2437 if (data
->n_div
< 0)
2439 if (data
->n_div
== 0)
2441 n_div
= get_expanded_n_div(data
, lower
);
2444 if (n_div
>= data
->n_div
)
2446 data
->n_div
= n_div
;
2451 /* Check if we can use "c" as a lower bound and if it is better than
2452 * any previously found lower bound.
2454 * If "c" does not involve the dimension at the current depth,
2455 * then we cannot use it.
2456 * Otherwise, let "c" be of the form
2460 * We compute the maximal value of
2462 * -ceil(f(j)/a)) + i + 1
2464 * over the domain. If there is such a value "n", then we know
2466 * -ceil(f(j)/a)) + i + 1 <= n
2470 * i < ceil(f(j)/a)) + n
2472 * meaning that we can use ceil(f(j)/a)) as a lower bound for unrolling.
2473 * We just need to check if we have found any lower bound before and
2474 * if the new lower bound is better (smaller n or fewer integer divisions)
2475 * than the previously found lower bounds.
2477 static isl_stat
update_unrolling_lower_bound(struct isl_find_unroll_data
*data
,
2478 __isl_keep isl_constraint
*c
)
2480 isl_aff
*aff
, *lower
;
2484 if (!isl_constraint_is_lower_bound(c
, isl_dim_set
, data
->depth
))
2487 lower
= isl_constraint_get_bound(c
, isl_dim_set
, data
->depth
);
2488 lower
= isl_aff_ceil(lower
);
2489 aff
= isl_aff_copy(lower
);
2490 aff
= isl_aff_neg(aff
);
2491 aff
= isl_aff_add_coefficient_si(aff
, isl_dim_in
, data
->depth
, 1);
2492 aff
= isl_aff_add_constant_si(aff
, 1);
2493 max
= isl_set_max_val(data
->domain
, aff
);
2496 better
= is_better_lower_bound(data
, lower
, max
);
2497 if (better
< 0 || !better
) {
2499 isl_aff_free(lower
);
2500 return better
< 0 ? isl_stat_error
: isl_stat_ok
;
2503 isl_aff_free(data
->lower
);
2504 data
->lower
= lower
;
2505 *data
->n
= isl_val_get_num_si(max
);
2511 /* Check if we can use "c" as a lower bound and if it is better than
2512 * any previously found lower bound.
2514 static isl_stat
constraint_find_unroll(__isl_take isl_constraint
*c
, void *user
)
2516 struct isl_find_unroll_data
*data
;
2519 data
= (struct isl_find_unroll_data
*) user
;
2520 r
= update_unrolling_lower_bound(data
, c
);
2521 isl_constraint_free(c
);
2526 /* Look for a lower bound l(i) on the dimension at "depth"
2527 * and a size n such that "domain" is a subset of
2529 * { [i] : l(i) <= i_d < l(i) + n }
2531 * where d is "depth" and l(i) depends only on earlier dimensions.
2532 * Furthermore, try and find a lower bound such that n is as small as possible.
2533 * In particular, "n" needs to be finite.
2534 * "build" is the build in which the unrolling will be performed.
2535 * "expansion" is the expansion that needs to be applied to "domain"
2536 * in the unrolling that will be performed.
2538 * Inner dimensions have been eliminated from "domain" by the caller.
2540 * We first construct a collection of lower bounds on the input set
2541 * by computing its simple hull. We then iterate through them,
2542 * discarding those that we cannot use (either because they do not
2543 * involve the dimension at "depth" or because they have no corresponding
2544 * upper bound, meaning that "n" would be unbounded) and pick out the
2545 * best from the remaining ones.
2547 * If we cannot find a suitable lower bound, then we consider that
2550 static __isl_give isl_aff
*find_unroll_lower_bound(
2551 __isl_keep isl_ast_build
*build
, __isl_keep isl_set
*domain
,
2552 int depth
, __isl_keep isl_basic_map
*expansion
, int *n
)
2554 struct isl_find_unroll_data data
=
2555 { build
, domain
, depth
, expansion
, NULL
, n
, -1 };
2556 isl_basic_set
*hull
;
2558 hull
= isl_set_simple_hull(isl_set_copy(domain
));
2560 if (isl_basic_set_foreach_constraint(hull
,
2561 &constraint_find_unroll
, &data
) < 0)
2564 isl_basic_set_free(hull
);
2567 isl_die(isl_set_get_ctx(domain
), isl_error_invalid
,
2568 "cannot find lower bound for unrolling", return NULL
);
2572 isl_basic_set_free(hull
);
2573 return isl_aff_free(data
.lower
);
2576 /* Call "fn" on each iteration of the current dimension of "domain".
2577 * If "init" is not NULL, then it is called with the number of
2578 * iterations before any call to "fn".
2579 * Return -1 on failure.
2581 * Since we are going to be iterating over the individual values,
2582 * we first check if there are any strides on the current dimension.
2583 * If there is, we rewrite the current dimension i as
2585 * i = stride i' + offset
2587 * and then iterate over individual values of i' instead.
2589 * We then look for a lower bound on i' and a size such that the domain
2592 * { [j,i'] : l(j) <= i' < l(j) + n }
2594 * and then take slices of the domain at values of i'
2595 * between l(j) and l(j) + n - 1.
2597 * We compute the unshifted simple hull of each slice to ensure that
2598 * we have a single basic set per offset. The slicing constraint
2599 * may get simplified away before the unshifted simple hull is taken
2600 * and may therefore in some rare cases disappear from the result.
2601 * We therefore explicitly add the constraint back after computing
2602 * the unshifted simple hull to ensure that the basic sets
2603 * remain disjoint. The constraints that are dropped by taking the hull
2604 * will be taken into account at the next level, as in the case of the
2607 * Finally, we map i' back to i and call "fn".
2609 static int foreach_iteration(__isl_take isl_set
*domain
,
2610 __isl_keep isl_ast_build
*build
, int (*init
)(int n
, void *user
),
2611 int (*fn
)(__isl_take isl_basic_set
*bset
, void *user
), void *user
)
2616 isl_multi_aff
*expansion
;
2617 isl_basic_map
*bmap
;
2618 isl_aff
*lower
= NULL
;
2619 isl_ast_build
*stride_build
;
2621 depth
= isl_ast_build_get_depth(build
);
2623 domain
= isl_ast_build_eliminate_inner(build
, domain
);
2624 domain
= isl_set_intersect(domain
, isl_ast_build_get_domain(build
));
2625 stride_build
= isl_ast_build_copy(build
);
2626 stride_build
= isl_ast_build_detect_strides(stride_build
,
2627 isl_set_copy(domain
));
2628 expansion
= isl_ast_build_get_stride_expansion(stride_build
);
2630 domain
= isl_set_preimage_multi_aff(domain
,
2631 isl_multi_aff_copy(expansion
));
2632 domain
= isl_ast_build_eliminate_divs(stride_build
, domain
);
2633 isl_ast_build_free(stride_build
);
2635 bmap
= isl_basic_map_from_multi_aff(expansion
);
2637 empty
= isl_set_is_empty(domain
);
2643 lower
= find_unroll_lower_bound(build
, domain
, depth
, bmap
, &n
);
2647 if (n
>= 0 && init
&& init(n
, user
) < 0)
2649 for (i
= 0; i
< n
; ++i
) {
2651 isl_basic_set
*bset
;
2652 isl_constraint
*slice
;
2654 slice
= at_offset(depth
, lower
, i
);
2655 set
= isl_set_copy(domain
);
2656 set
= isl_set_add_constraint(set
, isl_constraint_copy(slice
));
2657 bset
= isl_set_unshifted_simple_hull(set
);
2658 bset
= isl_basic_set_add_constraint(bset
, slice
);
2659 bset
= isl_basic_set_apply(bset
, isl_basic_map_copy(bmap
));
2661 if (fn(bset
, user
) < 0)
2665 isl_aff_free(lower
);
2666 isl_set_free(domain
);
2667 isl_basic_map_free(bmap
);
2669 return n
< 0 || i
< n
? -1 : 0;
2672 /* Data structure for storing the results and the intermediate objects
2673 * of compute_domains.
2675 * "list" is the main result of the function and contains a list
2676 * of disjoint basic sets for which code should be generated.
2678 * "executed" and "build" are inputs to compute_domains.
2679 * "schedule_domain" is the domain of "executed".
2681 * "option" contains the domains at the current depth that should by
2682 * atomic, separated or unrolled. These domains are as specified by
2683 * the user, except that inner dimensions have been eliminated and
2684 * that they have been made pair-wise disjoint.
2686 * "sep_class" contains the user-specified split into separation classes
2687 * specialized to the current depth.
2688 * "done" contains the union of the separation domains that have already
2691 struct isl_codegen_domains
{
2692 isl_basic_set_list
*list
;
2694 isl_union_map
*executed
;
2695 isl_ast_build
*build
;
2696 isl_set
*schedule_domain
;
2704 /* Internal data structure for do_unroll.
2706 * "domains" stores the results of compute_domains.
2707 * "class_domain" is the original class domain passed to do_unroll.
2708 * "unroll_domain" collects the unrolled iterations.
2710 struct isl_ast_unroll_data
{
2711 struct isl_codegen_domains
*domains
;
2712 isl_set
*class_domain
;
2713 isl_set
*unroll_domain
;
2716 /* Given an iteration of an unrolled domain represented by "bset",
2717 * add it to data->domains->list.
2718 * Since we may have dropped some constraints, we intersect with
2719 * the class domain again to ensure that each element in the list
2720 * is disjoint from the other class domains.
2722 static int do_unroll_iteration(__isl_take isl_basic_set
*bset
, void *user
)
2724 struct isl_ast_unroll_data
*data
= user
;
2726 isl_basic_set_list
*list
;
2728 set
= isl_set_from_basic_set(bset
);
2729 data
->unroll_domain
= isl_set_union(data
->unroll_domain
,
2731 set
= isl_set_intersect(set
, isl_set_copy(data
->class_domain
));
2732 set
= isl_set_make_disjoint(set
);
2733 list
= isl_basic_set_list_from_set(set
);
2734 data
->domains
->list
= isl_basic_set_list_concat(data
->domains
->list
,
2740 /* Extend domains->list with a list of basic sets, one for each value
2741 * of the current dimension in "domain" and remove the corresponding
2742 * sets from the class domain. Return the updated class domain.
2743 * The divs that involve the current dimension have not been projected out
2746 * We call foreach_iteration to iterate over the individual values and
2747 * in do_unroll_iteration we collect the individual basic sets in
2748 * domains->list and their union in data->unroll_domain, which is then
2749 * used to update the class domain.
2751 static __isl_give isl_set
*do_unroll(struct isl_codegen_domains
*domains
,
2752 __isl_take isl_set
*domain
, __isl_take isl_set
*class_domain
)
2754 struct isl_ast_unroll_data data
;
2757 return isl_set_free(class_domain
);
2759 return isl_set_free(domain
);
2761 data
.domains
= domains
;
2762 data
.class_domain
= class_domain
;
2763 data
.unroll_domain
= isl_set_empty(isl_set_get_space(domain
));
2765 if (foreach_iteration(domain
, domains
->build
, NULL
,
2766 &do_unroll_iteration
, &data
) < 0)
2767 data
.unroll_domain
= isl_set_free(data
.unroll_domain
);
2769 class_domain
= isl_set_subtract(class_domain
, data
.unroll_domain
);
2771 return class_domain
;
2774 /* Add domains to domains->list for each individual value of the current
2775 * dimension, for that part of the schedule domain that lies in the
2776 * intersection of the option domain and the class domain.
2777 * Remove the corresponding sets from the class domain and
2778 * return the updated class domain.
2780 * We first break up the unroll option domain into individual pieces
2781 * and then handle each of them separately. The unroll option domain
2782 * has been made disjoint in compute_domains_init_options,
2784 * Note that we actively want to combine different pieces of the
2785 * schedule domain that have the same value at the current dimension.
2786 * We therefore need to break up the unroll option domain before
2787 * intersecting with class and schedule domain, hoping that the
2788 * unroll option domain specified by the user is relatively simple.
2790 static __isl_give isl_set
*compute_unroll_domains(
2791 struct isl_codegen_domains
*domains
, __isl_take isl_set
*class_domain
)
2793 isl_set
*unroll_domain
;
2794 isl_basic_set_list
*unroll_list
;
2798 empty
= isl_set_is_empty(domains
->option
[isl_ast_loop_unroll
]);
2800 return isl_set_free(class_domain
);
2802 return class_domain
;
2804 unroll_domain
= isl_set_copy(domains
->option
[isl_ast_loop_unroll
]);
2805 unroll_list
= isl_basic_set_list_from_set(unroll_domain
);
2807 n
= isl_basic_set_list_n_basic_set(unroll_list
);
2808 for (i
= 0; i
< n
; ++i
) {
2809 isl_basic_set
*bset
;
2811 bset
= isl_basic_set_list_get_basic_set(unroll_list
, i
);
2812 unroll_domain
= isl_set_from_basic_set(bset
);
2813 unroll_domain
= isl_set_intersect(unroll_domain
,
2814 isl_set_copy(class_domain
));
2815 unroll_domain
= isl_set_intersect(unroll_domain
,
2816 isl_set_copy(domains
->schedule_domain
));
2818 empty
= isl_set_is_empty(unroll_domain
);
2819 if (empty
>= 0 && empty
) {
2820 isl_set_free(unroll_domain
);
2824 class_domain
= do_unroll(domains
, unroll_domain
, class_domain
);
2827 isl_basic_set_list_free(unroll_list
);
2829 return class_domain
;
2832 /* Try and construct a single basic set that includes the intersection of
2833 * the schedule domain, the atomic option domain and the class domain.
2834 * Add the resulting basic set(s) to domains->list and remove them
2835 * from class_domain. Return the updated class domain.
2837 * We construct a single domain rather than trying to combine
2838 * the schedule domains of individual domains because we are working
2839 * within a single component so that non-overlapping schedule domains
2840 * should already have been separated.
2841 * We do however need to make sure that this single domains is a subset
2842 * of the class domain so that it would not intersect with any other
2843 * class domains. This means that we may end up splitting up the atomic
2844 * domain in case separation classes are being used.
2846 * "domain" is the intersection of the schedule domain and the class domain,
2847 * with inner dimensions projected out.
2849 static __isl_give isl_set
*compute_atomic_domain(
2850 struct isl_codegen_domains
*domains
, __isl_take isl_set
*class_domain
)
2852 isl_basic_set
*bset
;
2853 isl_basic_set_list
*list
;
2854 isl_set
*domain
, *atomic_domain
;
2857 domain
= isl_set_copy(domains
->option
[isl_ast_loop_atomic
]);
2858 domain
= isl_set_intersect(domain
, isl_set_copy(class_domain
));
2859 domain
= isl_set_intersect(domain
,
2860 isl_set_copy(domains
->schedule_domain
));
2861 empty
= isl_set_is_empty(domain
);
2863 class_domain
= isl_set_free(class_domain
);
2865 isl_set_free(domain
);
2866 return class_domain
;
2869 domain
= isl_ast_build_eliminate(domains
->build
, domain
);
2870 domain
= isl_set_coalesce_preserve(domain
);
2871 bset
= isl_set_unshifted_simple_hull(domain
);
2872 domain
= isl_set_from_basic_set(bset
);
2873 atomic_domain
= isl_set_copy(domain
);
2874 domain
= isl_set_intersect(domain
, isl_set_copy(class_domain
));
2875 class_domain
= isl_set_subtract(class_domain
, atomic_domain
);
2876 domain
= isl_set_make_disjoint(domain
);
2877 list
= isl_basic_set_list_from_set(domain
);
2878 domains
->list
= isl_basic_set_list_concat(domains
->list
, list
);
2880 return class_domain
;
2883 /* Split up the schedule domain into uniform basic sets,
2884 * in the sense that each element in a basic set is associated to
2885 * elements of the same domains, and add the result to domains->list.
2886 * Do this for that part of the schedule domain that lies in the
2887 * intersection of "class_domain" and the separate option domain.
2889 * "class_domain" may or may not include the constraints
2890 * of the schedule domain, but this does not make a difference
2891 * since we are going to intersect it with the domain of the inverse schedule.
2892 * If it includes schedule domain constraints, then they may involve
2893 * inner dimensions, but we will eliminate them in separation_domain.
2895 static int compute_separate_domain(struct isl_codegen_domains
*domains
,
2896 __isl_keep isl_set
*class_domain
)
2900 isl_union_map
*executed
;
2901 isl_basic_set_list
*list
;
2904 domain
= isl_set_copy(domains
->option
[isl_ast_loop_separate
]);
2905 domain
= isl_set_intersect(domain
, isl_set_copy(class_domain
));
2906 executed
= isl_union_map_copy(domains
->executed
);
2907 executed
= isl_union_map_intersect_domain(executed
,
2908 isl_union_set_from_set(domain
));
2909 empty
= isl_union_map_is_empty(executed
);
2910 if (empty
< 0 || empty
) {
2911 isl_union_map_free(executed
);
2912 return empty
< 0 ? -1 : 0;
2915 space
= isl_set_get_space(class_domain
);
2916 domain
= separate_schedule_domains(space
, executed
, domains
->build
);
2918 list
= isl_basic_set_list_from_set(domain
);
2919 domains
->list
= isl_basic_set_list_concat(domains
->list
, list
);
2924 /* Split up the domain at the current depth into disjoint
2925 * basic sets for which code should be generated separately
2926 * for the given separation class domain.
2928 * If any separation classes have been defined, then "class_domain"
2929 * is the domain of the current class and does not refer to inner dimensions.
2930 * Otherwise, "class_domain" is the universe domain.
2932 * We first make sure that the class domain is disjoint from
2933 * previously considered class domains.
2935 * The separate domains can be computed directly from the "class_domain".
2937 * The unroll, atomic and remainder domains need the constraints
2938 * from the schedule domain.
2940 * For unrolling, the actual schedule domain is needed (with divs that
2941 * may refer to the current dimension) so that stride detection can be
2944 * For atomic and remainder domains, inner dimensions and divs involving
2945 * the current dimensions should be eliminated.
2946 * In case we are working within a separation class, we need to intersect
2947 * the result with the current "class_domain" to ensure that the domains
2948 * are disjoint from those generated from other class domains.
2950 * The domain that has been made atomic may be larger than specified
2951 * by the user since it needs to be representable as a single basic set.
2952 * This possibly larger domain is removed from class_domain by
2953 * compute_atomic_domain. It is computed first so that the extended domain
2954 * would not overlap with any domains computed before.
2955 * Similary, the unrolled domains may have some constraints removed and
2956 * may therefore also be larger than specified by the user.
2958 * If anything is left after handling separate, unroll and atomic,
2959 * we split it up into basic sets and append the basic sets to domains->list.
2961 static isl_stat
compute_partial_domains(struct isl_codegen_domains
*domains
,
2962 __isl_take isl_set
*class_domain
)
2964 isl_basic_set_list
*list
;
2967 class_domain
= isl_set_subtract(class_domain
,
2968 isl_set_copy(domains
->done
));
2969 domains
->done
= isl_set_union(domains
->done
,
2970 isl_set_copy(class_domain
));
2972 class_domain
= compute_atomic_domain(domains
, class_domain
);
2973 class_domain
= compute_unroll_domains(domains
, class_domain
);
2975 domain
= isl_set_copy(class_domain
);
2977 if (compute_separate_domain(domains
, domain
) < 0)
2979 domain
= isl_set_subtract(domain
,
2980 isl_set_copy(domains
->option
[isl_ast_loop_separate
]));
2982 domain
= isl_set_intersect(domain
,
2983 isl_set_copy(domains
->schedule_domain
));
2985 domain
= isl_ast_build_eliminate(domains
->build
, domain
);
2986 domain
= isl_set_intersect(domain
, isl_set_copy(class_domain
));
2988 domain
= isl_set_coalesce_preserve(domain
);
2989 domain
= isl_set_make_disjoint(domain
);
2991 list
= isl_basic_set_list_from_set(domain
);
2992 domains
->list
= isl_basic_set_list_concat(domains
->list
, list
);
2994 isl_set_free(class_domain
);
2998 isl_set_free(domain
);
2999 isl_set_free(class_domain
);
3000 return isl_stat_error
;
3003 /* Split up the domain at the current depth into disjoint
3004 * basic sets for which code should be generated separately
3005 * for the separation class identified by "pnt".
3007 * We extract the corresponding class domain from domains->sep_class,
3008 * eliminate inner dimensions and pass control to compute_partial_domains.
3010 static isl_stat
compute_class_domains(__isl_take isl_point
*pnt
, void *user
)
3012 struct isl_codegen_domains
*domains
= user
;
3017 class_set
= isl_set_from_point(pnt
);
3018 domain
= isl_map_domain(isl_map_intersect_range(
3019 isl_map_copy(domains
->sep_class
), class_set
));
3020 domain
= isl_ast_build_compute_gist(domains
->build
, domain
);
3021 domain
= isl_ast_build_eliminate(domains
->build
, domain
);
3023 disjoint
= isl_set_plain_is_disjoint(domain
, domains
->schedule_domain
);
3025 return isl_stat_error
;
3027 isl_set_free(domain
);
3031 return compute_partial_domains(domains
, domain
);
3034 /* Extract the domains at the current depth that should be atomic,
3035 * separated or unrolled and store them in option.
3037 * The domains specified by the user might overlap, so we make
3038 * them disjoint by subtracting earlier domains from later domains.
3040 static void compute_domains_init_options(isl_set
*option
[4],
3041 __isl_keep isl_ast_build
*build
)
3043 enum isl_ast_loop_type type
, type2
;
3046 for (type
= isl_ast_loop_atomic
;
3047 type
<= isl_ast_loop_separate
; ++type
) {
3048 option
[type
] = isl_ast_build_get_option_domain(build
, type
);
3049 for (type2
= isl_ast_loop_atomic
; type2
< type
; ++type2
)
3050 option
[type
] = isl_set_subtract(option
[type
],
3051 isl_set_copy(option
[type2
]));
3054 unroll
= option
[isl_ast_loop_unroll
];
3055 unroll
= isl_set_coalesce(unroll
);
3056 unroll
= isl_set_make_disjoint(unroll
);
3057 option
[isl_ast_loop_unroll
] = unroll
;
3060 /* Split up the domain at the current depth into disjoint
3061 * basic sets for which code should be generated separately,
3062 * based on the user-specified options.
3063 * Return the list of disjoint basic sets.
3065 * There are three kinds of domains that we need to keep track of.
3066 * - the "schedule domain" is the domain of "executed"
3067 * - the "class domain" is the domain corresponding to the currrent
3069 * - the "option domain" is the domain corresponding to one of the options
3070 * atomic, unroll or separate
3072 * We first consider the individial values of the separation classes
3073 * and split up the domain for each of them separately.
3074 * Finally, we consider the remainder. If no separation classes were
3075 * specified, then we call compute_partial_domains with the universe
3076 * "class_domain". Otherwise, we take the "schedule_domain" as "class_domain",
3077 * with inner dimensions removed. We do this because we want to
3078 * avoid computing the complement of the class domains (i.e., the difference
3079 * between the universe and domains->done).
3081 static __isl_give isl_basic_set_list
*compute_domains(
3082 __isl_keep isl_union_map
*executed
, __isl_keep isl_ast_build
*build
)
3084 struct isl_codegen_domains domains
;
3087 isl_union_set
*schedule_domain
;
3091 enum isl_ast_loop_type type
;
3097 ctx
= isl_union_map_get_ctx(executed
);
3098 domains
.list
= isl_basic_set_list_alloc(ctx
, 0);
3100 schedule_domain
= isl_union_map_domain(isl_union_map_copy(executed
));
3101 domain
= isl_set_from_union_set(schedule_domain
);
3103 compute_domains_init_options(domains
.option
, build
);
3105 domains
.sep_class
= isl_ast_build_get_separation_class(build
);
3106 classes
= isl_map_range(isl_map_copy(domains
.sep_class
));
3107 n_param
= isl_set_dim(classes
, isl_dim_param
);
3109 classes
= isl_set_free(classes
);
3110 classes
= isl_set_project_out(classes
, isl_dim_param
, 0, n_param
);
3112 space
= isl_set_get_space(domain
);
3113 domains
.build
= build
;
3114 domains
.schedule_domain
= isl_set_copy(domain
);
3115 domains
.executed
= executed
;
3116 domains
.done
= isl_set_empty(space
);
3118 if (isl_set_foreach_point(classes
, &compute_class_domains
, &domains
) < 0)
3119 domains
.list
= isl_basic_set_list_free(domains
.list
);
3120 isl_set_free(classes
);
3122 empty
= isl_set_is_empty(domains
.done
);
3124 domains
.list
= isl_basic_set_list_free(domains
.list
);
3125 domain
= isl_set_free(domain
);
3127 isl_set_free(domain
);
3128 domain
= isl_set_universe(isl_set_get_space(domains
.done
));
3130 domain
= isl_ast_build_eliminate(build
, domain
);
3132 if (compute_partial_domains(&domains
, domain
) < 0)
3133 domains
.list
= isl_basic_set_list_free(domains
.list
);
3135 isl_set_free(domains
.schedule_domain
);
3136 isl_set_free(domains
.done
);
3137 isl_map_free(domains
.sep_class
);
3138 for (type
= isl_ast_loop_atomic
; type
<= isl_ast_loop_separate
; ++type
)
3139 isl_set_free(domains
.option
[type
]);
3141 return domains
.list
;
3144 /* Generate code for a single component, after shifting (if any)
3145 * has been applied, in case the schedule was specified as a union map.
3147 * We first split up the domain at the current depth into disjoint
3148 * basic sets based on the user-specified options.
3149 * Then we generated code for each of them and concatenate the results.
3151 static __isl_give isl_ast_graft_list
*generate_shifted_component_flat(
3152 __isl_take isl_union_map
*executed
, __isl_take isl_ast_build
*build
)
3154 isl_basic_set_list
*domain_list
;
3155 isl_ast_graft_list
*list
= NULL
;
3157 domain_list
= compute_domains(executed
, build
);
3158 list
= generate_parallel_domains(domain_list
, executed
, build
);
3160 isl_basic_set_list_free(domain_list
);
3161 isl_union_map_free(executed
);
3162 isl_ast_build_free(build
);
3167 /* Generate code for a single component, after shifting (if any)
3168 * has been applied, in case the schedule was specified as a schedule tree
3169 * and the separate option was specified.
3171 * We perform separation on the domain of "executed" and then generate
3172 * an AST for each of the resulting disjoint basic sets.
3174 static __isl_give isl_ast_graft_list
*generate_shifted_component_tree_separate(
3175 __isl_take isl_union_map
*executed
, __isl_take isl_ast_build
*build
)
3179 isl_basic_set_list
*domain_list
;
3180 isl_ast_graft_list
*list
;
3182 space
= isl_ast_build_get_space(build
, 1);
3183 domain
= separate_schedule_domains(space
,
3184 isl_union_map_copy(executed
), build
);
3185 domain_list
= isl_basic_set_list_from_set(domain
);
3187 list
= generate_parallel_domains(domain_list
, executed
, build
);
3189 isl_basic_set_list_free(domain_list
);
3190 isl_union_map_free(executed
);
3191 isl_ast_build_free(build
);
3196 /* Internal data structure for generate_shifted_component_tree_unroll.
3198 * "executed" and "build" are inputs to generate_shifted_component_tree_unroll.
3199 * "list" collects the constructs grafts.
3201 struct isl_ast_unroll_tree_data
{
3202 isl_union_map
*executed
;
3203 isl_ast_build
*build
;
3204 isl_ast_graft_list
*list
;
3207 /* Initialize data->list to a list of "n" elements.
3209 static int init_unroll_tree(int n
, void *user
)
3211 struct isl_ast_unroll_tree_data
*data
= user
;
3214 ctx
= isl_ast_build_get_ctx(data
->build
);
3215 data
->list
= isl_ast_graft_list_alloc(ctx
, n
);
3220 /* Given an iteration of an unrolled domain represented by "bset",
3221 * generate the corresponding AST and add the result to data->list.
3223 static int do_unroll_tree_iteration(__isl_take isl_basic_set
*bset
, void *user
)
3225 struct isl_ast_unroll_tree_data
*data
= user
;
3227 data
->list
= add_node(data
->list
, isl_union_map_copy(data
->executed
),
3228 bset
, isl_ast_build_copy(data
->build
));
3233 /* Generate code for a single component, after shifting (if any)
3234 * has been applied, in case the schedule was specified as a schedule tree
3235 * and the unroll option was specified.
3237 * We call foreach_iteration to iterate over the individual values and
3238 * construct and collect the corresponding grafts in do_unroll_tree_iteration.
3240 static __isl_give isl_ast_graft_list
*generate_shifted_component_tree_unroll(
3241 __isl_take isl_union_map
*executed
, __isl_take isl_set
*domain
,
3242 __isl_take isl_ast_build
*build
)
3244 struct isl_ast_unroll_tree_data data
= { executed
, build
, NULL
};
3246 if (foreach_iteration(domain
, build
, &init_unroll_tree
,
3247 &do_unroll_tree_iteration
, &data
) < 0)
3248 data
.list
= isl_ast_graft_list_free(data
.list
);
3250 isl_union_map_free(executed
);
3251 isl_ast_build_free(build
);
3256 /* Does "domain" involve a disjunction that is purely based on
3257 * constraints involving only outer dimension?
3259 * In particular, is there a disjunction such that the constraints
3260 * involving the current and later dimensions are the same over
3261 * all the disjuncts?
3263 static isl_bool
has_pure_outer_disjunction(__isl_keep isl_set
*domain
,
3264 __isl_keep isl_ast_build
*build
)
3266 isl_basic_set
*hull
;
3267 isl_set
*shared
, *inner
;
3272 if (isl_set_n_basic_set(domain
) <= 1)
3273 return isl_bool_false
;
3274 dim
= isl_set_dim(domain
, isl_dim_set
);
3276 return isl_bool_error
;
3278 inner
= isl_set_copy(domain
);
3279 depth
= isl_ast_build_get_depth(build
);
3280 inner
= isl_set_drop_constraints_not_involving_dims(inner
,
3281 isl_dim_set
, depth
, dim
- depth
);
3282 hull
= isl_set_plain_unshifted_simple_hull(isl_set_copy(inner
));
3283 shared
= isl_set_from_basic_set(hull
);
3284 equal
= isl_set_plain_is_equal(inner
, shared
);
3285 isl_set_free(inner
);
3286 isl_set_free(shared
);
3291 /* Generate code for a single component, after shifting (if any)
3292 * has been applied, in case the schedule was specified as a schedule tree.
3293 * In particular, handle the base case where there is either no isolated
3294 * set or we are within the isolated set (in which case "isolated" is set)
3295 * or the iterations that precede or follow the isolated set.
3297 * The schedule domain is broken up or combined into basic sets
3298 * according to the AST generation option specified in the current
3299 * schedule node, which may be either atomic, separate, unroll or
3300 * unspecified. If the option is unspecified, then we currently simply
3301 * split the schedule domain into disjoint basic sets.
3303 * In case the separate option is specified, the AST generation is
3304 * handled by generate_shifted_component_tree_separate.
3305 * In the other cases, we need the global schedule domain.
3306 * In the unroll case, the AST generation is then handled by
3307 * generate_shifted_component_tree_unroll which needs the actual
3308 * schedule domain (with divs that may refer to the current dimension)
3309 * so that stride detection can be performed.
3310 * In the atomic or unspecified case, inner dimensions and divs involving
3311 * the current dimensions should be eliminated.
3312 * The result is then either combined into a single basic set or
3313 * split up into disjoint basic sets.
3314 * Finally an AST is generated for each basic set and the results are
3317 * If the schedule domain involves a disjunction that is purely based on
3318 * constraints involving only outer dimension, then it is treated as
3319 * if atomic was specified. This ensures that only a single loop
3320 * is generated instead of a sequence of identical loops with
3323 static __isl_give isl_ast_graft_list
*generate_shifted_component_tree_base(
3324 __isl_take isl_union_map
*executed
, __isl_take isl_ast_build
*build
,
3327 isl_bool outer_disjunction
;
3328 isl_union_set
*schedule_domain
;
3330 isl_basic_set_list
*domain_list
;
3331 isl_ast_graft_list
*list
;
3332 enum isl_ast_loop_type type
;
3334 type
= isl_ast_build_get_loop_type(build
, isolated
);
3338 if (type
== isl_ast_loop_separate
)
3339 return generate_shifted_component_tree_separate(executed
,
3342 schedule_domain
= isl_union_map_domain(isl_union_map_copy(executed
));
3343 domain
= isl_set_from_union_set(schedule_domain
);
3345 if (type
== isl_ast_loop_unroll
)
3346 return generate_shifted_component_tree_unroll(executed
, domain
,
3349 domain
= isl_ast_build_eliminate(build
, domain
);
3350 domain
= isl_set_coalesce_preserve(domain
);
3352 outer_disjunction
= has_pure_outer_disjunction(domain
, build
);
3353 if (outer_disjunction
< 0)
3354 domain
= isl_set_free(domain
);
3356 if (outer_disjunction
|| type
== isl_ast_loop_atomic
) {
3357 isl_basic_set
*hull
;
3358 hull
= isl_set_unshifted_simple_hull(domain
);
3359 domain_list
= isl_basic_set_list_from_basic_set(hull
);
3361 domain
= isl_set_make_disjoint(domain
);
3362 domain_list
= isl_basic_set_list_from_set(domain
);
3365 list
= generate_parallel_domains(domain_list
, executed
, build
);
3367 isl_basic_set_list_free(domain_list
);
3368 isl_union_map_free(executed
);
3369 isl_ast_build_free(build
);
3373 isl_union_map_free(executed
);
3374 isl_ast_build_free(build
);
3378 /* Extract out the disjunction imposed by "domain" on the outer
3379 * schedule dimensions.
3381 * In particular, remove all inner dimensions from "domain" (including
3382 * the current dimension) and then remove the constraints that are shared
3383 * by all disjuncts in the result.
3385 static __isl_give isl_set
*extract_disjunction(__isl_take isl_set
*domain
,
3386 __isl_keep isl_ast_build
*build
)
3392 domain
= isl_ast_build_specialize(build
, domain
);
3393 depth
= isl_ast_build_get_depth(build
);
3394 dim
= isl_set_dim(domain
, isl_dim_set
);
3396 return isl_set_free(domain
);
3397 domain
= isl_set_eliminate(domain
, isl_dim_set
, depth
, dim
- depth
);
3398 domain
= isl_set_remove_unknown_divs(domain
);
3399 hull
= isl_set_copy(domain
);
3400 hull
= isl_set_from_basic_set(isl_set_unshifted_simple_hull(hull
));
3401 domain
= isl_set_gist(domain
, hull
);
3406 /* Add "guard" to the grafts in "list".
3407 * "build" is the outer AST build, while "sub_build" includes "guard"
3408 * in its generated domain.
3410 * First combine the grafts into a single graft and then add the guard.
3411 * If the list is empty, or if some error occurred, then simply return
3414 static __isl_give isl_ast_graft_list
*list_add_guard(
3415 __isl_take isl_ast_graft_list
*list
, __isl_keep isl_set
*guard
,
3416 __isl_keep isl_ast_build
*build
, __isl_keep isl_ast_build
*sub_build
)
3418 isl_ast_graft
*graft
;
3420 list
= isl_ast_graft_list_fuse(list
, sub_build
);
3422 if (isl_ast_graft_list_n_ast_graft(list
) != 1)
3425 graft
= isl_ast_graft_list_get_ast_graft(list
, 0);
3426 graft
= isl_ast_graft_add_guard(graft
, isl_set_copy(guard
), build
);
3427 list
= isl_ast_graft_list_set_ast_graft(list
, 0, graft
);
3432 /* Generate code for a single component, after shifting (if any)
3433 * has been applied, in case the schedule was specified as a schedule tree.
3434 * In particular, do so for the specified subset of the schedule domain.
3436 * If we are outside of the isolated part, then "domain" may include
3437 * a disjunction. Explicitly generate this disjunction at this point
3438 * instead of relying on the disjunction getting hoisted back up
3441 static __isl_give isl_ast_graft_list
*generate_shifted_component_tree_part(
3442 __isl_keep isl_union_map
*executed
, __isl_take isl_set
*domain
,
3443 __isl_keep isl_ast_build
*build
, int isolated
)
3445 isl_union_set
*uset
;
3446 isl_ast_graft_list
*list
;
3447 isl_ast_build
*sub_build
;
3450 uset
= isl_union_set_from_set(isl_set_copy(domain
));
3451 executed
= isl_union_map_copy(executed
);
3452 executed
= isl_union_map_intersect_domain(executed
, uset
);
3453 empty
= isl_union_map_is_empty(executed
);
3458 isl_union_map_free(executed
);
3459 isl_set_free(domain
);
3460 ctx
= isl_ast_build_get_ctx(build
);
3461 return isl_ast_graft_list_alloc(ctx
, 0);
3464 sub_build
= isl_ast_build_copy(build
);
3466 domain
= extract_disjunction(domain
, build
);
3467 sub_build
= isl_ast_build_restrict_generated(sub_build
,
3468 isl_set_copy(domain
));
3470 list
= generate_shifted_component_tree_base(executed
,
3471 isl_ast_build_copy(sub_build
), isolated
);
3473 list
= list_add_guard(list
, domain
, build
, sub_build
);
3474 isl_ast_build_free(sub_build
);
3475 isl_set_free(domain
);
3478 isl_union_map_free(executed
);
3479 isl_set_free(domain
);
3483 /* Generate code for a single component, after shifting (if any)
3484 * has been applied, in case the schedule was specified as a schedule tree.
3485 * In particular, do so for the specified sequence of subsets
3486 * of the schedule domain, "before", "isolated", "after" and "other",
3487 * where only the "isolated" part is considered to be isolated.
3489 static __isl_give isl_ast_graft_list
*generate_shifted_component_parts(
3490 __isl_take isl_union_map
*executed
, __isl_take isl_set
*before
,
3491 __isl_take isl_set
*isolated
, __isl_take isl_set
*after
,
3492 __isl_take isl_set
*other
, __isl_take isl_ast_build
*build
)
3494 isl_ast_graft_list
*list
, *res
;
3496 res
= generate_shifted_component_tree_part(executed
, before
, build
, 0);
3497 list
= generate_shifted_component_tree_part(executed
, isolated
,
3499 res
= isl_ast_graft_list_concat(res
, list
);
3500 list
= generate_shifted_component_tree_part(executed
, after
, build
, 0);
3501 res
= isl_ast_graft_list_concat(res
, list
);
3502 list
= generate_shifted_component_tree_part(executed
, other
, build
, 0);
3503 res
= isl_ast_graft_list_concat(res
, list
);
3505 isl_union_map_free(executed
);
3506 isl_ast_build_free(build
);
3511 /* Does "set" intersect "first", but not "second"?
3513 static isl_bool
only_intersects_first(__isl_keep isl_set
*set
,
3514 __isl_keep isl_set
*first
, __isl_keep isl_set
*second
)
3518 disjoint
= isl_set_is_disjoint(set
, first
);
3520 return isl_bool_error
;
3522 return isl_bool_false
;
3524 return isl_set_is_disjoint(set
, second
);
3527 /* Generate code for a single component, after shifting (if any)
3528 * has been applied, in case the schedule was specified as a schedule tree.
3529 * In particular, do so in case of isolation where there is
3530 * only an "isolated" part and an "after" part.
3531 * "dead1" and "dead2" are freed by this function in order to simplify
3534 * The "before" and "other" parts are set to empty sets.
3536 static __isl_give isl_ast_graft_list
*generate_shifted_component_only_after(
3537 __isl_take isl_union_map
*executed
, __isl_take isl_set
*isolated
,
3538 __isl_take isl_set
*after
, __isl_take isl_ast_build
*build
,
3539 __isl_take isl_set
*dead1
, __isl_take isl_set
*dead2
)
3543 empty
= isl_set_empty(isl_set_get_space(after
));
3544 isl_set_free(dead1
);
3545 isl_set_free(dead2
);
3546 return generate_shifted_component_parts(executed
, isl_set_copy(empty
),
3547 isolated
, after
, empty
, build
);
3550 /* Generate code for a single component, after shifting (if any)
3551 * has been applied, in case the schedule was specified as a schedule tree.
3553 * We first check if the user has specified an isolated schedule domain
3554 * and that we are not already outside of this isolated schedule domain.
3555 * If so, we break up the schedule domain into iterations that
3556 * precede the isolated domain, the isolated domain itself,
3557 * the iterations that follow the isolated domain and
3558 * the remaining iterations (those that are incomparable
3559 * to the isolated domain).
3560 * We generate an AST for each piece and concatenate the results.
3562 * If the isolated domain is not convex, then it is replaced
3563 * by a convex superset to ensure that the sets of preceding and
3564 * following iterations are properly defined and, in particular,
3565 * that there are no intermediate iterations that do not belong
3566 * to the isolated domain.
3568 * In the special case where at least one element of the schedule
3569 * domain that does not belong to the isolated domain needs
3570 * to be scheduled after this isolated domain, but none of those
3571 * elements need to be scheduled before, break up the schedule domain
3572 * in only two parts, the isolated domain, and a part that will be
3573 * scheduled after the isolated domain.
3575 * If no isolated set has been specified, then we generate an
3576 * AST for the entire inverse schedule.
3578 static __isl_give isl_ast_graft_list
*generate_shifted_component_tree(
3579 __isl_take isl_union_map
*executed
, __isl_take isl_ast_build
*build
)
3582 int empty
, has_isolate
;
3584 isl_union_set
*schedule_domain
;
3586 isl_basic_set
*hull
;
3587 isl_set
*isolated
, *before
, *after
, *test
;
3591 build
= isl_ast_build_extract_isolated(build
);
3592 has_isolate
= isl_ast_build_has_isolated(build
);
3593 if (has_isolate
< 0)
3594 executed
= isl_union_map_free(executed
);
3595 else if (!has_isolate
)
3596 return generate_shifted_component_tree_base(executed
, build
, 0);
3598 schedule_domain
= isl_union_map_domain(isl_union_map_copy(executed
));
3599 domain
= isl_set_from_union_set(schedule_domain
);
3601 isolated
= isl_ast_build_get_isolated(build
);
3602 isolated
= isl_set_intersect(isolated
, isl_set_copy(domain
));
3603 test
= isl_ast_build_specialize(build
, isl_set_copy(isolated
));
3604 empty
= isl_set_is_empty(test
);
3609 isl_set_free(isolated
);
3610 isl_set_free(domain
);
3611 return generate_shifted_component_tree_base(executed
, build
, 0);
3613 isolated
= isl_ast_build_eliminate(build
, isolated
);
3614 hull
= isl_set_unshifted_simple_hull(isolated
);
3615 isolated
= isl_set_from_basic_set(hull
);
3617 depth
= isl_ast_build_get_depth(build
);
3618 space
= isl_space_map_from_set(isl_set_get_space(isolated
));
3619 gt
= isl_map_universe(space
);
3620 for (i
= 0; i
< depth
; ++i
)
3621 gt
= isl_map_equate(gt
, isl_dim_in
, i
, isl_dim_out
, i
);
3622 gt
= isl_map_order_gt(gt
, isl_dim_in
, depth
, isl_dim_out
, depth
);
3623 lt
= isl_map_reverse(isl_map_copy(gt
));
3624 before
= isl_set_apply(isl_set_copy(isolated
), gt
);
3625 after
= isl_set_apply(isl_set_copy(isolated
), lt
);
3627 domain
= isl_set_subtract(domain
, isl_set_copy(isolated
));
3628 pure
= only_intersects_first(domain
, after
, before
);
3630 executed
= isl_union_map_free(executed
);
3632 return generate_shifted_component_only_after(executed
, isolated
,
3633 domain
, build
, before
, after
);
3634 domain
= isl_set_subtract(domain
, isl_set_copy(before
));
3635 domain
= isl_set_subtract(domain
, isl_set_copy(after
));
3636 after
= isl_set_subtract(after
, isl_set_copy(isolated
));
3637 after
= isl_set_subtract(after
, isl_set_copy(before
));
3638 before
= isl_set_subtract(before
, isl_set_copy(isolated
));
3640 return generate_shifted_component_parts(executed
, before
, isolated
,
3641 after
, domain
, build
);
3643 isl_set_free(domain
);
3644 isl_set_free(isolated
);
3645 isl_union_map_free(executed
);
3646 isl_ast_build_free(build
);
3650 /* Generate code for a single component, after shifting (if any)
3653 * Call generate_shifted_component_tree or generate_shifted_component_flat
3654 * depending on whether the schedule was specified as a schedule tree.
3656 static __isl_give isl_ast_graft_list
*generate_shifted_component(
3657 __isl_take isl_union_map
*executed
, __isl_take isl_ast_build
*build
)
3659 if (isl_ast_build_has_schedule_node(build
))
3660 return generate_shifted_component_tree(executed
, build
);
3662 return generate_shifted_component_flat(executed
, build
);
3665 struct isl_set_map_pair
{
3670 /* Given an array "domain" of isl_set_map_pairs and an array "order"
3671 * of indices into the "domain" array,
3672 * return the union of the "map" fields of the elements
3673 * indexed by the first "n" elements of "order".
3675 static __isl_give isl_union_map
*construct_component_executed(
3676 struct isl_set_map_pair
*domain
, int *order
, int n
)
3680 isl_union_map
*executed
;
3682 map
= isl_map_copy(domain
[order
[0]].map
);
3683 executed
= isl_union_map_from_map(map
);
3684 for (i
= 1; i
< n
; ++i
) {
3685 map
= isl_map_copy(domain
[order
[i
]].map
);
3686 executed
= isl_union_map_add_map(executed
, map
);
3692 /* Generate code for a single component, after shifting (if any)
3695 * The component inverse schedule is specified as the "map" fields
3696 * of the elements of "domain" indexed by the first "n" elements of "order".
3698 static __isl_give isl_ast_graft_list
*generate_shifted_component_from_list(
3699 struct isl_set_map_pair
*domain
, int *order
, int n
,
3700 __isl_take isl_ast_build
*build
)
3702 isl_union_map
*executed
;
3704 executed
= construct_component_executed(domain
, order
, n
);
3705 return generate_shifted_component(executed
, build
);
3708 /* Does set dimension "pos" of "set" have an obviously fixed value?
3710 static int dim_is_fixed(__isl_keep isl_set
*set
, int pos
)
3715 v
= isl_set_plain_get_val_if_fixed(set
, isl_dim_set
, pos
);
3718 fixed
= !isl_val_is_nan(v
);
3724 /* Given an array "domain" of isl_set_map_pairs and an array "order"
3725 * of indices into the "domain" array,
3726 * do all (except for at most one) of the "set" field of the elements
3727 * indexed by the first "n" elements of "order" have a fixed value
3728 * at position "depth"?
3730 static int at_most_one_non_fixed(struct isl_set_map_pair
*domain
,
3731 int *order
, int n
, int depth
)
3736 for (i
= 0; i
< n
; ++i
) {
3739 f
= dim_is_fixed(domain
[order
[i
]].set
, depth
);
3752 /* Given an array "domain" of isl_set_map_pairs and an array "order"
3753 * of indices into the "domain" array,
3754 * eliminate the inner dimensions from the "set" field of the elements
3755 * indexed by the first "n" elements of "order", provided the current
3756 * dimension does not have a fixed value.
3758 * Return the index of the first element in "order" with a corresponding
3759 * "set" field that does not have an (obviously) fixed value.
3761 static int eliminate_non_fixed(struct isl_set_map_pair
*domain
,
3762 int *order
, int n
, int depth
, __isl_keep isl_ast_build
*build
)
3767 for (i
= n
- 1; i
>= 0; --i
) {
3769 f
= dim_is_fixed(domain
[order
[i
]].set
, depth
);
3774 domain
[order
[i
]].set
= isl_ast_build_eliminate_inner(build
,
3775 domain
[order
[i
]].set
);
3782 /* Given an array "domain" of isl_set_map_pairs and an array "order"
3783 * of indices into the "domain" array,
3784 * find the element of "domain" (amongst those indexed by the first "n"
3785 * elements of "order") with the "set" field that has the smallest
3786 * value for the current iterator.
3788 * Note that the domain with the smallest value may depend on the parameters
3789 * and/or outer loop dimension. Since the result of this function is only
3790 * used as heuristic, we only make a reasonable attempt at finding the best
3791 * domain, one that should work in case a single domain provides the smallest
3792 * value for the current dimension over all values of the parameters
3793 * and outer dimensions.
3795 * In particular, we compute the smallest value of the first domain
3796 * and replace it by that of any later domain if that later domain
3797 * has a smallest value that is smaller for at least some value
3798 * of the parameters and outer dimensions.
3800 static int first_offset(struct isl_set_map_pair
*domain
, int *order
, int n
,
3801 __isl_keep isl_ast_build
*build
)
3807 min_first
= isl_ast_build_map_to_iterator(build
,
3808 isl_set_copy(domain
[order
[0]].set
));
3809 min_first
= isl_map_lexmin(min_first
);
3811 for (i
= 1; i
< n
; ++i
) {
3812 isl_map
*min
, *test
;
3815 min
= isl_ast_build_map_to_iterator(build
,
3816 isl_set_copy(domain
[order
[i
]].set
));
3817 min
= isl_map_lexmin(min
);
3818 test
= isl_map_copy(min
);
3819 test
= isl_map_apply_domain(isl_map_copy(min_first
), test
);
3820 test
= isl_map_order_lt(test
, isl_dim_in
, 0, isl_dim_out
, 0);
3821 empty
= isl_map_is_empty(test
);
3823 if (empty
>= 0 && !empty
) {
3824 isl_map_free(min_first
);
3834 isl_map_free(min_first
);
3836 return i
< n
? -1 : first
;
3839 /* Construct a shifted inverse schedule based on the original inverse schedule,
3840 * the stride and the offset.
3842 * The original inverse schedule is specified as the "map" fields
3843 * of the elements of "domain" indexed by the first "n" elements of "order".
3845 * "stride" and "offset" are such that the difference
3846 * between the values of the current dimension of domain "i"
3847 * and the values of the current dimension for some reference domain are
3850 * stride * integer + offset[i]
3852 * Moreover, 0 <= offset[i] < stride.
3854 * For each domain, we create a map
3856 * { [..., j, ...] -> [..., j - offset[i], offset[i], ....] }
3858 * where j refers to the current dimension and the other dimensions are
3859 * unchanged, and apply this map to the original schedule domain.
3861 * For example, for the original schedule
3863 * { A[i] -> [2i]: 0 <= i < 10; B[i] -> [2i+1] : 0 <= i < 10 }
3865 * and assuming the offset is 0 for the A domain and 1 for the B domain,
3866 * we apply the mapping
3870 * to the schedule of the "A" domain and the mapping
3872 * { [j - 1] -> [j, 1] }
3874 * to the schedule of the "B" domain.
3877 * Note that after the transformation, the differences between pairs
3878 * of values of the current dimension over all domains are multiples
3879 * of stride and that we have therefore exposed the stride.
3882 * To see that the mapping preserves the lexicographic order,
3883 * first note that each of the individual maps above preserves the order.
3884 * If the value of the current iterator is j1 in one domain and j2 in another,
3885 * then if j1 = j2, we know that the same map is applied to both domains
3886 * and the order is preserved.
3887 * Otherwise, let us assume, without loss of generality, that j1 < j2.
3888 * If c1 >= c2 (with c1 and c2 the corresponding offsets), then
3892 * and the order is preserved.
3893 * If c1 < c2, then we know
3899 * j2 - j1 = n * s + r
3901 * with n >= 0 and 0 <= r < s.
3902 * In other words, r = c2 - c1.
3913 * (j1 - c1, c1) << (j2 - c2, c2)
3915 * with "<<" the lexicographic order, proving that the order is preserved
3918 static __isl_give isl_union_map
*construct_shifted_executed(
3919 struct isl_set_map_pair
*domain
, int *order
, int n
,
3920 __isl_keep isl_val
*stride
, __isl_keep isl_multi_val
*offset
,
3921 __isl_take isl_ast_build
*build
)
3924 isl_union_map
*executed
;
3930 depth
= isl_ast_build_get_depth(build
);
3931 space
= isl_ast_build_get_space(build
, 1);
3932 executed
= isl_union_map_empty(isl_space_copy(space
));
3933 space
= isl_space_map_from_set(space
);
3934 map
= isl_map_identity(isl_space_copy(space
));
3935 map
= isl_map_eliminate(map
, isl_dim_out
, depth
, 1);
3936 map
= isl_map_insert_dims(map
, isl_dim_out
, depth
+ 1, 1);
3937 space
= isl_space_insert_dims(space
, isl_dim_out
, depth
+ 1, 1);
3939 c
= isl_constraint_alloc_equality(isl_local_space_from_space(space
));
3940 c
= isl_constraint_set_coefficient_si(c
, isl_dim_in
, depth
, 1);
3941 c
= isl_constraint_set_coefficient_si(c
, isl_dim_out
, depth
, -1);
3943 for (i
= 0; i
< n
; ++i
) {
3947 v
= isl_multi_val_get_val(offset
, i
);
3950 map_i
= isl_map_copy(map
);
3951 map_i
= isl_map_fix_val(map_i
, isl_dim_out
, depth
+ 1,
3954 c
= isl_constraint_set_constant_val(c
, v
);
3955 map_i
= isl_map_add_constraint(map_i
, isl_constraint_copy(c
));
3957 map_i
= isl_map_apply_domain(isl_map_copy(domain
[order
[i
]].map
),
3959 executed
= isl_union_map_add_map(executed
, map_i
);
3962 isl_constraint_free(c
);
3966 executed
= isl_union_map_free(executed
);
3971 /* Generate code for a single component, after exposing the stride,
3972 * given that the schedule domain is "shifted strided".
3974 * The component inverse schedule is specified as the "map" fields
3975 * of the elements of "domain" indexed by the first "n" elements of "order".
3977 * The schedule domain being "shifted strided" means that the differences
3978 * between the values of the current dimension of domain "i"
3979 * and the values of the current dimension for some reference domain are
3982 * stride * integer + offset[i]
3984 * We first look for the domain with the "smallest" value for the current
3985 * dimension and adjust the offsets such that the offset of the "smallest"
3986 * domain is equal to zero. The other offsets are reduced modulo stride.
3988 * Based on this information, we construct a new inverse schedule in
3989 * construct_shifted_executed that exposes the stride.
3990 * Since this involves the introduction of a new schedule dimension,
3991 * the build needs to be changed accordingly.
3992 * After computing the AST, the newly introduced dimension needs
3993 * to be removed again from the list of grafts. We do this by plugging
3994 * in a mapping that represents the new schedule domain in terms of the
3995 * old schedule domain.
3997 static __isl_give isl_ast_graft_list
*generate_shift_component(
3998 struct isl_set_map_pair
*domain
, int *order
, int n
,
3999 __isl_keep isl_val
*stride
, __isl_keep isl_multi_val
*offset
,
4000 __isl_take isl_ast_build
*build
)
4002 isl_ast_graft_list
*list
;
4008 isl_multi_aff
*ma
, *zero
;
4009 isl_union_map
*executed
;
4011 depth
= isl_ast_build_get_depth(build
);
4013 first
= first_offset(domain
, order
, n
, build
);
4017 mv
= isl_multi_val_copy(offset
);
4018 val
= isl_multi_val_get_val(offset
, first
);
4019 val
= isl_val_neg(val
);
4020 mv
= isl_multi_val_add_val(mv
, val
);
4021 mv
= isl_multi_val_mod_val(mv
, isl_val_copy(stride
));
4023 executed
= construct_shifted_executed(domain
, order
, n
, stride
, mv
,
4025 space
= isl_ast_build_get_space(build
, 1);
4026 space
= isl_space_map_from_set(space
);
4027 ma
= isl_multi_aff_identity(isl_space_copy(space
));
4028 space
= isl_space_from_domain(isl_space_domain(space
));
4029 space
= isl_space_add_dims(space
, isl_dim_out
, 1);
4030 zero
= isl_multi_aff_zero(space
);
4031 ma
= isl_multi_aff_range_splice(ma
, depth
+ 1, zero
);
4032 build
= isl_ast_build_insert_dim(build
, depth
+ 1);
4033 list
= generate_shifted_component(executed
, build
);
4035 list
= isl_ast_graft_list_preimage_multi_aff(list
, ma
);
4037 isl_multi_val_free(mv
);
4041 isl_ast_build_free(build
);
4045 /* Does any node in the schedule tree rooted at the current schedule node
4046 * of "build" depend on outer schedule nodes?
4048 static int has_anchored_subtree(__isl_keep isl_ast_build
*build
)
4050 isl_schedule_node
*node
;
4053 node
= isl_ast_build_get_schedule_node(build
);
4054 dependent
= isl_schedule_node_is_subtree_anchored(node
);
4055 isl_schedule_node_free(node
);
4060 /* Generate code for a single component.
4062 * The component inverse schedule is specified as the "map" fields
4063 * of the elements of "domain" indexed by the first "n" elements of "order".
4065 * This function may modify the "set" fields of "domain".
4067 * Before proceeding with the actual code generation for the component,
4068 * we first check if there are any "shifted" strides, meaning that
4069 * the schedule domains of the individual domains are all strided,
4070 * but that they have different offsets, resulting in the union
4071 * of schedule domains not being strided anymore.
4073 * The simplest example is the schedule
4075 * { A[i] -> [2i]: 0 <= i < 10; B[i] -> [2i+1] : 0 <= i < 10 }
4077 * Both schedule domains are strided, but their union is not.
4078 * This function detects such cases and then rewrites the schedule to
4080 * { A[i] -> [2i, 0]: 0 <= i < 10; B[i] -> [2i, 1] : 0 <= i < 10 }
4082 * In the new schedule, the schedule domains have the same offset (modulo
4083 * the stride), ensuring that the union of schedule domains is also strided.
4086 * If there is only a single domain in the component, then there is
4087 * nothing to do. Similarly, if the current schedule dimension has
4088 * a fixed value for almost all domains then there is nothing to be done.
4089 * In particular, we need at least two domains where the current schedule
4090 * dimension does not have a fixed value.
4091 * Finally, in case of a schedule map input,
4092 * if any of the options refer to the current schedule dimension,
4093 * then we bail out as well. It would be possible to reformulate the options
4094 * in terms of the new schedule domain, but that would introduce constraints
4095 * that separate the domains in the options and that is something we would
4097 * In the case of a schedule tree input, we bail out if any of
4098 * the descendants of the current schedule node refer to outer
4099 * schedule nodes in any way.
4102 * To see if there is any shifted stride, we look at the differences
4103 * between the values of the current dimension in pairs of domains
4104 * for equal values of outer dimensions. These differences should be
4109 * with "m" the stride and "r" a constant. Note that we cannot perform
4110 * this analysis on individual domains as the lower bound in each domain
4111 * may depend on parameters or outer dimensions and so the current dimension
4112 * itself may not have a fixed remainder on division by the stride.
4114 * In particular, we compare the first domain that does not have an
4115 * obviously fixed value for the current dimension to itself and all
4116 * other domains and collect the offsets and the gcd of the strides.
4117 * If the gcd becomes one, then we failed to find shifted strides.
4118 * If the gcd is zero, then the differences were all fixed, meaning
4119 * that some domains had non-obviously fixed values for the current dimension.
4120 * If all the offsets are the same (for those domains that do not have
4121 * an obviously fixed value for the current dimension), then we do not
4122 * apply the transformation.
4123 * If none of the domains were skipped, then there is nothing to do.
4124 * If some of them were skipped, then if we apply separation, the schedule
4125 * domain should get split in pieces with a (non-shifted) stride.
4127 * Otherwise, we apply a shift to expose the stride in
4128 * generate_shift_component.
4130 static __isl_give isl_ast_graft_list
*generate_component(
4131 struct isl_set_map_pair
*domain
, int *order
, int n
,
4132 __isl_take isl_ast_build
*build
)
4139 isl_val
*gcd
= NULL
;
4143 isl_ast_graft_list
*list
;
4146 depth
= isl_ast_build_get_depth(build
);
4149 if (skip
>= 0 && !skip
)
4150 skip
= at_most_one_non_fixed(domain
, order
, n
, depth
);
4151 if (skip
>= 0 && !skip
) {
4152 if (isl_ast_build_has_schedule_node(build
))
4153 skip
= has_anchored_subtree(build
);
4155 skip
= isl_ast_build_options_involve_depth(build
);
4160 return generate_shifted_component_from_list(domain
,
4163 base
= eliminate_non_fixed(domain
, order
, n
, depth
, build
);
4167 ctx
= isl_ast_build_get_ctx(build
);
4169 mv
= isl_multi_val_zero(isl_space_set_alloc(ctx
, 0, n
));
4172 for (i
= 0; i
< n
; ++i
) {
4175 map
= isl_map_from_domain_and_range(
4176 isl_set_copy(domain
[order
[base
]].set
),
4177 isl_set_copy(domain
[order
[i
]].set
));
4178 for (d
= 0; d
< depth
; ++d
)
4179 map
= isl_map_equate(map
, isl_dim_in
, d
,
4181 deltas
= isl_map_deltas(map
);
4182 res
= isl_set_dim_residue_class_val(deltas
, depth
, &m
, &r
);
4183 isl_set_free(deltas
);
4190 gcd
= isl_val_gcd(gcd
, m
);
4191 if (isl_val_is_one(gcd
)) {
4195 mv
= isl_multi_val_set_val(mv
, i
, r
);
4197 res
= dim_is_fixed(domain
[order
[i
]].set
, depth
);
4203 if (fixed
&& i
> base
) {
4205 a
= isl_multi_val_get_val(mv
, i
);
4206 b
= isl_multi_val_get_val(mv
, base
);
4207 if (isl_val_ne(a
, b
))
4214 if (res
< 0 || !gcd
) {
4215 isl_ast_build_free(build
);
4217 } else if (i
< n
|| fixed
|| isl_val_is_zero(gcd
)) {
4218 list
= generate_shifted_component_from_list(domain
,
4221 list
= generate_shift_component(domain
, order
, n
, gcd
, mv
,
4226 isl_multi_val_free(mv
);
4230 isl_ast_build_free(build
);
4234 /* Store both "map" itself and its domain in the
4235 * structure pointed to by *next and advance to the next array element.
4237 static isl_stat
extract_domain(__isl_take isl_map
*map
, void *user
)
4239 struct isl_set_map_pair
**next
= user
;
4241 (*next
)->map
= isl_map_copy(map
);
4242 (*next
)->set
= isl_map_domain(map
);
4248 static isl_bool
after_in_tree(__isl_keep isl_union_map
*umap
,
4249 __isl_keep isl_schedule_node
*node
);
4251 /* Is any domain element of "umap" scheduled after any of
4252 * the corresponding image elements by the tree rooted at
4253 * the child of "node"?
4255 static isl_bool
after_in_child(__isl_keep isl_union_map
*umap
,
4256 __isl_keep isl_schedule_node
*node
)
4258 isl_schedule_node
*child
;
4261 child
= isl_schedule_node_get_child(node
, 0);
4262 after
= after_in_tree(umap
, child
);
4263 isl_schedule_node_free(child
);
4268 /* Is any domain element of "umap" scheduled after any of
4269 * the corresponding image elements by the tree rooted at
4270 * the band node "node"?
4272 * We first check if any domain element is scheduled after any
4273 * of the corresponding image elements by the band node itself.
4274 * If not, we restrict "map" to those pairs of element that
4275 * are scheduled together by the band node and continue with
4276 * the child of the band node.
4277 * If there are no such pairs then the map passed to after_in_child
4278 * will be empty causing it to return 0.
4280 static isl_bool
after_in_band(__isl_keep isl_union_map
*umap
,
4281 __isl_keep isl_schedule_node
*node
)
4283 isl_multi_union_pw_aff
*mupa
;
4284 isl_union_map
*partial
, *test
, *gt
, *universe
, *umap1
, *umap2
;
4285 isl_union_set
*domain
, *range
;
4290 if (isl_schedule_node_band_n_member(node
) == 0)
4291 return after_in_child(umap
, node
);
4293 mupa
= isl_schedule_node_band_get_partial_schedule(node
);
4294 space
= isl_multi_union_pw_aff_get_space(mupa
);
4295 partial
= isl_union_map_from_multi_union_pw_aff(mupa
);
4296 test
= isl_union_map_copy(umap
);
4297 test
= isl_union_map_apply_domain(test
, isl_union_map_copy(partial
));
4298 test
= isl_union_map_apply_range(test
, isl_union_map_copy(partial
));
4299 gt
= isl_union_map_from_map(isl_map_lex_gt(space
));
4300 test
= isl_union_map_intersect(test
, gt
);
4301 empty
= isl_union_map_is_empty(test
);
4302 isl_union_map_free(test
);
4304 if (empty
< 0 || !empty
) {
4305 isl_union_map_free(partial
);
4306 return isl_bool_not(empty
);
4309 universe
= isl_union_map_universe(isl_union_map_copy(umap
));
4310 domain
= isl_union_map_domain(isl_union_map_copy(universe
));
4311 range
= isl_union_map_range(universe
);
4312 umap1
= isl_union_map_copy(partial
);
4313 umap1
= isl_union_map_intersect_domain(umap1
, domain
);
4314 umap2
= isl_union_map_intersect_domain(partial
, range
);
4315 test
= isl_union_map_apply_range(umap1
, isl_union_map_reverse(umap2
));
4316 test
= isl_union_map_intersect(test
, isl_union_map_copy(umap
));
4317 after
= after_in_child(test
, node
);
4318 isl_union_map_free(test
);
4322 /* Is any domain element of "umap" scheduled after any of
4323 * the corresponding image elements by the tree rooted at
4324 * the context node "node"?
4326 * The context constraints apply to the schedule domain,
4327 * so we cannot apply them directly to "umap", which contains
4328 * pairs of statement instances. Instead, we add them
4329 * to the range of the prefix schedule for both domain and
4332 static isl_bool
after_in_context(__isl_keep isl_union_map
*umap
,
4333 __isl_keep isl_schedule_node
*node
)
4335 isl_union_map
*prefix
, *universe
, *umap1
, *umap2
;
4336 isl_union_set
*domain
, *range
;
4340 umap
= isl_union_map_copy(umap
);
4341 context
= isl_schedule_node_context_get_context(node
);
4342 prefix
= isl_schedule_node_get_prefix_schedule_union_map(node
);
4343 universe
= isl_union_map_universe(isl_union_map_copy(umap
));
4344 domain
= isl_union_map_domain(isl_union_map_copy(universe
));
4345 range
= isl_union_map_range(universe
);
4346 umap1
= isl_union_map_copy(prefix
);
4347 umap1
= isl_union_map_intersect_domain(umap1
, domain
);
4348 umap2
= isl_union_map_intersect_domain(prefix
, range
);
4349 umap1
= isl_union_map_intersect_range(umap1
,
4350 isl_union_set_from_set(context
));
4351 umap1
= isl_union_map_apply_range(umap1
, isl_union_map_reverse(umap2
));
4352 umap
= isl_union_map_intersect(umap
, umap1
);
4354 after
= after_in_child(umap
, node
);
4356 isl_union_map_free(umap
);
4361 /* Is any domain element of "umap" scheduled after any of
4362 * the corresponding image elements by the tree rooted at
4363 * the expansion node "node"?
4365 * We apply the expansion to domain and range of "umap" and
4366 * continue with its child.
4368 static isl_bool
after_in_expansion(__isl_keep isl_union_map
*umap
,
4369 __isl_keep isl_schedule_node
*node
)
4371 isl_union_map
*expansion
;
4374 expansion
= isl_schedule_node_expansion_get_expansion(node
);
4375 umap
= isl_union_map_copy(umap
);
4376 umap
= isl_union_map_apply_domain(umap
, isl_union_map_copy(expansion
));
4377 umap
= isl_union_map_apply_range(umap
, expansion
);
4379 after
= after_in_child(umap
, node
);
4381 isl_union_map_free(umap
);
4386 /* Is any domain element of "umap" scheduled after any of
4387 * the corresponding image elements by the tree rooted at
4388 * the extension node "node"?
4390 * Since the extension node may add statement instances before or
4391 * after the pairs of statement instances in "umap", we return isl_bool_true
4392 * to ensure that these pairs are not broken up.
4394 static isl_bool
after_in_extension(__isl_keep isl_union_map
*umap
,
4395 __isl_keep isl_schedule_node
*node
)
4397 return isl_bool_true
;
4400 /* Is any domain element of "umap" scheduled after any of
4401 * the corresponding image elements by the tree rooted at
4402 * the filter node "node"?
4404 * We intersect domain and range of "umap" with the filter and
4405 * continue with its child.
4407 static isl_bool
after_in_filter(__isl_keep isl_union_map
*umap
,
4408 __isl_keep isl_schedule_node
*node
)
4410 isl_union_set
*filter
;
4413 umap
= isl_union_map_copy(umap
);
4414 filter
= isl_schedule_node_filter_get_filter(node
);
4415 umap
= isl_union_map_intersect_domain(umap
, isl_union_set_copy(filter
));
4416 umap
= isl_union_map_intersect_range(umap
, filter
);
4418 after
= after_in_child(umap
, node
);
4420 isl_union_map_free(umap
);
4425 /* Is any domain element of "umap" scheduled after any of
4426 * the corresponding image elements by the tree rooted at
4427 * the set node "node"?
4429 * This is only the case if this condition holds in any
4430 * of the (filter) children of the set node.
4431 * In particular, if the domain and the range of "umap"
4432 * are contained in different children, then the condition
4435 static isl_bool
after_in_set(__isl_keep isl_union_map
*umap
,
4436 __isl_keep isl_schedule_node
*node
)
4440 n
= isl_schedule_node_n_children(node
);
4441 for (i
= 0; i
< n
; ++i
) {
4442 isl_schedule_node
*child
;
4445 child
= isl_schedule_node_get_child(node
, i
);
4446 after
= after_in_tree(umap
, child
);
4447 isl_schedule_node_free(child
);
4449 if (after
< 0 || after
)
4453 return isl_bool_false
;
4456 /* Return the filter of child "i" of "node".
4458 static __isl_give isl_union_set
*child_filter(
4459 __isl_keep isl_schedule_node
*node
, int i
)
4461 isl_schedule_node
*child
;
4462 isl_union_set
*filter
;
4464 child
= isl_schedule_node_get_child(node
, i
);
4465 filter
= isl_schedule_node_filter_get_filter(child
);
4466 isl_schedule_node_free(child
);
4471 /* Is any domain element of "umap" scheduled after any of
4472 * the corresponding image elements by the tree rooted at
4473 * the sequence node "node"?
4475 * This happens in particular if any domain element is
4476 * contained in a later child than one containing a range element or
4477 * if the condition holds within a given child in the sequence.
4478 * The later part of the condition is checked by after_in_set.
4480 static isl_bool
after_in_sequence(__isl_keep isl_union_map
*umap
,
4481 __isl_keep isl_schedule_node
*node
)
4484 isl_union_map
*umap_i
;
4486 isl_bool after
= isl_bool_false
;
4488 n
= isl_schedule_node_n_children(node
);
4489 for (i
= 1; i
< n
; ++i
) {
4490 isl_union_set
*filter_i
;
4492 umap_i
= isl_union_map_copy(umap
);
4493 filter_i
= child_filter(node
, i
);
4494 umap_i
= isl_union_map_intersect_domain(umap_i
, filter_i
);
4495 empty
= isl_union_map_is_empty(umap_i
);
4499 isl_union_map_free(umap_i
);
4503 for (j
= 0; j
< i
; ++j
) {
4504 isl_union_set
*filter_j
;
4505 isl_union_map
*umap_ij
;
4507 umap_ij
= isl_union_map_copy(umap_i
);
4508 filter_j
= child_filter(node
, j
);
4509 umap_ij
= isl_union_map_intersect_range(umap_ij
,
4511 empty
= isl_union_map_is_empty(umap_ij
);
4512 isl_union_map_free(umap_ij
);
4517 after
= isl_bool_true
;
4522 isl_union_map_free(umap_i
);
4527 if (after
< 0 || after
)
4530 return after_in_set(umap
, node
);
4532 isl_union_map_free(umap_i
);
4533 return isl_bool_error
;
4536 /* Is any domain element of "umap" scheduled after any of
4537 * the corresponding image elements by the tree rooted at "node"?
4539 * If "umap" is empty, then clearly there is no such element.
4540 * Otherwise, consider the different types of nodes separately.
4542 static isl_bool
after_in_tree(__isl_keep isl_union_map
*umap
,
4543 __isl_keep isl_schedule_node
*node
)
4546 enum isl_schedule_node_type type
;
4548 empty
= isl_union_map_is_empty(umap
);
4550 return isl_bool_error
;
4552 return isl_bool_false
;
4554 return isl_bool_error
;
4556 type
= isl_schedule_node_get_type(node
);
4558 case isl_schedule_node_error
:
4559 return isl_bool_error
;
4560 case isl_schedule_node_leaf
:
4561 return isl_bool_false
;
4562 case isl_schedule_node_band
:
4563 return after_in_band(umap
, node
);
4564 case isl_schedule_node_domain
:
4565 isl_die(isl_schedule_node_get_ctx(node
), isl_error_internal
,
4566 "unexpected internal domain node",
4567 return isl_bool_error
);
4568 case isl_schedule_node_context
:
4569 return after_in_context(umap
, node
);
4570 case isl_schedule_node_expansion
:
4571 return after_in_expansion(umap
, node
);
4572 case isl_schedule_node_extension
:
4573 return after_in_extension(umap
, node
);
4574 case isl_schedule_node_filter
:
4575 return after_in_filter(umap
, node
);
4576 case isl_schedule_node_guard
:
4577 case isl_schedule_node_mark
:
4578 return after_in_child(umap
, node
);
4579 case isl_schedule_node_set
:
4580 return after_in_set(umap
, node
);
4581 case isl_schedule_node_sequence
:
4582 return after_in_sequence(umap
, node
);
4585 return isl_bool_true
;
4588 /* Is any domain element of "map1" scheduled after any domain
4589 * element of "map2" by the subtree underneath the current band node,
4590 * while at the same time being scheduled together by the current
4591 * band node, i.e., by "map1" and "map2?
4593 * If the child of the current band node is a leaf, then
4594 * no element can be scheduled after any other element.
4596 * Otherwise, we construct a relation between domain elements
4597 * of "map1" and domain elements of "map2" that are scheduled
4598 * together and then check if the subtree underneath the current
4599 * band node determines their relative order.
4601 static isl_bool
after_in_subtree(__isl_keep isl_ast_build
*build
,
4602 __isl_keep isl_map
*map1
, __isl_keep isl_map
*map2
)
4604 isl_schedule_node
*node
;
4606 isl_union_map
*umap
;
4609 node
= isl_ast_build_get_schedule_node(build
);
4611 return isl_bool_error
;
4612 node
= isl_schedule_node_child(node
, 0);
4613 if (isl_schedule_node_get_type(node
) == isl_schedule_node_leaf
) {
4614 isl_schedule_node_free(node
);
4615 return isl_bool_false
;
4617 map
= isl_map_copy(map2
);
4618 map
= isl_map_apply_domain(map
, isl_map_copy(map1
));
4619 umap
= isl_union_map_from_map(map
);
4620 after
= after_in_tree(umap
, node
);
4621 isl_union_map_free(umap
);
4622 isl_schedule_node_free(node
);
4626 /* Internal data for any_scheduled_after.
4628 * "build" is the build in which the AST is constructed.
4629 * "depth" is the number of loops that have already been generated
4630 * "group_coscheduled" is a local copy of options->ast_build_group_coscheduled
4631 * "domain" is an array of set-map pairs corresponding to the different
4632 * iteration domains. The set is the schedule domain, i.e., the domain
4633 * of the inverse schedule, while the map is the inverse schedule itself.
4635 struct isl_any_scheduled_after_data
{
4636 isl_ast_build
*build
;
4638 int group_coscheduled
;
4639 struct isl_set_map_pair
*domain
;
4642 /* Is any element of domain "i" scheduled after any element of domain "j"
4643 * (for a common iteration of the first data->depth loops)?
4645 * data->domain[i].set contains the domain of the inverse schedule
4646 * for domain "i", i.e., elements in the schedule domain.
4648 * If we are inside a band of a schedule tree and there is a pair
4649 * of elements in the two domains that is schedule together by
4650 * the current band, then we check if any element of "i" may be schedule
4651 * after element of "j" by the descendants of the band node.
4653 * If data->group_coscheduled is set, then we also return 1 if there
4654 * is any pair of elements in the two domains that are scheduled together.
4656 static isl_bool
any_scheduled_after(int i
, int j
, void *user
)
4658 struct isl_any_scheduled_after_data
*data
= user
;
4659 isl_size dim
= isl_set_dim(data
->domain
[i
].set
, isl_dim_set
);
4663 return isl_bool_error
;
4665 for (pos
= data
->depth
; pos
< dim
; ++pos
) {
4668 follows
= isl_set_follows_at(data
->domain
[i
].set
,
4669 data
->domain
[j
].set
, pos
);
4672 return isl_bool_error
;
4674 return isl_bool_true
;
4676 return isl_bool_false
;
4679 if (isl_ast_build_has_schedule_node(data
->build
)) {
4682 after
= after_in_subtree(data
->build
, data
->domain
[i
].map
,
4683 data
->domain
[j
].map
);
4684 if (after
< 0 || after
)
4688 return data
->group_coscheduled
;
4691 /* Look for independent components at the current depth and generate code
4692 * for each component separately. The resulting lists of grafts are
4693 * merged in an attempt to combine grafts with identical guards.
4695 * Code for two domains can be generated separately if all the elements
4696 * of one domain are scheduled before (or together with) all the elements
4697 * of the other domain. We therefore consider the graph with as nodes
4698 * the domains and an edge between two nodes if any element of the first
4699 * node is scheduled after any element of the second node.
4700 * If the ast_build_group_coscheduled is set, then we also add an edge if
4701 * there is any pair of elements in the two domains that are scheduled
4703 * Code is then generated (by generate_component)
4704 * for each of the strongly connected components in this graph
4705 * in their topological order.
4707 * Since the test is performed on the domain of the inverse schedules of
4708 * the different domains, we precompute these domains and store
4709 * them in data.domain.
4711 static __isl_give isl_ast_graft_list
*generate_components(
4712 __isl_take isl_union_map
*executed
, __isl_take isl_ast_build
*build
)
4715 isl_ctx
*ctx
= isl_ast_build_get_ctx(build
);
4716 int n
= isl_union_map_n_map(executed
);
4717 struct isl_any_scheduled_after_data data
;
4718 struct isl_set_map_pair
*next
;
4719 struct isl_tarjan_graph
*g
= NULL
;
4720 isl_ast_graft_list
*list
= NULL
;
4723 data
.domain
= isl_calloc_array(ctx
, struct isl_set_map_pair
, n
);
4729 if (isl_union_map_foreach_map(executed
, &extract_domain
, &next
) < 0)
4735 data
.depth
= isl_ast_build_get_depth(build
);
4736 data
.group_coscheduled
= isl_options_get_ast_build_group_coscheduled(ctx
);
4737 g
= isl_tarjan_graph_init(ctx
, n
, &any_scheduled_after
, &data
);
4741 list
= isl_ast_graft_list_alloc(ctx
, 0);
4745 isl_ast_graft_list
*list_c
;
4748 if (g
->order
[i
] == -1)
4749 isl_die(ctx
, isl_error_internal
, "cannot happen",
4752 while (g
->order
[i
] != -1) {
4756 list_c
= generate_component(data
.domain
,
4757 g
->order
+ first
, i
- first
,
4758 isl_ast_build_copy(build
));
4759 list
= isl_ast_graft_list_merge(list
, list_c
, build
);
4765 error
: list
= isl_ast_graft_list_free(list
);
4766 isl_tarjan_graph_free(g
);
4767 for (i
= 0; i
< n_domain
; ++i
) {
4768 isl_map_free(data
.domain
[i
].map
);
4769 isl_set_free(data
.domain
[i
].set
);
4772 isl_union_map_free(executed
);
4773 isl_ast_build_free(build
);
4778 /* Generate code for the next level (and all inner levels).
4780 * If "executed" is empty, i.e., no code needs to be generated,
4781 * then we return an empty list.
4783 * If we have already generated code for all loop levels, then we pass
4784 * control to generate_inner_level.
4786 * If "executed" lives in a single space, i.e., if code needs to be
4787 * generated for a single domain, then there can only be a single
4788 * component and we go directly to generate_shifted_component.
4789 * Otherwise, we call generate_components to detect the components
4790 * and to call generate_component on each of them separately.
4792 static __isl_give isl_ast_graft_list
*generate_next_level(
4793 __isl_take isl_union_map
*executed
, __isl_take isl_ast_build
*build
)
4798 if (!build
|| !executed
)
4801 if (isl_union_map_is_empty(executed
)) {
4802 isl_ctx
*ctx
= isl_ast_build_get_ctx(build
);
4803 isl_union_map_free(executed
);
4804 isl_ast_build_free(build
);
4805 return isl_ast_graft_list_alloc(ctx
, 0);
4808 depth
= isl_ast_build_get_depth(build
);
4809 dim
= isl_ast_build_dim(build
, isl_dim_set
);
4813 return generate_inner_level(executed
, build
);
4815 if (isl_union_map_n_map(executed
) == 1)
4816 return generate_shifted_component(executed
, build
);
4818 return generate_components(executed
, build
);
4820 isl_union_map_free(executed
);
4821 isl_ast_build_free(build
);
4825 /* Internal data structure used by isl_ast_build_node_from_schedule_map.
4826 * internal, executed and build are the inputs to generate_code.
4827 * list collects the output.
4829 struct isl_generate_code_data
{
4831 isl_union_map
*executed
;
4832 isl_ast_build
*build
;
4834 isl_ast_graft_list
*list
;
4837 /* Given an inverse schedule in terms of the external build schedule, i.e.,
4841 * with E the external build schedule and S the additional schedule "space",
4842 * reformulate the inverse schedule in terms of the internal schedule domain,
4847 * We first obtain a mapping
4851 * take the inverse and the product with S -> S, resulting in
4853 * [I -> S] -> [E -> S]
4855 * Applying the map to the input produces the desired result.
4857 static __isl_give isl_union_map
*internal_executed(
4858 __isl_take isl_union_map
*executed
, __isl_keep isl_space
*space
,
4859 __isl_keep isl_ast_build
*build
)
4863 proj
= isl_ast_build_get_schedule_map(build
);
4864 proj
= isl_map_reverse(proj
);
4865 space
= isl_space_map_from_set(isl_space_copy(space
));
4866 id
= isl_map_identity(space
);
4867 proj
= isl_map_product(proj
, id
);
4868 executed
= isl_union_map_apply_domain(executed
,
4869 isl_union_map_from_map(proj
));
4873 /* Generate an AST that visits the elements in the range of data->executed
4874 * in the relative order specified by the corresponding domain element(s)
4875 * for those domain elements that belong to "set".
4876 * Add the result to data->list.
4878 * The caller ensures that "set" is a universe domain.
4879 * "space" is the space of the additional part of the schedule.
4880 * It is equal to the space of "set" if build->domain is parametric.
4881 * Otherwise, it is equal to the range of the wrapped space of "set".
4883 * If the build space is not parametric and
4884 * if isl_ast_build_node_from_schedule_map
4885 * was called from an outside user (data->internal not set), then
4886 * the (inverse) schedule refers to the external build domain and needs to
4887 * be transformed to refer to the internal build domain.
4889 * If the build space is parametric, then we add some of the parameter
4890 * constraints to the executed relation. Adding these constraints
4891 * allows for an earlier detection of conflicts in some cases.
4892 * However, we do not want to divide the executed relation into
4893 * more disjuncts than necessary. We therefore approximate
4894 * the constraints on the parameters by a single disjunct set.
4896 * The build is extended to include the additional part of the schedule.
4897 * If the original build space was not parametric, then the options
4898 * in data->build refer only to the additional part of the schedule
4899 * and they need to be adjusted to refer to the complete AST build
4902 * After having adjusted inverse schedule and build, we start generating
4903 * code with the outer loop of the current code generation
4904 * in generate_next_level.
4906 * If the original build space was not parametric, we undo the embedding
4907 * on the resulting isl_ast_node_list so that it can be used within
4908 * the outer AST build.
4910 static isl_stat
generate_code_in_space(struct isl_generate_code_data
*data
,
4911 __isl_take isl_set
*set
, __isl_take isl_space
*space
)
4913 isl_union_map
*executed
;
4914 isl_ast_build
*build
;
4915 isl_ast_graft_list
*list
;
4918 executed
= isl_union_map_copy(data
->executed
);
4919 executed
= isl_union_map_intersect_domain(executed
,
4920 isl_union_set_from_set(set
));
4922 embed
= !isl_set_is_params(data
->build
->domain
);
4923 if (embed
&& !data
->internal
)
4924 executed
= internal_executed(executed
, space
, data
->build
);
4927 domain
= isl_ast_build_get_domain(data
->build
);
4928 domain
= isl_set_from_basic_set(isl_set_simple_hull(domain
));
4929 executed
= isl_union_map_intersect_params(executed
, domain
);
4932 build
= isl_ast_build_copy(data
->build
);
4933 build
= isl_ast_build_product(build
, space
);
4935 list
= generate_next_level(executed
, build
);
4937 list
= isl_ast_graft_list_unembed(list
, embed
);
4939 data
->list
= isl_ast_graft_list_concat(data
->list
, list
);
4944 /* Generate an AST that visits the elements in the range of data->executed
4945 * in the relative order specified by the corresponding domain element(s)
4946 * for those domain elements that belong to "set".
4947 * Add the result to data->list.
4949 * The caller ensures that "set" is a universe domain.
4951 * If the build space S is not parametric, then the space of "set"
4952 * need to be a wrapped relation with S as domain. That is, it needs
4957 * Check this property and pass control to generate_code_in_space
4959 * If the build space is not parametric, then T is the space of "set".
4961 static isl_stat
generate_code_set(__isl_take isl_set
*set
, void *user
)
4963 struct isl_generate_code_data
*data
= user
;
4964 isl_space
*space
, *build_space
;
4967 space
= isl_set_get_space(set
);
4969 if (isl_set_is_params(data
->build
->domain
))
4970 return generate_code_in_space(data
, set
, space
);
4972 build_space
= isl_ast_build_get_space(data
->build
, data
->internal
);
4973 space
= isl_space_unwrap(space
);
4974 is_domain
= isl_space_is_domain(build_space
, space
);
4975 isl_space_free(build_space
);
4976 space
= isl_space_range(space
);
4981 isl_die(isl_set_get_ctx(set
), isl_error_invalid
,
4982 "invalid nested schedule space", goto error
);
4984 return generate_code_in_space(data
, set
, space
);
4987 isl_space_free(space
);
4988 return isl_stat_error
;
4991 /* Generate an AST that visits the elements in the range of "executed"
4992 * in the relative order specified by the corresponding domain element(s).
4994 * "build" is an isl_ast_build that has either been constructed by
4995 * isl_ast_build_from_context or passed to a callback set by
4996 * isl_ast_build_set_create_leaf.
4997 * In the first case, the space of the isl_ast_build is typically
4998 * a parametric space, although this is currently not enforced.
4999 * In the second case, the space is never a parametric space.
5000 * If the space S is not parametric, then the domain space(s) of "executed"
5001 * need to be wrapped relations with S as domain.
5003 * If the domain of "executed" consists of several spaces, then an AST
5004 * is generated for each of them (in arbitrary order) and the results
5007 * If "internal" is set, then the domain "S" above refers to the internal
5008 * schedule domain representation. Otherwise, it refers to the external
5009 * representation, as returned by isl_ast_build_get_schedule_space.
5011 * We essentially run over all the spaces in the domain of "executed"
5012 * and call generate_code_set on each of them.
5014 static __isl_give isl_ast_graft_list
*generate_code(
5015 __isl_take isl_union_map
*executed
, __isl_take isl_ast_build
*build
,
5019 struct isl_generate_code_data data
= { 0 };
5021 isl_union_set
*schedule_domain
;
5022 isl_union_map
*universe
;
5026 space
= isl_ast_build_get_space(build
, 1);
5027 space
= isl_space_align_params(space
,
5028 isl_union_map_get_space(executed
));
5029 space
= isl_space_align_params(space
,
5030 isl_union_map_get_space(build
->options
));
5031 build
= isl_ast_build_align_params(build
, isl_space_copy(space
));
5032 executed
= isl_union_map_align_params(executed
, space
);
5033 if (!executed
|| !build
)
5036 ctx
= isl_ast_build_get_ctx(build
);
5038 data
.internal
= internal
;
5039 data
.executed
= executed
;
5041 data
.list
= isl_ast_graft_list_alloc(ctx
, 0);
5043 universe
= isl_union_map_universe(isl_union_map_copy(executed
));
5044 schedule_domain
= isl_union_map_domain(universe
);
5045 if (isl_union_set_foreach_set(schedule_domain
, &generate_code_set
,
5047 data
.list
= isl_ast_graft_list_free(data
.list
);
5049 isl_union_set_free(schedule_domain
);
5050 isl_union_map_free(executed
);
5052 isl_ast_build_free(build
);
5055 isl_union_map_free(executed
);
5056 isl_ast_build_free(build
);
5060 /* Generate an AST that visits the elements in the domain of "schedule"
5061 * in the relative order specified by the corresponding image element(s).
5063 * "build" is an isl_ast_build that has either been constructed by
5064 * isl_ast_build_from_context or passed to a callback set by
5065 * isl_ast_build_set_create_leaf.
5066 * In the first case, the space of the isl_ast_build is typically
5067 * a parametric space, although this is currently not enforced.
5068 * In the second case, the space is never a parametric space.
5069 * If the space S is not parametric, then the range space(s) of "schedule"
5070 * need to be wrapped relations with S as domain.
5072 * If the range of "schedule" consists of several spaces, then an AST
5073 * is generated for each of them (in arbitrary order) and the results
5076 * We first initialize the local copies of the relevant options.
5077 * We do this here rather than when the isl_ast_build is created
5078 * because the options may have changed between the construction
5079 * of the isl_ast_build and the call to isl_generate_code.
5081 * The main computation is performed on an inverse schedule (with
5082 * the schedule domain in the domain and the elements to be executed
5083 * in the range) called "executed".
5085 __isl_give isl_ast_node
*isl_ast_build_node_from_schedule_map(
5086 __isl_keep isl_ast_build
*build
, __isl_take isl_union_map
*schedule
)
5088 isl_ast_graft_list
*list
;
5090 isl_union_map
*executed
;
5092 build
= isl_ast_build_copy(build
);
5093 build
= isl_ast_build_set_single_valued(build
, 0);
5094 schedule
= isl_union_map_coalesce(schedule
);
5095 schedule
= isl_union_map_remove_redundancies(schedule
);
5096 executed
= isl_union_map_reverse(schedule
);
5097 list
= generate_code(executed
, isl_ast_build_copy(build
), 0);
5098 node
= isl_ast_node_from_graft_list(list
, build
);
5099 isl_ast_build_free(build
);
5104 /* The old name for isl_ast_build_node_from_schedule_map.
5105 * It is being kept for backward compatibility, but
5106 * it will be removed in the future.
5108 __isl_give isl_ast_node
*isl_ast_build_ast_from_schedule(
5109 __isl_keep isl_ast_build
*build
, __isl_take isl_union_map
*schedule
)
5111 return isl_ast_build_node_from_schedule_map(build
, schedule
);
5114 /* Generate an AST that visits the elements in the domain of "executed"
5115 * in the relative order specified by the band node "node" and its descendants.
5117 * The relation "executed" maps the outer generated loop iterators
5118 * to the domain elements executed by those iterations.
5120 * If the band is empty, we continue with its descendants.
5121 * Otherwise, we extend the build and the inverse schedule with
5122 * the additional space/partial schedule and continue generating
5123 * an AST in generate_next_level.
5124 * As soon as we have extended the inverse schedule with the additional
5125 * partial schedule, we look for equalities that may exists between
5126 * the old and the new part.
5128 static __isl_give isl_ast_graft_list
*build_ast_from_band(
5129 __isl_take isl_ast_build
*build
, __isl_take isl_schedule_node
*node
,
5130 __isl_take isl_union_map
*executed
)
5133 isl_multi_union_pw_aff
*extra
;
5134 isl_union_map
*extra_umap
;
5135 isl_ast_graft_list
*list
;
5138 if (!build
|| !node
|| !executed
)
5141 if (isl_schedule_node_band_n_member(node
) == 0)
5142 return build_ast_from_child(build
, node
, executed
);
5144 extra
= isl_schedule_node_band_get_partial_schedule(node
);
5145 extra
= isl_multi_union_pw_aff_align_params(extra
,
5146 isl_ast_build_get_space(build
, 1));
5147 space
= isl_multi_union_pw_aff_get_space(extra
);
5149 extra_umap
= isl_union_map_from_multi_union_pw_aff(extra
);
5150 extra_umap
= isl_union_map_reverse(extra_umap
);
5152 executed
= isl_union_map_domain_product(executed
, extra_umap
);
5153 executed
= isl_union_map_detect_equalities(executed
);
5155 n1
= isl_ast_build_dim(build
, isl_dim_param
);
5156 build
= isl_ast_build_product(build
, space
);
5157 n2
= isl_ast_build_dim(build
, isl_dim_param
);
5158 if (n1
< 0 || n2
< 0)
5159 build
= isl_ast_build_free(build
);
5161 isl_die(isl_ast_build_get_ctx(build
), isl_error_invalid
,
5162 "band node is not allowed to introduce new parameters",
5163 build
= isl_ast_build_free(build
));
5164 build
= isl_ast_build_set_schedule_node(build
, node
);
5166 list
= generate_next_level(executed
, build
);
5168 list
= isl_ast_graft_list_unembed(list
, 1);
5172 isl_schedule_node_free(node
);
5173 isl_union_map_free(executed
);
5174 isl_ast_build_free(build
);
5178 /* Hoist a list of grafts (in practice containing a single graft)
5179 * from "sub_build" (which includes extra context information)
5182 * In particular, project out all additional parameters introduced
5183 * by the context node from the enforced constraints and the guard
5184 * of the single graft.
5186 static __isl_give isl_ast_graft_list
*hoist_out_of_context(
5187 __isl_take isl_ast_graft_list
*list
, __isl_keep isl_ast_build
*build
,
5188 __isl_keep isl_ast_build
*sub_build
)
5190 isl_ast_graft
*graft
;
5191 isl_basic_set
*enforced
;
5193 isl_size n_param
, extra_param
;
5195 n_param
= isl_ast_build_dim(build
, isl_dim_param
);
5196 extra_param
= isl_ast_build_dim(sub_build
, isl_dim_param
);
5197 if (n_param
< 0 || extra_param
< 0)
5198 return isl_ast_graft_list_free(list
);
5200 if (extra_param
== n_param
)
5203 extra_param
-= n_param
;
5204 enforced
= isl_ast_graft_list_extract_shared_enforced(list
, sub_build
);
5205 enforced
= isl_basic_set_project_out(enforced
, isl_dim_param
,
5206 n_param
, extra_param
);
5207 enforced
= isl_basic_set_remove_unknown_divs(enforced
);
5208 guard
= isl_ast_graft_list_extract_hoistable_guard(list
, sub_build
);
5209 guard
= isl_set_remove_divs_involving_dims(guard
, isl_dim_param
,
5210 n_param
, extra_param
);
5211 guard
= isl_set_project_out(guard
, isl_dim_param
, n_param
, extra_param
);
5212 guard
= isl_set_compute_divs(guard
);
5213 graft
= isl_ast_graft_alloc_from_children(list
, guard
, enforced
,
5215 list
= isl_ast_graft_list_from_ast_graft(graft
);
5220 /* Generate an AST that visits the elements in the domain of "executed"
5221 * in the relative order specified by the context node "node"
5222 * and its descendants.
5224 * The relation "executed" maps the outer generated loop iterators
5225 * to the domain elements executed by those iterations.
5227 * The context node may introduce additional parameters as well as
5228 * constraints on the outer schedule dimensions or original parameters.
5230 * We add the extra parameters to a new build and the context
5231 * constraints to both the build and (as a single disjunct)
5232 * to the domain of "executed". Since the context constraints
5233 * are specified in terms of the input schedule, we first need
5234 * to map them to the internal schedule domain.
5236 * After constructing the AST from the descendants of "node",
5237 * we combine the list of grafts into a single graft within
5238 * the new build, in order to be able to exploit the additional
5239 * context constraints during this combination.
5241 * Additionally, if the current node is the outermost node in
5242 * the schedule tree (apart from the root domain node), we generate
5243 * all pending guards, again to be able to exploit the additional
5244 * context constraints. We currently do not do this for internal
5245 * context nodes since we may still want to hoist conditions
5246 * to outer AST nodes.
5248 * If the context node introduced any new parameters, then they
5249 * are removed from the set of enforced constraints and guard
5250 * in hoist_out_of_context.
5252 static __isl_give isl_ast_graft_list
*build_ast_from_context(
5253 __isl_take isl_ast_build
*build
, __isl_take isl_schedule_node
*node
,
5254 __isl_take isl_union_map
*executed
)
5258 isl_multi_aff
*internal2input
;
5259 isl_ast_build
*sub_build
;
5260 isl_ast_graft_list
*list
;
5263 depth
= isl_schedule_node_get_tree_depth(node
);
5264 space
= isl_ast_build_get_space(build
, 1);
5265 context
= isl_schedule_node_context_get_context(node
);
5266 context
= isl_set_align_params(context
, space
);
5267 sub_build
= isl_ast_build_copy(build
);
5268 space
= isl_set_get_space(context
);
5269 sub_build
= isl_ast_build_align_params(sub_build
, space
);
5270 internal2input
= isl_ast_build_get_internal2input(sub_build
);
5271 context
= isl_set_preimage_multi_aff(context
, internal2input
);
5272 sub_build
= isl_ast_build_restrict_generated(sub_build
,
5273 isl_set_copy(context
));
5274 context
= isl_set_from_basic_set(isl_set_simple_hull(context
));
5275 executed
= isl_union_map_intersect_domain(executed
,
5276 isl_union_set_from_set(context
));
5278 list
= build_ast_from_child(isl_ast_build_copy(sub_build
),
5280 n
= isl_ast_graft_list_n_ast_graft(list
);
5282 list
= isl_ast_graft_list_free(list
);
5284 list
= isl_ast_graft_list_fuse(list
, sub_build
);
5286 list
= isl_ast_graft_list_insert_pending_guard_nodes(list
,
5289 list
= hoist_out_of_context(list
, build
, sub_build
);
5291 isl_ast_build_free(build
);
5292 isl_ast_build_free(sub_build
);
5297 /* Generate an AST that visits the elements in the domain of "executed"
5298 * in the relative order specified by the expansion node "node" and
5301 * The relation "executed" maps the outer generated loop iterators
5302 * to the domain elements executed by those iterations.
5304 * We expand the domain elements by the expansion and
5305 * continue with the descendants of the node.
5307 static __isl_give isl_ast_graft_list
*build_ast_from_expansion(
5308 __isl_take isl_ast_build
*build
, __isl_take isl_schedule_node
*node
,
5309 __isl_take isl_union_map
*executed
)
5311 isl_union_map
*expansion
;
5314 expansion
= isl_schedule_node_expansion_get_expansion(node
);
5315 expansion
= isl_union_map_align_params(expansion
,
5316 isl_union_map_get_space(executed
));
5318 n1
= isl_union_map_dim(executed
, isl_dim_param
);
5319 executed
= isl_union_map_apply_range(executed
, expansion
);
5320 n2
= isl_union_map_dim(executed
, isl_dim_param
);
5321 if (n1
< 0 || n2
< 0)
5324 isl_die(isl_ast_build_get_ctx(build
), isl_error_invalid
,
5325 "expansion node is not allowed to introduce "
5326 "new parameters", goto error
);
5328 return build_ast_from_child(build
, node
, executed
);
5330 isl_ast_build_free(build
);
5331 isl_schedule_node_free(node
);
5332 isl_union_map_free(executed
);
5336 /* Generate an AST that visits the elements in the domain of "executed"
5337 * in the relative order specified by the extension node "node" and
5340 * The relation "executed" maps the outer generated loop iterators
5341 * to the domain elements executed by those iterations.
5343 * Extend the inverse schedule with the extension applied to current
5344 * set of generated constraints. Since the extension if formulated
5345 * in terms of the input schedule, it first needs to be transformed
5346 * to refer to the internal schedule.
5348 static __isl_give isl_ast_graft_list
*build_ast_from_extension(
5349 __isl_take isl_ast_build
*build
, __isl_take isl_schedule_node
*node
,
5350 __isl_take isl_union_map
*executed
)
5352 isl_union_set
*schedule_domain
;
5353 isl_union_map
*extension
;
5356 set
= isl_ast_build_get_generated(build
);
5357 set
= isl_set_from_basic_set(isl_set_simple_hull(set
));
5358 schedule_domain
= isl_union_set_from_set(set
);
5360 extension
= isl_schedule_node_extension_get_extension(node
);
5362 extension
= isl_union_map_preimage_domain_multi_aff(extension
,
5363 isl_multi_aff_copy(build
->internal2input
));
5364 extension
= isl_union_map_intersect_domain(extension
, schedule_domain
);
5365 extension
= isl_ast_build_substitute_values_union_map_domain(build
,
5367 executed
= isl_union_map_union(executed
, extension
);
5369 return build_ast_from_child(build
, node
, executed
);
5372 /* Generate an AST that visits the elements in the domain of "executed"
5373 * in the relative order specified by the filter node "node" and
5376 * The relation "executed" maps the outer generated loop iterators
5377 * to the domain elements executed by those iterations.
5379 * We simply intersect the iteration domain (i.e., the range of "executed")
5380 * with the filter and continue with the descendants of the node,
5381 * unless the resulting inverse schedule is empty, in which
5382 * case we return an empty list.
5384 * If the result of the intersection is equal to the original "executed"
5385 * relation, then keep the original representation since the intersection
5386 * may have unnecessarily broken up the relation into a greater number
5389 static __isl_give isl_ast_graft_list
*build_ast_from_filter(
5390 __isl_take isl_ast_build
*build
, __isl_take isl_schedule_node
*node
,
5391 __isl_take isl_union_map
*executed
)
5394 isl_union_set
*filter
;
5395 isl_union_map
*orig
;
5396 isl_ast_graft_list
*list
;
5401 orig
= isl_union_map_copy(executed
);
5402 if (!build
|| !node
|| !executed
)
5405 filter
= isl_schedule_node_filter_get_filter(node
);
5406 filter
= isl_union_set_align_params(filter
,
5407 isl_union_map_get_space(executed
));
5408 n1
= isl_union_map_dim(executed
, isl_dim_param
);
5409 executed
= isl_union_map_intersect_range(executed
, filter
);
5410 n2
= isl_union_map_dim(executed
, isl_dim_param
);
5411 if (n1
< 0 || n2
< 0)
5414 isl_die(isl_ast_build_get_ctx(build
), isl_error_invalid
,
5415 "filter node is not allowed to introduce "
5416 "new parameters", goto error
);
5418 unchanged
= isl_union_map_is_subset(orig
, executed
);
5419 empty
= isl_union_map_is_empty(executed
);
5420 if (unchanged
< 0 || empty
< 0)
5423 isl_union_map_free(executed
);
5424 return build_ast_from_child(build
, node
, orig
);
5426 isl_union_map_free(orig
);
5428 return build_ast_from_child(build
, node
, executed
);
5430 ctx
= isl_ast_build_get_ctx(build
);
5431 list
= isl_ast_graft_list_alloc(ctx
, 0);
5432 isl_ast_build_free(build
);
5433 isl_schedule_node_free(node
);
5434 isl_union_map_free(executed
);
5437 isl_ast_build_free(build
);
5438 isl_schedule_node_free(node
);
5439 isl_union_map_free(executed
);
5440 isl_union_map_free(orig
);
5444 /* Generate an AST that visits the elements in the domain of "executed"
5445 * in the relative order specified by the guard node "node" and
5448 * The relation "executed" maps the outer generated loop iterators
5449 * to the domain elements executed by those iterations.
5451 * Ensure that the associated guard is enforced by the outer AST
5452 * constructs by adding it to the guard of the graft.
5453 * Since we know that we will enforce the guard, we can also include it
5454 * in the generated constraints used to construct an AST for
5455 * the descendant nodes.
5457 static __isl_give isl_ast_graft_list
*build_ast_from_guard(
5458 __isl_take isl_ast_build
*build
, __isl_take isl_schedule_node
*node
,
5459 __isl_take isl_union_map
*executed
)
5462 isl_set
*guard
, *hoisted
;
5463 isl_basic_set
*enforced
;
5464 isl_ast_build
*sub_build
;
5465 isl_ast_graft
*graft
;
5466 isl_ast_graft_list
*list
;
5469 space
= isl_ast_build_get_space(build
, 1);
5470 guard
= isl_schedule_node_guard_get_guard(node
);
5471 n1
= isl_space_dim(space
, isl_dim_param
);
5472 guard
= isl_set_align_params(guard
, space
);
5473 n2
= isl_set_dim(guard
, isl_dim_param
);
5474 if (n1
< 0 || n2
< 0)
5475 guard
= isl_set_free(guard
);
5477 isl_die(isl_ast_build_get_ctx(build
), isl_error_invalid
,
5478 "guard node is not allowed to introduce "
5479 "new parameters", guard
= isl_set_free(guard
));
5480 guard
= isl_set_preimage_multi_aff(guard
,
5481 isl_multi_aff_copy(build
->internal2input
));
5482 guard
= isl_ast_build_specialize(build
, guard
);
5483 guard
= isl_set_gist(guard
, isl_set_copy(build
->generated
));
5485 sub_build
= isl_ast_build_copy(build
);
5486 sub_build
= isl_ast_build_restrict_generated(sub_build
,
5487 isl_set_copy(guard
));
5489 list
= build_ast_from_child(isl_ast_build_copy(sub_build
),
5492 hoisted
= isl_ast_graft_list_extract_hoistable_guard(list
, sub_build
);
5493 if (isl_set_n_basic_set(hoisted
) > 1)
5494 list
= isl_ast_graft_list_gist_guards(list
,
5495 isl_set_copy(hoisted
));
5496 guard
= isl_set_intersect(guard
, hoisted
);
5497 enforced
= extract_shared_enforced(list
, build
);
5498 graft
= isl_ast_graft_alloc_from_children(list
, guard
, enforced
,
5501 isl_ast_build_free(sub_build
);
5502 isl_ast_build_free(build
);
5503 return isl_ast_graft_list_from_ast_graft(graft
);
5506 /* Call the before_each_mark callback, if requested by the user.
5508 * Return 0 on success and -1 on error.
5510 * The caller is responsible for recording the current inverse schedule
5513 static isl_stat
before_each_mark(__isl_keep isl_id
*mark
,
5514 __isl_keep isl_ast_build
*build
)
5517 return isl_stat_error
;
5518 if (!build
->before_each_mark
)
5520 return build
->before_each_mark(mark
, build
,
5521 build
->before_each_mark_user
);
5524 /* Call the after_each_mark callback, if requested by the user.
5526 * The caller is responsible for recording the current inverse schedule
5529 static __isl_give isl_ast_graft
*after_each_mark(
5530 __isl_take isl_ast_graft
*graft
, __isl_keep isl_ast_build
*build
)
5532 if (!graft
|| !build
)
5533 return isl_ast_graft_free(graft
);
5534 if (!build
->after_each_mark
)
5536 graft
->node
= build
->after_each_mark(graft
->node
, build
,
5537 build
->after_each_mark_user
);
5539 return isl_ast_graft_free(graft
);
5544 /* Generate an AST that visits the elements in the domain of "executed"
5545 * in the relative order specified by the mark node "node" and
5548 * The relation "executed" maps the outer generated loop iterators
5549 * to the domain elements executed by those iterations.
5551 * Since we may be calling before_each_mark and after_each_mark
5552 * callbacks, we record the current inverse schedule in the build.
5554 * We generate an AST for the child of the mark node, combine
5555 * the graft list into a single graft and then insert the mark
5556 * in the AST of that single graft.
5558 static __isl_give isl_ast_graft_list
*build_ast_from_mark(
5559 __isl_take isl_ast_build
*build
, __isl_take isl_schedule_node
*node
,
5560 __isl_take isl_union_map
*executed
)
5563 isl_ast_graft
*graft
;
5564 isl_ast_graft_list
*list
;
5567 build
= isl_ast_build_set_executed(build
, isl_union_map_copy(executed
));
5569 mark
= isl_schedule_node_mark_get_id(node
);
5570 if (before_each_mark(mark
, build
) < 0)
5571 node
= isl_schedule_node_free(node
);
5573 list
= build_ast_from_child(isl_ast_build_copy(build
), node
, executed
);
5574 list
= isl_ast_graft_list_fuse(list
, build
);
5575 n
= isl_ast_graft_list_n_ast_graft(list
);
5577 list
= isl_ast_graft_list_free(list
);
5581 graft
= isl_ast_graft_list_get_ast_graft(list
, 0);
5582 graft
= isl_ast_graft_insert_mark(graft
, mark
);
5583 graft
= after_each_mark(graft
, build
);
5584 list
= isl_ast_graft_list_set_ast_graft(list
, 0, graft
);
5586 isl_ast_build_free(build
);
5591 static __isl_give isl_ast_graft_list
*build_ast_from_schedule_node(
5592 __isl_take isl_ast_build
*build
, __isl_take isl_schedule_node
*node
,
5593 __isl_take isl_union_map
*executed
);
5595 /* Generate an AST that visits the elements in the domain of "executed"
5596 * in the relative order specified by the sequence (or set) node "node" and
5599 * The relation "executed" maps the outer generated loop iterators
5600 * to the domain elements executed by those iterations.
5602 * We simply generate an AST for each of the children and concatenate
5605 static __isl_give isl_ast_graft_list
*build_ast_from_sequence(
5606 __isl_take isl_ast_build
*build
, __isl_take isl_schedule_node
*node
,
5607 __isl_take isl_union_map
*executed
)
5611 isl_ast_graft_list
*list
;
5613 ctx
= isl_ast_build_get_ctx(build
);
5614 list
= isl_ast_graft_list_alloc(ctx
, 0);
5616 n
= isl_schedule_node_n_children(node
);
5617 for (i
= 0; i
< n
; ++i
) {
5618 isl_schedule_node
*child
;
5619 isl_ast_graft_list
*list_i
;
5621 child
= isl_schedule_node_get_child(node
, i
);
5622 list_i
= build_ast_from_schedule_node(isl_ast_build_copy(build
),
5623 child
, isl_union_map_copy(executed
));
5624 list
= isl_ast_graft_list_concat(list
, list_i
);
5626 isl_ast_build_free(build
);
5627 isl_schedule_node_free(node
);
5628 isl_union_map_free(executed
);
5633 /* Generate an AST that visits the elements in the domain of "executed"
5634 * in the relative order specified by the node "node" and its descendants.
5636 * The relation "executed" maps the outer generated loop iterators
5637 * to the domain elements executed by those iterations.
5639 * If the node is a leaf, then we pass control to generate_inner_level.
5640 * Note that the current build does not refer to any band node, so
5641 * that generate_inner_level will not try to visit the child of
5644 * The other node types are handled in separate functions.
5645 * Set nodes are currently treated in the same way as sequence nodes.
5646 * The children of a set node may be executed in any order,
5647 * including the order of the children.
5649 static __isl_give isl_ast_graft_list
*build_ast_from_schedule_node(
5650 __isl_take isl_ast_build
*build
, __isl_take isl_schedule_node
*node
,
5651 __isl_take isl_union_map
*executed
)
5653 enum isl_schedule_node_type type
;
5655 type
= isl_schedule_node_get_type(node
);
5658 case isl_schedule_node_error
:
5660 case isl_schedule_node_leaf
:
5661 isl_schedule_node_free(node
);
5662 return generate_inner_level(executed
, build
);
5663 case isl_schedule_node_band
:
5664 return build_ast_from_band(build
, node
, executed
);
5665 case isl_schedule_node_context
:
5666 return build_ast_from_context(build
, node
, executed
);
5667 case isl_schedule_node_domain
:
5668 isl_die(isl_schedule_node_get_ctx(node
), isl_error_unsupported
,
5669 "unexpected internal domain node", goto error
);
5670 case isl_schedule_node_expansion
:
5671 return build_ast_from_expansion(build
, node
, executed
);
5672 case isl_schedule_node_extension
:
5673 return build_ast_from_extension(build
, node
, executed
);
5674 case isl_schedule_node_filter
:
5675 return build_ast_from_filter(build
, node
, executed
);
5676 case isl_schedule_node_guard
:
5677 return build_ast_from_guard(build
, node
, executed
);
5678 case isl_schedule_node_mark
:
5679 return build_ast_from_mark(build
, node
, executed
);
5680 case isl_schedule_node_sequence
:
5681 case isl_schedule_node_set
:
5682 return build_ast_from_sequence(build
, node
, executed
);
5685 isl_die(isl_ast_build_get_ctx(build
), isl_error_internal
,
5686 "unhandled type", goto error
);
5688 isl_union_map_free(executed
);
5689 isl_schedule_node_free(node
);
5690 isl_ast_build_free(build
);
5695 /* Generate an AST that visits the elements in the domain of "executed"
5696 * in the relative order specified by the (single) child of "node" and
5699 * The relation "executed" maps the outer generated loop iterators
5700 * to the domain elements executed by those iterations.
5702 * This function is never called on a leaf, set or sequence node,
5703 * so the node always has exactly one child.
5705 static __isl_give isl_ast_graft_list
*build_ast_from_child(
5706 __isl_take isl_ast_build
*build
, __isl_take isl_schedule_node
*node
,
5707 __isl_take isl_union_map
*executed
)
5709 node
= isl_schedule_node_child(node
, 0);
5710 return build_ast_from_schedule_node(build
, node
, executed
);
5713 /* Generate an AST that visits the elements in the domain of the domain
5714 * node "node" in the relative order specified by its descendants.
5716 * An initial inverse schedule is created that maps a zero-dimensional
5717 * schedule space to the node domain.
5718 * The input "build" is assumed to have a parametric domain and
5719 * is replaced by the same zero-dimensional schedule space.
5721 * We also add some of the parameter constraints in the build domain
5722 * to the executed relation. Adding these constraints
5723 * allows for an earlier detection of conflicts in some cases.
5724 * However, we do not want to divide the executed relation into
5725 * more disjuncts than necessary. We therefore approximate
5726 * the constraints on the parameters by a single disjunct set.
5728 static __isl_give isl_ast_node
*build_ast_from_domain(
5729 __isl_take isl_ast_build
*build
, __isl_take isl_schedule_node
*node
)
5732 isl_union_set
*domain
, *schedule_domain
;
5733 isl_union_map
*executed
;
5736 isl_ast_graft_list
*list
;
5743 ctx
= isl_ast_build_get_ctx(build
);
5744 space
= isl_ast_build_get_space(build
, 1);
5745 is_params
= isl_space_is_params(space
);
5746 isl_space_free(space
);
5750 isl_die(ctx
, isl_error_unsupported
,
5751 "expecting parametric initial context", goto error
);
5753 domain
= isl_schedule_node_domain_get_domain(node
);
5754 domain
= isl_union_set_coalesce(domain
);
5756 space
= isl_union_set_get_space(domain
);
5757 space
= isl_space_set_from_params(space
);
5758 build
= isl_ast_build_product(build
, space
);
5760 set
= isl_ast_build_get_domain(build
);
5761 set
= isl_set_from_basic_set(isl_set_simple_hull(set
));
5762 schedule_domain
= isl_union_set_from_set(set
);
5764 executed
= isl_union_map_from_domain_and_range(schedule_domain
, domain
);
5765 list
= build_ast_from_child(isl_ast_build_copy(build
), node
, executed
);
5766 ast
= isl_ast_node_from_graft_list(list
, build
);
5767 isl_ast_build_free(build
);
5771 isl_schedule_node_free(node
);
5772 isl_ast_build_free(build
);
5776 /* Generate an AST that visits the elements in the domain of "schedule"
5777 * in the relative order specified by the schedule tree.
5779 * "build" is an isl_ast_build that has been created using
5780 * isl_ast_build_alloc or isl_ast_build_from_context based
5781 * on a parametric set.
5783 * The construction starts at the root node of the schedule,
5784 * which is assumed to be a domain node.
5786 __isl_give isl_ast_node
*isl_ast_build_node_from_schedule(
5787 __isl_keep isl_ast_build
*build
, __isl_take isl_schedule
*schedule
)
5790 isl_schedule_node
*node
;
5792 if (!build
|| !schedule
)
5795 ctx
= isl_ast_build_get_ctx(build
);
5797 node
= isl_schedule_get_root(schedule
);
5800 isl_schedule_free(schedule
);
5802 build
= isl_ast_build_copy(build
);
5803 build
= isl_ast_build_set_single_valued(build
, 0);
5804 if (isl_schedule_node_get_type(node
) != isl_schedule_node_domain
)
5805 isl_die(ctx
, isl_error_unsupported
,
5806 "expecting root domain node",
5807 build
= isl_ast_build_free(build
));
5808 return build_ast_from_domain(build
, node
);
5810 isl_schedule_free(schedule
);