2 * Copyright 2012-2014 Ecole Normale Superieure
3 * Copyright 2014 INRIA Rocquencourt
5 * Use of this software is governed by the MIT license
7 * Written by Sven Verdoolaege,
8 * Ecole Normale Superieure, 45 rue d’Ulm, 75230 Paris, France
9 * and Inria Paris - Rocquencourt, Domaine de Voluceau - Rocquencourt,
10 * B.P. 105 - 78153 Le Chesnay, France
15 #include <isl/constraint.h>
18 #include <isl/union_set.h>
19 #include <isl/union_map.h>
20 #include <isl/schedule_node.h>
22 #include <isl_tarjan.h>
23 #include <isl_ast_private.h>
24 #include <isl_ast_build_expr.h>
25 #include <isl_ast_build_private.h>
26 #include <isl_ast_graft_private.h>
28 /* Data used in generate_domain.
30 * "build" is the input build.
31 * "list" collects the results.
33 struct isl_generate_domain_data
{
36 isl_ast_graft_list
*list
;
39 static __isl_give isl_ast_graft_list
*generate_next_level(
40 __isl_take isl_union_map
*executed
,
41 __isl_take isl_ast_build
*build
);
42 static __isl_give isl_ast_graft_list
*generate_code(
43 __isl_take isl_union_map
*executed
, __isl_take isl_ast_build
*build
,
46 /* Generate an AST for a single domain based on
47 * the (non single valued) inverse schedule "executed".
49 * We extend the schedule with the iteration domain
50 * and continue generating through a call to generate_code.
52 * In particular, if executed has the form
56 * then we continue generating code on
60 * The extended inverse schedule is clearly single valued
61 * ensuring that the nested generate_code will not reach this function,
62 * but will instead create calls to all elements of D that need
63 * to be executed from the current schedule domain.
65 static isl_stat
generate_non_single_valued(__isl_take isl_map
*executed
,
66 struct isl_generate_domain_data
*data
)
70 isl_ast_graft_list
*list
;
72 build
= isl_ast_build_copy(data
->build
);
74 identity
= isl_set_identity(isl_map_range(isl_map_copy(executed
)));
75 executed
= isl_map_domain_product(executed
, identity
);
76 build
= isl_ast_build_set_single_valued(build
, 1);
78 list
= generate_code(isl_union_map_from_map(executed
), build
, 1);
80 data
->list
= isl_ast_graft_list_concat(data
->list
, list
);
85 /* Call the at_each_domain callback, if requested by the user,
86 * after recording the current inverse schedule in the build.
88 static __isl_give isl_ast_graft
*at_each_domain(__isl_take isl_ast_graft
*graft
,
89 __isl_keep isl_map
*executed
, __isl_keep isl_ast_build
*build
)
92 return isl_ast_graft_free(graft
);
93 if (!build
->at_each_domain
)
96 build
= isl_ast_build_copy(build
);
97 build
= isl_ast_build_set_executed(build
,
98 isl_union_map_from_map(isl_map_copy(executed
)));
100 return isl_ast_graft_free(graft
);
102 graft
->node
= build
->at_each_domain(graft
->node
,
103 build
, build
->at_each_domain_user
);
104 isl_ast_build_free(build
);
107 graft
= isl_ast_graft_free(graft
);
112 /* Generate a call expression for the single executed
113 * domain element "map" and put a guard around it based its (simplified)
114 * domain. "executed" is the original inverse schedule from which "map"
115 * has been derived. In particular, "map" is either identical to "executed"
116 * or it is the result of gisting "executed" with respect to the build domain.
117 * "executed" is only used if there is an at_each_domain callback.
119 * At this stage, any pending constraints in the build can no longer
120 * be simplified with respect to any enforced constraints since
121 * the call node does not have any enforced constraints.
122 * Since all pending constraints not covered by any enforced constraints
123 * will be added as a guard to the graft in create_node_scaled,
124 * even in the eliminated case, the pending constraints
125 * can be considered to have been generated by outer constructs.
127 * If the user has set an at_each_domain callback, it is called
128 * on the constructed call expression node.
130 static isl_stat
add_domain(__isl_take isl_map
*executed
,
131 __isl_take isl_map
*map
, struct isl_generate_domain_data
*data
)
133 isl_ast_build
*build
;
134 isl_ast_graft
*graft
;
135 isl_ast_graft_list
*list
;
136 isl_set
*guard
, *pending
;
138 build
= isl_ast_build_copy(data
->build
);
139 pending
= isl_ast_build_get_pending(build
);
140 build
= isl_ast_build_replace_pending_by_guard(build
, pending
);
142 guard
= isl_map_domain(isl_map_copy(map
));
143 guard
= isl_set_compute_divs(guard
);
144 guard
= isl_set_coalesce(guard
);
145 guard
= isl_set_gist(guard
, isl_ast_build_get_generated(build
));
146 guard
= isl_ast_build_specialize(build
, guard
);
148 graft
= isl_ast_graft_alloc_domain(map
, build
);
149 graft
= at_each_domain(graft
, executed
, build
);
150 isl_ast_build_free(build
);
151 isl_map_free(executed
);
152 graft
= isl_ast_graft_add_guard(graft
, guard
, data
->build
);
154 list
= isl_ast_graft_list_from_ast_graft(graft
);
155 data
->list
= isl_ast_graft_list_concat(data
->list
, list
);
160 /* Generate an AST for a single domain based on
161 * the inverse schedule "executed" and add it to data->list.
163 * If there is more than one domain element associated to the current
164 * schedule "time", then we need to continue the generation process
165 * in generate_non_single_valued.
166 * Note that the inverse schedule being single-valued may depend
167 * on constraints that are only available in the original context
168 * domain specified by the user. We therefore first introduce
169 * some of the constraints of data->build->domain. In particular,
170 * we intersect with a single-disjunct approximation of this set.
171 * We perform this approximation to avoid further splitting up
172 * the executed relation, possibly introducing a disjunctive guard
175 * On the other hand, we only perform the test after having taken the gist
176 * of the domain as the resulting map is the one from which the call
177 * expression is constructed. Using this map to construct the call
178 * expression usually yields simpler results in cases where the original
179 * map is not obviously single-valued.
180 * If the original map is obviously single-valued, then the gist
181 * operation is skipped.
183 * Because we perform the single-valuedness test on the gisted map,
184 * we may in rare cases fail to recognize that the inverse schedule
185 * is single-valued. This becomes problematic if this happens
186 * from the recursive call through generate_non_single_valued
187 * as we would then end up in an infinite recursion.
188 * We therefore check if we are inside a call to generate_non_single_valued
189 * and revert to the ungisted map if the gisted map turns out not to be
192 * Otherwise, call add_domain to generate a call expression (with guard) and
193 * to call the at_each_domain callback, if any.
195 static isl_stat
generate_domain(__isl_take isl_map
*executed
, void *user
)
197 struct isl_generate_domain_data
*data
= user
;
202 domain
= isl_ast_build_get_domain(data
->build
);
203 domain
= isl_set_from_basic_set(isl_set_simple_hull(domain
));
204 executed
= isl_map_intersect_domain(executed
, domain
);
205 empty
= isl_map_is_empty(executed
);
209 isl_map_free(executed
);
213 sv
= isl_map_plain_is_single_valued(executed
);
217 return add_domain(executed
, isl_map_copy(executed
), data
);
219 executed
= isl_map_coalesce(executed
);
220 map
= isl_map_copy(executed
);
221 map
= isl_ast_build_compute_gist_map_domain(data
->build
, map
);
222 sv
= isl_map_is_single_valued(map
);
227 if (data
->build
->single_valued
)
228 map
= isl_map_copy(executed
);
230 return generate_non_single_valued(executed
, data
);
233 return add_domain(executed
, map
, data
);
236 isl_map_free(executed
);
237 return isl_stat_error
;
240 /* Call build->create_leaf to a create "leaf" node in the AST,
241 * encapsulate the result in an isl_ast_graft and return the result
242 * as a 1-element list.
244 * Note that the node returned by the user may be an entire tree.
246 * Since the node itself cannot enforce any constraints, we turn
247 * all pending constraints into guards and add them to the resulting
248 * graft to ensure that they will be generated.
250 * Before we pass control to the user, we first clear some information
251 * from the build that is (presumbably) only meaningful
252 * for the current code generation.
253 * This includes the create_leaf callback itself, so we make a copy
254 * of the build first.
256 static __isl_give isl_ast_graft_list
*call_create_leaf(
257 __isl_take isl_union_map
*executed
, __isl_take isl_ast_build
*build
)
261 isl_ast_graft
*graft
;
262 isl_ast_build
*user_build
;
264 guard
= isl_ast_build_get_pending(build
);
265 user_build
= isl_ast_build_copy(build
);
266 user_build
= isl_ast_build_replace_pending_by_guard(user_build
,
267 isl_set_copy(guard
));
268 user_build
= isl_ast_build_set_executed(user_build
, executed
);
269 user_build
= isl_ast_build_clear_local_info(user_build
);
273 node
= build
->create_leaf(user_build
, build
->create_leaf_user
);
274 graft
= isl_ast_graft_alloc(node
, build
);
275 graft
= isl_ast_graft_add_guard(graft
, guard
, build
);
276 isl_ast_build_free(build
);
277 return isl_ast_graft_list_from_ast_graft(graft
);
280 static __isl_give isl_ast_graft_list
*build_ast_from_child(
281 __isl_take isl_ast_build
*build
, __isl_take isl_schedule_node
*node
,
282 __isl_take isl_union_map
*executed
);
284 /* Generate an AST after having handled the complete schedule
285 * of this call to the code generator or the complete band
286 * if we are generating an AST from a schedule tree.
288 * If we are inside a band node, then move on to the child of the band.
290 * If the user has specified a create_leaf callback, control
291 * is passed to the user in call_create_leaf.
293 * Otherwise, we generate one or more calls for each individual
294 * domain in generate_domain.
296 static __isl_give isl_ast_graft_list
*generate_inner_level(
297 __isl_take isl_union_map
*executed
, __isl_take isl_ast_build
*build
)
300 struct isl_generate_domain_data data
= { build
};
302 if (!build
|| !executed
)
305 if (isl_ast_build_has_schedule_node(build
)) {
306 isl_schedule_node
*node
;
307 node
= isl_ast_build_get_schedule_node(build
);
308 build
= isl_ast_build_reset_schedule_node(build
);
309 return build_ast_from_child(build
, node
, executed
);
312 if (build
->create_leaf
)
313 return call_create_leaf(executed
, build
);
315 ctx
= isl_union_map_get_ctx(executed
);
316 data
.list
= isl_ast_graft_list_alloc(ctx
, 0);
317 if (isl_union_map_foreach_map(executed
, &generate_domain
, &data
) < 0)
318 data
.list
= isl_ast_graft_list_free(data
.list
);
321 error
: data
.list
= NULL
;
322 isl_ast_build_free(build
);
323 isl_union_map_free(executed
);
327 /* Call the before_each_for callback, if requested by the user.
329 static __isl_give isl_ast_node
*before_each_for(__isl_take isl_ast_node
*node
,
330 __isl_keep isl_ast_build
*build
)
335 return isl_ast_node_free(node
);
336 if (!build
->before_each_for
)
338 id
= build
->before_each_for(build
, build
->before_each_for_user
);
339 node
= isl_ast_node_set_annotation(node
, id
);
343 /* Call the after_each_for callback, if requested by the user.
345 static __isl_give isl_ast_graft
*after_each_for(__isl_take isl_ast_graft
*graft
,
346 __isl_keep isl_ast_build
*build
)
348 if (!graft
|| !build
)
349 return isl_ast_graft_free(graft
);
350 if (!build
->after_each_for
)
352 graft
->node
= build
->after_each_for(graft
->node
, build
,
353 build
->after_each_for_user
);
355 return isl_ast_graft_free(graft
);
359 /* Plug in all the know values of the current and outer dimensions
360 * in the domain of "executed". In principle, we only need to plug
361 * in the known value of the current dimension since the values of
362 * outer dimensions have been plugged in already.
363 * However, it turns out to be easier to just plug in all known values.
365 static __isl_give isl_union_map
*plug_in_values(
366 __isl_take isl_union_map
*executed
, __isl_keep isl_ast_build
*build
)
368 return isl_ast_build_substitute_values_union_map_domain(build
,
372 /* Check if the constraint "c" is a lower bound on dimension "pos",
373 * an upper bound, or independent of dimension "pos".
375 static int constraint_type(isl_constraint
*c
, int pos
)
377 if (isl_constraint_is_lower_bound(c
, isl_dim_set
, pos
))
379 if (isl_constraint_is_upper_bound(c
, isl_dim_set
, pos
))
384 /* Compare the types of the constraints "a" and "b",
385 * resulting in constraints that are independent of "depth"
386 * to be sorted before the lower bounds on "depth", which in
387 * turn are sorted before the upper bounds on "depth".
389 static int cmp_constraint(__isl_keep isl_constraint
*a
,
390 __isl_keep isl_constraint
*b
, void *user
)
393 int t1
= constraint_type(a
, *depth
);
394 int t2
= constraint_type(b
, *depth
);
399 /* Extract a lower bound on dimension "pos" from constraint "c".
401 * If the constraint is of the form
405 * then we essentially return
407 * l = ceil(-f(...)/a)
409 * However, if the current dimension is strided, then we need to make
410 * sure that the lower bound we construct is of the form
414 * with f the offset and s the stride.
415 * We therefore compute
417 * f + s * ceil((l - f)/s)
419 static __isl_give isl_aff
*lower_bound(__isl_keep isl_constraint
*c
,
420 int pos
, __isl_keep isl_ast_build
*build
)
424 aff
= isl_constraint_get_bound(c
, isl_dim_set
, pos
);
425 aff
= isl_aff_ceil(aff
);
427 if (isl_ast_build_has_stride(build
, pos
)) {
431 offset
= isl_ast_build_get_offset(build
, pos
);
432 stride
= isl_ast_build_get_stride(build
, pos
);
434 aff
= isl_aff_sub(aff
, isl_aff_copy(offset
));
435 aff
= isl_aff_scale_down_val(aff
, isl_val_copy(stride
));
436 aff
= isl_aff_ceil(aff
);
437 aff
= isl_aff_scale_val(aff
, stride
);
438 aff
= isl_aff_add(aff
, offset
);
441 aff
= isl_ast_build_compute_gist_aff(build
, aff
);
446 /* Return the exact lower bound (or upper bound if "upper" is set)
447 * of "domain" as a piecewise affine expression.
449 * If we are computing a lower bound (of a strided dimension), then
450 * we need to make sure it is of the form
454 * where f is the offset and s is the stride.
455 * We therefore need to include the stride constraint before computing
458 static __isl_give isl_pw_aff
*exact_bound(__isl_keep isl_set
*domain
,
459 __isl_keep isl_ast_build
*build
, int upper
)
464 isl_pw_multi_aff
*pma
;
466 domain
= isl_set_copy(domain
);
468 stride
= isl_ast_build_get_stride_constraint(build
);
469 domain
= isl_set_intersect(domain
, stride
);
471 it_map
= isl_ast_build_map_to_iterator(build
, domain
);
473 pma
= isl_map_lexmax_pw_multi_aff(it_map
);
475 pma
= isl_map_lexmin_pw_multi_aff(it_map
);
476 pa
= isl_pw_multi_aff_get_pw_aff(pma
, 0);
477 isl_pw_multi_aff_free(pma
);
478 pa
= isl_ast_build_compute_gist_pw_aff(build
, pa
);
479 pa
= isl_pw_aff_coalesce(pa
);
484 /* Callback for sorting the isl_pw_aff_list passed to reduce_list and
485 * remove_redundant_lower_bounds.
487 static int reduce_list_cmp(__isl_keep isl_pw_aff
*a
, __isl_keep isl_pw_aff
*b
,
490 return isl_pw_aff_plain_cmp(a
, b
);
493 /* Given a list of lower bounds "list", remove those that are redundant
494 * with respect to the other bounds in "list" and the domain of "build".
496 * We first sort the bounds in the same way as they would be sorted
497 * by set_for_node_expressions so that we can try and remove the last
500 * For a lower bound to be effective, there needs to be at least
501 * one domain element for which it is larger than all other lower bounds.
502 * For each lower bound we therefore intersect the domain with
503 * the conditions that it is larger than all other bounds and
504 * check whether the result is empty. If so, the bound can be removed.
506 static __isl_give isl_pw_aff_list
*remove_redundant_lower_bounds(
507 __isl_take isl_pw_aff_list
*list
, __isl_keep isl_ast_build
*build
)
512 list
= isl_pw_aff_list_sort(list
, &reduce_list_cmp
, NULL
);
516 n
= isl_pw_aff_list_n_pw_aff(list
);
520 domain
= isl_ast_build_get_domain(build
);
522 for (i
= n
- 1; i
>= 0; --i
) {
527 domain_i
= isl_set_copy(domain
);
528 pa_i
= isl_pw_aff_list_get_pw_aff(list
, i
);
530 for (j
= 0; j
< n
; ++j
) {
537 pa_j
= isl_pw_aff_list_get_pw_aff(list
, j
);
538 better
= isl_pw_aff_gt_set(isl_pw_aff_copy(pa_i
), pa_j
);
539 domain_i
= isl_set_intersect(domain_i
, better
);
542 empty
= isl_set_is_empty(domain_i
);
544 isl_set_free(domain_i
);
545 isl_pw_aff_free(pa_i
);
551 list
= isl_pw_aff_list_drop(list
, i
, 1);
555 isl_set_free(domain
);
559 isl_set_free(domain
);
560 return isl_pw_aff_list_free(list
);
563 /* Extract a lower bound on dimension "pos" from each constraint
564 * in "constraints" and return the list of lower bounds.
565 * If "constraints" has zero elements, then we extract a lower bound
566 * from "domain" instead.
568 * If the current dimension is strided, then the lower bound
569 * is adjusted by lower_bound to match the stride information.
570 * This modification may make one or more lower bounds redundant
571 * with respect to the other lower bounds. We therefore check
572 * for this condition and remove the redundant lower bounds.
574 static __isl_give isl_pw_aff_list
*lower_bounds(
575 __isl_keep isl_constraint_list
*constraints
, int pos
,
576 __isl_keep isl_set
*domain
, __isl_keep isl_ast_build
*build
)
579 isl_pw_aff_list
*list
;
585 n
= isl_constraint_list_n_constraint(constraints
);
588 pa
= exact_bound(domain
, build
, 0);
589 return isl_pw_aff_list_from_pw_aff(pa
);
592 ctx
= isl_ast_build_get_ctx(build
);
593 list
= isl_pw_aff_list_alloc(ctx
,n
);
595 for (i
= 0; i
< n
; ++i
) {
599 c
= isl_constraint_list_get_constraint(constraints
, i
);
600 aff
= lower_bound(c
, pos
, build
);
601 isl_constraint_free(c
);
602 list
= isl_pw_aff_list_add(list
, isl_pw_aff_from_aff(aff
));
605 if (isl_ast_build_has_stride(build
, pos
))
606 list
= remove_redundant_lower_bounds(list
, build
);
611 /* Extract an upper bound on dimension "pos" from each constraint
612 * in "constraints" and return the list of upper bounds.
613 * If "constraints" has zero elements, then we extract an upper bound
614 * from "domain" instead.
616 static __isl_give isl_pw_aff_list
*upper_bounds(
617 __isl_keep isl_constraint_list
*constraints
, int pos
,
618 __isl_keep isl_set
*domain
, __isl_keep isl_ast_build
*build
)
621 isl_pw_aff_list
*list
;
624 n
= isl_constraint_list_n_constraint(constraints
);
627 pa
= exact_bound(domain
, build
, 1);
628 return isl_pw_aff_list_from_pw_aff(pa
);
631 ctx
= isl_ast_build_get_ctx(build
);
632 list
= isl_pw_aff_list_alloc(ctx
,n
);
634 for (i
= 0; i
< n
; ++i
) {
638 c
= isl_constraint_list_get_constraint(constraints
, i
);
639 aff
= isl_constraint_get_bound(c
, isl_dim_set
, pos
);
640 isl_constraint_free(c
);
641 aff
= isl_aff_floor(aff
);
642 list
= isl_pw_aff_list_add(list
, isl_pw_aff_from_aff(aff
));
648 /* Return an isl_ast_expr that performs the reduction of type "type"
649 * on AST expressions corresponding to the elements in "list".
651 * The list is assumed to contain at least one element.
652 * If the list contains exactly one element, then the returned isl_ast_expr
653 * simply computes that affine expression.
654 * If the list contains more than one element, then we sort it
655 * using a fairly abitrary but hopefully reasonably stable order.
657 static __isl_give isl_ast_expr
*reduce_list(enum isl_ast_op_type type
,
658 __isl_keep isl_pw_aff_list
*list
, __isl_keep isl_ast_build
*build
)
667 n
= isl_pw_aff_list_n_pw_aff(list
);
670 return isl_ast_build_expr_from_pw_aff_internal(build
,
671 isl_pw_aff_list_get_pw_aff(list
, 0));
673 ctx
= isl_pw_aff_list_get_ctx(list
);
674 expr
= isl_ast_expr_alloc_op(ctx
, type
, n
);
678 list
= isl_pw_aff_list_copy(list
);
679 list
= isl_pw_aff_list_sort(list
, &reduce_list_cmp
, NULL
);
681 return isl_ast_expr_free(expr
);
683 for (i
= 0; i
< n
; ++i
) {
684 isl_ast_expr
*expr_i
;
686 expr_i
= isl_ast_build_expr_from_pw_aff_internal(build
,
687 isl_pw_aff_list_get_pw_aff(list
, i
));
690 expr
->u
.op
.args
[i
] = expr_i
;
693 isl_pw_aff_list_free(list
);
696 isl_pw_aff_list_free(list
);
697 isl_ast_expr_free(expr
);
701 /* Add guards implied by the "generated constraints",
702 * but not (necessarily) enforced by the generated AST to "guard".
703 * In particular, if there is any stride constraints,
704 * then add the guard implied by those constraints.
705 * If we have generated a degenerate loop, then add the guard
706 * implied by "bounds" on the outer dimensions, i.e., the guard
707 * that ensures that the single value actually exists.
708 * Since there may also be guards implied by a combination
709 * of these constraints, we first combine them before
710 * deriving the implied constraints.
712 static __isl_give isl_set
*add_implied_guards(__isl_take isl_set
*guard
,
713 int degenerate
, __isl_keep isl_basic_set
*bounds
,
714 __isl_keep isl_ast_build
*build
)
716 int depth
, has_stride
;
720 depth
= isl_ast_build_get_depth(build
);
721 has_stride
= isl_ast_build_has_stride(build
, depth
);
722 if (!has_stride
&& !degenerate
)
725 space
= isl_basic_set_get_space(bounds
);
726 dom
= isl_set_universe(space
);
729 bounds
= isl_basic_set_copy(bounds
);
730 bounds
= isl_basic_set_drop_constraints_not_involving_dims(
731 bounds
, isl_dim_set
, depth
, 1);
732 set
= isl_set_from_basic_set(bounds
);
733 dom
= isl_set_intersect(dom
, set
);
737 set
= isl_ast_build_get_stride_constraint(build
);
738 dom
= isl_set_intersect(dom
, set
);
741 dom
= isl_set_eliminate(dom
, isl_dim_set
, depth
, 1);
742 dom
= isl_ast_build_compute_gist(build
, dom
);
743 guard
= isl_set_intersect(guard
, dom
);
748 /* Update "graft" based on "sub_build" for the degenerate case.
750 * "build" is the build in which graft->node was created
751 * "sub_build" contains information about the current level itself,
752 * including the single value attained.
754 * We set the initialization part of the for loop to the single
755 * value attained by the current dimension.
756 * The increment and condition are not strictly needed as the are known
757 * to be "1" and "iterator <= value" respectively.
759 static __isl_give isl_ast_graft
*refine_degenerate(
760 __isl_take isl_ast_graft
*graft
, __isl_keep isl_ast_build
*build
,
761 __isl_keep isl_ast_build
*sub_build
)
765 if (!graft
|| !sub_build
)
766 return isl_ast_graft_free(graft
);
768 value
= isl_pw_aff_copy(sub_build
->value
);
770 graft
->node
->u
.f
.init
= isl_ast_build_expr_from_pw_aff_internal(build
,
772 if (!graft
->node
->u
.f
.init
)
773 return isl_ast_graft_free(graft
);
778 /* Return the intersection of constraints in "list" as a set.
780 static __isl_give isl_set
*intersect_constraints(
781 __isl_keep isl_constraint_list
*list
)
786 n
= isl_constraint_list_n_constraint(list
);
788 isl_die(isl_constraint_list_get_ctx(list
), isl_error_internal
,
789 "expecting at least one constraint", return NULL
);
791 bset
= isl_basic_set_from_constraint(
792 isl_constraint_list_get_constraint(list
, 0));
793 for (i
= 1; i
< n
; ++i
) {
794 isl_basic_set
*bset_i
;
796 bset_i
= isl_basic_set_from_constraint(
797 isl_constraint_list_get_constraint(list
, i
));
798 bset
= isl_basic_set_intersect(bset
, bset_i
);
801 return isl_set_from_basic_set(bset
);
804 /* Compute the constraints on the outer dimensions enforced by
805 * graft->node and add those constraints to graft->enforced,
806 * in case the upper bound is expressed as a set "upper".
808 * In particular, if l(...) is a lower bound in "lower", and
810 * -a i + f(...) >= 0 or a i <= f(...)
812 * is an upper bound ocnstraint on the current dimension i,
813 * then the for loop enforces the constraint
815 * -a l(...) + f(...) >= 0 or a l(...) <= f(...)
817 * We therefore simply take each lower bound in turn, plug it into
818 * the upper bounds and compute the intersection over all lower bounds.
820 * If a lower bound is a rational expression, then
821 * isl_basic_set_preimage_multi_aff will force this rational
822 * expression to have only integer values. However, the loop
823 * itself does not enforce this integrality constraint. We therefore
824 * use the ceil of the lower bounds instead of the lower bounds themselves.
825 * Other constraints will make sure that the for loop is only executed
826 * when each of the lower bounds attains an integral value.
827 * In particular, potentially rational values only occur in
828 * lower_bound if the offset is a (seemingly) rational expression,
829 * but then outer conditions will make sure that this rational expression
830 * only attains integer values.
832 static __isl_give isl_ast_graft
*set_enforced_from_set(
833 __isl_take isl_ast_graft
*graft
,
834 __isl_keep isl_pw_aff_list
*lower
, int pos
, __isl_keep isl_set
*upper
)
837 isl_basic_set
*enforced
;
838 isl_pw_multi_aff
*pma
;
841 if (!graft
|| !lower
)
842 return isl_ast_graft_free(graft
);
844 space
= isl_set_get_space(upper
);
845 enforced
= isl_basic_set_universe(isl_space_copy(space
));
847 space
= isl_space_map_from_set(space
);
848 pma
= isl_pw_multi_aff_identity(space
);
850 n
= isl_pw_aff_list_n_pw_aff(lower
);
851 for (i
= 0; i
< n
; ++i
) {
855 isl_pw_multi_aff
*pma_i
;
857 pa
= isl_pw_aff_list_get_pw_aff(lower
, i
);
858 pa
= isl_pw_aff_ceil(pa
);
859 pma_i
= isl_pw_multi_aff_copy(pma
);
860 pma_i
= isl_pw_multi_aff_set_pw_aff(pma_i
, pos
, pa
);
861 enforced_i
= isl_set_copy(upper
);
862 enforced_i
= isl_set_preimage_pw_multi_aff(enforced_i
, pma_i
);
863 hull
= isl_set_simple_hull(enforced_i
);
864 enforced
= isl_basic_set_intersect(enforced
, hull
);
867 isl_pw_multi_aff_free(pma
);
869 graft
= isl_ast_graft_enforce(graft
, enforced
);
874 /* Compute the constraints on the outer dimensions enforced by
875 * graft->node and add those constraints to graft->enforced,
876 * in case the upper bound is expressed as
877 * a list of affine expressions "upper".
879 * The enforced condition is that each lower bound expression is less
880 * than or equal to each upper bound expression.
882 static __isl_give isl_ast_graft
*set_enforced_from_list(
883 __isl_take isl_ast_graft
*graft
,
884 __isl_keep isl_pw_aff_list
*lower
, __isl_keep isl_pw_aff_list
*upper
)
887 isl_basic_set
*enforced
;
889 lower
= isl_pw_aff_list_copy(lower
);
890 upper
= isl_pw_aff_list_copy(upper
);
891 cond
= isl_pw_aff_list_le_set(lower
, upper
);
892 enforced
= isl_set_simple_hull(cond
);
893 graft
= isl_ast_graft_enforce(graft
, enforced
);
898 /* Does "aff" have a negative constant term?
900 static isl_stat
aff_constant_is_negative(__isl_take isl_set
*set
,
901 __isl_take isl_aff
*aff
, void *user
)
906 v
= isl_aff_get_constant_val(aff
);
907 *neg
= isl_val_is_neg(v
);
912 return *neg
? isl_stat_ok
: isl_stat_error
;
915 /* Does "pa" have a negative constant term over its entire domain?
917 static isl_stat
pw_aff_constant_is_negative(__isl_take isl_pw_aff
*pa
,
923 r
= isl_pw_aff_foreach_piece(pa
, &aff_constant_is_negative
, user
);
926 return (*neg
&& r
>= 0) ? isl_stat_ok
: isl_stat_error
;
929 /* Does each element in "list" have a negative constant term?
931 * The callback terminates the iteration as soon an element has been
932 * found that does not have a negative constant term.
934 static int list_constant_is_negative(__isl_keep isl_pw_aff_list
*list
)
938 if (isl_pw_aff_list_foreach(list
,
939 &pw_aff_constant_is_negative
, &neg
) < 0 && neg
)
945 /* Add 1 to each of the elements in "list", where each of these elements
946 * is defined over the internal schedule space of "build".
948 static __isl_give isl_pw_aff_list
*list_add_one(
949 __isl_take isl_pw_aff_list
*list
, __isl_keep isl_ast_build
*build
)
956 space
= isl_ast_build_get_space(build
, 1);
957 aff
= isl_aff_zero_on_domain(isl_local_space_from_space(space
));
958 aff
= isl_aff_add_constant_si(aff
, 1);
959 one
= isl_pw_aff_from_aff(aff
);
961 n
= isl_pw_aff_list_n_pw_aff(list
);
962 for (i
= 0; i
< n
; ++i
) {
964 pa
= isl_pw_aff_list_get_pw_aff(list
, i
);
965 pa
= isl_pw_aff_add(pa
, isl_pw_aff_copy(one
));
966 list
= isl_pw_aff_list_set_pw_aff(list
, i
, pa
);
969 isl_pw_aff_free(one
);
974 /* Set the condition part of the for node graft->node in case
975 * the upper bound is represented as a list of piecewise affine expressions.
977 * In particular, set the condition to
979 * iterator <= min(list of upper bounds)
981 * If each of the upper bounds has a negative constant term, then
982 * set the condition to
984 * iterator < min(list of (upper bound + 1)s)
987 static __isl_give isl_ast_graft
*set_for_cond_from_list(
988 __isl_take isl_ast_graft
*graft
, __isl_keep isl_pw_aff_list
*list
,
989 __isl_keep isl_ast_build
*build
)
992 isl_ast_expr
*bound
, *iterator
, *cond
;
993 enum isl_ast_op_type type
= isl_ast_op_le
;
996 return isl_ast_graft_free(graft
);
998 neg
= list_constant_is_negative(list
);
1000 return isl_ast_graft_free(graft
);
1001 list
= isl_pw_aff_list_copy(list
);
1003 list
= list_add_one(list
, build
);
1004 type
= isl_ast_op_lt
;
1007 bound
= reduce_list(isl_ast_op_min
, list
, build
);
1008 iterator
= isl_ast_expr_copy(graft
->node
->u
.f
.iterator
);
1009 cond
= isl_ast_expr_alloc_binary(type
, iterator
, bound
);
1010 graft
->node
->u
.f
.cond
= cond
;
1012 isl_pw_aff_list_free(list
);
1013 if (!graft
->node
->u
.f
.cond
)
1014 return isl_ast_graft_free(graft
);
1018 /* Set the condition part of the for node graft->node in case
1019 * the upper bound is represented as a set.
1021 static __isl_give isl_ast_graft
*set_for_cond_from_set(
1022 __isl_take isl_ast_graft
*graft
, __isl_keep isl_set
*set
,
1023 __isl_keep isl_ast_build
*build
)
1030 cond
= isl_ast_build_expr_from_set_internal(build
, isl_set_copy(set
));
1031 graft
->node
->u
.f
.cond
= cond
;
1032 if (!graft
->node
->u
.f
.cond
)
1033 return isl_ast_graft_free(graft
);
1037 /* Construct an isl_ast_expr for the increment (i.e., stride) of
1038 * the current dimension.
1040 static __isl_give isl_ast_expr
*for_inc(__isl_keep isl_ast_build
*build
)
1048 ctx
= isl_ast_build_get_ctx(build
);
1049 depth
= isl_ast_build_get_depth(build
);
1051 if (!isl_ast_build_has_stride(build
, depth
))
1052 return isl_ast_expr_alloc_int_si(ctx
, 1);
1054 v
= isl_ast_build_get_stride(build
, depth
);
1055 return isl_ast_expr_from_val(v
);
1058 /* Should we express the loop condition as
1060 * iterator <= min(list of upper bounds)
1062 * or as a conjunction of constraints?
1064 * The first is constructed from a list of upper bounds.
1065 * The second is constructed from a set.
1067 * If there are no upper bounds in "constraints", then this could mean
1068 * that "domain" simply doesn't have an upper bound or that we didn't
1069 * pick any upper bound. In the first case, we want to generate the
1070 * loop condition as a(n empty) conjunction of constraints
1071 * In the second case, we will compute
1072 * a single upper bound from "domain" and so we use the list form.
1074 * If there are upper bounds in "constraints",
1075 * then we use the list form iff the atomic_upper_bound option is set.
1077 static int use_upper_bound_list(isl_ctx
*ctx
, int n_upper
,
1078 __isl_keep isl_set
*domain
, int depth
)
1081 return isl_options_get_ast_build_atomic_upper_bound(ctx
);
1083 return isl_set_dim_has_upper_bound(domain
, isl_dim_set
, depth
);
1086 /* Fill in the expressions of the for node in graft->node.
1089 * - set the initialization part of the loop to the maximum of the lower bounds
1090 * - extract the increment from the stride of the current dimension
1091 * - construct the for condition either based on a list of upper bounds
1092 * or on a set of upper bound constraints.
1094 static __isl_give isl_ast_graft
*set_for_node_expressions(
1095 __isl_take isl_ast_graft
*graft
, __isl_keep isl_pw_aff_list
*lower
,
1096 int use_list
, __isl_keep isl_pw_aff_list
*upper_list
,
1097 __isl_keep isl_set
*upper_set
, __isl_keep isl_ast_build
*build
)
1104 build
= isl_ast_build_copy(build
);
1107 node
->u
.f
.init
= reduce_list(isl_ast_op_max
, lower
, build
);
1108 node
->u
.f
.inc
= for_inc(build
);
1110 if (!node
->u
.f
.init
|| !node
->u
.f
.inc
)
1111 graft
= isl_ast_graft_free(graft
);
1114 graft
= set_for_cond_from_list(graft
, upper_list
, build
);
1116 graft
= set_for_cond_from_set(graft
, upper_set
, build
);
1118 isl_ast_build_free(build
);
1123 /* Update "graft" based on "bounds" and "domain" for the generic,
1124 * non-degenerate, case.
1126 * "c_lower" and "c_upper" contain the lower and upper bounds
1127 * that the loop node should express.
1128 * "domain" is the subset of the intersection of the constraints
1129 * for which some code is executed.
1131 * There may be zero lower bounds or zero upper bounds in "constraints"
1132 * in case the list of constraints was created
1133 * based on the atomic option or based on separation with explicit bounds.
1134 * In that case, we use "domain" to derive lower and/or upper bounds.
1136 * We first compute a list of one or more lower bounds.
1138 * Then we decide if we want to express the condition as
1140 * iterator <= min(list of upper bounds)
1142 * or as a conjunction of constraints.
1144 * The set of enforced constraints is then computed either based on
1145 * a list of upper bounds or on a set of upper bound constraints.
1146 * We do not compute any enforced constraints if we were forced
1147 * to compute a lower or upper bound using exact_bound. The domains
1148 * of the resulting expressions may imply some bounds on outer dimensions
1149 * that we do not want to appear in the enforced constraints since
1150 * they are not actually enforced by the corresponding code.
1152 * Finally, we fill in the expressions of the for node.
1154 static __isl_give isl_ast_graft
*refine_generic_bounds(
1155 __isl_take isl_ast_graft
*graft
,
1156 __isl_take isl_constraint_list
*c_lower
,
1157 __isl_take isl_constraint_list
*c_upper
,
1158 __isl_keep isl_set
*domain
, __isl_keep isl_ast_build
*build
)
1162 isl_pw_aff_list
*lower
;
1164 isl_set
*upper_set
= NULL
;
1165 isl_pw_aff_list
*upper_list
= NULL
;
1166 int n_lower
, n_upper
;
1168 if (!graft
|| !c_lower
|| !c_upper
|| !build
)
1171 depth
= isl_ast_build_get_depth(build
);
1172 ctx
= isl_ast_graft_get_ctx(graft
);
1174 n_lower
= isl_constraint_list_n_constraint(c_lower
);
1175 n_upper
= isl_constraint_list_n_constraint(c_upper
);
1177 use_list
= use_upper_bound_list(ctx
, n_upper
, domain
, depth
);
1179 lower
= lower_bounds(c_lower
, depth
, domain
, build
);
1182 upper_list
= upper_bounds(c_upper
, depth
, domain
, build
);
1183 else if (n_upper
> 0)
1184 upper_set
= intersect_constraints(c_upper
);
1186 upper_set
= isl_set_universe(isl_set_get_space(domain
));
1188 if (n_lower
== 0 || n_upper
== 0)
1191 graft
= set_enforced_from_list(graft
, lower
, upper_list
);
1193 graft
= set_enforced_from_set(graft
, lower
, depth
, upper_set
);
1195 graft
= set_for_node_expressions(graft
, lower
, use_list
, upper_list
,
1198 isl_pw_aff_list_free(lower
);
1199 isl_pw_aff_list_free(upper_list
);
1200 isl_set_free(upper_set
);
1201 isl_constraint_list_free(c_lower
);
1202 isl_constraint_list_free(c_upper
);
1206 isl_constraint_list_free(c_lower
);
1207 isl_constraint_list_free(c_upper
);
1208 return isl_ast_graft_free(graft
);
1211 /* Internal data structure used inside count_constraints to keep
1212 * track of the number of constraints that are independent of dimension "pos",
1213 * the lower bounds in "pos" and the upper bounds in "pos".
1215 struct isl_ast_count_constraints_data
{
1223 /* Increment data->n_indep, data->lower or data->upper depending
1224 * on whether "c" is independenct of dimensions data->pos,
1225 * a lower bound or an upper bound.
1227 static isl_stat
count_constraints(__isl_take isl_constraint
*c
, void *user
)
1229 struct isl_ast_count_constraints_data
*data
= user
;
1231 if (isl_constraint_is_lower_bound(c
, isl_dim_set
, data
->pos
))
1233 else if (isl_constraint_is_upper_bound(c
, isl_dim_set
, data
->pos
))
1238 isl_constraint_free(c
);
1243 /* Update "graft" based on "bounds" and "domain" for the generic,
1244 * non-degenerate, case.
1246 * "list" respresent the list of bounds that need to be encoded by
1247 * the for loop. Only the constraints that involve the iterator
1248 * are relevant here. The other constraints are taken care of by
1249 * the caller and are included in the generated constraints of "build".
1250 * "domain" is the subset of the intersection of the constraints
1251 * for which some code is executed.
1252 * "build" is the build in which graft->node was created.
1254 * We separate lower bounds, upper bounds and constraints that
1255 * are independent of the loop iterator.
1257 * The actual for loop bounds are generated in refine_generic_bounds.
1259 static __isl_give isl_ast_graft
*refine_generic_split(
1260 __isl_take isl_ast_graft
*graft
, __isl_take isl_constraint_list
*list
,
1261 __isl_keep isl_set
*domain
, __isl_keep isl_ast_build
*build
)
1263 struct isl_ast_count_constraints_data data
;
1264 isl_constraint_list
*lower
;
1265 isl_constraint_list
*upper
;
1268 return isl_ast_graft_free(graft
);
1270 data
.pos
= isl_ast_build_get_depth(build
);
1272 list
= isl_constraint_list_sort(list
, &cmp_constraint
, &data
.pos
);
1274 return isl_ast_graft_free(graft
);
1276 data
.n_indep
= data
.n_lower
= data
.n_upper
= 0;
1277 if (isl_constraint_list_foreach(list
, &count_constraints
, &data
) < 0) {
1278 isl_constraint_list_free(list
);
1279 return isl_ast_graft_free(graft
);
1282 lower
= isl_constraint_list_drop(list
, 0, data
.n_indep
);
1283 upper
= isl_constraint_list_copy(lower
);
1284 lower
= isl_constraint_list_drop(lower
, data
.n_lower
, data
.n_upper
);
1285 upper
= isl_constraint_list_drop(upper
, 0, data
.n_lower
);
1287 return refine_generic_bounds(graft
, lower
, upper
, domain
, build
);
1290 /* Update "graft" based on "bounds" and "domain" for the generic,
1291 * non-degenerate, case.
1293 * "bounds" respresent the bounds that need to be encoded by
1294 * the for loop (or a guard around the for loop).
1295 * "domain" is the subset of "bounds" for which some code is executed.
1296 * "build" is the build in which graft->node was created.
1298 * We break up "bounds" into a list of constraints and continue with
1299 * refine_generic_split.
1301 static __isl_give isl_ast_graft
*refine_generic(
1302 __isl_take isl_ast_graft
*graft
,
1303 __isl_keep isl_basic_set
*bounds
, __isl_keep isl_set
*domain
,
1304 __isl_keep isl_ast_build
*build
)
1306 isl_constraint_list
*list
;
1308 if (!build
|| !graft
)
1309 return isl_ast_graft_free(graft
);
1311 list
= isl_basic_set_get_constraint_list(bounds
);
1313 graft
= refine_generic_split(graft
, list
, domain
, build
);
1318 /* Create a for node for the current level.
1320 * Mark the for node degenerate if "degenerate" is set.
1322 static __isl_give isl_ast_node
*create_for(__isl_keep isl_ast_build
*build
,
1332 depth
= isl_ast_build_get_depth(build
);
1333 id
= isl_ast_build_get_iterator_id(build
, depth
);
1334 node
= isl_ast_node_alloc_for(id
);
1336 node
= isl_ast_node_for_mark_degenerate(node
);
1341 /* If the ast_build_exploit_nested_bounds option is set, then return
1342 * the constraints enforced by all elements in "list".
1343 * Otherwise, return the universe.
1345 static __isl_give isl_basic_set
*extract_shared_enforced(
1346 __isl_keep isl_ast_graft_list
*list
, __isl_keep isl_ast_build
*build
)
1354 ctx
= isl_ast_graft_list_get_ctx(list
);
1355 if (isl_options_get_ast_build_exploit_nested_bounds(ctx
))
1356 return isl_ast_graft_list_extract_shared_enforced(list
, build
);
1358 space
= isl_ast_build_get_space(build
, 1);
1359 return isl_basic_set_universe(space
);
1362 /* Return the pending constraints of "build" that are not already taken
1363 * care of (by a combination of "enforced" and the generated constraints
1366 static __isl_give isl_set
*extract_pending(__isl_keep isl_ast_build
*build
,
1367 __isl_keep isl_basic_set
*enforced
)
1369 isl_set
*guard
, *context
;
1371 guard
= isl_ast_build_get_pending(build
);
1372 context
= isl_set_from_basic_set(isl_basic_set_copy(enforced
));
1373 context
= isl_set_intersect(context
,
1374 isl_ast_build_get_generated(build
));
1375 return isl_set_gist(guard
, context
);
1378 /* Create an AST node for the current dimension based on
1379 * the schedule domain "bounds" and return the node encapsulated
1380 * in an isl_ast_graft.
1382 * "executed" is the current inverse schedule, taking into account
1383 * the bounds in "bounds"
1384 * "domain" is the domain of "executed", with inner dimensions projected out.
1385 * It may be a strict subset of "bounds" in case "bounds" was created
1386 * based on the atomic option or based on separation with explicit bounds.
1388 * "domain" may satisfy additional equalities that result
1389 * from intersecting "executed" with "bounds" in add_node.
1390 * It may also satisfy some global constraints that were dropped out because
1391 * we performed separation with explicit bounds.
1392 * The very first step is then to copy these constraints to "bounds".
1394 * Since we may be calling before_each_for and after_each_for
1395 * callbacks, we record the current inverse schedule in the build.
1397 * We consider three builds,
1398 * "build" is the one in which the current level is created,
1399 * "body_build" is the build in which the next level is created,
1400 * "sub_build" is essentially the same as "body_build", except that
1401 * the depth has not been increased yet.
1403 * "build" already contains information (in strides and offsets)
1404 * about the strides at the current level, but this information is not
1405 * reflected in the build->domain.
1406 * We first add this information and the "bounds" to the sub_build->domain.
1407 * isl_ast_build_set_loop_bounds adds the stride information and
1408 * checks whether the current dimension attains
1409 * only a single value and whether this single value can be represented using
1410 * a single affine expression.
1411 * In the first case, the current level is considered "degenerate".
1412 * In the second, sub-case, the current level is considered "eliminated".
1413 * Eliminated levels don't need to be reflected in the AST since we can
1414 * simply plug in the affine expression. For degenerate, but non-eliminated,
1415 * levels, we do introduce a for node, but mark is as degenerate so that
1416 * it can be printed as an assignment of the single value to the loop
1419 * If the current level is eliminated, we explicitly plug in the value
1420 * for the current level found by isl_ast_build_set_loop_bounds in the
1421 * inverse schedule. This ensures that if we are working on a slice
1422 * of the domain based on information available in the inverse schedule
1423 * and the build domain, that then this information is also reflected
1424 * in the inverse schedule. This operation also eliminates the current
1425 * dimension from the inverse schedule making sure no inner dimensions depend
1426 * on the current dimension. Otherwise, we create a for node, marking
1427 * it degenerate if appropriate. The initial for node is still incomplete
1428 * and will be completed in either refine_degenerate or refine_generic.
1430 * We then generate a sequence of grafts for the next level,
1431 * create a surrounding graft for the current level and insert
1432 * the for node we created (if the current level is not eliminated).
1433 * Before creating a graft for the current level, we first extract
1434 * hoistable constraints from the child guards and combine them
1435 * with the pending constraints in the build. These constraints
1436 * are used to simplify the child guards and then added to the guard
1437 * of the current graft to ensure that they will be generated.
1438 * If the hoisted guard is a disjunction, then we use it directly
1439 * to gist the guards on the children before intersect it with the
1440 * pending constraints. We do so because this disjunction is typically
1441 * identical to the guards on the children such that these guards
1442 * can be effectively removed completely. After the intersection,
1443 * the gist operation would have a harder time figuring this out.
1445 * Finally, we set the bounds of the for loop in either
1446 * refine_degenerate or refine_generic.
1447 * We do so in a context where the pending constraints of the build
1448 * have been replaced by the guard of the current graft.
1450 static __isl_give isl_ast_graft
*create_node_scaled(
1451 __isl_take isl_union_map
*executed
,
1452 __isl_take isl_basic_set
*bounds
, __isl_take isl_set
*domain
,
1453 __isl_take isl_ast_build
*build
)
1456 int degenerate
, eliminated
;
1457 isl_basic_set
*hull
;
1458 isl_basic_set
*enforced
;
1459 isl_set
*guard
, *hoisted
;
1460 isl_ast_node
*node
= NULL
;
1461 isl_ast_graft
*graft
;
1462 isl_ast_graft_list
*children
;
1463 isl_ast_build
*sub_build
;
1464 isl_ast_build
*body_build
;
1466 domain
= isl_ast_build_eliminate_divs(build
, domain
);
1467 domain
= isl_set_detect_equalities(domain
);
1468 hull
= isl_set_unshifted_simple_hull(isl_set_copy(domain
));
1469 bounds
= isl_basic_set_intersect(bounds
, hull
);
1470 build
= isl_ast_build_set_executed(build
, isl_union_map_copy(executed
));
1472 depth
= isl_ast_build_get_depth(build
);
1473 sub_build
= isl_ast_build_copy(build
);
1474 bounds
= isl_basic_set_remove_redundancies(bounds
);
1475 bounds
= isl_ast_build_specialize_basic_set(sub_build
, bounds
);
1476 sub_build
= isl_ast_build_set_loop_bounds(sub_build
,
1477 isl_basic_set_copy(bounds
));
1478 degenerate
= isl_ast_build_has_value(sub_build
);
1479 eliminated
= isl_ast_build_has_affine_value(sub_build
, depth
);
1480 if (degenerate
< 0 || eliminated
< 0)
1481 executed
= isl_union_map_free(executed
);
1483 bounds
= isl_ast_build_compute_gist_basic_set(build
, bounds
);
1484 sub_build
= isl_ast_build_set_pending_generated(sub_build
,
1485 isl_basic_set_copy(bounds
));
1487 executed
= plug_in_values(executed
, sub_build
);
1489 node
= create_for(build
, degenerate
);
1491 body_build
= isl_ast_build_copy(sub_build
);
1492 body_build
= isl_ast_build_increase_depth(body_build
);
1494 node
= before_each_for(node
, body_build
);
1495 children
= generate_next_level(executed
,
1496 isl_ast_build_copy(body_build
));
1498 enforced
= extract_shared_enforced(children
, build
);
1499 guard
= extract_pending(sub_build
, enforced
);
1500 hoisted
= isl_ast_graft_list_extract_hoistable_guard(children
, build
);
1501 if (isl_set_n_basic_set(hoisted
) > 1)
1502 children
= isl_ast_graft_list_gist_guards(children
,
1503 isl_set_copy(hoisted
));
1504 guard
= isl_set_intersect(guard
, hoisted
);
1506 guard
= add_implied_guards(guard
, degenerate
, bounds
, build
);
1508 graft
= isl_ast_graft_alloc_from_children(children
,
1509 isl_set_copy(guard
), enforced
, build
, sub_build
);
1512 isl_ast_build
*for_build
;
1514 graft
= isl_ast_graft_insert_for(graft
, node
);
1515 for_build
= isl_ast_build_copy(build
);
1516 for_build
= isl_ast_build_replace_pending_by_guard(for_build
,
1517 isl_set_copy(guard
));
1519 graft
= refine_degenerate(graft
, for_build
, sub_build
);
1521 graft
= refine_generic(graft
, bounds
,
1523 isl_ast_build_free(for_build
);
1525 isl_set_free(guard
);
1527 graft
= after_each_for(graft
, body_build
);
1529 isl_ast_build_free(body_build
);
1530 isl_ast_build_free(sub_build
);
1531 isl_ast_build_free(build
);
1532 isl_basic_set_free(bounds
);
1533 isl_set_free(domain
);
1538 /* Internal data structure for checking if all constraints involving
1539 * the input dimension "depth" are such that the other coefficients
1540 * are multiples of "m", reducing "m" if they are not.
1541 * If "m" is reduced all the way down to "1", then the check has failed
1542 * and we break out of the iteration.
1544 struct isl_check_scaled_data
{
1549 /* If constraint "c" involves the input dimension data->depth,
1550 * then make sure that all the other coefficients are multiples of data->m,
1551 * reducing data->m if needed.
1552 * Break out of the iteration if data->m has become equal to "1".
1554 static isl_stat
constraint_check_scaled(__isl_take isl_constraint
*c
,
1557 struct isl_check_scaled_data
*data
= user
;
1559 enum isl_dim_type t
[] = { isl_dim_param
, isl_dim_in
, isl_dim_out
,
1562 if (!isl_constraint_involves_dims(c
, isl_dim_in
, data
->depth
, 1)) {
1563 isl_constraint_free(c
);
1567 for (i
= 0; i
< 4; ++i
) {
1568 n
= isl_constraint_dim(c
, t
[i
]);
1569 for (j
= 0; j
< n
; ++j
) {
1572 if (t
[i
] == isl_dim_in
&& j
== data
->depth
)
1574 if (!isl_constraint_involves_dims(c
, t
[i
], j
, 1))
1576 d
= isl_constraint_get_coefficient_val(c
, t
[i
], j
);
1577 data
->m
= isl_val_gcd(data
->m
, d
);
1578 if (isl_val_is_one(data
->m
))
1585 isl_constraint_free(c
);
1587 return i
< 4 ? isl_stat_error
: isl_stat_ok
;
1590 /* For each constraint of "bmap" that involves the input dimension data->depth,
1591 * make sure that all the other coefficients are multiples of data->m,
1592 * reducing data->m if needed.
1593 * Break out of the iteration if data->m has become equal to "1".
1595 static isl_stat
basic_map_check_scaled(__isl_take isl_basic_map
*bmap
,
1600 r
= isl_basic_map_foreach_constraint(bmap
,
1601 &constraint_check_scaled
, user
);
1602 isl_basic_map_free(bmap
);
1607 /* For each constraint of "map" that involves the input dimension data->depth,
1608 * make sure that all the other coefficients are multiples of data->m,
1609 * reducing data->m if needed.
1610 * Break out of the iteration if data->m has become equal to "1".
1612 static isl_stat
map_check_scaled(__isl_take isl_map
*map
, void *user
)
1616 r
= isl_map_foreach_basic_map(map
, &basic_map_check_scaled
, user
);
1622 /* Create an AST node for the current dimension based on
1623 * the schedule domain "bounds" and return the node encapsulated
1624 * in an isl_ast_graft.
1626 * "executed" is the current inverse schedule, taking into account
1627 * the bounds in "bounds"
1628 * "domain" is the domain of "executed", with inner dimensions projected out.
1631 * Before moving on to the actual AST node construction in create_node_scaled,
1632 * we first check if the current dimension is strided and if we can scale
1633 * down this stride. Note that we only do this if the ast_build_scale_strides
1636 * In particular, let the current dimension take on values
1640 * with a an integer. We check if we can find an integer m that (obviously)
1641 * divides both f and s.
1643 * If so, we check if the current dimension only appears in constraints
1644 * where the coefficients of the other variables are multiples of m.
1645 * We perform this extra check to avoid the risk of introducing
1646 * divisions by scaling down the current dimension.
1648 * If so, we scale the current dimension down by a factor of m.
1649 * That is, we plug in
1653 * Note that in principle we could always scale down strided loops
1658 * but this may result in i' taking on larger values than the original i,
1659 * due to the shift by "f".
1660 * By constrast, the scaling in (1) can only reduce the (absolute) value "i".
1662 static __isl_give isl_ast_graft
*create_node(__isl_take isl_union_map
*executed
,
1663 __isl_take isl_basic_set
*bounds
, __isl_take isl_set
*domain
,
1664 __isl_take isl_ast_build
*build
)
1666 struct isl_check_scaled_data data
;
1671 ctx
= isl_ast_build_get_ctx(build
);
1672 if (!isl_options_get_ast_build_scale_strides(ctx
))
1673 return create_node_scaled(executed
, bounds
, domain
, build
);
1675 data
.depth
= isl_ast_build_get_depth(build
);
1676 if (!isl_ast_build_has_stride(build
, data
.depth
))
1677 return create_node_scaled(executed
, bounds
, domain
, build
);
1679 offset
= isl_ast_build_get_offset(build
, data
.depth
);
1680 data
.m
= isl_ast_build_get_stride(build
, data
.depth
);
1682 offset
= isl_aff_free(offset
);
1683 offset
= isl_aff_scale_down_val(offset
, isl_val_copy(data
.m
));
1684 d
= isl_aff_get_denominator_val(offset
);
1686 executed
= isl_union_map_free(executed
);
1688 if (executed
&& isl_val_is_divisible_by(data
.m
, d
))
1689 data
.m
= isl_val_div(data
.m
, d
);
1691 data
.m
= isl_val_set_si(data
.m
, 1);
1695 if (!isl_val_is_one(data
.m
)) {
1696 if (isl_union_map_foreach_map(executed
, &map_check_scaled
,
1698 !isl_val_is_one(data
.m
))
1699 executed
= isl_union_map_free(executed
);
1702 if (!isl_val_is_one(data
.m
)) {
1707 isl_union_map
*umap
;
1709 space
= isl_ast_build_get_space(build
, 1);
1710 space
= isl_space_map_from_set(space
);
1711 ma
= isl_multi_aff_identity(space
);
1712 aff
= isl_multi_aff_get_aff(ma
, data
.depth
);
1713 aff
= isl_aff_scale_val(aff
, isl_val_copy(data
.m
));
1714 ma
= isl_multi_aff_set_aff(ma
, data
.depth
, aff
);
1716 bounds
= isl_basic_set_preimage_multi_aff(bounds
,
1717 isl_multi_aff_copy(ma
));
1718 domain
= isl_set_preimage_multi_aff(domain
,
1719 isl_multi_aff_copy(ma
));
1720 map
= isl_map_reverse(isl_map_from_multi_aff(ma
));
1721 umap
= isl_union_map_from_map(map
);
1722 executed
= isl_union_map_apply_domain(executed
,
1723 isl_union_map_copy(umap
));
1724 build
= isl_ast_build_scale_down(build
, isl_val_copy(data
.m
),
1727 isl_aff_free(offset
);
1728 isl_val_free(data
.m
);
1730 return create_node_scaled(executed
, bounds
, domain
, build
);
1733 /* Add the basic set to the list that "user" points to.
1735 static isl_stat
collect_basic_set(__isl_take isl_basic_set
*bset
, void *user
)
1737 isl_basic_set_list
**list
= user
;
1739 *list
= isl_basic_set_list_add(*list
, bset
);
1744 /* Extract the basic sets of "set" and collect them in an isl_basic_set_list.
1746 static __isl_give isl_basic_set_list
*isl_basic_set_list_from_set(
1747 __isl_take isl_set
*set
)
1751 isl_basic_set_list
*list
;
1756 ctx
= isl_set_get_ctx(set
);
1758 n
= isl_set_n_basic_set(set
);
1759 list
= isl_basic_set_list_alloc(ctx
, n
);
1760 if (isl_set_foreach_basic_set(set
, &collect_basic_set
, &list
) < 0)
1761 list
= isl_basic_set_list_free(list
);
1767 /* Generate code for the schedule domain "bounds"
1768 * and add the result to "list".
1770 * We mainly detect strides here and check if the bounds do not
1771 * conflict with the current build domain
1772 * and then pass over control to create_node.
1774 * "bounds" reflects the bounds on the current dimension and possibly
1775 * some extra conditions on outer dimensions.
1776 * It does not, however, include any divs involving the current dimension,
1777 * so it does not capture any stride constraints.
1778 * We therefore need to compute that part of the schedule domain that
1779 * intersects with "bounds" and derive the strides from the result.
1781 static __isl_give isl_ast_graft_list
*add_node(
1782 __isl_take isl_ast_graft_list
*list
, __isl_take isl_union_map
*executed
,
1783 __isl_take isl_basic_set
*bounds
, __isl_take isl_ast_build
*build
)
1785 isl_ast_graft
*graft
;
1786 isl_set
*domain
= NULL
;
1787 isl_union_set
*uset
;
1788 int empty
, disjoint
;
1790 uset
= isl_union_set_from_basic_set(isl_basic_set_copy(bounds
));
1791 executed
= isl_union_map_intersect_domain(executed
, uset
);
1792 empty
= isl_union_map_is_empty(executed
);
1798 uset
= isl_union_map_domain(isl_union_map_copy(executed
));
1799 domain
= isl_set_from_union_set(uset
);
1800 domain
= isl_ast_build_specialize(build
, domain
);
1802 domain
= isl_set_compute_divs(domain
);
1803 domain
= isl_ast_build_eliminate_inner(build
, domain
);
1804 disjoint
= isl_set_is_disjoint(domain
, build
->domain
);
1810 build
= isl_ast_build_detect_strides(build
, isl_set_copy(domain
));
1812 graft
= create_node(executed
, bounds
, domain
,
1813 isl_ast_build_copy(build
));
1814 list
= isl_ast_graft_list_add(list
, graft
);
1815 isl_ast_build_free(build
);
1818 list
= isl_ast_graft_list_free(list
);
1820 isl_set_free(domain
);
1821 isl_basic_set_free(bounds
);
1822 isl_union_map_free(executed
);
1823 isl_ast_build_free(build
);
1827 /* Does any element of i follow or coincide with any element of j
1828 * at the current depth for equal values of the outer dimensions?
1830 static isl_bool
domain_follows_at_depth(__isl_keep isl_basic_set
*i
,
1831 __isl_keep isl_basic_set
*j
, void *user
)
1833 int depth
= *(int *) user
;
1834 isl_basic_map
*test
;
1838 test
= isl_basic_map_from_domain_and_range(isl_basic_set_copy(i
),
1839 isl_basic_set_copy(j
));
1840 for (l
= 0; l
< depth
; ++l
)
1841 test
= isl_basic_map_equate(test
, isl_dim_in
, l
,
1843 test
= isl_basic_map_order_ge(test
, isl_dim_in
, depth
,
1844 isl_dim_out
, depth
);
1845 empty
= isl_basic_map_is_empty(test
);
1846 isl_basic_map_free(test
);
1848 return empty
< 0 ? isl_bool_error
: !empty
;
1851 /* Split up each element of "list" into a part that is related to "bset"
1852 * according to "gt" and a part that is not.
1853 * Return a list that consist of "bset" and all the pieces.
1855 static __isl_give isl_basic_set_list
*add_split_on(
1856 __isl_take isl_basic_set_list
*list
, __isl_take isl_basic_set
*bset
,
1857 __isl_keep isl_basic_map
*gt
)
1860 isl_basic_set_list
*res
;
1863 bset
= isl_basic_set_free(bset
);
1865 gt
= isl_basic_map_copy(gt
);
1866 gt
= isl_basic_map_intersect_domain(gt
, isl_basic_set_copy(bset
));
1867 n
= isl_basic_set_list_n_basic_set(list
);
1868 res
= isl_basic_set_list_from_basic_set(bset
);
1869 for (i
= 0; res
&& i
< n
; ++i
) {
1870 isl_basic_set
*bset
;
1871 isl_set
*set1
, *set2
;
1872 isl_basic_map
*bmap
;
1875 bset
= isl_basic_set_list_get_basic_set(list
, i
);
1876 bmap
= isl_basic_map_copy(gt
);
1877 bmap
= isl_basic_map_intersect_range(bmap
, bset
);
1878 bset
= isl_basic_map_range(bmap
);
1879 empty
= isl_basic_set_is_empty(bset
);
1881 res
= isl_basic_set_list_free(res
);
1883 isl_basic_set_free(bset
);
1884 bset
= isl_basic_set_list_get_basic_set(list
, i
);
1885 res
= isl_basic_set_list_add(res
, bset
);
1889 res
= isl_basic_set_list_add(res
, isl_basic_set_copy(bset
));
1890 set1
= isl_set_from_basic_set(bset
);
1891 bset
= isl_basic_set_list_get_basic_set(list
, i
);
1892 set2
= isl_set_from_basic_set(bset
);
1893 set1
= isl_set_subtract(set2
, set1
);
1894 set1
= isl_set_make_disjoint(set1
);
1896 res
= isl_basic_set_list_concat(res
,
1897 isl_basic_set_list_from_set(set1
));
1899 isl_basic_map_free(gt
);
1900 isl_basic_set_list_free(list
);
1904 static __isl_give isl_ast_graft_list
*generate_sorted_domains(
1905 __isl_keep isl_basic_set_list
*domain_list
,
1906 __isl_keep isl_union_map
*executed
,
1907 __isl_keep isl_ast_build
*build
);
1909 /* Internal data structure for add_nodes.
1911 * "executed" and "build" are extra arguments to be passed to add_node.
1912 * "list" collects the results.
1914 struct isl_add_nodes_data
{
1915 isl_union_map
*executed
;
1916 isl_ast_build
*build
;
1918 isl_ast_graft_list
*list
;
1921 /* Generate code for the schedule domains in "scc"
1922 * and add the results to "list".
1924 * The domains in "scc" form a strongly connected component in the ordering.
1925 * If the number of domains in "scc" is larger than 1, then this means
1926 * that we cannot determine a valid ordering for the domains in the component.
1927 * This should be fairly rare because the individual domains
1928 * have been made disjoint first.
1929 * The problem is that the domains may be integrally disjoint but not
1930 * rationally disjoint. For example, we may have domains
1932 * { [i,i] : 0 <= i <= 1 } and { [i,1-i] : 0 <= i <= 1 }
1934 * These two domains have an empty intersection, but their rational
1935 * relaxations do intersect. It is impossible to order these domains
1936 * in the second dimension because the first should be ordered before
1937 * the second for outer dimension equal to 0, while it should be ordered
1938 * after for outer dimension equal to 1.
1940 * This may happen in particular in case of unrolling since the domain
1941 * of each slice is replaced by its simple hull.
1943 * For each basic set i in "scc" and for each of the following basic sets j,
1944 * we split off that part of the basic set i that shares the outer dimensions
1945 * with j and lies before j in the current dimension.
1946 * We collect all the pieces in a new list that replaces "scc".
1948 * While the elements in "scc" should be disjoint, we double-check
1949 * this property to avoid running into an infinite recursion in case
1950 * they intersect due to some internal error.
1952 static isl_stat
add_nodes(__isl_take isl_basic_set_list
*scc
, void *user
)
1954 struct isl_add_nodes_data
*data
= user
;
1956 isl_basic_set
*bset
, *first
;
1957 isl_basic_set_list
*list
;
1961 n
= isl_basic_set_list_n_basic_set(scc
);
1962 bset
= isl_basic_set_list_get_basic_set(scc
, 0);
1964 isl_basic_set_list_free(scc
);
1965 data
->list
= add_node(data
->list
,
1966 isl_union_map_copy(data
->executed
), bset
,
1967 isl_ast_build_copy(data
->build
));
1968 return data
->list
? isl_stat_ok
: isl_stat_error
;
1971 depth
= isl_ast_build_get_depth(data
->build
);
1972 space
= isl_basic_set_get_space(bset
);
1973 space
= isl_space_map_from_set(space
);
1974 gt
= isl_basic_map_universe(space
);
1975 for (i
= 0; i
< depth
; ++i
)
1976 gt
= isl_basic_map_equate(gt
, isl_dim_in
, i
, isl_dim_out
, i
);
1977 gt
= isl_basic_map_order_gt(gt
, isl_dim_in
, depth
, isl_dim_out
, depth
);
1979 first
= isl_basic_set_copy(bset
);
1980 list
= isl_basic_set_list_from_basic_set(bset
);
1981 for (i
= 1; i
< n
; ++i
) {
1984 bset
= isl_basic_set_list_get_basic_set(scc
, i
);
1986 disjoint
= isl_basic_set_is_disjoint(bset
, first
);
1988 list
= isl_basic_set_list_free(list
);
1990 isl_die(isl_basic_set_list_get_ctx(scc
),
1992 "basic sets in scc are assumed to be disjoint",
1993 list
= isl_basic_set_list_free(list
));
1995 list
= add_split_on(list
, bset
, gt
);
1997 isl_basic_set_free(first
);
1998 isl_basic_map_free(gt
);
1999 isl_basic_set_list_free(scc
);
2001 data
->list
= isl_ast_graft_list_concat(data
->list
,
2002 generate_sorted_domains(scc
, data
->executed
, data
->build
));
2003 isl_basic_set_list_free(scc
);
2005 return data
->list
? isl_stat_ok
: isl_stat_error
;
2008 /* Sort the domains in "domain_list" according to the execution order
2009 * at the current depth (for equal values of the outer dimensions),
2010 * generate code for each of them, collecting the results in a list.
2011 * If no code is generated (because the intersection of the inverse schedule
2012 * with the domains turns out to be empty), then an empty list is returned.
2014 * The caller is responsible for ensuring that the basic sets in "domain_list"
2015 * are pair-wise disjoint. It can, however, in principle happen that
2016 * two basic sets should be ordered one way for one value of the outer
2017 * dimensions and the other way for some other value of the outer dimensions.
2018 * We therefore play safe and look for strongly connected components.
2019 * The function add_nodes takes care of handling non-trivial components.
2021 static __isl_give isl_ast_graft_list
*generate_sorted_domains(
2022 __isl_keep isl_basic_set_list
*domain_list
,
2023 __isl_keep isl_union_map
*executed
, __isl_keep isl_ast_build
*build
)
2026 struct isl_add_nodes_data data
;
2033 ctx
= isl_basic_set_list_get_ctx(domain_list
);
2034 n
= isl_basic_set_list_n_basic_set(domain_list
);
2035 data
.list
= isl_ast_graft_list_alloc(ctx
, n
);
2039 return add_node(data
.list
, isl_union_map_copy(executed
),
2040 isl_basic_set_list_get_basic_set(domain_list
, 0),
2041 isl_ast_build_copy(build
));
2043 depth
= isl_ast_build_get_depth(build
);
2044 data
.executed
= executed
;
2046 if (isl_basic_set_list_foreach_scc(domain_list
,
2047 &domain_follows_at_depth
, &depth
,
2048 &add_nodes
, &data
) < 0)
2049 data
.list
= isl_ast_graft_list_free(data
.list
);
2054 /* Do i and j share any values for the outer dimensions?
2056 static isl_bool
shared_outer(__isl_keep isl_basic_set
*i
,
2057 __isl_keep isl_basic_set
*j
, void *user
)
2059 int depth
= *(int *) user
;
2060 isl_basic_map
*test
;
2064 test
= isl_basic_map_from_domain_and_range(isl_basic_set_copy(i
),
2065 isl_basic_set_copy(j
));
2066 for (l
= 0; l
< depth
; ++l
)
2067 test
= isl_basic_map_equate(test
, isl_dim_in
, l
,
2069 empty
= isl_basic_map_is_empty(test
);
2070 isl_basic_map_free(test
);
2072 return empty
< 0 ? isl_bool_error
: !empty
;
2075 /* Internal data structure for generate_sorted_domains_wrap.
2077 * "n" is the total number of basic sets
2078 * "executed" and "build" are extra arguments to be passed
2079 * to generate_sorted_domains.
2081 * "single" is set to 1 by generate_sorted_domains_wrap if there
2082 * is only a single component.
2083 * "list" collects the results.
2085 struct isl_ast_generate_parallel_domains_data
{
2087 isl_union_map
*executed
;
2088 isl_ast_build
*build
;
2091 isl_ast_graft_list
*list
;
2094 /* Call generate_sorted_domains on "scc", fuse the result into a list
2095 * with either zero or one graft and collect the these single element
2096 * lists into data->list.
2098 * If there is only one component, i.e., if the number of basic sets
2099 * in the current component is equal to the total number of basic sets,
2100 * then data->single is set to 1 and the result of generate_sorted_domains
2103 static isl_stat
generate_sorted_domains_wrap(__isl_take isl_basic_set_list
*scc
,
2106 struct isl_ast_generate_parallel_domains_data
*data
= user
;
2107 isl_ast_graft_list
*list
;
2109 list
= generate_sorted_domains(scc
, data
->executed
, data
->build
);
2110 data
->single
= isl_basic_set_list_n_basic_set(scc
) == data
->n
;
2112 list
= isl_ast_graft_list_fuse(list
, data
->build
);
2116 data
->list
= isl_ast_graft_list_concat(data
->list
, list
);
2118 isl_basic_set_list_free(scc
);
2120 return isl_stat_error
;
2125 /* Look for any (weakly connected) components in the "domain_list"
2126 * of domains that share some values of the outer dimensions.
2127 * That is, domains in different components do not share any values
2128 * of the outer dimensions. This means that these components
2129 * can be freely reordered.
2130 * Within each of the components, we sort the domains according
2131 * to the execution order at the current depth.
2133 * If there is more than one component, then generate_sorted_domains_wrap
2134 * fuses the result of each call to generate_sorted_domains
2135 * into a list with either zero or one graft and collects these (at most)
2136 * single element lists into a bigger list. This means that the elements of the
2137 * final list can be freely reordered. In particular, we sort them
2138 * according to an arbitrary but fixed ordering to ease merging of
2139 * graft lists from different components.
2141 static __isl_give isl_ast_graft_list
*generate_parallel_domains(
2142 __isl_keep isl_basic_set_list
*domain_list
,
2143 __isl_keep isl_union_map
*executed
, __isl_keep isl_ast_build
*build
)
2146 struct isl_ast_generate_parallel_domains_data data
;
2151 data
.n
= isl_basic_set_list_n_basic_set(domain_list
);
2153 return generate_sorted_domains(domain_list
, executed
, build
);
2155 depth
= isl_ast_build_get_depth(build
);
2157 data
.executed
= executed
;
2160 if (isl_basic_set_list_foreach_scc(domain_list
, &shared_outer
, &depth
,
2161 &generate_sorted_domains_wrap
,
2163 data
.list
= isl_ast_graft_list_free(data
.list
);
2166 data
.list
= isl_ast_graft_list_sort_guard(data
.list
);
2171 /* Internal data for separate_domain.
2173 * "explicit" is set if we only want to use explicit bounds.
2175 * "domain" collects the separated domains.
2177 struct isl_separate_domain_data
{
2178 isl_ast_build
*build
;
2183 /* Extract implicit bounds on the current dimension for the executed "map".
2185 * The domain of "map" may involve inner dimensions, so we
2186 * need to eliminate them.
2188 static __isl_give isl_set
*implicit_bounds(__isl_take isl_map
*map
,
2189 __isl_keep isl_ast_build
*build
)
2193 domain
= isl_map_domain(map
);
2194 domain
= isl_ast_build_eliminate(build
, domain
);
2199 /* Extract explicit bounds on the current dimension for the executed "map".
2201 * Rather than eliminating the inner dimensions as in implicit_bounds,
2202 * we simply drop any constraints involving those inner dimensions.
2203 * The idea is that most bounds that are implied by constraints on the
2204 * inner dimensions will be enforced by for loops and not by explicit guards.
2205 * There is then no need to separate along those bounds.
2207 static __isl_give isl_set
*explicit_bounds(__isl_take isl_map
*map
,
2208 __isl_keep isl_ast_build
*build
)
2213 dim
= isl_map_dim(map
, isl_dim_out
);
2214 map
= isl_map_drop_constraints_involving_dims(map
, isl_dim_out
, 0, dim
);
2216 domain
= isl_map_domain(map
);
2217 depth
= isl_ast_build_get_depth(build
);
2218 dim
= isl_set_dim(domain
, isl_dim_set
);
2219 domain
= isl_set_detect_equalities(domain
);
2220 domain
= isl_set_drop_constraints_involving_dims(domain
,
2221 isl_dim_set
, depth
+ 1, dim
- (depth
+ 1));
2222 domain
= isl_set_remove_divs_involving_dims(domain
,
2223 isl_dim_set
, depth
, 1);
2224 domain
= isl_set_remove_unknown_divs(domain
);
2229 /* Split data->domain into pieces that intersect with the range of "map"
2230 * and pieces that do not intersect with the range of "map"
2231 * and then add that part of the range of "map" that does not intersect
2232 * with data->domain.
2234 static isl_stat
separate_domain(__isl_take isl_map
*map
, void *user
)
2236 struct isl_separate_domain_data
*data
= user
;
2241 domain
= explicit_bounds(map
, data
->build
);
2243 domain
= implicit_bounds(map
, data
->build
);
2245 domain
= isl_set_coalesce(domain
);
2246 domain
= isl_set_make_disjoint(domain
);
2247 d1
= isl_set_subtract(isl_set_copy(domain
), isl_set_copy(data
->domain
));
2248 d2
= isl_set_subtract(isl_set_copy(data
->domain
), isl_set_copy(domain
));
2249 data
->domain
= isl_set_intersect(data
->domain
, domain
);
2250 data
->domain
= isl_set_union(data
->domain
, d1
);
2251 data
->domain
= isl_set_union(data
->domain
, d2
);
2256 /* Separate the schedule domains of "executed".
2258 * That is, break up the domain of "executed" into basic sets,
2259 * such that for each basic set S, every element in S is associated with
2260 * the same domain spaces.
2262 * "space" is the (single) domain space of "executed".
2264 static __isl_give isl_set
*separate_schedule_domains(
2265 __isl_take isl_space
*space
, __isl_take isl_union_map
*executed
,
2266 __isl_keep isl_ast_build
*build
)
2268 struct isl_separate_domain_data data
= { build
};
2271 ctx
= isl_ast_build_get_ctx(build
);
2272 data
.explicit = isl_options_get_ast_build_separation_bounds(ctx
) ==
2273 ISL_AST_BUILD_SEPARATION_BOUNDS_EXPLICIT
;
2274 data
.domain
= isl_set_empty(space
);
2275 if (isl_union_map_foreach_map(executed
, &separate_domain
, &data
) < 0)
2276 data
.domain
= isl_set_free(data
.domain
);
2278 isl_union_map_free(executed
);
2282 /* Temporary data used during the search for a lower bound for unrolling.
2284 * "build" is the build in which the unrolling will be performed
2285 * "domain" is the original set for which to find a lower bound
2286 * "depth" is the dimension for which to find a lower boudn
2287 * "expansion" is the expansion that needs to be applied to "domain"
2288 * in the unrolling that will be performed
2290 * "lower" is the best lower bound found so far. It is NULL if we have not
2292 * "n" is the corresponding size. If lower is NULL, then the value of n
2294 * "n_div" is the maximal number of integer divisions in the first
2295 * unrolled iteration (after expansion). It is set to -1 if it hasn't
2296 * been computed yet.
2298 struct isl_find_unroll_data
{
2299 isl_ast_build
*build
;
2302 isl_basic_map
*expansion
;
2309 /* Return the constraint
2311 * i_"depth" = aff + offset
2313 static __isl_give isl_constraint
*at_offset(int depth
, __isl_keep isl_aff
*aff
,
2316 aff
= isl_aff_copy(aff
);
2317 aff
= isl_aff_add_coefficient_si(aff
, isl_dim_in
, depth
, -1);
2318 aff
= isl_aff_add_constant_si(aff
, offset
);
2319 return isl_equality_from_aff(aff
);
2322 /* Update *user to the number of integer divsions in the first element
2323 * of "ma", if it is larger than the current value.
2325 static isl_stat
update_n_div(__isl_take isl_set
*set
,
2326 __isl_take isl_multi_aff
*ma
, void *user
)
2332 aff
= isl_multi_aff_get_aff(ma
, 0);
2333 n_div
= isl_aff_dim(aff
, isl_dim_div
);
2335 isl_multi_aff_free(ma
);
2341 return aff
? isl_stat_ok
: isl_stat_error
;
2344 /* Get the number of integer divisions in the expression for the iterator
2345 * value at the first slice in the unrolling based on lower bound "lower",
2346 * taking into account the expansion that needs to be performed on this slice.
2348 static int get_expanded_n_div(struct isl_find_unroll_data
*data
,
2349 __isl_keep isl_aff
*lower
)
2353 isl_map
*it_map
, *expansion
;
2354 isl_pw_multi_aff
*pma
;
2357 c
= at_offset(data
->depth
, lower
, 0);
2358 set
= isl_set_copy(data
->domain
);
2359 set
= isl_set_add_constraint(set
, c
);
2360 expansion
= isl_map_from_basic_map(isl_basic_map_copy(data
->expansion
));
2361 set
= isl_set_apply(set
, expansion
);
2362 it_map
= isl_ast_build_map_to_iterator(data
->build
, set
);
2363 pma
= isl_pw_multi_aff_from_map(it_map
);
2365 if (isl_pw_multi_aff_foreach_piece(pma
, &update_n_div
, &n
) < 0)
2367 isl_pw_multi_aff_free(pma
);
2372 /* Is the lower bound "lower" with corresponding iteration count "n"
2373 * better than the one stored in "data"?
2374 * If there is no upper bound on the iteration count ("n" is infinity) or
2375 * if the count is too large, then we cannot use this lower bound.
2376 * Otherwise, if there was no previous lower bound or
2377 * if the iteration count of the new lower bound is smaller than
2378 * the iteration count of the previous lower bound, then we consider
2379 * the new lower bound to be better.
2380 * If the iteration count is the same, then compare the number
2381 * of integer divisions that would be needed to express
2382 * the iterator value at the first slice in the unrolling
2383 * according to the lower bound. If we end up computing this
2384 * number, then store the lowest value in data->n_div.
2386 static int is_better_lower_bound(struct isl_find_unroll_data
*data
,
2387 __isl_keep isl_aff
*lower
, __isl_keep isl_val
*n
)
2394 if (isl_val_is_infty(n
))
2396 if (isl_val_cmp_si(n
, INT_MAX
) > 0)
2400 cmp
= isl_val_cmp_si(n
, *data
->n
);
2405 if (data
->n_div
< 0)
2406 data
->n_div
= get_expanded_n_div(data
, data
->lower
);
2407 if (data
->n_div
< 0)
2409 if (data
->n_div
== 0)
2411 n_div
= get_expanded_n_div(data
, lower
);
2414 if (n_div
>= data
->n_div
)
2416 data
->n_div
= n_div
;
2421 /* Check if we can use "c" as a lower bound and if it is better than
2422 * any previously found lower bound.
2424 * If "c" does not involve the dimension at the current depth,
2425 * then we cannot use it.
2426 * Otherwise, let "c" be of the form
2430 * We compute the maximal value of
2432 * -ceil(f(j)/a)) + i + 1
2434 * over the domain. If there is such a value "n", then we know
2436 * -ceil(f(j)/a)) + i + 1 <= n
2440 * i < ceil(f(j)/a)) + n
2442 * meaning that we can use ceil(f(j)/a)) as a lower bound for unrolling.
2443 * We just need to check if we have found any lower bound before and
2444 * if the new lower bound is better (smaller n or fewer integer divisions)
2445 * than the previously found lower bounds.
2447 static isl_stat
update_unrolling_lower_bound(struct isl_find_unroll_data
*data
,
2448 __isl_keep isl_constraint
*c
)
2450 isl_aff
*aff
, *lower
;
2454 if (!isl_constraint_is_lower_bound(c
, isl_dim_set
, data
->depth
))
2457 lower
= isl_constraint_get_bound(c
, isl_dim_set
, data
->depth
);
2458 lower
= isl_aff_ceil(lower
);
2459 aff
= isl_aff_copy(lower
);
2460 aff
= isl_aff_neg(aff
);
2461 aff
= isl_aff_add_coefficient_si(aff
, isl_dim_in
, data
->depth
, 1);
2462 aff
= isl_aff_add_constant_si(aff
, 1);
2463 max
= isl_set_max_val(data
->domain
, aff
);
2466 better
= is_better_lower_bound(data
, lower
, max
);
2467 if (better
< 0 || !better
) {
2469 isl_aff_free(lower
);
2470 return better
< 0 ? isl_stat_error
: isl_stat_ok
;
2473 isl_aff_free(data
->lower
);
2474 data
->lower
= lower
;
2475 *data
->n
= isl_val_get_num_si(max
);
2481 /* Check if we can use "c" as a lower bound and if it is better than
2482 * any previously found lower bound.
2484 static isl_stat
constraint_find_unroll(__isl_take isl_constraint
*c
, void *user
)
2486 struct isl_find_unroll_data
*data
;
2489 data
= (struct isl_find_unroll_data
*) user
;
2490 r
= update_unrolling_lower_bound(data
, c
);
2491 isl_constraint_free(c
);
2496 /* Look for a lower bound l(i) on the dimension at "depth"
2497 * and a size n such that "domain" is a subset of
2499 * { [i] : l(i) <= i_d < l(i) + n }
2501 * where d is "depth" and l(i) depends only on earlier dimensions.
2502 * Furthermore, try and find a lower bound such that n is as small as possible.
2503 * In particular, "n" needs to be finite.
2504 * "build" is the build in which the unrolling will be performed.
2505 * "expansion" is the expansion that needs to be applied to "domain"
2506 * in the unrolling that will be performed.
2508 * Inner dimensions have been eliminated from "domain" by the caller.
2510 * We first construct a collection of lower bounds on the input set
2511 * by computing its simple hull. We then iterate through them,
2512 * discarding those that we cannot use (either because they do not
2513 * involve the dimension at "depth" or because they have no corresponding
2514 * upper bound, meaning that "n" would be unbounded) and pick out the
2515 * best from the remaining ones.
2517 * If we cannot find a suitable lower bound, then we consider that
2520 static __isl_give isl_aff
*find_unroll_lower_bound(
2521 __isl_keep isl_ast_build
*build
, __isl_keep isl_set
*domain
,
2522 int depth
, __isl_keep isl_basic_map
*expansion
, int *n
)
2524 struct isl_find_unroll_data data
=
2525 { build
, domain
, depth
, expansion
, NULL
, n
, -1 };
2526 isl_basic_set
*hull
;
2528 hull
= isl_set_simple_hull(isl_set_copy(domain
));
2530 if (isl_basic_set_foreach_constraint(hull
,
2531 &constraint_find_unroll
, &data
) < 0)
2534 isl_basic_set_free(hull
);
2537 isl_die(isl_set_get_ctx(domain
), isl_error_invalid
,
2538 "cannot find lower bound for unrolling", return NULL
);
2542 isl_basic_set_free(hull
);
2543 return isl_aff_free(data
.lower
);
2546 /* Call "fn" on each iteration of the current dimension of "domain".
2547 * If "init" is not NULL, then it is called with the number of
2548 * iterations before any call to "fn".
2549 * Return -1 on failure.
2551 * Since we are going to be iterating over the individual values,
2552 * we first check if there are any strides on the current dimension.
2553 * If there is, we rewrite the current dimension i as
2555 * i = stride i' + offset
2557 * and then iterate over individual values of i' instead.
2559 * We then look for a lower bound on i' and a size such that the domain
2562 * { [j,i'] : l(j) <= i' < l(j) + n }
2564 * and then take slices of the domain at values of i'
2565 * between l(j) and l(j) + n - 1.
2567 * We compute the unshifted simple hull of each slice to ensure that
2568 * we have a single basic set per offset. The slicing constraint
2569 * may get simplified away before the unshifted simple hull is taken
2570 * and may therefore in some rare cases disappear from the result.
2571 * We therefore explicitly add the constraint back after computing
2572 * the unshifted simple hull to ensure that the basic sets
2573 * remain disjoint. The constraints that are dropped by taking the hull
2574 * will be taken into account at the next level, as in the case of the
2577 * Finally, we map i' back to i and call "fn".
2579 static int foreach_iteration(__isl_take isl_set
*domain
,
2580 __isl_keep isl_ast_build
*build
, int (*init
)(int n
, void *user
),
2581 int (*fn
)(__isl_take isl_basic_set
*bset
, void *user
), void *user
)
2586 isl_multi_aff
*expansion
;
2587 isl_basic_map
*bmap
;
2588 isl_aff
*lower
= NULL
;
2589 isl_ast_build
*stride_build
;
2591 depth
= isl_ast_build_get_depth(build
);
2593 domain
= isl_ast_build_eliminate_inner(build
, domain
);
2594 domain
= isl_set_intersect(domain
, isl_ast_build_get_domain(build
));
2595 stride_build
= isl_ast_build_copy(build
);
2596 stride_build
= isl_ast_build_detect_strides(stride_build
,
2597 isl_set_copy(domain
));
2598 expansion
= isl_ast_build_get_stride_expansion(stride_build
);
2600 domain
= isl_set_preimage_multi_aff(domain
,
2601 isl_multi_aff_copy(expansion
));
2602 domain
= isl_ast_build_eliminate_divs(stride_build
, domain
);
2603 isl_ast_build_free(stride_build
);
2605 bmap
= isl_basic_map_from_multi_aff(expansion
);
2607 empty
= isl_set_is_empty(domain
);
2613 lower
= find_unroll_lower_bound(build
, domain
, depth
, bmap
, &n
);
2617 if (n
>= 0 && init
&& init(n
, user
) < 0)
2619 for (i
= 0; i
< n
; ++i
) {
2621 isl_basic_set
*bset
;
2622 isl_constraint
*slice
;
2624 slice
= at_offset(depth
, lower
, i
);
2625 set
= isl_set_copy(domain
);
2626 set
= isl_set_add_constraint(set
, isl_constraint_copy(slice
));
2627 bset
= isl_set_unshifted_simple_hull(set
);
2628 bset
= isl_basic_set_add_constraint(bset
, slice
);
2629 bset
= isl_basic_set_apply(bset
, isl_basic_map_copy(bmap
));
2631 if (fn(bset
, user
) < 0)
2635 isl_aff_free(lower
);
2636 isl_set_free(domain
);
2637 isl_basic_map_free(bmap
);
2639 return n
< 0 || i
< n
? -1 : 0;
2642 /* Data structure for storing the results and the intermediate objects
2643 * of compute_domains.
2645 * "list" is the main result of the function and contains a list
2646 * of disjoint basic sets for which code should be generated.
2648 * "executed" and "build" are inputs to compute_domains.
2649 * "schedule_domain" is the domain of "executed".
2651 * "option" constains the domains at the current depth that should by
2652 * atomic, separated or unrolled. These domains are as specified by
2653 * the user, except that inner dimensions have been eliminated and
2654 * that they have been made pair-wise disjoint.
2656 * "sep_class" contains the user-specified split into separation classes
2657 * specialized to the current depth.
2658 * "done" contains the union of the separation domains that have already
2661 struct isl_codegen_domains
{
2662 isl_basic_set_list
*list
;
2664 isl_union_map
*executed
;
2665 isl_ast_build
*build
;
2666 isl_set
*schedule_domain
;
2674 /* Internal data structure for do_unroll.
2676 * "domains" stores the results of compute_domains.
2677 * "class_domain" is the original class domain passed to do_unroll.
2678 * "unroll_domain" collects the unrolled iterations.
2680 struct isl_ast_unroll_data
{
2681 struct isl_codegen_domains
*domains
;
2682 isl_set
*class_domain
;
2683 isl_set
*unroll_domain
;
2686 /* Given an iteration of an unrolled domain represented by "bset",
2687 * add it to data->domains->list.
2688 * Since we may have dropped some constraints, we intersect with
2689 * the class domain again to ensure that each element in the list
2690 * is disjoint from the other class domains.
2692 static int do_unroll_iteration(__isl_take isl_basic_set
*bset
, void *user
)
2694 struct isl_ast_unroll_data
*data
= user
;
2696 isl_basic_set_list
*list
;
2698 set
= isl_set_from_basic_set(bset
);
2699 data
->unroll_domain
= isl_set_union(data
->unroll_domain
,
2701 set
= isl_set_intersect(set
, isl_set_copy(data
->class_domain
));
2702 set
= isl_set_make_disjoint(set
);
2703 list
= isl_basic_set_list_from_set(set
);
2704 data
->domains
->list
= isl_basic_set_list_concat(data
->domains
->list
,
2710 /* Extend domains->list with a list of basic sets, one for each value
2711 * of the current dimension in "domain" and remove the corresponding
2712 * sets from the class domain. Return the updated class domain.
2713 * The divs that involve the current dimension have not been projected out
2716 * We call foreach_iteration to iterate over the individual values and
2717 * in do_unroll_iteration we collect the individual basic sets in
2718 * domains->list and their union in data->unroll_domain, which is then
2719 * used to update the class domain.
2721 static __isl_give isl_set
*do_unroll(struct isl_codegen_domains
*domains
,
2722 __isl_take isl_set
*domain
, __isl_take isl_set
*class_domain
)
2724 struct isl_ast_unroll_data data
;
2727 return isl_set_free(class_domain
);
2729 return isl_set_free(domain
);
2731 data
.domains
= domains
;
2732 data
.class_domain
= class_domain
;
2733 data
.unroll_domain
= isl_set_empty(isl_set_get_space(domain
));
2735 if (foreach_iteration(domain
, domains
->build
, NULL
,
2736 &do_unroll_iteration
, &data
) < 0)
2737 data
.unroll_domain
= isl_set_free(data
.unroll_domain
);
2739 class_domain
= isl_set_subtract(class_domain
, data
.unroll_domain
);
2741 return class_domain
;
2744 /* Add domains to domains->list for each individual value of the current
2745 * dimension, for that part of the schedule domain that lies in the
2746 * intersection of the option domain and the class domain.
2747 * Remove the corresponding sets from the class domain and
2748 * return the updated class domain.
2750 * We first break up the unroll option domain into individual pieces
2751 * and then handle each of them separately. The unroll option domain
2752 * has been made disjoint in compute_domains_init_options,
2754 * Note that we actively want to combine different pieces of the
2755 * schedule domain that have the same value at the current dimension.
2756 * We therefore need to break up the unroll option domain before
2757 * intersecting with class and schedule domain, hoping that the
2758 * unroll option domain specified by the user is relatively simple.
2760 static __isl_give isl_set
*compute_unroll_domains(
2761 struct isl_codegen_domains
*domains
, __isl_take isl_set
*class_domain
)
2763 isl_set
*unroll_domain
;
2764 isl_basic_set_list
*unroll_list
;
2768 empty
= isl_set_is_empty(domains
->option
[isl_ast_loop_unroll
]);
2770 return isl_set_free(class_domain
);
2772 return class_domain
;
2774 unroll_domain
= isl_set_copy(domains
->option
[isl_ast_loop_unroll
]);
2775 unroll_list
= isl_basic_set_list_from_set(unroll_domain
);
2777 n
= isl_basic_set_list_n_basic_set(unroll_list
);
2778 for (i
= 0; i
< n
; ++i
) {
2779 isl_basic_set
*bset
;
2781 bset
= isl_basic_set_list_get_basic_set(unroll_list
, i
);
2782 unroll_domain
= isl_set_from_basic_set(bset
);
2783 unroll_domain
= isl_set_intersect(unroll_domain
,
2784 isl_set_copy(class_domain
));
2785 unroll_domain
= isl_set_intersect(unroll_domain
,
2786 isl_set_copy(domains
->schedule_domain
));
2788 empty
= isl_set_is_empty(unroll_domain
);
2789 if (empty
>= 0 && empty
) {
2790 isl_set_free(unroll_domain
);
2794 class_domain
= do_unroll(domains
, unroll_domain
, class_domain
);
2797 isl_basic_set_list_free(unroll_list
);
2799 return class_domain
;
2802 /* Try and construct a single basic set that includes the intersection of
2803 * the schedule domain, the atomic option domain and the class domain.
2804 * Add the resulting basic set(s) to domains->list and remove them
2805 * from class_domain. Return the updated class domain.
2807 * We construct a single domain rather than trying to combine
2808 * the schedule domains of individual domains because we are working
2809 * within a single component so that non-overlapping schedule domains
2810 * should already have been separated.
2811 * We do however need to make sure that this single domains is a subset
2812 * of the class domain so that it would not intersect with any other
2813 * class domains. This means that we may end up splitting up the atomic
2814 * domain in case separation classes are being used.
2816 * "domain" is the intersection of the schedule domain and the class domain,
2817 * with inner dimensions projected out.
2819 static __isl_give isl_set
*compute_atomic_domain(
2820 struct isl_codegen_domains
*domains
, __isl_take isl_set
*class_domain
)
2822 isl_basic_set
*bset
;
2823 isl_basic_set_list
*list
;
2824 isl_set
*domain
, *atomic_domain
;
2827 domain
= isl_set_copy(domains
->option
[isl_ast_loop_atomic
]);
2828 domain
= isl_set_intersect(domain
, isl_set_copy(class_domain
));
2829 domain
= isl_set_intersect(domain
,
2830 isl_set_copy(domains
->schedule_domain
));
2831 empty
= isl_set_is_empty(domain
);
2833 class_domain
= isl_set_free(class_domain
);
2835 isl_set_free(domain
);
2836 return class_domain
;
2839 domain
= isl_ast_build_eliminate(domains
->build
, domain
);
2840 domain
= isl_set_coalesce(domain
);
2841 bset
= isl_set_unshifted_simple_hull(domain
);
2842 domain
= isl_set_from_basic_set(bset
);
2843 atomic_domain
= isl_set_copy(domain
);
2844 domain
= isl_set_intersect(domain
, isl_set_copy(class_domain
));
2845 class_domain
= isl_set_subtract(class_domain
, atomic_domain
);
2846 domain
= isl_set_make_disjoint(domain
);
2847 list
= isl_basic_set_list_from_set(domain
);
2848 domains
->list
= isl_basic_set_list_concat(domains
->list
, list
);
2850 return class_domain
;
2853 /* Split up the schedule domain into uniform basic sets,
2854 * in the sense that each element in a basic set is associated to
2855 * elements of the same domains, and add the result to domains->list.
2856 * Do this for that part of the schedule domain that lies in the
2857 * intersection of "class_domain" and the separate option domain.
2859 * "class_domain" may or may not include the constraints
2860 * of the schedule domain, but this does not make a difference
2861 * since we are going to intersect it with the domain of the inverse schedule.
2862 * If it includes schedule domain constraints, then they may involve
2863 * inner dimensions, but we will eliminate them in separation_domain.
2865 static int compute_separate_domain(struct isl_codegen_domains
*domains
,
2866 __isl_keep isl_set
*class_domain
)
2870 isl_union_map
*executed
;
2871 isl_basic_set_list
*list
;
2874 domain
= isl_set_copy(domains
->option
[isl_ast_loop_separate
]);
2875 domain
= isl_set_intersect(domain
, isl_set_copy(class_domain
));
2876 executed
= isl_union_map_copy(domains
->executed
);
2877 executed
= isl_union_map_intersect_domain(executed
,
2878 isl_union_set_from_set(domain
));
2879 empty
= isl_union_map_is_empty(executed
);
2880 if (empty
< 0 || empty
) {
2881 isl_union_map_free(executed
);
2882 return empty
< 0 ? -1 : 0;
2885 space
= isl_set_get_space(class_domain
);
2886 domain
= separate_schedule_domains(space
, executed
, domains
->build
);
2888 list
= isl_basic_set_list_from_set(domain
);
2889 domains
->list
= isl_basic_set_list_concat(domains
->list
, list
);
2894 /* Split up the domain at the current depth into disjoint
2895 * basic sets for which code should be generated separately
2896 * for the given separation class domain.
2898 * If any separation classes have been defined, then "class_domain"
2899 * is the domain of the current class and does not refer to inner dimensions.
2900 * Otherwise, "class_domain" is the universe domain.
2902 * We first make sure that the class domain is disjoint from
2903 * previously considered class domains.
2905 * The separate domains can be computed directly from the "class_domain".
2907 * The unroll, atomic and remainder domains need the constraints
2908 * from the schedule domain.
2910 * For unrolling, the actual schedule domain is needed (with divs that
2911 * may refer to the current dimension) so that stride detection can be
2914 * For atomic and remainder domains, inner dimensions and divs involving
2915 * the current dimensions should be eliminated.
2916 * In case we are working within a separation class, we need to intersect
2917 * the result with the current "class_domain" to ensure that the domains
2918 * are disjoint from those generated from other class domains.
2920 * The domain that has been made atomic may be larger than specified
2921 * by the user since it needs to be representable as a single basic set.
2922 * This possibly larger domain is removed from class_domain by
2923 * compute_atomic_domain. It is computed first so that the extended domain
2924 * would not overlap with any domains computed before.
2925 * Similary, the unrolled domains may have some constraints removed and
2926 * may therefore also be larger than specified by the user.
2928 * If anything is left after handling separate, unroll and atomic,
2929 * we split it up into basic sets and append the basic sets to domains->list.
2931 static isl_stat
compute_partial_domains(struct isl_codegen_domains
*domains
,
2932 __isl_take isl_set
*class_domain
)
2934 isl_basic_set_list
*list
;
2937 class_domain
= isl_set_subtract(class_domain
,
2938 isl_set_copy(domains
->done
));
2939 domains
->done
= isl_set_union(domains
->done
,
2940 isl_set_copy(class_domain
));
2942 class_domain
= compute_atomic_domain(domains
, class_domain
);
2943 class_domain
= compute_unroll_domains(domains
, class_domain
);
2945 domain
= isl_set_copy(class_domain
);
2947 if (compute_separate_domain(domains
, domain
) < 0)
2949 domain
= isl_set_subtract(domain
,
2950 isl_set_copy(domains
->option
[isl_ast_loop_separate
]));
2952 domain
= isl_set_intersect(domain
,
2953 isl_set_copy(domains
->schedule_domain
));
2955 domain
= isl_ast_build_eliminate(domains
->build
, domain
);
2956 domain
= isl_set_intersect(domain
, isl_set_copy(class_domain
));
2958 domain
= isl_set_coalesce(domain
);
2959 domain
= isl_set_make_disjoint(domain
);
2961 list
= isl_basic_set_list_from_set(domain
);
2962 domains
->list
= isl_basic_set_list_concat(domains
->list
, list
);
2964 isl_set_free(class_domain
);
2968 isl_set_free(domain
);
2969 isl_set_free(class_domain
);
2970 return isl_stat_error
;
2973 /* Split up the domain at the current depth into disjoint
2974 * basic sets for which code should be generated separately
2975 * for the separation class identified by "pnt".
2977 * We extract the corresponding class domain from domains->sep_class,
2978 * eliminate inner dimensions and pass control to compute_partial_domains.
2980 static isl_stat
compute_class_domains(__isl_take isl_point
*pnt
, void *user
)
2982 struct isl_codegen_domains
*domains
= user
;
2987 class_set
= isl_set_from_point(pnt
);
2988 domain
= isl_map_domain(isl_map_intersect_range(
2989 isl_map_copy(domains
->sep_class
), class_set
));
2990 domain
= isl_ast_build_compute_gist(domains
->build
, domain
);
2991 domain
= isl_ast_build_eliminate(domains
->build
, domain
);
2993 disjoint
= isl_set_plain_is_disjoint(domain
, domains
->schedule_domain
);
2995 return isl_stat_error
;
2997 isl_set_free(domain
);
3001 return compute_partial_domains(domains
, domain
);
3004 /* Extract the domains at the current depth that should be atomic,
3005 * separated or unrolled and store them in option.
3007 * The domains specified by the user might overlap, so we make
3008 * them disjoint by subtracting earlier domains from later domains.
3010 static void compute_domains_init_options(isl_set
*option
[4],
3011 __isl_keep isl_ast_build
*build
)
3013 enum isl_ast_loop_type type
, type2
;
3016 for (type
= isl_ast_loop_atomic
;
3017 type
<= isl_ast_loop_separate
; ++type
) {
3018 option
[type
] = isl_ast_build_get_option_domain(build
, type
);
3019 for (type2
= isl_ast_loop_atomic
; type2
< type
; ++type2
)
3020 option
[type
] = isl_set_subtract(option
[type
],
3021 isl_set_copy(option
[type2
]));
3024 unroll
= option
[isl_ast_loop_unroll
];
3025 unroll
= isl_set_coalesce(unroll
);
3026 unroll
= isl_set_make_disjoint(unroll
);
3027 option
[isl_ast_loop_unroll
] = unroll
;
3030 /* Split up the domain at the current depth into disjoint
3031 * basic sets for which code should be generated separately,
3032 * based on the user-specified options.
3033 * Return the list of disjoint basic sets.
3035 * There are three kinds of domains that we need to keep track of.
3036 * - the "schedule domain" is the domain of "executed"
3037 * - the "class domain" is the domain corresponding to the currrent
3039 * - the "option domain" is the domain corresponding to one of the options
3040 * atomic, unroll or separate
3042 * We first consider the individial values of the separation classes
3043 * and split up the domain for each of them separately.
3044 * Finally, we consider the remainder. If no separation classes were
3045 * specified, then we call compute_partial_domains with the universe
3046 * "class_domain". Otherwise, we take the "schedule_domain" as "class_domain",
3047 * with inner dimensions removed. We do this because we want to
3048 * avoid computing the complement of the class domains (i.e., the difference
3049 * between the universe and domains->done).
3051 static __isl_give isl_basic_set_list
*compute_domains(
3052 __isl_keep isl_union_map
*executed
, __isl_keep isl_ast_build
*build
)
3054 struct isl_codegen_domains domains
;
3057 isl_union_set
*schedule_domain
;
3061 enum isl_ast_loop_type type
;
3067 ctx
= isl_union_map_get_ctx(executed
);
3068 domains
.list
= isl_basic_set_list_alloc(ctx
, 0);
3070 schedule_domain
= isl_union_map_domain(isl_union_map_copy(executed
));
3071 domain
= isl_set_from_union_set(schedule_domain
);
3073 compute_domains_init_options(domains
.option
, build
);
3075 domains
.sep_class
= isl_ast_build_get_separation_class(build
);
3076 classes
= isl_map_range(isl_map_copy(domains
.sep_class
));
3077 n_param
= isl_set_dim(classes
, isl_dim_param
);
3078 classes
= isl_set_project_out(classes
, isl_dim_param
, 0, n_param
);
3080 space
= isl_set_get_space(domain
);
3081 domains
.build
= build
;
3082 domains
.schedule_domain
= isl_set_copy(domain
);
3083 domains
.executed
= executed
;
3084 domains
.done
= isl_set_empty(space
);
3086 if (isl_set_foreach_point(classes
, &compute_class_domains
, &domains
) < 0)
3087 domains
.list
= isl_basic_set_list_free(domains
.list
);
3088 isl_set_free(classes
);
3090 empty
= isl_set_is_empty(domains
.done
);
3092 domains
.list
= isl_basic_set_list_free(domains
.list
);
3093 domain
= isl_set_free(domain
);
3095 isl_set_free(domain
);
3096 domain
= isl_set_universe(isl_set_get_space(domains
.done
));
3098 domain
= isl_ast_build_eliminate(build
, domain
);
3100 if (compute_partial_domains(&domains
, domain
) < 0)
3101 domains
.list
= isl_basic_set_list_free(domains
.list
);
3103 isl_set_free(domains
.schedule_domain
);
3104 isl_set_free(domains
.done
);
3105 isl_map_free(domains
.sep_class
);
3106 for (type
= isl_ast_loop_atomic
; type
<= isl_ast_loop_separate
; ++type
)
3107 isl_set_free(domains
.option
[type
]);
3109 return domains
.list
;
3112 /* Generate code for a single component, after shifting (if any)
3113 * has been applied, in case the schedule was specified as a union map.
3115 * We first split up the domain at the current depth into disjoint
3116 * basic sets based on the user-specified options.
3117 * Then we generated code for each of them and concatenate the results.
3119 static __isl_give isl_ast_graft_list
*generate_shifted_component_flat(
3120 __isl_take isl_union_map
*executed
, __isl_take isl_ast_build
*build
)
3122 isl_basic_set_list
*domain_list
;
3123 isl_ast_graft_list
*list
= NULL
;
3125 domain_list
= compute_domains(executed
, build
);
3126 list
= generate_parallel_domains(domain_list
, executed
, build
);
3128 isl_basic_set_list_free(domain_list
);
3129 isl_union_map_free(executed
);
3130 isl_ast_build_free(build
);
3135 /* Generate code for a single component, after shifting (if any)
3136 * has been applied, in case the schedule was specified as a schedule tree
3137 * and the separate option was specified.
3139 * We perform separation on the domain of "executed" and then generate
3140 * an AST for each of the resulting disjoint basic sets.
3142 static __isl_give isl_ast_graft_list
*generate_shifted_component_tree_separate(
3143 __isl_take isl_union_map
*executed
, __isl_take isl_ast_build
*build
)
3147 isl_basic_set_list
*domain_list
;
3148 isl_ast_graft_list
*list
;
3150 space
= isl_ast_build_get_space(build
, 1);
3151 domain
= separate_schedule_domains(space
,
3152 isl_union_map_copy(executed
), build
);
3153 domain_list
= isl_basic_set_list_from_set(domain
);
3155 list
= generate_parallel_domains(domain_list
, executed
, build
);
3157 isl_basic_set_list_free(domain_list
);
3158 isl_union_map_free(executed
);
3159 isl_ast_build_free(build
);
3164 /* Internal data structure for generate_shifted_component_tree_unroll.
3166 * "executed" and "build" are inputs to generate_shifted_component_tree_unroll.
3167 * "list" collects the constructs grafts.
3169 struct isl_ast_unroll_tree_data
{
3170 isl_union_map
*executed
;
3171 isl_ast_build
*build
;
3172 isl_ast_graft_list
*list
;
3175 /* Initialize data->list to a list of "n" elements.
3177 static int init_unroll_tree(int n
, void *user
)
3179 struct isl_ast_unroll_tree_data
*data
= user
;
3182 ctx
= isl_ast_build_get_ctx(data
->build
);
3183 data
->list
= isl_ast_graft_list_alloc(ctx
, n
);
3188 /* Given an iteration of an unrolled domain represented by "bset",
3189 * generate the corresponding AST and add the result to data->list.
3191 static int do_unroll_tree_iteration(__isl_take isl_basic_set
*bset
, void *user
)
3193 struct isl_ast_unroll_tree_data
*data
= user
;
3195 data
->list
= add_node(data
->list
, isl_union_map_copy(data
->executed
),
3196 bset
, isl_ast_build_copy(data
->build
));
3201 /* Generate code for a single component, after shifting (if any)
3202 * has been applied, in case the schedule was specified as a schedule tree
3203 * and the unroll option was specified.
3205 * We call foreach_iteration to iterate over the individual values and
3206 * construct and collect the corresponding grafts in do_unroll_tree_iteration.
3208 static __isl_give isl_ast_graft_list
*generate_shifted_component_tree_unroll(
3209 __isl_take isl_union_map
*executed
, __isl_take isl_set
*domain
,
3210 __isl_take isl_ast_build
*build
)
3212 struct isl_ast_unroll_tree_data data
= { executed
, build
, NULL
};
3214 if (foreach_iteration(domain
, build
, &init_unroll_tree
,
3215 &do_unroll_tree_iteration
, &data
) < 0)
3216 data
.list
= isl_ast_graft_list_free(data
.list
);
3218 isl_union_map_free(executed
);
3219 isl_ast_build_free(build
);
3224 /* Does "domain" involve a disjunction that is purely based on
3225 * constraints involving only outer dimension?
3227 * In particular, is there a disjunction such that the constraints
3228 * involving the current and later dimensions are the same over
3229 * all the disjuncts?
3231 static isl_bool
has_pure_outer_disjunction(__isl_keep isl_set
*domain
,
3232 __isl_keep isl_ast_build
*build
)
3234 isl_basic_set
*hull
;
3235 isl_set
*shared
, *inner
;
3239 if (isl_set_n_basic_set(domain
) <= 1)
3240 return isl_bool_false
;
3242 inner
= isl_set_copy(domain
);
3243 depth
= isl_ast_build_get_depth(build
);
3244 dim
= isl_set_dim(inner
, isl_dim_set
);
3245 inner
= isl_set_drop_constraints_not_involving_dims(inner
,
3246 isl_dim_set
, depth
, dim
- depth
);
3247 hull
= isl_set_plain_unshifted_simple_hull(isl_set_copy(inner
));
3248 shared
= isl_set_from_basic_set(hull
);
3249 equal
= isl_set_plain_is_equal(inner
, shared
);
3250 isl_set_free(inner
);
3251 isl_set_free(shared
);
3256 /* Generate code for a single component, after shifting (if any)
3257 * has been applied, in case the schedule was specified as a schedule tree.
3258 * In particular, handle the base case where there is either no isolated
3259 * set or we are within the isolated set (in which case "isolated" is set)
3260 * or the iterations that precede or follow the isolated set.
3262 * The schedule domain is broken up or combined into basic sets
3263 * according to the AST generation option specified in the current
3264 * schedule node, which may be either atomic, separate, unroll or
3265 * unspecified. If the option is unspecified, then we currently simply
3266 * split the schedule domain into disjoint basic sets.
3268 * In case the separate option is specified, the AST generation is
3269 * handled by generate_shifted_component_tree_separate.
3270 * In the other cases, we need the global schedule domain.
3271 * In the unroll case, the AST generation is then handled by
3272 * generate_shifted_component_tree_unroll which needs the actual
3273 * schedule domain (with divs that may refer to the current dimension)
3274 * so that stride detection can be performed.
3275 * In the atomic or unspecified case, inner dimensions and divs involving
3276 * the current dimensions should be eliminated.
3277 * The result is then either combined into a single basic set or
3278 * split up into disjoint basic sets.
3279 * Finally an AST is generated for each basic set and the results are
3282 * If the schedule domain involves a disjunction that is purely based on
3283 * constraints involving only outer dimension, then it is treated as
3284 * if atomic was specified. This ensures that only a single loop
3285 * is generated instead of a sequence of identical loops with
3288 static __isl_give isl_ast_graft_list
*generate_shifted_component_tree_base(
3289 __isl_take isl_union_map
*executed
, __isl_take isl_ast_build
*build
,
3292 isl_bool outer_disjunction
;
3293 isl_union_set
*schedule_domain
;
3295 isl_basic_set_list
*domain_list
;
3296 isl_ast_graft_list
*list
;
3297 enum isl_ast_loop_type type
;
3299 type
= isl_ast_build_get_loop_type(build
, isolated
);
3303 if (type
== isl_ast_loop_separate
)
3304 return generate_shifted_component_tree_separate(executed
,
3307 schedule_domain
= isl_union_map_domain(isl_union_map_copy(executed
));
3308 domain
= isl_set_from_union_set(schedule_domain
);
3310 if (type
== isl_ast_loop_unroll
)
3311 return generate_shifted_component_tree_unroll(executed
, domain
,
3314 domain
= isl_ast_build_eliminate(build
, domain
);
3315 domain
= isl_set_coalesce(domain
);
3317 outer_disjunction
= has_pure_outer_disjunction(domain
, build
);
3318 if (outer_disjunction
< 0)
3319 domain
= isl_set_free(domain
);
3321 if (outer_disjunction
|| type
== isl_ast_loop_atomic
) {
3322 isl_basic_set
*hull
;
3323 hull
= isl_set_unshifted_simple_hull(domain
);
3324 domain_list
= isl_basic_set_list_from_basic_set(hull
);
3326 domain
= isl_set_make_disjoint(domain
);
3327 domain_list
= isl_basic_set_list_from_set(domain
);
3330 list
= generate_parallel_domains(domain_list
, executed
, build
);
3332 isl_basic_set_list_free(domain_list
);
3333 isl_union_map_free(executed
);
3334 isl_ast_build_free(build
);
3338 isl_union_map_free(executed
);
3339 isl_ast_build_free(build
);
3343 /* Extract out the disjunction imposed by "domain" on the outer
3344 * schedule dimensions.
3346 * In particular, remove all inner dimensions from "domain" (including
3347 * the current dimension) and then remove the constraints that are shared
3348 * by all disjuncts in the result.
3350 static __isl_give isl_set
*extract_disjunction(__isl_take isl_set
*domain
,
3351 __isl_keep isl_ast_build
*build
)
3356 domain
= isl_ast_build_specialize(build
, domain
);
3357 depth
= isl_ast_build_get_depth(build
);
3358 dim
= isl_set_dim(domain
, isl_dim_set
);
3359 domain
= isl_set_eliminate(domain
, isl_dim_set
, depth
, dim
- depth
);
3360 domain
= isl_set_remove_unknown_divs(domain
);
3361 hull
= isl_set_copy(domain
);
3362 hull
= isl_set_from_basic_set(isl_set_unshifted_simple_hull(hull
));
3363 domain
= isl_set_gist(domain
, hull
);
3368 /* Add "guard" to the grafts in "list".
3369 * "build" is the outer AST build, while "sub_build" includes "guard"
3370 * in its generated domain.
3372 * First combine the grafts into a single graft and then add the guard.
3373 * If the list is empty, or if some error occurred, then simply return
3376 static __isl_give isl_ast_graft_list
*list_add_guard(
3377 __isl_take isl_ast_graft_list
*list
, __isl_keep isl_set
*guard
,
3378 __isl_keep isl_ast_build
*build
, __isl_keep isl_ast_build
*sub_build
)
3380 isl_ast_graft
*graft
;
3382 list
= isl_ast_graft_list_fuse(list
, sub_build
);
3384 if (isl_ast_graft_list_n_ast_graft(list
) != 1)
3387 graft
= isl_ast_graft_list_get_ast_graft(list
, 0);
3388 graft
= isl_ast_graft_add_guard(graft
, isl_set_copy(guard
), build
);
3389 list
= isl_ast_graft_list_set_ast_graft(list
, 0, graft
);
3394 /* Generate code for a single component, after shifting (if any)
3395 * has been applied, in case the schedule was specified as a schedule tree.
3396 * In particular, do so for the specified subset of the schedule domain.
3398 * If we are outside of the isolated part, then "domain" may include
3399 * a disjunction. Explicitly generate this disjunction at this point
3400 * instead of relying on the disjunction getting hoisted back up
3403 static __isl_give isl_ast_graft_list
*generate_shifted_component_tree_part(
3404 __isl_keep isl_union_map
*executed
, __isl_take isl_set
*domain
,
3405 __isl_keep isl_ast_build
*build
, int isolated
)
3407 isl_union_set
*uset
;
3408 isl_ast_graft_list
*list
;
3409 isl_ast_build
*sub_build
;
3412 uset
= isl_union_set_from_set(isl_set_copy(domain
));
3413 executed
= isl_union_map_copy(executed
);
3414 executed
= isl_union_map_intersect_domain(executed
, uset
);
3415 empty
= isl_union_map_is_empty(executed
);
3420 isl_union_map_free(executed
);
3421 isl_set_free(domain
);
3422 ctx
= isl_ast_build_get_ctx(build
);
3423 return isl_ast_graft_list_alloc(ctx
, 0);
3426 sub_build
= isl_ast_build_copy(build
);
3428 domain
= extract_disjunction(domain
, build
);
3429 sub_build
= isl_ast_build_restrict_generated(sub_build
,
3430 isl_set_copy(domain
));
3432 list
= generate_shifted_component_tree_base(executed
,
3433 isl_ast_build_copy(sub_build
), isolated
);
3435 list
= list_add_guard(list
, domain
, build
, sub_build
);
3436 isl_ast_build_free(sub_build
);
3437 isl_set_free(domain
);
3440 isl_union_map_free(executed
);
3441 isl_set_free(domain
);
3445 /* Generate code for a single component, after shifting (if any)
3446 * has been applied, in case the schedule was specified as a schedule tree.
3447 * In particular, do so for the specified sequence of subsets
3448 * of the schedule domain, "before", "isolated", "after" and "other",
3449 * where only the "isolated" part is considered to be isolated.
3451 static __isl_give isl_ast_graft_list
*generate_shifted_component_parts(
3452 __isl_take isl_union_map
*executed
, __isl_take isl_set
*before
,
3453 __isl_take isl_set
*isolated
, __isl_take isl_set
*after
,
3454 __isl_take isl_set
*other
, __isl_take isl_ast_build
*build
)
3456 isl_ast_graft_list
*list
, *res
;
3458 res
= generate_shifted_component_tree_part(executed
, before
, build
, 0);
3459 list
= generate_shifted_component_tree_part(executed
, isolated
,
3461 res
= isl_ast_graft_list_concat(res
, list
);
3462 list
= generate_shifted_component_tree_part(executed
, after
, build
, 0);
3463 res
= isl_ast_graft_list_concat(res
, list
);
3464 list
= generate_shifted_component_tree_part(executed
, other
, build
, 0);
3465 res
= isl_ast_graft_list_concat(res
, list
);
3467 isl_union_map_free(executed
);
3468 isl_ast_build_free(build
);
3473 /* Does "set" intersect "first", but not "second"?
3475 static isl_bool
only_intersects_first(__isl_keep isl_set
*set
,
3476 __isl_keep isl_set
*first
, __isl_keep isl_set
*second
)
3480 disjoint
= isl_set_is_disjoint(set
, first
);
3482 return isl_bool_error
;
3484 return isl_bool_false
;
3486 return isl_set_is_disjoint(set
, second
);
3489 /* Generate code for a single component, after shifting (if any)
3490 * has been applied, in case the schedule was specified as a schedule tree.
3491 * In particular, do so in case of isolation where there is
3492 * only an "isolated" part and an "after" part.
3493 * "dead1" and "dead2" are freed by this function in order to simplify
3496 * The "before" and "other" parts are set to empty sets.
3498 static __isl_give isl_ast_graft_list
*generate_shifted_component_only_after(
3499 __isl_take isl_union_map
*executed
, __isl_take isl_set
*isolated
,
3500 __isl_take isl_set
*after
, __isl_take isl_ast_build
*build
,
3501 __isl_take isl_set
*dead1
, __isl_take isl_set
*dead2
)
3505 empty
= isl_set_empty(isl_set_get_space(after
));
3506 isl_set_free(dead1
);
3507 isl_set_free(dead2
);
3508 return generate_shifted_component_parts(executed
, isl_set_copy(empty
),
3509 isolated
, after
, empty
, build
);
3512 /* Generate code for a single component, after shifting (if any)
3513 * has been applied, in case the schedule was specified as a schedule tree.
3515 * We first check if the user has specified an isolated schedule domain
3516 * and that we are not already outside of this isolated schedule domain.
3517 * If so, we break up the schedule domain into iterations that
3518 * precede the isolated domain, the isolated domain itself,
3519 * the iterations that follow the isolated domain and
3520 * the remaining iterations (those that are incomparable
3521 * to the isolated domain).
3522 * We generate an AST for each piece and concatenate the results.
3524 * In the special case where at least one element of the schedule
3525 * domain that does not belong to the isolated domain needs
3526 * to be scheduled after this isolated domain, but none of those
3527 * elements need to be scheduled before, break up the schedule domain
3528 * in only two parts, the isolated domain, and a part that will be
3529 * scheduled after the isolated domain.
3531 * If no isolated set has been specified, then we generate an
3532 * AST for the entire inverse schedule.
3534 static __isl_give isl_ast_graft_list
*generate_shifted_component_tree(
3535 __isl_take isl_union_map
*executed
, __isl_take isl_ast_build
*build
)
3538 int empty
, has_isolate
;
3540 isl_union_set
*schedule_domain
;
3542 isl_basic_set
*hull
;
3543 isl_set
*isolated
, *before
, *after
, *test
;
3547 build
= isl_ast_build_extract_isolated(build
);
3548 has_isolate
= isl_ast_build_has_isolated(build
);
3549 if (has_isolate
< 0)
3550 executed
= isl_union_map_free(executed
);
3551 else if (!has_isolate
)
3552 return generate_shifted_component_tree_base(executed
, build
, 0);
3554 schedule_domain
= isl_union_map_domain(isl_union_map_copy(executed
));
3555 domain
= isl_set_from_union_set(schedule_domain
);
3557 isolated
= isl_ast_build_get_isolated(build
);
3558 isolated
= isl_set_intersect(isolated
, isl_set_copy(domain
));
3559 test
= isl_ast_build_specialize(build
, isl_set_copy(isolated
));
3560 empty
= isl_set_is_empty(test
);
3565 isl_set_free(isolated
);
3566 isl_set_free(domain
);
3567 return generate_shifted_component_tree_base(executed
, build
, 0);
3569 isolated
= isl_ast_build_eliminate(build
, isolated
);
3570 hull
= isl_set_unshifted_simple_hull(isolated
);
3571 isolated
= isl_set_from_basic_set(hull
);
3573 depth
= isl_ast_build_get_depth(build
);
3574 space
= isl_space_map_from_set(isl_set_get_space(isolated
));
3575 gt
= isl_map_universe(space
);
3576 for (i
= 0; i
< depth
; ++i
)
3577 gt
= isl_map_equate(gt
, isl_dim_in
, i
, isl_dim_out
, i
);
3578 gt
= isl_map_order_gt(gt
, isl_dim_in
, depth
, isl_dim_out
, depth
);
3579 lt
= isl_map_reverse(isl_map_copy(gt
));
3580 before
= isl_set_apply(isl_set_copy(isolated
), gt
);
3581 after
= isl_set_apply(isl_set_copy(isolated
), lt
);
3583 domain
= isl_set_subtract(domain
, isl_set_copy(isolated
));
3584 pure
= only_intersects_first(domain
, after
, before
);
3586 executed
= isl_union_map_free(executed
);
3588 return generate_shifted_component_only_after(executed
, isolated
,
3589 domain
, build
, before
, after
);
3590 domain
= isl_set_subtract(domain
, isl_set_copy(before
));
3591 domain
= isl_set_subtract(domain
, isl_set_copy(after
));
3592 after
= isl_set_subtract(after
, isl_set_copy(isolated
));
3593 after
= isl_set_subtract(after
, isl_set_copy(before
));
3594 before
= isl_set_subtract(before
, isl_set_copy(isolated
));
3596 return generate_shifted_component_parts(executed
, before
, isolated
,
3597 after
, domain
, build
);
3599 isl_set_free(domain
);
3600 isl_set_free(isolated
);
3601 isl_union_map_free(executed
);
3602 isl_ast_build_free(build
);
3606 /* Generate code for a single component, after shifting (if any)
3609 * Call generate_shifted_component_tree or generate_shifted_component_flat
3610 * depending on whether the schedule was specified as a schedule tree.
3612 static __isl_give isl_ast_graft_list
*generate_shifted_component(
3613 __isl_take isl_union_map
*executed
, __isl_take isl_ast_build
*build
)
3615 if (isl_ast_build_has_schedule_node(build
))
3616 return generate_shifted_component_tree(executed
, build
);
3618 return generate_shifted_component_flat(executed
, build
);
3621 struct isl_set_map_pair
{
3626 /* Given an array "domain" of isl_set_map_pairs and an array "order"
3627 * of indices into the "domain" array,
3628 * return the union of the "map" fields of the elements
3629 * indexed by the first "n" elements of "order".
3631 static __isl_give isl_union_map
*construct_component_executed(
3632 struct isl_set_map_pair
*domain
, int *order
, int n
)
3636 isl_union_map
*executed
;
3638 map
= isl_map_copy(domain
[order
[0]].map
);
3639 executed
= isl_union_map_from_map(map
);
3640 for (i
= 1; i
< n
; ++i
) {
3641 map
= isl_map_copy(domain
[order
[i
]].map
);
3642 executed
= isl_union_map_add_map(executed
, map
);
3648 /* Generate code for a single component, after shifting (if any)
3651 * The component inverse schedule is specified as the "map" fields
3652 * of the elements of "domain" indexed by the first "n" elements of "order".
3654 static __isl_give isl_ast_graft_list
*generate_shifted_component_from_list(
3655 struct isl_set_map_pair
*domain
, int *order
, int n
,
3656 __isl_take isl_ast_build
*build
)
3658 isl_union_map
*executed
;
3660 executed
= construct_component_executed(domain
, order
, n
);
3661 return generate_shifted_component(executed
, build
);
3664 /* Does set dimension "pos" of "set" have an obviously fixed value?
3666 static int dim_is_fixed(__isl_keep isl_set
*set
, int pos
)
3671 v
= isl_set_plain_get_val_if_fixed(set
, isl_dim_set
, pos
);
3674 fixed
= !isl_val_is_nan(v
);
3680 /* Given an array "domain" of isl_set_map_pairs and an array "order"
3681 * of indices into the "domain" array,
3682 * do all (except for at most one) of the "set" field of the elements
3683 * indexed by the first "n" elements of "order" have a fixed value
3684 * at position "depth"?
3686 static int at_most_one_non_fixed(struct isl_set_map_pair
*domain
,
3687 int *order
, int n
, int depth
)
3692 for (i
= 0; i
< n
; ++i
) {
3695 f
= dim_is_fixed(domain
[order
[i
]].set
, depth
);
3708 /* Given an array "domain" of isl_set_map_pairs and an array "order"
3709 * of indices into the "domain" array,
3710 * eliminate the inner dimensions from the "set" field of the elements
3711 * indexed by the first "n" elements of "order", provided the current
3712 * dimension does not have a fixed value.
3714 * Return the index of the first element in "order" with a corresponding
3715 * "set" field that does not have an (obviously) fixed value.
3717 static int eliminate_non_fixed(struct isl_set_map_pair
*domain
,
3718 int *order
, int n
, int depth
, __isl_keep isl_ast_build
*build
)
3723 for (i
= n
- 1; i
>= 0; --i
) {
3725 f
= dim_is_fixed(domain
[order
[i
]].set
, depth
);
3730 domain
[order
[i
]].set
= isl_ast_build_eliminate_inner(build
,
3731 domain
[order
[i
]].set
);
3738 /* Given an array "domain" of isl_set_map_pairs and an array "order"
3739 * of indices into the "domain" array,
3740 * find the element of "domain" (amongst those indexed by the first "n"
3741 * elements of "order") with the "set" field that has the smallest
3742 * value for the current iterator.
3744 * Note that the domain with the smallest value may depend on the parameters
3745 * and/or outer loop dimension. Since the result of this function is only
3746 * used as heuristic, we only make a reasonable attempt at finding the best
3747 * domain, one that should work in case a single domain provides the smallest
3748 * value for the current dimension over all values of the parameters
3749 * and outer dimensions.
3751 * In particular, we compute the smallest value of the first domain
3752 * and replace it by that of any later domain if that later domain
3753 * has a smallest value that is smaller for at least some value
3754 * of the parameters and outer dimensions.
3756 static int first_offset(struct isl_set_map_pair
*domain
, int *order
, int n
,
3757 __isl_keep isl_ast_build
*build
)
3763 min_first
= isl_ast_build_map_to_iterator(build
,
3764 isl_set_copy(domain
[order
[0]].set
));
3765 min_first
= isl_map_lexmin(min_first
);
3767 for (i
= 1; i
< n
; ++i
) {
3768 isl_map
*min
, *test
;
3771 min
= isl_ast_build_map_to_iterator(build
,
3772 isl_set_copy(domain
[order
[i
]].set
));
3773 min
= isl_map_lexmin(min
);
3774 test
= isl_map_copy(min
);
3775 test
= isl_map_apply_domain(isl_map_copy(min_first
), test
);
3776 test
= isl_map_order_lt(test
, isl_dim_in
, 0, isl_dim_out
, 0);
3777 empty
= isl_map_is_empty(test
);
3779 if (empty
>= 0 && !empty
) {
3780 isl_map_free(min_first
);
3790 isl_map_free(min_first
);
3792 return i
< n
? -1 : first
;
3795 /* Construct a shifted inverse schedule based on the original inverse schedule,
3796 * the stride and the offset.
3798 * The original inverse schedule is specified as the "map" fields
3799 * of the elements of "domain" indexed by the first "n" elements of "order".
3801 * "stride" and "offset" are such that the difference
3802 * between the values of the current dimension of domain "i"
3803 * and the values of the current dimension for some reference domain are
3806 * stride * integer + offset[i]
3808 * Moreover, 0 <= offset[i] < stride.
3810 * For each domain, we create a map
3812 * { [..., j, ...] -> [..., j - offset[i], offset[i], ....] }
3814 * where j refers to the current dimension and the other dimensions are
3815 * unchanged, and apply this map to the original schedule domain.
3817 * For example, for the original schedule
3819 * { A[i] -> [2i]: 0 <= i < 10; B[i] -> [2i+1] : 0 <= i < 10 }
3821 * and assuming the offset is 0 for the A domain and 1 for the B domain,
3822 * we apply the mapping
3826 * to the schedule of the "A" domain and the mapping
3828 * { [j - 1] -> [j, 1] }
3830 * to the schedule of the "B" domain.
3833 * Note that after the transformation, the differences between pairs
3834 * of values of the current dimension over all domains are multiples
3835 * of stride and that we have therefore exposed the stride.
3838 * To see that the mapping preserves the lexicographic order,
3839 * first note that each of the individual maps above preserves the order.
3840 * If the value of the current iterator is j1 in one domain and j2 in another,
3841 * then if j1 = j2, we know that the same map is applied to both domains
3842 * and the order is preserved.
3843 * Otherwise, let us assume, without loss of generality, that j1 < j2.
3844 * If c1 >= c2 (with c1 and c2 the corresponding offsets), then
3848 * and the order is preserved.
3849 * If c1 < c2, then we know
3855 * j2 - j1 = n * s + r
3857 * with n >= 0 and 0 <= r < s.
3858 * In other words, r = c2 - c1.
3869 * (j1 - c1, c1) << (j2 - c2, c2)
3871 * with "<<" the lexicographic order, proving that the order is preserved
3874 static __isl_give isl_union_map
*contruct_shifted_executed(
3875 struct isl_set_map_pair
*domain
, int *order
, int n
,
3876 __isl_keep isl_val
*stride
, __isl_keep isl_multi_val
*offset
,
3877 __isl_take isl_ast_build
*build
)
3880 isl_union_map
*executed
;
3886 depth
= isl_ast_build_get_depth(build
);
3887 space
= isl_ast_build_get_space(build
, 1);
3888 executed
= isl_union_map_empty(isl_space_copy(space
));
3889 space
= isl_space_map_from_set(space
);
3890 map
= isl_map_identity(isl_space_copy(space
));
3891 map
= isl_map_eliminate(map
, isl_dim_out
, depth
, 1);
3892 map
= isl_map_insert_dims(map
, isl_dim_out
, depth
+ 1, 1);
3893 space
= isl_space_insert_dims(space
, isl_dim_out
, depth
+ 1, 1);
3895 c
= isl_constraint_alloc_equality(isl_local_space_from_space(space
));
3896 c
= isl_constraint_set_coefficient_si(c
, isl_dim_in
, depth
, 1);
3897 c
= isl_constraint_set_coefficient_si(c
, isl_dim_out
, depth
, -1);
3899 for (i
= 0; i
< n
; ++i
) {
3903 v
= isl_multi_val_get_val(offset
, i
);
3906 map_i
= isl_map_copy(map
);
3907 map_i
= isl_map_fix_val(map_i
, isl_dim_out
, depth
+ 1,
3910 c
= isl_constraint_set_constant_val(c
, v
);
3911 map_i
= isl_map_add_constraint(map_i
, isl_constraint_copy(c
));
3913 map_i
= isl_map_apply_domain(isl_map_copy(domain
[order
[i
]].map
),
3915 executed
= isl_union_map_add_map(executed
, map_i
);
3918 isl_constraint_free(c
);
3922 executed
= isl_union_map_free(executed
);
3927 /* Generate code for a single component, after exposing the stride,
3928 * given that the schedule domain is "shifted strided".
3930 * The component inverse schedule is specified as the "map" fields
3931 * of the elements of "domain" indexed by the first "n" elements of "order".
3933 * The schedule domain being "shifted strided" means that the differences
3934 * between the values of the current dimension of domain "i"
3935 * and the values of the current dimension for some reference domain are
3938 * stride * integer + offset[i]
3940 * We first look for the domain with the "smallest" value for the current
3941 * dimension and adjust the offsets such that the offset of the "smallest"
3942 * domain is equal to zero. The other offsets are reduced modulo stride.
3944 * Based on this information, we construct a new inverse schedule in
3945 * contruct_shifted_executed that exposes the stride.
3946 * Since this involves the introduction of a new schedule dimension,
3947 * the build needs to be changed accodingly.
3948 * After computing the AST, the newly introduced dimension needs
3949 * to be removed again from the list of grafts. We do this by plugging
3950 * in a mapping that represents the new schedule domain in terms of the
3951 * old schedule domain.
3953 static __isl_give isl_ast_graft_list
*generate_shift_component(
3954 struct isl_set_map_pair
*domain
, int *order
, int n
,
3955 __isl_keep isl_val
*stride
, __isl_keep isl_multi_val
*offset
,
3956 __isl_take isl_ast_build
*build
)
3958 isl_ast_graft_list
*list
;
3964 isl_multi_aff
*ma
, *zero
;
3965 isl_union_map
*executed
;
3967 depth
= isl_ast_build_get_depth(build
);
3969 first
= first_offset(domain
, order
, n
, build
);
3973 mv
= isl_multi_val_copy(offset
);
3974 val
= isl_multi_val_get_val(offset
, first
);
3975 val
= isl_val_neg(val
);
3976 mv
= isl_multi_val_add_val(mv
, val
);
3977 mv
= isl_multi_val_mod_val(mv
, isl_val_copy(stride
));
3979 executed
= contruct_shifted_executed(domain
, order
, n
, stride
, mv
,
3981 space
= isl_ast_build_get_space(build
, 1);
3982 space
= isl_space_map_from_set(space
);
3983 ma
= isl_multi_aff_identity(isl_space_copy(space
));
3984 space
= isl_space_from_domain(isl_space_domain(space
));
3985 space
= isl_space_add_dims(space
, isl_dim_out
, 1);
3986 zero
= isl_multi_aff_zero(space
);
3987 ma
= isl_multi_aff_range_splice(ma
, depth
+ 1, zero
);
3988 build
= isl_ast_build_insert_dim(build
, depth
+ 1);
3989 list
= generate_shifted_component(executed
, build
);
3991 list
= isl_ast_graft_list_preimage_multi_aff(list
, ma
);
3993 isl_multi_val_free(mv
);
3997 isl_ast_build_free(build
);
4001 /* Does any node in the schedule tree rooted at the current schedule node
4002 * of "build" depend on outer schedule nodes?
4004 static int has_anchored_subtree(__isl_keep isl_ast_build
*build
)
4006 isl_schedule_node
*node
;
4009 node
= isl_ast_build_get_schedule_node(build
);
4010 dependent
= isl_schedule_node_is_subtree_anchored(node
);
4011 isl_schedule_node_free(node
);
4016 /* Generate code for a single component.
4018 * The component inverse schedule is specified as the "map" fields
4019 * of the elements of "domain" indexed by the first "n" elements of "order".
4021 * This function may modify the "set" fields of "domain".
4023 * Before proceeding with the actual code generation for the component,
4024 * we first check if there are any "shifted" strides, meaning that
4025 * the schedule domains of the individual domains are all strided,
4026 * but that they have different offsets, resulting in the union
4027 * of schedule domains not being strided anymore.
4029 * The simplest example is the schedule
4031 * { A[i] -> [2i]: 0 <= i < 10; B[i] -> [2i+1] : 0 <= i < 10 }
4033 * Both schedule domains are strided, but their union is not.
4034 * This function detects such cases and then rewrites the schedule to
4036 * { A[i] -> [2i, 0]: 0 <= i < 10; B[i] -> [2i, 1] : 0 <= i < 10 }
4038 * In the new schedule, the schedule domains have the same offset (modulo
4039 * the stride), ensuring that the union of schedule domains is also strided.
4042 * If there is only a single domain in the component, then there is
4043 * nothing to do. Similarly, if the current schedule dimension has
4044 * a fixed value for almost all domains then there is nothing to be done.
4045 * In particular, we need at least two domains where the current schedule
4046 * dimension does not have a fixed value.
4047 * Finally, in case of a schedule map input,
4048 * if any of the options refer to the current schedule dimension,
4049 * then we bail out as well. It would be possible to reformulate the options
4050 * in terms of the new schedule domain, but that would introduce constraints
4051 * that separate the domains in the options and that is something we would
4053 * In the case of a schedule tree input, we bail out if any of
4054 * the descendants of the current schedule node refer to outer
4055 * schedule nodes in any way.
4058 * To see if there is any shifted stride, we look at the differences
4059 * between the values of the current dimension in pairs of domains
4060 * for equal values of outer dimensions. These differences should be
4065 * with "m" the stride and "r" a constant. Note that we cannot perform
4066 * this analysis on individual domains as the lower bound in each domain
4067 * may depend on parameters or outer dimensions and so the current dimension
4068 * itself may not have a fixed remainder on division by the stride.
4070 * In particular, we compare the first domain that does not have an
4071 * obviously fixed value for the current dimension to itself and all
4072 * other domains and collect the offsets and the gcd of the strides.
4073 * If the gcd becomes one, then we failed to find shifted strides.
4074 * If the gcd is zero, then the differences were all fixed, meaning
4075 * that some domains had non-obviously fixed values for the current dimension.
4076 * If all the offsets are the same (for those domains that do not have
4077 * an obviously fixed value for the current dimension), then we do not
4078 * apply the transformation.
4079 * If none of the domains were skipped, then there is nothing to do.
4080 * If some of them were skipped, then if we apply separation, the schedule
4081 * domain should get split in pieces with a (non-shifted) stride.
4083 * Otherwise, we apply a shift to expose the stride in
4084 * generate_shift_component.
4086 static __isl_give isl_ast_graft_list
*generate_component(
4087 struct isl_set_map_pair
*domain
, int *order
, int n
,
4088 __isl_take isl_ast_build
*build
)
4095 isl_val
*gcd
= NULL
;
4099 isl_ast_graft_list
*list
;
4102 depth
= isl_ast_build_get_depth(build
);
4105 if (skip
>= 0 && !skip
)
4106 skip
= at_most_one_non_fixed(domain
, order
, n
, depth
);
4107 if (skip
>= 0 && !skip
) {
4108 if (isl_ast_build_has_schedule_node(build
))
4109 skip
= has_anchored_subtree(build
);
4111 skip
= isl_ast_build_options_involve_depth(build
);
4116 return generate_shifted_component_from_list(domain
,
4119 base
= eliminate_non_fixed(domain
, order
, n
, depth
, build
);
4123 ctx
= isl_ast_build_get_ctx(build
);
4125 mv
= isl_multi_val_zero(isl_space_set_alloc(ctx
, 0, n
));
4128 for (i
= 0; i
< n
; ++i
) {
4131 map
= isl_map_from_domain_and_range(
4132 isl_set_copy(domain
[order
[base
]].set
),
4133 isl_set_copy(domain
[order
[i
]].set
));
4134 for (d
= 0; d
< depth
; ++d
)
4135 map
= isl_map_equate(map
, isl_dim_in
, d
,
4137 deltas
= isl_map_deltas(map
);
4138 res
= isl_set_dim_residue_class_val(deltas
, depth
, &m
, &r
);
4139 isl_set_free(deltas
);
4146 gcd
= isl_val_gcd(gcd
, m
);
4147 if (isl_val_is_one(gcd
)) {
4151 mv
= isl_multi_val_set_val(mv
, i
, r
);
4153 res
= dim_is_fixed(domain
[order
[i
]].set
, depth
);
4159 if (fixed
&& i
> base
) {
4161 a
= isl_multi_val_get_val(mv
, i
);
4162 b
= isl_multi_val_get_val(mv
, base
);
4163 if (isl_val_ne(a
, b
))
4170 if (res
< 0 || !gcd
) {
4171 isl_ast_build_free(build
);
4173 } else if (i
< n
|| fixed
|| isl_val_is_zero(gcd
)) {
4174 list
= generate_shifted_component_from_list(domain
,
4177 list
= generate_shift_component(domain
, order
, n
, gcd
, mv
,
4182 isl_multi_val_free(mv
);
4186 isl_ast_build_free(build
);
4190 /* Store both "map" itself and its domain in the
4191 * structure pointed to by *next and advance to the next array element.
4193 static isl_stat
extract_domain(__isl_take isl_map
*map
, void *user
)
4195 struct isl_set_map_pair
**next
= user
;
4197 (*next
)->map
= isl_map_copy(map
);
4198 (*next
)->set
= isl_map_domain(map
);
4204 static int after_in_tree(__isl_keep isl_union_map
*umap
,
4205 __isl_keep isl_schedule_node
*node
);
4207 /* Is any domain element of "umap" scheduled after any of
4208 * the corresponding image elements by the tree rooted at
4209 * the child of "node"?
4211 static int after_in_child(__isl_keep isl_union_map
*umap
,
4212 __isl_keep isl_schedule_node
*node
)
4214 isl_schedule_node
*child
;
4217 child
= isl_schedule_node_get_child(node
, 0);
4218 after
= after_in_tree(umap
, child
);
4219 isl_schedule_node_free(child
);
4224 /* Is any domain element of "umap" scheduled after any of
4225 * the corresponding image elements by the tree rooted at
4226 * the band node "node"?
4228 * We first check if any domain element is scheduled after any
4229 * of the corresponding image elements by the band node itself.
4230 * If not, we restrict "map" to those pairs of element that
4231 * are scheduled together by the band node and continue with
4232 * the child of the band node.
4233 * If there are no such pairs then the map passed to after_in_child
4234 * will be empty causing it to return 0.
4236 static int after_in_band(__isl_keep isl_union_map
*umap
,
4237 __isl_keep isl_schedule_node
*node
)
4239 isl_multi_union_pw_aff
*mupa
;
4240 isl_union_map
*partial
, *test
, *gt
, *universe
, *umap1
, *umap2
;
4241 isl_union_set
*domain
, *range
;
4246 if (isl_schedule_node_band_n_member(node
) == 0)
4247 return after_in_child(umap
, node
);
4249 mupa
= isl_schedule_node_band_get_partial_schedule(node
);
4250 space
= isl_multi_union_pw_aff_get_space(mupa
);
4251 partial
= isl_union_map_from_multi_union_pw_aff(mupa
);
4252 test
= isl_union_map_copy(umap
);
4253 test
= isl_union_map_apply_domain(test
, isl_union_map_copy(partial
));
4254 test
= isl_union_map_apply_range(test
, isl_union_map_copy(partial
));
4255 gt
= isl_union_map_from_map(isl_map_lex_gt(space
));
4256 test
= isl_union_map_intersect(test
, gt
);
4257 empty
= isl_union_map_is_empty(test
);
4258 isl_union_map_free(test
);
4260 if (empty
< 0 || !empty
) {
4261 isl_union_map_free(partial
);
4262 return empty
< 0 ? -1 : 1;
4265 universe
= isl_union_map_universe(isl_union_map_copy(umap
));
4266 domain
= isl_union_map_domain(isl_union_map_copy(universe
));
4267 range
= isl_union_map_range(universe
);
4268 umap1
= isl_union_map_copy(partial
);
4269 umap1
= isl_union_map_intersect_domain(umap1
, domain
);
4270 umap2
= isl_union_map_intersect_domain(partial
, range
);
4271 test
= isl_union_map_apply_range(umap1
, isl_union_map_reverse(umap2
));
4272 test
= isl_union_map_intersect(test
, isl_union_map_copy(umap
));
4273 after
= after_in_child(test
, node
);
4274 isl_union_map_free(test
);
4278 /* Is any domain element of "umap" scheduled after any of
4279 * the corresponding image elements by the tree rooted at
4280 * the context node "node"?
4282 * The context constraints apply to the schedule domain,
4283 * so we cannot apply them directly to "umap", which contains
4284 * pairs of statement instances. Instead, we add them
4285 * to the range of the prefix schedule for both domain and
4288 static int after_in_context(__isl_keep isl_union_map
*umap
,
4289 __isl_keep isl_schedule_node
*node
)
4291 isl_union_map
*prefix
, *universe
, *umap1
, *umap2
;
4292 isl_union_set
*domain
, *range
;
4296 umap
= isl_union_map_copy(umap
);
4297 context
= isl_schedule_node_context_get_context(node
);
4298 prefix
= isl_schedule_node_get_prefix_schedule_union_map(node
);
4299 universe
= isl_union_map_universe(isl_union_map_copy(umap
));
4300 domain
= isl_union_map_domain(isl_union_map_copy(universe
));
4301 range
= isl_union_map_range(universe
);
4302 umap1
= isl_union_map_copy(prefix
);
4303 umap1
= isl_union_map_intersect_domain(umap1
, domain
);
4304 umap2
= isl_union_map_intersect_domain(prefix
, range
);
4305 umap1
= isl_union_map_intersect_range(umap1
,
4306 isl_union_set_from_set(context
));
4307 umap1
= isl_union_map_apply_range(umap1
, isl_union_map_reverse(umap2
));
4308 umap
= isl_union_map_intersect(umap
, umap1
);
4310 after
= after_in_child(umap
, node
);
4312 isl_union_map_free(umap
);
4317 /* Is any domain element of "umap" scheduled after any of
4318 * the corresponding image elements by the tree rooted at
4319 * the expansion node "node"?
4321 * We apply the expansion to domain and range of "umap" and
4322 * continue with its child.
4324 static int after_in_expansion(__isl_keep isl_union_map
*umap
,
4325 __isl_keep isl_schedule_node
*node
)
4327 isl_union_map
*expansion
;
4330 expansion
= isl_schedule_node_expansion_get_expansion(node
);
4331 umap
= isl_union_map_copy(umap
);
4332 umap
= isl_union_map_apply_domain(umap
, isl_union_map_copy(expansion
));
4333 umap
= isl_union_map_apply_range(umap
, expansion
);
4335 after
= after_in_child(umap
, node
);
4337 isl_union_map_free(umap
);
4342 /* Is any domain element of "umap" scheduled after any of
4343 * the corresponding image elements by the tree rooted at
4344 * the extension node "node"?
4346 * Since the extension node may add statement instances before or
4347 * after the pairs of statement instances in "umap", we return 1
4348 * to ensure that these pairs are not broken up.
4350 static int after_in_extension(__isl_keep isl_union_map
*umap
,
4351 __isl_keep isl_schedule_node
*node
)
4356 /* Is any domain element of "umap" scheduled after any of
4357 * the corresponding image elements by the tree rooted at
4358 * the filter node "node"?
4360 * We intersect domain and range of "umap" with the filter and
4361 * continue with its child.
4363 static int after_in_filter(__isl_keep isl_union_map
*umap
,
4364 __isl_keep isl_schedule_node
*node
)
4366 isl_union_set
*filter
;
4369 umap
= isl_union_map_copy(umap
);
4370 filter
= isl_schedule_node_filter_get_filter(node
);
4371 umap
= isl_union_map_intersect_domain(umap
, isl_union_set_copy(filter
));
4372 umap
= isl_union_map_intersect_range(umap
, filter
);
4374 after
= after_in_child(umap
, node
);
4376 isl_union_map_free(umap
);
4381 /* Is any domain element of "umap" scheduled after any of
4382 * the corresponding image elements by the tree rooted at
4383 * the set node "node"?
4385 * This is only the case if this condition holds in any
4386 * of the (filter) children of the set node.
4387 * In particular, if the domain and the range of "umap"
4388 * are contained in different children, then the condition
4391 static int after_in_set(__isl_keep isl_union_map
*umap
,
4392 __isl_keep isl_schedule_node
*node
)
4396 n
= isl_schedule_node_n_children(node
);
4397 for (i
= 0; i
< n
; ++i
) {
4398 isl_schedule_node
*child
;
4401 child
= isl_schedule_node_get_child(node
, i
);
4402 after
= after_in_tree(umap
, child
);
4403 isl_schedule_node_free(child
);
4405 if (after
< 0 || after
)
4412 /* Return the filter of child "i" of "node".
4414 static __isl_give isl_union_set
*child_filter(
4415 __isl_keep isl_schedule_node
*node
, int i
)
4417 isl_schedule_node
*child
;
4418 isl_union_set
*filter
;
4420 child
= isl_schedule_node_get_child(node
, i
);
4421 filter
= isl_schedule_node_filter_get_filter(child
);
4422 isl_schedule_node_free(child
);
4427 /* Is any domain element of "umap" scheduled after any of
4428 * the corresponding image elements by the tree rooted at
4429 * the sequence node "node"?
4431 * This happens in particular if any domain element is
4432 * contained in a later child than one containing a range element or
4433 * if the condition holds within a given child in the sequence.
4434 * The later part of the condition is checked by after_in_set.
4436 static int after_in_sequence(__isl_keep isl_union_map
*umap
,
4437 __isl_keep isl_schedule_node
*node
)
4440 isl_union_map
*umap_i
;
4441 int empty
, after
= 0;
4443 n
= isl_schedule_node_n_children(node
);
4444 for (i
= 1; i
< n
; ++i
) {
4445 isl_union_set
*filter_i
;
4447 umap_i
= isl_union_map_copy(umap
);
4448 filter_i
= child_filter(node
, i
);
4449 umap_i
= isl_union_map_intersect_domain(umap_i
, filter_i
);
4450 empty
= isl_union_map_is_empty(umap_i
);
4454 isl_union_map_free(umap_i
);
4458 for (j
= 0; j
< i
; ++j
) {
4459 isl_union_set
*filter_j
;
4460 isl_union_map
*umap_ij
;
4462 umap_ij
= isl_union_map_copy(umap_i
);
4463 filter_j
= child_filter(node
, j
);
4464 umap_ij
= isl_union_map_intersect_range(umap_ij
,
4466 empty
= isl_union_map_is_empty(umap_ij
);
4467 isl_union_map_free(umap_ij
);
4477 isl_union_map_free(umap_i
);
4482 if (after
< 0 || after
)
4485 return after_in_set(umap
, node
);
4487 isl_union_map_free(umap_i
);
4491 /* Is any domain element of "umap" scheduled after any of
4492 * the corresponding image elements by the tree rooted at "node"?
4494 * If "umap" is empty, then clearly there is no such element.
4495 * Otherwise, consider the different types of nodes separately.
4497 static int after_in_tree(__isl_keep isl_union_map
*umap
,
4498 __isl_keep isl_schedule_node
*node
)
4501 enum isl_schedule_node_type type
;
4503 empty
= isl_union_map_is_empty(umap
);
4511 type
= isl_schedule_node_get_type(node
);
4513 case isl_schedule_node_error
:
4515 case isl_schedule_node_leaf
:
4517 case isl_schedule_node_band
:
4518 return after_in_band(umap
, node
);
4519 case isl_schedule_node_domain
:
4520 isl_die(isl_schedule_node_get_ctx(node
), isl_error_internal
,
4521 "unexpected internal domain node", return -1);
4522 case isl_schedule_node_context
:
4523 return after_in_context(umap
, node
);
4524 case isl_schedule_node_expansion
:
4525 return after_in_expansion(umap
, node
);
4526 case isl_schedule_node_extension
:
4527 return after_in_extension(umap
, node
);
4528 case isl_schedule_node_filter
:
4529 return after_in_filter(umap
, node
);
4530 case isl_schedule_node_guard
:
4531 case isl_schedule_node_mark
:
4532 return after_in_child(umap
, node
);
4533 case isl_schedule_node_set
:
4534 return after_in_set(umap
, node
);
4535 case isl_schedule_node_sequence
:
4536 return after_in_sequence(umap
, node
);
4542 /* Is any domain element of "map1" scheduled after any domain
4543 * element of "map2" by the subtree underneath the current band node,
4544 * while at the same time being scheduled together by the current
4545 * band node, i.e., by "map1" and "map2?
4547 * If the child of the current band node is a leaf, then
4548 * no element can be scheduled after any other element.
4550 * Otherwise, we construct a relation between domain elements
4551 * of "map1" and domain elements of "map2" that are scheduled
4552 * together and then check if the subtree underneath the current
4553 * band node determines their relative order.
4555 static int after_in_subtree(__isl_keep isl_ast_build
*build
,
4556 __isl_keep isl_map
*map1
, __isl_keep isl_map
*map2
)
4558 isl_schedule_node
*node
;
4560 isl_union_map
*umap
;
4563 node
= isl_ast_build_get_schedule_node(build
);
4566 node
= isl_schedule_node_child(node
, 0);
4567 if (isl_schedule_node_get_type(node
) == isl_schedule_node_leaf
) {
4568 isl_schedule_node_free(node
);
4571 map
= isl_map_copy(map2
);
4572 map
= isl_map_apply_domain(map
, isl_map_copy(map1
));
4573 umap
= isl_union_map_from_map(map
);
4574 after
= after_in_tree(umap
, node
);
4575 isl_union_map_free(umap
);
4576 isl_schedule_node_free(node
);
4580 /* Internal data for any_scheduled_after.
4582 * "build" is the build in which the AST is constructed.
4583 * "depth" is the number of loops that have already been generated
4584 * "group_coscheduled" is a local copy of options->ast_build_group_coscheduled
4585 * "domain" is an array of set-map pairs corresponding to the different
4586 * iteration domains. The set is the schedule domain, i.e., the domain
4587 * of the inverse schedule, while the map is the inverse schedule itself.
4589 struct isl_any_scheduled_after_data
{
4590 isl_ast_build
*build
;
4592 int group_coscheduled
;
4593 struct isl_set_map_pair
*domain
;
4596 /* Is any element of domain "i" scheduled after any element of domain "j"
4597 * (for a common iteration of the first data->depth loops)?
4599 * data->domain[i].set contains the domain of the inverse schedule
4600 * for domain "i", i.e., elements in the schedule domain.
4602 * If we are inside a band of a schedule tree and there is a pair
4603 * of elements in the two domains that is schedule together by
4604 * the current band, then we check if any element of "i" may be schedule
4605 * after element of "j" by the descendants of the band node.
4607 * If data->group_coscheduled is set, then we also return 1 if there
4608 * is any pair of elements in the two domains that are scheduled together.
4610 static isl_bool
any_scheduled_after(int i
, int j
, void *user
)
4612 struct isl_any_scheduled_after_data
*data
= user
;
4613 int dim
= isl_set_dim(data
->domain
[i
].set
, isl_dim_set
);
4616 for (pos
= data
->depth
; pos
< dim
; ++pos
) {
4619 follows
= isl_set_follows_at(data
->domain
[i
].set
,
4620 data
->domain
[j
].set
, pos
);
4623 return isl_bool_error
;
4625 return isl_bool_true
;
4627 return isl_bool_false
;
4630 if (isl_ast_build_has_schedule_node(data
->build
)) {
4633 after
= after_in_subtree(data
->build
, data
->domain
[i
].map
,
4634 data
->domain
[j
].map
);
4635 if (after
< 0 || after
)
4639 return data
->group_coscheduled
;
4642 /* Look for independent components at the current depth and generate code
4643 * for each component separately. The resulting lists of grafts are
4644 * merged in an attempt to combine grafts with identical guards.
4646 * Code for two domains can be generated separately if all the elements
4647 * of one domain are scheduled before (or together with) all the elements
4648 * of the other domain. We therefore consider the graph with as nodes
4649 * the domains and an edge between two nodes if any element of the first
4650 * node is scheduled after any element of the second node.
4651 * If the ast_build_group_coscheduled is set, then we also add an edge if
4652 * there is any pair of elements in the two domains that are scheduled
4654 * Code is then generated (by generate_component)
4655 * for each of the strongly connected components in this graph
4656 * in their topological order.
4658 * Since the test is performed on the domain of the inverse schedules of
4659 * the different domains, we precompute these domains and store
4660 * them in data.domain.
4662 static __isl_give isl_ast_graft_list
*generate_components(
4663 __isl_take isl_union_map
*executed
, __isl_take isl_ast_build
*build
)
4666 isl_ctx
*ctx
= isl_ast_build_get_ctx(build
);
4667 int n
= isl_union_map_n_map(executed
);
4668 struct isl_any_scheduled_after_data data
;
4669 struct isl_set_map_pair
*next
;
4670 struct isl_tarjan_graph
*g
= NULL
;
4671 isl_ast_graft_list
*list
= NULL
;
4674 data
.domain
= isl_calloc_array(ctx
, struct isl_set_map_pair
, n
);
4680 if (isl_union_map_foreach_map(executed
, &extract_domain
, &next
) < 0)
4686 data
.depth
= isl_ast_build_get_depth(build
);
4687 data
.group_coscheduled
= isl_options_get_ast_build_group_coscheduled(ctx
);
4688 g
= isl_tarjan_graph_init(ctx
, n
, &any_scheduled_after
, &data
);
4692 list
= isl_ast_graft_list_alloc(ctx
, 0);
4696 isl_ast_graft_list
*list_c
;
4699 if (g
->order
[i
] == -1)
4700 isl_die(ctx
, isl_error_internal
, "cannot happen",
4703 while (g
->order
[i
] != -1) {
4707 list_c
= generate_component(data
.domain
,
4708 g
->order
+ first
, i
- first
,
4709 isl_ast_build_copy(build
));
4710 list
= isl_ast_graft_list_merge(list
, list_c
, build
);
4716 error
: list
= isl_ast_graft_list_free(list
);
4717 isl_tarjan_graph_free(g
);
4718 for (i
= 0; i
< n_domain
; ++i
) {
4719 isl_map_free(data
.domain
[i
].map
);
4720 isl_set_free(data
.domain
[i
].set
);
4723 isl_union_map_free(executed
);
4724 isl_ast_build_free(build
);
4729 /* Generate code for the next level (and all inner levels).
4731 * If "executed" is empty, i.e., no code needs to be generated,
4732 * then we return an empty list.
4734 * If we have already generated code for all loop levels, then we pass
4735 * control to generate_inner_level.
4737 * If "executed" lives in a single space, i.e., if code needs to be
4738 * generated for a single domain, then there can only be a single
4739 * component and we go directly to generate_shifted_component.
4740 * Otherwise, we call generate_components to detect the components
4741 * and to call generate_component on each of them separately.
4743 static __isl_give isl_ast_graft_list
*generate_next_level(
4744 __isl_take isl_union_map
*executed
, __isl_take isl_ast_build
*build
)
4748 if (!build
|| !executed
)
4751 if (isl_union_map_is_empty(executed
)) {
4752 isl_ctx
*ctx
= isl_ast_build_get_ctx(build
);
4753 isl_union_map_free(executed
);
4754 isl_ast_build_free(build
);
4755 return isl_ast_graft_list_alloc(ctx
, 0);
4758 depth
= isl_ast_build_get_depth(build
);
4759 if (depth
>= isl_ast_build_dim(build
, isl_dim_set
))
4760 return generate_inner_level(executed
, build
);
4762 if (isl_union_map_n_map(executed
) == 1)
4763 return generate_shifted_component(executed
, build
);
4765 return generate_components(executed
, build
);
4767 isl_union_map_free(executed
);
4768 isl_ast_build_free(build
);
4772 /* Internal data structure used by isl_ast_build_node_from_schedule_map.
4773 * internal, executed and build are the inputs to generate_code.
4774 * list collects the output.
4776 struct isl_generate_code_data
{
4778 isl_union_map
*executed
;
4779 isl_ast_build
*build
;
4781 isl_ast_graft_list
*list
;
4784 /* Given an inverse schedule in terms of the external build schedule, i.e.,
4788 * with E the external build schedule and S the additional schedule "space",
4789 * reformulate the inverse schedule in terms of the internal schedule domain,
4794 * We first obtain a mapping
4798 * take the inverse and the product with S -> S, resulting in
4800 * [I -> S] -> [E -> S]
4802 * Applying the map to the input produces the desired result.
4804 static __isl_give isl_union_map
*internal_executed(
4805 __isl_take isl_union_map
*executed
, __isl_keep isl_space
*space
,
4806 __isl_keep isl_ast_build
*build
)
4810 proj
= isl_ast_build_get_schedule_map(build
);
4811 proj
= isl_map_reverse(proj
);
4812 space
= isl_space_map_from_set(isl_space_copy(space
));
4813 id
= isl_map_identity(space
);
4814 proj
= isl_map_product(proj
, id
);
4815 executed
= isl_union_map_apply_domain(executed
,
4816 isl_union_map_from_map(proj
));
4820 /* Generate an AST that visits the elements in the range of data->executed
4821 * in the relative order specified by the corresponding domain element(s)
4822 * for those domain elements that belong to "set".
4823 * Add the result to data->list.
4825 * The caller ensures that "set" is a universe domain.
4826 * "space" is the space of the additional part of the schedule.
4827 * It is equal to the space of "set" if build->domain is parametric.
4828 * Otherwise, it is equal to the range of the wrapped space of "set".
4830 * If the build space is not parametric and
4831 * if isl_ast_build_node_from_schedule_map
4832 * was called from an outside user (data->internal not set), then
4833 * the (inverse) schedule refers to the external build domain and needs to
4834 * be transformed to refer to the internal build domain.
4836 * If the build space is parametric, then we add some of the parameter
4837 * constraints to the executed relation. Adding these constraints
4838 * allows for an earlier detection of conflicts in some cases.
4839 * However, we do not want to divide the executed relation into
4840 * more disjuncts than necessary. We therefore approximate
4841 * the constraints on the parameters by a single disjunct set.
4843 * The build is extended to include the additional part of the schedule.
4844 * If the original build space was not parametric, then the options
4845 * in data->build refer only to the additional part of the schedule
4846 * and they need to be adjusted to refer to the complete AST build
4849 * After having adjusted inverse schedule and build, we start generating
4850 * code with the outer loop of the current code generation
4851 * in generate_next_level.
4853 * If the original build space was not parametric, we undo the embedding
4854 * on the resulting isl_ast_node_list so that it can be used within
4855 * the outer AST build.
4857 static isl_stat
generate_code_in_space(struct isl_generate_code_data
*data
,
4858 __isl_take isl_set
*set
, __isl_take isl_space
*space
)
4860 isl_union_map
*executed
;
4861 isl_ast_build
*build
;
4862 isl_ast_graft_list
*list
;
4865 executed
= isl_union_map_copy(data
->executed
);
4866 executed
= isl_union_map_intersect_domain(executed
,
4867 isl_union_set_from_set(set
));
4869 embed
= !isl_set_is_params(data
->build
->domain
);
4870 if (embed
&& !data
->internal
)
4871 executed
= internal_executed(executed
, space
, data
->build
);
4874 domain
= isl_ast_build_get_domain(data
->build
);
4875 domain
= isl_set_from_basic_set(isl_set_simple_hull(domain
));
4876 executed
= isl_union_map_intersect_params(executed
, domain
);
4879 build
= isl_ast_build_copy(data
->build
);
4880 build
= isl_ast_build_product(build
, space
);
4882 list
= generate_next_level(executed
, build
);
4884 list
= isl_ast_graft_list_unembed(list
, embed
);
4886 data
->list
= isl_ast_graft_list_concat(data
->list
, list
);
4891 /* Generate an AST that visits the elements in the range of data->executed
4892 * in the relative order specified by the corresponding domain element(s)
4893 * for those domain elements that belong to "set".
4894 * Add the result to data->list.
4896 * The caller ensures that "set" is a universe domain.
4898 * If the build space S is not parametric, then the space of "set"
4899 * need to be a wrapped relation with S as domain. That is, it needs
4904 * Check this property and pass control to generate_code_in_space
4906 * If the build space is not parametric, then T is the space of "set".
4908 static isl_stat
generate_code_set(__isl_take isl_set
*set
, void *user
)
4910 struct isl_generate_code_data
*data
= user
;
4911 isl_space
*space
, *build_space
;
4914 space
= isl_set_get_space(set
);
4916 if (isl_set_is_params(data
->build
->domain
))
4917 return generate_code_in_space(data
, set
, space
);
4919 build_space
= isl_ast_build_get_space(data
->build
, data
->internal
);
4920 space
= isl_space_unwrap(space
);
4921 is_domain
= isl_space_is_domain(build_space
, space
);
4922 isl_space_free(build_space
);
4923 space
= isl_space_range(space
);
4928 isl_die(isl_set_get_ctx(set
), isl_error_invalid
,
4929 "invalid nested schedule space", goto error
);
4931 return generate_code_in_space(data
, set
, space
);
4934 isl_space_free(space
);
4935 return isl_stat_error
;
4938 /* Generate an AST that visits the elements in the range of "executed"
4939 * in the relative order specified by the corresponding domain element(s).
4941 * "build" is an isl_ast_build that has either been constructed by
4942 * isl_ast_build_from_context or passed to a callback set by
4943 * isl_ast_build_set_create_leaf.
4944 * In the first case, the space of the isl_ast_build is typically
4945 * a parametric space, although this is currently not enforced.
4946 * In the second case, the space is never a parametric space.
4947 * If the space S is not parametric, then the domain space(s) of "executed"
4948 * need to be wrapped relations with S as domain.
4950 * If the domain of "executed" consists of several spaces, then an AST
4951 * is generated for each of them (in arbitrary order) and the results
4954 * If "internal" is set, then the domain "S" above refers to the internal
4955 * schedule domain representation. Otherwise, it refers to the external
4956 * representation, as returned by isl_ast_build_get_schedule_space.
4958 * We essentially run over all the spaces in the domain of "executed"
4959 * and call generate_code_set on each of them.
4961 static __isl_give isl_ast_graft_list
*generate_code(
4962 __isl_take isl_union_map
*executed
, __isl_take isl_ast_build
*build
,
4966 struct isl_generate_code_data data
= { 0 };
4968 isl_union_set
*schedule_domain
;
4969 isl_union_map
*universe
;
4973 space
= isl_ast_build_get_space(build
, 1);
4974 space
= isl_space_align_params(space
,
4975 isl_union_map_get_space(executed
));
4976 space
= isl_space_align_params(space
,
4977 isl_union_map_get_space(build
->options
));
4978 build
= isl_ast_build_align_params(build
, isl_space_copy(space
));
4979 executed
= isl_union_map_align_params(executed
, space
);
4980 if (!executed
|| !build
)
4983 ctx
= isl_ast_build_get_ctx(build
);
4985 data
.internal
= internal
;
4986 data
.executed
= executed
;
4988 data
.list
= isl_ast_graft_list_alloc(ctx
, 0);
4990 universe
= isl_union_map_universe(isl_union_map_copy(executed
));
4991 schedule_domain
= isl_union_map_domain(universe
);
4992 if (isl_union_set_foreach_set(schedule_domain
, &generate_code_set
,
4994 data
.list
= isl_ast_graft_list_free(data
.list
);
4996 isl_union_set_free(schedule_domain
);
4997 isl_union_map_free(executed
);
4999 isl_ast_build_free(build
);
5002 isl_union_map_free(executed
);
5003 isl_ast_build_free(build
);
5007 /* Generate an AST that visits the elements in the domain of "schedule"
5008 * in the relative order specified by the corresponding image element(s).
5010 * "build" is an isl_ast_build that has either been constructed by
5011 * isl_ast_build_from_context or passed to a callback set by
5012 * isl_ast_build_set_create_leaf.
5013 * In the first case, the space of the isl_ast_build is typically
5014 * a parametric space, although this is currently not enforced.
5015 * In the second case, the space is never a parametric space.
5016 * If the space S is not parametric, then the range space(s) of "schedule"
5017 * need to be wrapped relations with S as domain.
5019 * If the range of "schedule" consists of several spaces, then an AST
5020 * is generated for each of them (in arbitrary order) and the results
5023 * We first initialize the local copies of the relevant options.
5024 * We do this here rather than when the isl_ast_build is created
5025 * because the options may have changed between the construction
5026 * of the isl_ast_build and the call to isl_generate_code.
5028 * The main computation is performed on an inverse schedule (with
5029 * the schedule domain in the domain and the elements to be executed
5030 * in the range) called "executed".
5032 __isl_give isl_ast_node
*isl_ast_build_node_from_schedule_map(
5033 __isl_keep isl_ast_build
*build
, __isl_take isl_union_map
*schedule
)
5035 isl_ast_graft_list
*list
;
5037 isl_union_map
*executed
;
5039 build
= isl_ast_build_copy(build
);
5040 build
= isl_ast_build_set_single_valued(build
, 0);
5041 schedule
= isl_union_map_coalesce(schedule
);
5042 schedule
= isl_union_map_remove_redundancies(schedule
);
5043 executed
= isl_union_map_reverse(schedule
);
5044 list
= generate_code(executed
, isl_ast_build_copy(build
), 0);
5045 node
= isl_ast_node_from_graft_list(list
, build
);
5046 isl_ast_build_free(build
);
5051 /* The old name for isl_ast_build_node_from_schedule_map.
5052 * It is being kept for backward compatibility, but
5053 * it will be removed in the future.
5055 __isl_give isl_ast_node
*isl_ast_build_ast_from_schedule(
5056 __isl_keep isl_ast_build
*build
, __isl_take isl_union_map
*schedule
)
5058 return isl_ast_build_node_from_schedule_map(build
, schedule
);
5061 /* Generate an AST that visits the elements in the domain of "executed"
5062 * in the relative order specified by the band node "node" and its descendants.
5064 * The relation "executed" maps the outer generated loop iterators
5065 * to the domain elements executed by those iterations.
5067 * If the band is empty, we continue with its descendants.
5068 * Otherwise, we extend the build and the inverse schedule with
5069 * the additional space/partial schedule and continue generating
5070 * an AST in generate_next_level.
5071 * As soon as we have extended the inverse schedule with the additional
5072 * partial schedule, we look for equalities that may exists between
5073 * the old and the new part.
5075 static __isl_give isl_ast_graft_list
*build_ast_from_band(
5076 __isl_take isl_ast_build
*build
, __isl_take isl_schedule_node
*node
,
5077 __isl_take isl_union_map
*executed
)
5080 isl_multi_union_pw_aff
*extra
;
5081 isl_union_map
*extra_umap
;
5082 isl_ast_graft_list
*list
;
5085 if (!build
|| !node
|| !executed
)
5088 if (isl_schedule_node_band_n_member(node
) == 0)
5089 return build_ast_from_child(build
, node
, executed
);
5091 extra
= isl_schedule_node_band_get_partial_schedule(node
);
5092 extra
= isl_multi_union_pw_aff_align_params(extra
,
5093 isl_ast_build_get_space(build
, 1));
5094 space
= isl_multi_union_pw_aff_get_space(extra
);
5096 extra_umap
= isl_union_map_from_multi_union_pw_aff(extra
);
5097 extra_umap
= isl_union_map_reverse(extra_umap
);
5099 executed
= isl_union_map_domain_product(executed
, extra_umap
);
5100 executed
= isl_union_map_detect_equalities(executed
);
5102 n1
= isl_ast_build_dim(build
, isl_dim_param
);
5103 build
= isl_ast_build_product(build
, space
);
5104 n2
= isl_ast_build_dim(build
, isl_dim_param
);
5106 isl_die(isl_ast_build_get_ctx(build
), isl_error_invalid
,
5107 "band node is not allowed to introduce new parameters",
5108 build
= isl_ast_build_free(build
));
5109 build
= isl_ast_build_set_schedule_node(build
, node
);
5111 list
= generate_next_level(executed
, build
);
5113 list
= isl_ast_graft_list_unembed(list
, 1);
5117 isl_schedule_node_free(node
);
5118 isl_union_map_free(executed
);
5119 isl_ast_build_free(build
);
5123 /* Hoist a list of grafts (in practice containing a single graft)
5124 * from "sub_build" (which includes extra context information)
5127 * In particular, project out all additional parameters introduced
5128 * by the context node from the enforced constraints and the guard
5129 * of the single graft.
5131 static __isl_give isl_ast_graft_list
*hoist_out_of_context(
5132 __isl_take isl_ast_graft_list
*list
, __isl_keep isl_ast_build
*build
,
5133 __isl_keep isl_ast_build
*sub_build
)
5135 isl_ast_graft
*graft
;
5136 isl_basic_set
*enforced
;
5138 unsigned n_param
, extra_param
;
5140 if (!build
|| !sub_build
)
5141 return isl_ast_graft_list_free(list
);
5143 n_param
= isl_ast_build_dim(build
, isl_dim_param
);
5144 extra_param
= isl_ast_build_dim(sub_build
, isl_dim_param
);
5146 if (extra_param
== n_param
)
5149 extra_param
-= n_param
;
5150 enforced
= isl_ast_graft_list_extract_shared_enforced(list
, sub_build
);
5151 enforced
= isl_basic_set_project_out(enforced
, isl_dim_param
,
5152 n_param
, extra_param
);
5153 enforced
= isl_basic_set_remove_unknown_divs(enforced
);
5154 guard
= isl_ast_graft_list_extract_hoistable_guard(list
, sub_build
);
5155 guard
= isl_set_remove_divs_involving_dims(guard
, isl_dim_param
,
5156 n_param
, extra_param
);
5157 guard
= isl_set_project_out(guard
, isl_dim_param
, n_param
, extra_param
);
5158 guard
= isl_set_compute_divs(guard
);
5159 graft
= isl_ast_graft_alloc_from_children(list
, guard
, enforced
,
5161 list
= isl_ast_graft_list_from_ast_graft(graft
);
5166 /* Generate an AST that visits the elements in the domain of "executed"
5167 * in the relative order specified by the context node "node"
5168 * and its descendants.
5170 * The relation "executed" maps the outer generated loop iterators
5171 * to the domain elements executed by those iterations.
5173 * The context node may introduce additional parameters as well as
5174 * constraints on the outer schedule dimenions or original parameters.
5176 * We add the extra parameters to a new build and the context
5177 * constraints to both the build and (as a single disjunct)
5178 * to the domain of "executed". Since the context constraints
5179 * are specified in terms of the input schedule, we first need
5180 * to map them to the internal schedule domain.
5182 * After constructing the AST from the descendants of "node",
5183 * we combine the list of grafts into a single graft within
5184 * the new build, in order to be able to exploit the additional
5185 * context constraints during this combination.
5187 * Additionally, if the current node is the outermost node in
5188 * the schedule tree (apart from the root domain node), we generate
5189 * all pending guards, again to be able to exploit the additional
5190 * context constraints. We currently do not do this for internal
5191 * context nodes since we may still want to hoist conditions
5192 * to outer AST nodes.
5194 * If the context node introduced any new parameters, then they
5195 * are removed from the set of enforced constraints and guard
5196 * in hoist_out_of_context.
5198 static __isl_give isl_ast_graft_list
*build_ast_from_context(
5199 __isl_take isl_ast_build
*build
, __isl_take isl_schedule_node
*node
,
5200 __isl_take isl_union_map
*executed
)
5204 isl_multi_aff
*internal2input
;
5205 isl_ast_build
*sub_build
;
5206 isl_ast_graft_list
*list
;
5209 depth
= isl_schedule_node_get_tree_depth(node
);
5210 space
= isl_ast_build_get_space(build
, 1);
5211 context
= isl_schedule_node_context_get_context(node
);
5212 context
= isl_set_align_params(context
, space
);
5213 sub_build
= isl_ast_build_copy(build
);
5214 space
= isl_set_get_space(context
);
5215 sub_build
= isl_ast_build_align_params(sub_build
, space
);
5216 internal2input
= isl_ast_build_get_internal2input(sub_build
);
5217 context
= isl_set_preimage_multi_aff(context
, internal2input
);
5218 sub_build
= isl_ast_build_restrict_generated(sub_build
,
5219 isl_set_copy(context
));
5220 context
= isl_set_from_basic_set(isl_set_simple_hull(context
));
5221 executed
= isl_union_map_intersect_domain(executed
,
5222 isl_union_set_from_set(context
));
5224 list
= build_ast_from_child(isl_ast_build_copy(sub_build
),
5226 n
= isl_ast_graft_list_n_ast_graft(list
);
5228 list
= isl_ast_graft_list_free(list
);
5230 list
= isl_ast_graft_list_fuse(list
, sub_build
);
5232 list
= isl_ast_graft_list_insert_pending_guard_nodes(list
,
5235 list
= hoist_out_of_context(list
, build
, sub_build
);
5237 isl_ast_build_free(build
);
5238 isl_ast_build_free(sub_build
);
5243 /* Generate an AST that visits the elements in the domain of "executed"
5244 * in the relative order specified by the expansion node "node" and
5247 * The relation "executed" maps the outer generated loop iterators
5248 * to the domain elements executed by those iterations.
5250 * We expand the domain elements by the expansion and
5251 * continue with the descendants of the node.
5253 static __isl_give isl_ast_graft_list
*build_ast_from_expansion(
5254 __isl_take isl_ast_build
*build
, __isl_take isl_schedule_node
*node
,
5255 __isl_take isl_union_map
*executed
)
5257 isl_union_map
*expansion
;
5260 expansion
= isl_schedule_node_expansion_get_expansion(node
);
5261 expansion
= isl_union_map_align_params(expansion
,
5262 isl_union_map_get_space(executed
));
5264 n1
= isl_union_map_dim(executed
, isl_dim_param
);
5265 executed
= isl_union_map_apply_range(executed
, expansion
);
5266 n2
= isl_union_map_dim(executed
, isl_dim_param
);
5268 isl_die(isl_ast_build_get_ctx(build
), isl_error_invalid
,
5269 "expansion node is not allowed to introduce "
5270 "new parameters", goto error
);
5272 return build_ast_from_child(build
, node
, executed
);
5274 isl_ast_build_free(build
);
5275 isl_schedule_node_free(node
);
5276 isl_union_map_free(executed
);
5280 /* Generate an AST that visits the elements in the domain of "executed"
5281 * in the relative order specified by the extension node "node" and
5284 * The relation "executed" maps the outer generated loop iterators
5285 * to the domain elements executed by those iterations.
5287 * Extend the inverse schedule with the extension applied to current
5288 * set of generated constraints. Since the extension if formulated
5289 * in terms of the input schedule, it first needs to be transformed
5290 * to refer to the internal schedule.
5292 static __isl_give isl_ast_graft_list
*build_ast_from_extension(
5293 __isl_take isl_ast_build
*build
, __isl_take isl_schedule_node
*node
,
5294 __isl_take isl_union_map
*executed
)
5296 isl_union_set
*schedule_domain
;
5297 isl_union_map
*extension
;
5300 set
= isl_ast_build_get_generated(build
);
5301 set
= isl_set_from_basic_set(isl_set_simple_hull(set
));
5302 schedule_domain
= isl_union_set_from_set(set
);
5304 extension
= isl_schedule_node_extension_get_extension(node
);
5306 extension
= isl_union_map_preimage_domain_multi_aff(extension
,
5307 isl_multi_aff_copy(build
->internal2input
));
5308 extension
= isl_union_map_intersect_domain(extension
, schedule_domain
);
5309 extension
= isl_ast_build_substitute_values_union_map_domain(build
,
5311 executed
= isl_union_map_union(executed
, extension
);
5313 return build_ast_from_child(build
, node
, executed
);
5316 /* Generate an AST that visits the elements in the domain of "executed"
5317 * in the relative order specified by the filter node "node" and
5320 * The relation "executed" maps the outer generated loop iterators
5321 * to the domain elements executed by those iterations.
5323 * We simply intersect the iteration domain (i.e., the range of "executed")
5324 * with the filter and continue with the descendants of the node,
5325 * unless the resulting inverse schedule is empty, in which
5326 * case we return an empty list.
5328 * If the result of the intersection is equal to the original "executed"
5329 * relation, then keep the original representation since the intersection
5330 * may have unnecessarily broken up the relation into a greater number
5333 static __isl_give isl_ast_graft_list
*build_ast_from_filter(
5334 __isl_take isl_ast_build
*build
, __isl_take isl_schedule_node
*node
,
5335 __isl_take isl_union_map
*executed
)
5338 isl_union_set
*filter
;
5339 isl_union_map
*orig
;
5340 isl_ast_graft_list
*list
;
5345 orig
= isl_union_map_copy(executed
);
5346 if (!build
|| !node
|| !executed
)
5349 filter
= isl_schedule_node_filter_get_filter(node
);
5350 filter
= isl_union_set_align_params(filter
,
5351 isl_union_map_get_space(executed
));
5352 n1
= isl_union_map_dim(executed
, isl_dim_param
);
5353 executed
= isl_union_map_intersect_range(executed
, filter
);
5354 n2
= isl_union_map_dim(executed
, isl_dim_param
);
5356 isl_die(isl_ast_build_get_ctx(build
), isl_error_invalid
,
5357 "filter node is not allowed to introduce "
5358 "new parameters", goto error
);
5360 unchanged
= isl_union_map_is_subset(orig
, executed
);
5361 empty
= isl_union_map_is_empty(executed
);
5362 if (unchanged
< 0 || empty
< 0)
5365 isl_union_map_free(executed
);
5366 return build_ast_from_child(build
, node
, orig
);
5368 isl_union_map_free(orig
);
5370 return build_ast_from_child(build
, node
, executed
);
5372 ctx
= isl_ast_build_get_ctx(build
);
5373 list
= isl_ast_graft_list_alloc(ctx
, 0);
5374 isl_ast_build_free(build
);
5375 isl_schedule_node_free(node
);
5376 isl_union_map_free(executed
);
5379 isl_ast_build_free(build
);
5380 isl_schedule_node_free(node
);
5381 isl_union_map_free(executed
);
5382 isl_union_map_free(orig
);
5386 /* Generate an AST that visits the elements in the domain of "executed"
5387 * in the relative order specified by the guard node "node" and
5390 * The relation "executed" maps the outer generated loop iterators
5391 * to the domain elements executed by those iterations.
5393 * Ensure that the associated guard is enforced by the outer AST
5394 * constructs by adding it to the guard of the graft.
5395 * Since we know that we will enforce the guard, we can also include it
5396 * in the generated constraints used to construct an AST for
5397 * the descendant nodes.
5399 static __isl_give isl_ast_graft_list
*build_ast_from_guard(
5400 __isl_take isl_ast_build
*build
, __isl_take isl_schedule_node
*node
,
5401 __isl_take isl_union_map
*executed
)
5404 isl_set
*guard
, *hoisted
;
5405 isl_basic_set
*enforced
;
5406 isl_ast_build
*sub_build
;
5407 isl_ast_graft
*graft
;
5408 isl_ast_graft_list
*list
;
5411 space
= isl_ast_build_get_space(build
, 1);
5412 guard
= isl_schedule_node_guard_get_guard(node
);
5413 n1
= isl_space_dim(space
, isl_dim_param
);
5414 guard
= isl_set_align_params(guard
, space
);
5415 n2
= isl_set_dim(guard
, isl_dim_param
);
5417 isl_die(isl_ast_build_get_ctx(build
), isl_error_invalid
,
5418 "guard node is not allowed to introduce "
5419 "new parameters", guard
= isl_set_free(guard
));
5420 guard
= isl_set_preimage_multi_aff(guard
,
5421 isl_multi_aff_copy(build
->internal2input
));
5422 guard
= isl_ast_build_specialize(build
, guard
);
5423 guard
= isl_set_gist(guard
, isl_set_copy(build
->generated
));
5425 sub_build
= isl_ast_build_copy(build
);
5426 sub_build
= isl_ast_build_restrict_generated(sub_build
,
5427 isl_set_copy(guard
));
5429 list
= build_ast_from_child(isl_ast_build_copy(sub_build
),
5432 hoisted
= isl_ast_graft_list_extract_hoistable_guard(list
, sub_build
);
5433 if (isl_set_n_basic_set(hoisted
) > 1)
5434 list
= isl_ast_graft_list_gist_guards(list
,
5435 isl_set_copy(hoisted
));
5436 guard
= isl_set_intersect(guard
, hoisted
);
5437 enforced
= extract_shared_enforced(list
, build
);
5438 graft
= isl_ast_graft_alloc_from_children(list
, guard
, enforced
,
5441 isl_ast_build_free(sub_build
);
5442 isl_ast_build_free(build
);
5443 return isl_ast_graft_list_from_ast_graft(graft
);
5446 /* Call the before_each_mark callback, if requested by the user.
5448 * Return 0 on success and -1 on error.
5450 * The caller is responsible for recording the current inverse schedule
5453 static isl_stat
before_each_mark(__isl_keep isl_id
*mark
,
5454 __isl_keep isl_ast_build
*build
)
5457 return isl_stat_error
;
5458 if (!build
->before_each_mark
)
5460 return build
->before_each_mark(mark
, build
,
5461 build
->before_each_mark_user
);
5464 /* Call the after_each_mark callback, if requested by the user.
5466 * The caller is responsible for recording the current inverse schedule
5469 static __isl_give isl_ast_graft
*after_each_mark(
5470 __isl_take isl_ast_graft
*graft
, __isl_keep isl_ast_build
*build
)
5472 if (!graft
|| !build
)
5473 return isl_ast_graft_free(graft
);
5474 if (!build
->after_each_mark
)
5476 graft
->node
= build
->after_each_mark(graft
->node
, build
,
5477 build
->after_each_mark_user
);
5479 return isl_ast_graft_free(graft
);
5484 /* Generate an AST that visits the elements in the domain of "executed"
5485 * in the relative order specified by the mark node "node" and
5488 * The relation "executed" maps the outer generated loop iterators
5489 * to the domain elements executed by those iterations.
5491 * Since we may be calling before_each_mark and after_each_mark
5492 * callbacks, we record the current inverse schedule in the build.
5494 * We generate an AST for the child of the mark node, combine
5495 * the graft list into a single graft and then insert the mark
5496 * in the AST of that single graft.
5498 static __isl_give isl_ast_graft_list
*build_ast_from_mark(
5499 __isl_take isl_ast_build
*build
, __isl_take isl_schedule_node
*node
,
5500 __isl_take isl_union_map
*executed
)
5503 isl_ast_graft
*graft
;
5504 isl_ast_graft_list
*list
;
5507 build
= isl_ast_build_set_executed(build
, isl_union_map_copy(executed
));
5509 mark
= isl_schedule_node_mark_get_id(node
);
5510 if (before_each_mark(mark
, build
) < 0)
5511 node
= isl_schedule_node_free(node
);
5513 list
= build_ast_from_child(isl_ast_build_copy(build
), node
, executed
);
5514 list
= isl_ast_graft_list_fuse(list
, build
);
5515 n
= isl_ast_graft_list_n_ast_graft(list
);
5517 list
= isl_ast_graft_list_free(list
);
5521 graft
= isl_ast_graft_list_get_ast_graft(list
, 0);
5522 graft
= isl_ast_graft_insert_mark(graft
, mark
);
5523 graft
= after_each_mark(graft
, build
);
5524 list
= isl_ast_graft_list_set_ast_graft(list
, 0, graft
);
5526 isl_ast_build_free(build
);
5531 static __isl_give isl_ast_graft_list
*build_ast_from_schedule_node(
5532 __isl_take isl_ast_build
*build
, __isl_take isl_schedule_node
*node
,
5533 __isl_take isl_union_map
*executed
);
5535 /* Generate an AST that visits the elements in the domain of "executed"
5536 * in the relative order specified by the sequence (or set) node "node" and
5539 * The relation "executed" maps the outer generated loop iterators
5540 * to the domain elements executed by those iterations.
5542 * We simply generate an AST for each of the children and concatenate
5545 static __isl_give isl_ast_graft_list
*build_ast_from_sequence(
5546 __isl_take isl_ast_build
*build
, __isl_take isl_schedule_node
*node
,
5547 __isl_take isl_union_map
*executed
)
5551 isl_ast_graft_list
*list
;
5553 ctx
= isl_ast_build_get_ctx(build
);
5554 list
= isl_ast_graft_list_alloc(ctx
, 0);
5556 n
= isl_schedule_node_n_children(node
);
5557 for (i
= 0; i
< n
; ++i
) {
5558 isl_schedule_node
*child
;
5559 isl_ast_graft_list
*list_i
;
5561 child
= isl_schedule_node_get_child(node
, i
);
5562 list_i
= build_ast_from_schedule_node(isl_ast_build_copy(build
),
5563 child
, isl_union_map_copy(executed
));
5564 list
= isl_ast_graft_list_concat(list
, list_i
);
5566 isl_ast_build_free(build
);
5567 isl_schedule_node_free(node
);
5568 isl_union_map_free(executed
);
5573 /* Generate an AST that visits the elements in the domain of "executed"
5574 * in the relative order specified by the node "node" and its descendants.
5576 * The relation "executed" maps the outer generated loop iterators
5577 * to the domain elements executed by those iterations.
5579 * If the node is a leaf, then we pass control to generate_inner_level.
5580 * Note that the current build does not refer to any band node, so
5581 * that generate_inner_level will not try to visit the child of
5584 * The other node types are handled in separate functions.
5585 * Set nodes are currently treated in the same way as sequence nodes.
5586 * The children of a set node may be executed in any order,
5587 * including the order of the children.
5589 static __isl_give isl_ast_graft_list
*build_ast_from_schedule_node(
5590 __isl_take isl_ast_build
*build
, __isl_take isl_schedule_node
*node
,
5591 __isl_take isl_union_map
*executed
)
5593 enum isl_schedule_node_type type
;
5595 type
= isl_schedule_node_get_type(node
);
5598 case isl_schedule_node_error
:
5600 case isl_schedule_node_leaf
:
5601 isl_schedule_node_free(node
);
5602 return generate_inner_level(executed
, build
);
5603 case isl_schedule_node_band
:
5604 return build_ast_from_band(build
, node
, executed
);
5605 case isl_schedule_node_context
:
5606 return build_ast_from_context(build
, node
, executed
);
5607 case isl_schedule_node_domain
:
5608 isl_die(isl_schedule_node_get_ctx(node
), isl_error_unsupported
,
5609 "unexpected internal domain node", goto error
);
5610 case isl_schedule_node_expansion
:
5611 return build_ast_from_expansion(build
, node
, executed
);
5612 case isl_schedule_node_extension
:
5613 return build_ast_from_extension(build
, node
, executed
);
5614 case isl_schedule_node_filter
:
5615 return build_ast_from_filter(build
, node
, executed
);
5616 case isl_schedule_node_guard
:
5617 return build_ast_from_guard(build
, node
, executed
);
5618 case isl_schedule_node_mark
:
5619 return build_ast_from_mark(build
, node
, executed
);
5620 case isl_schedule_node_sequence
:
5621 case isl_schedule_node_set
:
5622 return build_ast_from_sequence(build
, node
, executed
);
5625 isl_die(isl_ast_build_get_ctx(build
), isl_error_internal
,
5626 "unhandled type", goto error
);
5628 isl_union_map_free(executed
);
5629 isl_schedule_node_free(node
);
5630 isl_ast_build_free(build
);
5635 /* Generate an AST that visits the elements in the domain of "executed"
5636 * in the relative order specified by the (single) child of "node" and
5639 * The relation "executed" maps the outer generated loop iterators
5640 * to the domain elements executed by those iterations.
5642 * This function is never called on a leaf, set or sequence node,
5643 * so the node always has exactly one child.
5645 static __isl_give isl_ast_graft_list
*build_ast_from_child(
5646 __isl_take isl_ast_build
*build
, __isl_take isl_schedule_node
*node
,
5647 __isl_take isl_union_map
*executed
)
5649 node
= isl_schedule_node_child(node
, 0);
5650 return build_ast_from_schedule_node(build
, node
, executed
);
5653 /* Generate an AST that visits the elements in the domain of the domain
5654 * node "node" in the relative order specified by its descendants.
5656 * An initial inverse schedule is created that maps a zero-dimensional
5657 * schedule space to the node domain.
5658 * The input "build" is assumed to have a parametric domain and
5659 * is replaced by the same zero-dimensional schedule space.
5661 * We also add some of the parameter constraints in the build domain
5662 * to the executed relation. Adding these constraints
5663 * allows for an earlier detection of conflicts in some cases.
5664 * However, we do not want to divide the executed relation into
5665 * more disjuncts than necessary. We therefore approximate
5666 * the constraints on the parameters by a single disjunct set.
5668 static __isl_give isl_ast_node
*build_ast_from_domain(
5669 __isl_take isl_ast_build
*build
, __isl_take isl_schedule_node
*node
)
5672 isl_union_set
*domain
, *schedule_domain
;
5673 isl_union_map
*executed
;
5676 isl_ast_graft_list
*list
;
5683 ctx
= isl_ast_build_get_ctx(build
);
5684 space
= isl_ast_build_get_space(build
, 1);
5685 is_params
= isl_space_is_params(space
);
5686 isl_space_free(space
);
5690 isl_die(ctx
, isl_error_unsupported
,
5691 "expecting parametric initial context", goto error
);
5693 domain
= isl_schedule_node_domain_get_domain(node
);
5694 domain
= isl_union_set_coalesce(domain
);
5696 space
= isl_union_set_get_space(domain
);
5697 space
= isl_space_set_from_params(space
);
5698 build
= isl_ast_build_product(build
, space
);
5700 set
= isl_ast_build_get_domain(build
);
5701 set
= isl_set_from_basic_set(isl_set_simple_hull(set
));
5702 schedule_domain
= isl_union_set_from_set(set
);
5704 executed
= isl_union_map_from_domain_and_range(schedule_domain
, domain
);
5705 list
= build_ast_from_child(isl_ast_build_copy(build
), node
, executed
);
5706 ast
= isl_ast_node_from_graft_list(list
, build
);
5707 isl_ast_build_free(build
);
5711 isl_schedule_node_free(node
);
5712 isl_ast_build_free(build
);
5716 /* Generate an AST that visits the elements in the domain of "schedule"
5717 * in the relative order specified by the schedule tree.
5719 * "build" is an isl_ast_build that has been created using
5720 * isl_ast_build_alloc or isl_ast_build_from_context based
5721 * on a parametric set.
5723 * The construction starts at the root node of the schedule,
5724 * which is assumed to be a domain node.
5726 __isl_give isl_ast_node
*isl_ast_build_node_from_schedule(
5727 __isl_keep isl_ast_build
*build
, __isl_take isl_schedule
*schedule
)
5730 isl_schedule_node
*node
;
5732 if (!build
|| !schedule
)
5735 ctx
= isl_ast_build_get_ctx(build
);
5737 node
= isl_schedule_get_root(schedule
);
5738 isl_schedule_free(schedule
);
5740 build
= isl_ast_build_copy(build
);
5741 build
= isl_ast_build_set_single_valued(build
, 0);
5742 if (isl_schedule_node_get_type(node
) != isl_schedule_node_domain
)
5743 isl_die(ctx
, isl_error_unsupported
,
5744 "expecting root domain node",
5745 build
= isl_ast_build_free(build
));
5746 return build_ast_from_domain(build
, node
);
5748 isl_schedule_free(schedule
);