2 * Copyright 2012-2014 Ecole Normale Superieure
3 * Copyright 2014 INRIA Rocquencourt
5 * Use of this software is governed by the MIT license
7 * Written by Sven Verdoolaege,
8 * Ecole Normale Superieure, 45 rue d’Ulm, 75230 Paris, France
9 * and Inria Paris - Rocquencourt, Domaine de Voluceau - Rocquencourt,
10 * B.P. 105 - 78153 Le Chesnay, France
17 #include <isl/union_map.h>
19 #include <isl_tarjan.h>
20 #include <isl_ast_private.h>
21 #include <isl_ast_build_expr.h>
22 #include <isl_ast_build_private.h>
23 #include <isl_ast_graft_private.h>
25 /* Data used in generate_domain.
27 * "build" is the input build.
28 * "list" collects the results.
30 struct isl_generate_domain_data
{
33 isl_ast_graft_list
*list
;
36 static __isl_give isl_ast_graft_list
*generate_next_level(
37 __isl_take isl_union_map
*executed
,
38 __isl_take isl_ast_build
*build
);
39 static __isl_give isl_ast_graft_list
*generate_code(
40 __isl_take isl_union_map
*executed
, __isl_take isl_ast_build
*build
,
43 /* Generate an AST for a single domain based on
44 * the (non single valued) inverse schedule "executed".
46 * We extend the schedule with the iteration domain
47 * and continue generating through a call to generate_code.
49 * In particular, if executed has the form
53 * then we continue generating code on
57 * The extended inverse schedule is clearly single valued
58 * ensuring that the nested generate_code will not reach this function,
59 * but will instead create calls to all elements of D that need
60 * to be executed from the current schedule domain.
62 static int generate_non_single_valued(__isl_take isl_map
*executed
,
63 struct isl_generate_domain_data
*data
)
67 isl_ast_graft_list
*list
;
69 build
= isl_ast_build_copy(data
->build
);
71 identity
= isl_set_identity(isl_map_range(isl_map_copy(executed
)));
72 executed
= isl_map_domain_product(executed
, identity
);
73 build
= isl_ast_build_set_single_valued(build
, 1);
75 list
= generate_code(isl_union_map_from_map(executed
), build
, 1);
77 data
->list
= isl_ast_graft_list_concat(data
->list
, list
);
82 /* Call the at_each_domain callback, if requested by the user,
83 * after recording the current inverse schedule in the build.
85 static __isl_give isl_ast_graft
*at_each_domain(__isl_take isl_ast_graft
*graft
,
86 __isl_keep isl_map
*executed
, __isl_keep isl_ast_build
*build
)
89 return isl_ast_graft_free(graft
);
90 if (!build
->at_each_domain
)
93 build
= isl_ast_build_copy(build
);
94 build
= isl_ast_build_set_executed(build
,
95 isl_union_map_from_map(isl_map_copy(executed
)));
97 return isl_ast_graft_free(graft
);
99 graft
->node
= build
->at_each_domain(graft
->node
,
100 build
, build
->at_each_domain_user
);
101 isl_ast_build_free(build
);
104 graft
= isl_ast_graft_free(graft
);
109 /* Generate an AST for a single domain based on
110 * the inverse schedule "executed" and add it to data->list.
112 * If there is more than one domain element associated to the current
113 * schedule "time", then we need to continue the generation process
114 * in generate_non_single_valued.
115 * Note that the inverse schedule being single-valued may depend
116 * on constraints that are only available in the original context
117 * domain specified by the user. We therefore first introduce
118 * some of the constraints of data->build->domain. In particular,
119 * we intersect with a single-disjunct approximation of this set.
120 * We perform this approximation to avoid further splitting up
121 * the executed relation, possibly introducing a disjunctive guard
124 * On the other hand, we only perform the test after having taken the gist
125 * of the domain as the resulting map is the one from which the call
126 * expression is constructed. Using this map to construct the call
127 * expression usually yields simpler results.
128 * Because we perform the single-valuedness test on the gisted map,
129 * we may in rare cases fail to recognize that the inverse schedule
130 * is single-valued. This becomes problematic if this happens
131 * from the recursive call through generate_non_single_valued
132 * as we would then end up in an infinite recursion.
133 * We therefore check if we are inside a call to generate_non_single_valued
134 * and revert to the ungisted map if the gisted map turns out not to be
137 * Otherwise, we generate a call expression for the single executed
138 * domain element and put a guard around it based on the (simplified)
139 * domain of "executed".
141 * At this stage, any pending constraints in the build can no longer
142 * be simplified with respect to any enforced constraints since
143 * the call node does not have any enforced constraints.
144 * We therefore turn all pending constraints into guards
145 * (after simplifying them with respect to the already generated
146 * constraints) and add them to both the generated constraints
147 * and the guard of the constructed graft. This guard will ensure
148 * that the constraints are effectively generated.
150 * If the user has set an at_each_domain callback, it is called
151 * on the constructed call expression node.
153 static int generate_domain(__isl_take isl_map
*executed
, void *user
)
155 struct isl_generate_domain_data
*data
= user
;
156 isl_ast_build
*build
;
157 isl_ast_graft
*graft
;
158 isl_ast_graft_list
*list
;
159 isl_set
*guard
, *domain
;
163 domain
= isl_ast_build_get_domain(data
->build
);
164 domain
= isl_set_from_basic_set(isl_set_simple_hull(domain
));
165 executed
= isl_map_intersect_domain(executed
, domain
);
166 empty
= isl_map_is_empty(executed
);
170 isl_map_free(executed
);
174 executed
= isl_map_coalesce(executed
);
175 map
= isl_map_copy(executed
);
176 map
= isl_ast_build_compute_gist_map_domain(data
->build
, map
);
177 sv
= isl_map_is_single_valued(map
);
182 if (data
->build
->single_valued
)
183 map
= isl_map_copy(executed
);
185 return generate_non_single_valued(executed
, data
);
187 guard
= isl_map_domain(isl_map_copy(map
));
188 guard
= isl_set_compute_divs(guard
);
189 guard
= isl_set_intersect(guard
,
190 isl_ast_build_get_pending(data
->build
));
191 guard
= isl_set_coalesce(guard
);
192 guard
= isl_ast_build_specialize(data
->build
, guard
);
193 guard
= isl_set_gist(guard
, isl_ast_build_get_generated(data
->build
));
195 build
= isl_ast_build_copy(data
->build
);
196 build
= isl_ast_build_replace_pending_by_guard(build
,
197 isl_set_copy(guard
));
198 graft
= isl_ast_graft_alloc_domain(map
, build
);
199 graft
= at_each_domain(graft
, executed
, build
);
200 isl_ast_build_free(build
);
201 isl_map_free(executed
);
202 graft
= isl_ast_graft_add_guard(graft
, guard
, data
->build
);
204 list
= isl_ast_graft_list_from_ast_graft(graft
);
205 data
->list
= isl_ast_graft_list_concat(data
->list
, list
);
210 isl_map_free(executed
);
214 /* Call build->create_leaf to a create "leaf" node in the AST,
215 * encapsulate the result in an isl_ast_graft and return the result
216 * as a 1-element list.
218 * Note that the node returned by the user may be an entire tree.
220 * Since the node itself cannot enforce any constraints, we turn
221 * all pending constraints into guards and add them to the resulting
222 * graft to ensure that they will be generated.
224 * Before we pass control to the user, we first clear some information
225 * from the build that is (presumbably) only meaningful
226 * for the current code generation.
227 * This includes the create_leaf callback itself, so we make a copy
228 * of the build first.
230 static __isl_give isl_ast_graft_list
*call_create_leaf(
231 __isl_take isl_union_map
*executed
, __isl_take isl_ast_build
*build
)
235 isl_ast_graft
*graft
;
236 isl_ast_build
*user_build
;
238 guard
= isl_ast_build_get_pending(build
);
239 user_build
= isl_ast_build_copy(build
);
240 user_build
= isl_ast_build_replace_pending_by_guard(user_build
,
241 isl_set_copy(guard
));
242 user_build
= isl_ast_build_set_executed(user_build
, executed
);
243 user_build
= isl_ast_build_clear_local_info(user_build
);
247 node
= build
->create_leaf(user_build
, build
->create_leaf_user
);
248 graft
= isl_ast_graft_alloc(node
, build
);
249 graft
= isl_ast_graft_add_guard(graft
, guard
, build
);
250 isl_ast_build_free(build
);
251 return isl_ast_graft_list_from_ast_graft(graft
);
254 /* Generate an AST after having handled the complete schedule
255 * of this call to the code generator.
257 * If the user has specified a create_leaf callback, control
258 * is passed to the user in call_create_leaf.
260 * Otherwise, we generate one or more calls for each individual
261 * domain in generate_domain.
263 static __isl_give isl_ast_graft_list
*generate_inner_level(
264 __isl_take isl_union_map
*executed
, __isl_take isl_ast_build
*build
)
267 struct isl_generate_domain_data data
= { build
};
269 if (!build
|| !executed
)
272 if (build
->create_leaf
)
273 return call_create_leaf(executed
, build
);
275 ctx
= isl_union_map_get_ctx(executed
);
276 data
.list
= isl_ast_graft_list_alloc(ctx
, 0);
277 if (isl_union_map_foreach_map(executed
, &generate_domain
, &data
) < 0)
278 data
.list
= isl_ast_graft_list_free(data
.list
);
281 error
: data
.list
= NULL
;
282 isl_ast_build_free(build
);
283 isl_union_map_free(executed
);
287 /* Call the before_each_for callback, if requested by the user.
289 static __isl_give isl_ast_node
*before_each_for(__isl_take isl_ast_node
*node
,
290 __isl_keep isl_ast_build
*build
)
295 return isl_ast_node_free(node
);
296 if (!build
->before_each_for
)
298 id
= build
->before_each_for(build
, build
->before_each_for_user
);
299 node
= isl_ast_node_set_annotation(node
, id
);
303 /* Call the after_each_for callback, if requested by the user.
305 static __isl_give isl_ast_graft
*after_each_for(__isl_take isl_ast_graft
*graft
,
306 __isl_keep isl_ast_build
*build
)
308 if (!graft
|| !build
)
309 return isl_ast_graft_free(graft
);
310 if (!build
->after_each_for
)
312 graft
->node
= build
->after_each_for(graft
->node
, build
,
313 build
->after_each_for_user
);
315 return isl_ast_graft_free(graft
);
319 /* Plug in all the know values of the current and outer dimensions
320 * in the domain of "executed". In principle, we only need to plug
321 * in the known value of the current dimension since the values of
322 * outer dimensions have been plugged in already.
323 * However, it turns out to be easier to just plug in all known values.
325 static __isl_give isl_union_map
*plug_in_values(
326 __isl_take isl_union_map
*executed
, __isl_keep isl_ast_build
*build
)
328 return isl_ast_build_substitute_values_union_map_domain(build
,
332 /* Check if the constraint "c" is a lower bound on dimension "pos",
333 * an upper bound, or independent of dimension "pos".
335 static int constraint_type(isl_constraint
*c
, int pos
)
337 if (isl_constraint_is_lower_bound(c
, isl_dim_set
, pos
))
339 if (isl_constraint_is_upper_bound(c
, isl_dim_set
, pos
))
344 /* Compare the types of the constraints "a" and "b",
345 * resulting in constraints that are independent of "depth"
346 * to be sorted before the lower bounds on "depth", which in
347 * turn are sorted before the upper bounds on "depth".
349 static int cmp_constraint(__isl_keep isl_constraint
*a
,
350 __isl_keep isl_constraint
*b
, void *user
)
353 int t1
= constraint_type(a
, *depth
);
354 int t2
= constraint_type(b
, *depth
);
359 /* Extract a lower bound on dimension "pos" from constraint "c".
361 * If the constraint is of the form
365 * then we essentially return
367 * l = ceil(-f(...)/a)
369 * However, if the current dimension is strided, then we need to make
370 * sure that the lower bound we construct is of the form
374 * with f the offset and s the stride.
375 * We therefore compute
377 * f + s * ceil((l - f)/s)
379 static __isl_give isl_aff
*lower_bound(__isl_keep isl_constraint
*c
,
380 int pos
, __isl_keep isl_ast_build
*build
)
384 aff
= isl_constraint_get_bound(c
, isl_dim_set
, pos
);
385 aff
= isl_aff_ceil(aff
);
387 if (isl_ast_build_has_stride(build
, pos
)) {
391 offset
= isl_ast_build_get_offset(build
, pos
);
392 stride
= isl_ast_build_get_stride(build
, pos
);
394 aff
= isl_aff_sub(aff
, isl_aff_copy(offset
));
395 aff
= isl_aff_scale_down_val(aff
, isl_val_copy(stride
));
396 aff
= isl_aff_ceil(aff
);
397 aff
= isl_aff_scale_val(aff
, stride
);
398 aff
= isl_aff_add(aff
, offset
);
401 aff
= isl_ast_build_compute_gist_aff(build
, aff
);
406 /* Return the exact lower bound (or upper bound if "upper" is set)
407 * of "domain" as a piecewise affine expression.
409 * If we are computing a lower bound (of a strided dimension), then
410 * we need to make sure it is of the form
414 * where f is the offset and s is the stride.
415 * We therefore need to include the stride constraint before computing
418 static __isl_give isl_pw_aff
*exact_bound(__isl_keep isl_set
*domain
,
419 __isl_keep isl_ast_build
*build
, int upper
)
424 isl_pw_multi_aff
*pma
;
426 domain
= isl_set_copy(domain
);
428 stride
= isl_ast_build_get_stride_constraint(build
);
429 domain
= isl_set_intersect(domain
, stride
);
431 it_map
= isl_ast_build_map_to_iterator(build
, domain
);
433 pma
= isl_map_lexmax_pw_multi_aff(it_map
);
435 pma
= isl_map_lexmin_pw_multi_aff(it_map
);
436 pa
= isl_pw_multi_aff_get_pw_aff(pma
, 0);
437 isl_pw_multi_aff_free(pma
);
438 pa
= isl_ast_build_compute_gist_pw_aff(build
, pa
);
439 pa
= isl_pw_aff_coalesce(pa
);
444 /* Extract a lower bound on dimension "pos" from each constraint
445 * in "constraints" and return the list of lower bounds.
446 * If "constraints" has zero elements, then we extract a lower bound
447 * from "domain" instead.
449 static __isl_give isl_pw_aff_list
*lower_bounds(
450 __isl_keep isl_constraint_list
*constraints
, int pos
,
451 __isl_keep isl_set
*domain
, __isl_keep isl_ast_build
*build
)
454 isl_pw_aff_list
*list
;
460 n
= isl_constraint_list_n_constraint(constraints
);
463 pa
= exact_bound(domain
, build
, 0);
464 return isl_pw_aff_list_from_pw_aff(pa
);
467 ctx
= isl_ast_build_get_ctx(build
);
468 list
= isl_pw_aff_list_alloc(ctx
,n
);
470 for (i
= 0; i
< n
; ++i
) {
474 c
= isl_constraint_list_get_constraint(constraints
, i
);
475 aff
= lower_bound(c
, pos
, build
);
476 isl_constraint_free(c
);
477 list
= isl_pw_aff_list_add(list
, isl_pw_aff_from_aff(aff
));
483 /* Extract an upper bound on dimension "pos" from each constraint
484 * in "constraints" and return the list of upper bounds.
485 * If "constraints" has zero elements, then we extract an upper bound
486 * from "domain" instead.
488 static __isl_give isl_pw_aff_list
*upper_bounds(
489 __isl_keep isl_constraint_list
*constraints
, int pos
,
490 __isl_keep isl_set
*domain
, __isl_keep isl_ast_build
*build
)
493 isl_pw_aff_list
*list
;
496 n
= isl_constraint_list_n_constraint(constraints
);
499 pa
= exact_bound(domain
, build
, 1);
500 return isl_pw_aff_list_from_pw_aff(pa
);
503 ctx
= isl_ast_build_get_ctx(build
);
504 list
= isl_pw_aff_list_alloc(ctx
,n
);
506 for (i
= 0; i
< n
; ++i
) {
510 c
= isl_constraint_list_get_constraint(constraints
, i
);
511 aff
= isl_constraint_get_bound(c
, isl_dim_set
, pos
);
512 isl_constraint_free(c
);
513 aff
= isl_aff_floor(aff
);
514 list
= isl_pw_aff_list_add(list
, isl_pw_aff_from_aff(aff
));
520 /* Callback for sorting the isl_pw_aff_list passed to reduce_list.
522 static int reduce_list_cmp(__isl_keep isl_pw_aff
*a
, __isl_keep isl_pw_aff
*b
,
525 return isl_pw_aff_plain_cmp(a
, b
);
528 /* Return an isl_ast_expr that performs the reduction of type "type"
529 * on AST expressions corresponding to the elements in "list".
531 * The list is assumed to contain at least one element.
532 * If the list contains exactly one element, then the returned isl_ast_expr
533 * simply computes that affine expression.
534 * If the list contains more than one element, then we sort it
535 * using a fairly abitrary but hopefully reasonably stable order.
537 static __isl_give isl_ast_expr
*reduce_list(enum isl_ast_op_type type
,
538 __isl_keep isl_pw_aff_list
*list
, __isl_keep isl_ast_build
*build
)
547 n
= isl_pw_aff_list_n_pw_aff(list
);
550 return isl_ast_build_expr_from_pw_aff_internal(build
,
551 isl_pw_aff_list_get_pw_aff(list
, 0));
553 ctx
= isl_pw_aff_list_get_ctx(list
);
554 expr
= isl_ast_expr_alloc_op(ctx
, type
, n
);
558 list
= isl_pw_aff_list_copy(list
);
559 list
= isl_pw_aff_list_sort(list
, &reduce_list_cmp
, NULL
);
561 return isl_ast_expr_free(expr
);
563 for (i
= 0; i
< n
; ++i
) {
564 isl_ast_expr
*expr_i
;
566 expr_i
= isl_ast_build_expr_from_pw_aff_internal(build
,
567 isl_pw_aff_list_get_pw_aff(list
, i
));
570 expr
->u
.op
.args
[i
] = expr_i
;
573 isl_pw_aff_list_free(list
);
576 isl_pw_aff_list_free(list
);
577 isl_ast_expr_free(expr
);
581 /* Add guards implied by the "generated constraints",
582 * but not (necessarily) enforced by the generated AST to "guard".
583 * In particular, if there is any stride constraints,
584 * then add the guard implied by those constraints.
585 * If we have generated a degenerate loop, then add the guard
586 * implied by "bounds" on the outer dimensions, i.e., the guard
587 * that ensures that the single value actually exists.
589 static __isl_give isl_set
*add_implied_guards(__isl_take isl_set
*guard
,
590 int degenerate
, __isl_keep isl_basic_set
*bounds
,
591 __isl_keep isl_ast_build
*build
)
593 int depth
, has_stride
;
596 depth
= isl_ast_build_get_depth(build
);
597 has_stride
= isl_ast_build_has_stride(build
, depth
);
598 if (!has_stride
&& !degenerate
)
602 bounds
= isl_basic_set_copy(bounds
);
603 bounds
= isl_basic_set_drop_constraints_not_involving_dims(
604 bounds
, isl_dim_set
, depth
, 1);
605 dom
= isl_set_from_basic_set(bounds
);
606 dom
= isl_set_eliminate(dom
, isl_dim_set
, depth
, 1);
607 dom
= isl_ast_build_compute_gist(build
, dom
);
608 guard
= isl_set_intersect(guard
, dom
);
612 dom
= isl_ast_build_get_stride_constraint(build
);
613 dom
= isl_set_eliminate(dom
, isl_dim_set
, depth
, 1);
614 dom
= isl_ast_build_compute_gist(build
, dom
);
615 guard
= isl_set_intersect(guard
, dom
);
621 /* Update "graft" based on "sub_build" for the degenerate case.
623 * "build" is the build in which graft->node was created
624 * "sub_build" contains information about the current level itself,
625 * including the single value attained.
627 * We set the initialization part of the for loop to the single
628 * value attained by the current dimension.
629 * The increment and condition are not strictly needed as the are known
630 * to be "1" and "iterator <= value" respectively.
632 static __isl_give isl_ast_graft
*refine_degenerate(
633 __isl_take isl_ast_graft
*graft
, __isl_keep isl_ast_build
*build
,
634 __isl_keep isl_ast_build
*sub_build
)
638 if (!graft
|| !sub_build
)
639 return isl_ast_graft_free(graft
);
641 value
= isl_pw_aff_copy(sub_build
->value
);
643 graft
->node
->u
.f
.init
= isl_ast_build_expr_from_pw_aff_internal(build
,
645 if (!graft
->node
->u
.f
.init
)
646 return isl_ast_graft_free(graft
);
651 /* Return the intersection of constraints in "list" as a set.
653 static __isl_give isl_set
*intersect_constraints(
654 __isl_keep isl_constraint_list
*list
)
659 n
= isl_constraint_list_n_constraint(list
);
661 isl_die(isl_constraint_list_get_ctx(list
), isl_error_internal
,
662 "expecting at least one constraint", return NULL
);
664 bset
= isl_basic_set_from_constraint(
665 isl_constraint_list_get_constraint(list
, 0));
666 for (i
= 1; i
< n
; ++i
) {
667 isl_basic_set
*bset_i
;
669 bset_i
= isl_basic_set_from_constraint(
670 isl_constraint_list_get_constraint(list
, i
));
671 bset
= isl_basic_set_intersect(bset
, bset_i
);
674 return isl_set_from_basic_set(bset
);
677 /* Compute the constraints on the outer dimensions enforced by
678 * graft->node and add those constraints to graft->enforced,
679 * in case the upper bound is expressed as a set "upper".
681 * In particular, if l(...) is a lower bound in "lower", and
683 * -a i + f(...) >= 0 or a i <= f(...)
685 * is an upper bound ocnstraint on the current dimension i,
686 * then the for loop enforces the constraint
688 * -a l(...) + f(...) >= 0 or a l(...) <= f(...)
690 * We therefore simply take each lower bound in turn, plug it into
691 * the upper bounds and compute the intersection over all lower bounds.
693 * If a lower bound is a rational expression, then
694 * isl_basic_set_preimage_multi_aff will force this rational
695 * expression to have only integer values. However, the loop
696 * itself does not enforce this integrality constraint. We therefore
697 * use the ceil of the lower bounds instead of the lower bounds themselves.
698 * Other constraints will make sure that the for loop is only executed
699 * when each of the lower bounds attains an integral value.
700 * In particular, potentially rational values only occur in
701 * lower_bound if the offset is a (seemingly) rational expression,
702 * but then outer conditions will make sure that this rational expression
703 * only attains integer values.
705 static __isl_give isl_ast_graft
*set_enforced_from_set(
706 __isl_take isl_ast_graft
*graft
,
707 __isl_keep isl_pw_aff_list
*lower
, int pos
, __isl_keep isl_set
*upper
)
710 isl_basic_set
*enforced
;
711 isl_pw_multi_aff
*pma
;
714 if (!graft
|| !lower
)
715 return isl_ast_graft_free(graft
);
717 space
= isl_set_get_space(upper
);
718 enforced
= isl_basic_set_universe(isl_space_copy(space
));
720 space
= isl_space_map_from_set(space
);
721 pma
= isl_pw_multi_aff_identity(space
);
723 n
= isl_pw_aff_list_n_pw_aff(lower
);
724 for (i
= 0; i
< n
; ++i
) {
728 isl_pw_multi_aff
*pma_i
;
730 pa
= isl_pw_aff_list_get_pw_aff(lower
, i
);
731 pa
= isl_pw_aff_ceil(pa
);
732 pma_i
= isl_pw_multi_aff_copy(pma
);
733 pma_i
= isl_pw_multi_aff_set_pw_aff(pma_i
, pos
, pa
);
734 enforced_i
= isl_set_copy(upper
);
735 enforced_i
= isl_set_preimage_pw_multi_aff(enforced_i
, pma_i
);
736 hull
= isl_set_simple_hull(enforced_i
);
737 enforced
= isl_basic_set_intersect(enforced
, hull
);
740 isl_pw_multi_aff_free(pma
);
742 graft
= isl_ast_graft_enforce(graft
, enforced
);
747 /* Compute the constraints on the outer dimensions enforced by
748 * graft->node and add those constraints to graft->enforced,
749 * in case the upper bound is expressed as
750 * a list of affine expressions "upper".
752 * The enforced condition is that each lower bound expression is less
753 * than or equal to each upper bound expression.
755 static __isl_give isl_ast_graft
*set_enforced_from_list(
756 __isl_take isl_ast_graft
*graft
,
757 __isl_keep isl_pw_aff_list
*lower
, __isl_keep isl_pw_aff_list
*upper
)
760 isl_basic_set
*enforced
;
762 lower
= isl_pw_aff_list_copy(lower
);
763 upper
= isl_pw_aff_list_copy(upper
);
764 cond
= isl_pw_aff_list_le_set(lower
, upper
);
765 enforced
= isl_set_simple_hull(cond
);
766 graft
= isl_ast_graft_enforce(graft
, enforced
);
771 /* Does "aff" have a negative constant term?
773 static int aff_constant_is_negative(__isl_take isl_set
*set
,
774 __isl_take isl_aff
*aff
, void *user
)
779 v
= isl_aff_get_constant_val(aff
);
780 *neg
= isl_val_is_neg(v
);
785 return *neg
? 0 : -1;
788 /* Does "pa" have a negative constant term over its entire domain?
790 static int pw_aff_constant_is_negative(__isl_take isl_pw_aff
*pa
, void *user
)
795 r
= isl_pw_aff_foreach_piece(pa
, &aff_constant_is_negative
, user
);
798 return *neg
? 0 : -1;
801 /* Does each element in "list" have a negative constant term?
803 * The callback terminates the iteration as soon an element has been
804 * found that does not have a negative constant term.
806 static int list_constant_is_negative(__isl_keep isl_pw_aff_list
*list
)
810 if (isl_pw_aff_list_foreach(list
,
811 &pw_aff_constant_is_negative
, &neg
) < 0 && neg
)
817 /* Add 1 to each of the elements in "list", where each of these elements
818 * is defined over the internal schedule space of "build".
820 static __isl_give isl_pw_aff_list
*list_add_one(
821 __isl_take isl_pw_aff_list
*list
, __isl_keep isl_ast_build
*build
)
828 space
= isl_ast_build_get_space(build
, 1);
829 aff
= isl_aff_zero_on_domain(isl_local_space_from_space(space
));
830 aff
= isl_aff_add_constant_si(aff
, 1);
831 one
= isl_pw_aff_from_aff(aff
);
833 n
= isl_pw_aff_list_n_pw_aff(list
);
834 for (i
= 0; i
< n
; ++i
) {
836 pa
= isl_pw_aff_list_get_pw_aff(list
, i
);
837 pa
= isl_pw_aff_add(pa
, isl_pw_aff_copy(one
));
838 list
= isl_pw_aff_list_set_pw_aff(list
, i
, pa
);
841 isl_pw_aff_free(one
);
846 /* Set the condition part of the for node graft->node in case
847 * the upper bound is represented as a list of piecewise affine expressions.
849 * In particular, set the condition to
851 * iterator <= min(list of upper bounds)
853 * If each of the upper bounds has a negative constant term, then
854 * set the condition to
856 * iterator < min(list of (upper bound + 1)s)
859 static __isl_give isl_ast_graft
*set_for_cond_from_list(
860 __isl_take isl_ast_graft
*graft
, __isl_keep isl_pw_aff_list
*list
,
861 __isl_keep isl_ast_build
*build
)
864 isl_ast_expr
*bound
, *iterator
, *cond
;
865 enum isl_ast_op_type type
= isl_ast_op_le
;
868 return isl_ast_graft_free(graft
);
870 neg
= list_constant_is_negative(list
);
872 return isl_ast_graft_free(graft
);
873 list
= isl_pw_aff_list_copy(list
);
875 list
= list_add_one(list
, build
);
876 type
= isl_ast_op_lt
;
879 bound
= reduce_list(isl_ast_op_min
, list
, build
);
880 iterator
= isl_ast_expr_copy(graft
->node
->u
.f
.iterator
);
881 cond
= isl_ast_expr_alloc_binary(type
, iterator
, bound
);
882 graft
->node
->u
.f
.cond
= cond
;
884 isl_pw_aff_list_free(list
);
885 if (!graft
->node
->u
.f
.cond
)
886 return isl_ast_graft_free(graft
);
890 /* Set the condition part of the for node graft->node in case
891 * the upper bound is represented as a set.
893 static __isl_give isl_ast_graft
*set_for_cond_from_set(
894 __isl_take isl_ast_graft
*graft
, __isl_keep isl_set
*set
,
895 __isl_keep isl_ast_build
*build
)
902 cond
= isl_ast_build_expr_from_set(build
, isl_set_copy(set
));
903 graft
->node
->u
.f
.cond
= cond
;
904 if (!graft
->node
->u
.f
.cond
)
905 return isl_ast_graft_free(graft
);
909 /* Construct an isl_ast_expr for the increment (i.e., stride) of
910 * the current dimension.
912 static __isl_give isl_ast_expr
*for_inc(__isl_keep isl_ast_build
*build
)
920 ctx
= isl_ast_build_get_ctx(build
);
921 depth
= isl_ast_build_get_depth(build
);
923 if (!isl_ast_build_has_stride(build
, depth
))
924 return isl_ast_expr_alloc_int_si(ctx
, 1);
926 v
= isl_ast_build_get_stride(build
, depth
);
927 return isl_ast_expr_from_val(v
);
930 /* Should we express the loop condition as
932 * iterator <= min(list of upper bounds)
934 * or as a conjunction of constraints?
936 * The first is constructed from a list of upper bounds.
937 * The second is constructed from a set.
939 * If there are no upper bounds in "constraints", then this could mean
940 * that "domain" simply doesn't have an upper bound or that we didn't
941 * pick any upper bound. In the first case, we want to generate the
942 * loop condition as a(n empty) conjunction of constraints
943 * In the second case, we will compute
944 * a single upper bound from "domain" and so we use the list form.
946 * If there are upper bounds in "constraints",
947 * then we use the list form iff the atomic_upper_bound option is set.
949 static int use_upper_bound_list(isl_ctx
*ctx
, int n_upper
,
950 __isl_keep isl_set
*domain
, int depth
)
953 return isl_options_get_ast_build_atomic_upper_bound(ctx
);
955 return isl_set_dim_has_upper_bound(domain
, isl_dim_set
, depth
);
958 /* Fill in the expressions of the for node in graft->node.
961 * - set the initialization part of the loop to the maximum of the lower bounds
962 * - extract the increment from the stride of the current dimension
963 * - construct the for condition either based on a list of upper bounds
964 * or on a set of upper bound constraints.
966 static __isl_give isl_ast_graft
*set_for_node_expressions(
967 __isl_take isl_ast_graft
*graft
, __isl_keep isl_pw_aff_list
*lower
,
968 int use_list
, __isl_keep isl_pw_aff_list
*upper_list
,
969 __isl_keep isl_set
*upper_set
, __isl_keep isl_ast_build
*build
)
976 build
= isl_ast_build_copy(build
);
979 node
->u
.f
.init
= reduce_list(isl_ast_op_max
, lower
, build
);
980 node
->u
.f
.inc
= for_inc(build
);
983 graft
= set_for_cond_from_list(graft
, upper_list
, build
);
985 graft
= set_for_cond_from_set(graft
, upper_set
, build
);
987 isl_ast_build_free(build
);
989 if (!node
->u
.f
.iterator
|| !node
->u
.f
.init
||
990 !node
->u
.f
.cond
|| !node
->u
.f
.inc
)
991 return isl_ast_graft_free(graft
);
996 /* Update "graft" based on "bounds" and "domain" for the generic,
997 * non-degenerate, case.
999 * "c_lower" and "c_upper" contain the lower and upper bounds
1000 * that the loop node should express.
1001 * "domain" is the subset of the intersection of the constraints
1002 * for which some code is executed.
1004 * There may be zero lower bounds or zero upper bounds in "constraints"
1005 * in case the list of constraints was created
1006 * based on the atomic option or based on separation with explicit bounds.
1007 * In that case, we use "domain" to derive lower and/or upper bounds.
1009 * We first compute a list of one or more lower bounds.
1011 * Then we decide if we want to express the condition as
1013 * iterator <= min(list of upper bounds)
1015 * or as a conjunction of constraints.
1017 * The set of enforced constraints is then computed either based on
1018 * a list of upper bounds or on a set of upper bound constraints.
1019 * We do not compute any enforced constraints if we were forced
1020 * to compute a lower or upper bound using exact_bound. The domains
1021 * of the resulting expressions may imply some bounds on outer dimensions
1022 * that we do not want to appear in the enforced constraints since
1023 * they are not actually enforced by the corresponding code.
1025 * Finally, we fill in the expressions of the for node.
1027 static __isl_give isl_ast_graft
*refine_generic_bounds(
1028 __isl_take isl_ast_graft
*graft
,
1029 __isl_take isl_constraint_list
*c_lower
,
1030 __isl_take isl_constraint_list
*c_upper
,
1031 __isl_keep isl_set
*domain
, __isl_keep isl_ast_build
*build
)
1035 isl_pw_aff_list
*lower
;
1037 isl_set
*upper_set
= NULL
;
1038 isl_pw_aff_list
*upper_list
= NULL
;
1039 int n_lower
, n_upper
;
1041 if (!graft
|| !c_lower
|| !c_upper
|| !build
)
1044 depth
= isl_ast_build_get_depth(build
);
1045 ctx
= isl_ast_graft_get_ctx(graft
);
1047 n_lower
= isl_constraint_list_n_constraint(c_lower
);
1048 n_upper
= isl_constraint_list_n_constraint(c_upper
);
1050 use_list
= use_upper_bound_list(ctx
, n_upper
, domain
, depth
);
1052 lower
= lower_bounds(c_lower
, depth
, domain
, build
);
1055 upper_list
= upper_bounds(c_upper
, depth
, domain
, build
);
1056 else if (n_upper
> 0)
1057 upper_set
= intersect_constraints(c_upper
);
1059 upper_set
= isl_set_universe(isl_set_get_space(domain
));
1061 if (n_lower
== 0 || n_upper
== 0)
1064 graft
= set_enforced_from_list(graft
, lower
, upper_list
);
1066 graft
= set_enforced_from_set(graft
, lower
, depth
, upper_set
);
1068 graft
= set_for_node_expressions(graft
, lower
, use_list
, upper_list
,
1071 isl_pw_aff_list_free(lower
);
1072 isl_pw_aff_list_free(upper_list
);
1073 isl_set_free(upper_set
);
1074 isl_constraint_list_free(c_lower
);
1075 isl_constraint_list_free(c_upper
);
1079 isl_constraint_list_free(c_lower
);
1080 isl_constraint_list_free(c_upper
);
1081 return isl_ast_graft_free(graft
);
1084 /* Internal data structure used inside count_constraints to keep
1085 * track of the number of constraints that are independent of dimension "pos",
1086 * the lower bounds in "pos" and the upper bounds in "pos".
1088 struct isl_ast_count_constraints_data
{
1096 /* Increment data->n_indep, data->lower or data->upper depending
1097 * on whether "c" is independenct of dimensions data->pos,
1098 * a lower bound or an upper bound.
1100 static int count_constraints(__isl_take isl_constraint
*c
, void *user
)
1102 struct isl_ast_count_constraints_data
*data
= user
;
1104 if (isl_constraint_is_lower_bound(c
, isl_dim_set
, data
->pos
))
1106 else if (isl_constraint_is_upper_bound(c
, isl_dim_set
, data
->pos
))
1111 isl_constraint_free(c
);
1116 /* Update "graft" based on "bounds" and "domain" for the generic,
1117 * non-degenerate, case.
1119 * "list" respresent the list of bounds that need to be encoded by
1120 * the for loop. Only the constraints that involve the iterator
1121 * are relevant here. The other constraints are taken care of by
1122 * the caller and are included in the generated constraints of "build".
1123 * "domain" is the subset of the intersection of the constraints
1124 * for which some code is executed.
1125 * "build" is the build in which graft->node was created.
1127 * We separate lower bounds, upper bounds and constraints that
1128 * are independent of the loop iterator.
1130 * The actual for loop bounds are generated in refine_generic_bounds.
1132 static __isl_give isl_ast_graft
*refine_generic_split(
1133 __isl_take isl_ast_graft
*graft
, __isl_take isl_constraint_list
*list
,
1134 __isl_keep isl_set
*domain
, __isl_keep isl_ast_build
*build
)
1136 struct isl_ast_count_constraints_data data
;
1137 isl_constraint_list
*lower
;
1138 isl_constraint_list
*upper
;
1141 return isl_ast_graft_free(graft
);
1143 data
.pos
= isl_ast_build_get_depth(build
);
1145 list
= isl_constraint_list_sort(list
, &cmp_constraint
, &data
.pos
);
1147 return isl_ast_graft_free(graft
);
1149 data
.n_indep
= data
.n_lower
= data
.n_upper
= 0;
1150 if (isl_constraint_list_foreach(list
, &count_constraints
, &data
) < 0) {
1151 isl_constraint_list_free(list
);
1152 return isl_ast_graft_free(graft
);
1155 lower
= isl_constraint_list_drop(list
, 0, data
.n_indep
);
1156 upper
= isl_constraint_list_copy(lower
);
1157 lower
= isl_constraint_list_drop(lower
, data
.n_lower
, data
.n_upper
);
1158 upper
= isl_constraint_list_drop(upper
, 0, data
.n_lower
);
1160 return refine_generic_bounds(graft
, lower
, upper
, domain
, build
);
1163 /* Update "graft" based on "bounds" and "domain" for the generic,
1164 * non-degenerate, case.
1166 * "bounds" respresent the bounds that need to be encoded by
1167 * the for loop (or a guard around the for loop).
1168 * "domain" is the subset of "bounds" for which some code is executed.
1169 * "build" is the build in which graft->node was created.
1171 * We break up "bounds" into a list of constraints and continue with
1172 * refine_generic_split.
1174 static __isl_give isl_ast_graft
*refine_generic(
1175 __isl_take isl_ast_graft
*graft
,
1176 __isl_keep isl_basic_set
*bounds
, __isl_keep isl_set
*domain
,
1177 __isl_keep isl_ast_build
*build
)
1179 isl_constraint_list
*list
;
1181 if (!build
|| !graft
)
1182 return isl_ast_graft_free(graft
);
1184 list
= isl_basic_set_get_constraint_list(bounds
);
1186 graft
= refine_generic_split(graft
, list
, domain
, build
);
1191 /* Create a for node for the current level.
1193 * Mark the for node degenerate if "degenerate" is set.
1195 static __isl_give isl_ast_node
*create_for(__isl_keep isl_ast_build
*build
,
1205 depth
= isl_ast_build_get_depth(build
);
1206 id
= isl_ast_build_get_iterator_id(build
, depth
);
1207 node
= isl_ast_node_alloc_for(id
);
1209 node
= isl_ast_node_for_mark_degenerate(node
);
1214 /* If the ast_build_exploit_nested_bounds option is set, then return
1215 * the constraints enforced by all elements in "list".
1216 * Otherwise, return the universe.
1218 static __isl_give isl_basic_set
*extract_shared_enforced(
1219 __isl_keep isl_ast_graft_list
*list
, __isl_keep isl_ast_build
*build
)
1227 ctx
= isl_ast_graft_list_get_ctx(list
);
1228 if (isl_options_get_ast_build_exploit_nested_bounds(ctx
))
1229 return isl_ast_graft_list_extract_shared_enforced(list
, build
);
1231 space
= isl_ast_build_get_space(build
, 1);
1232 return isl_basic_set_universe(space
);
1235 /* Return the pending constraints of "build" that are not already taken
1236 * care of (by a combination of "enforced" and the generated constraints
1239 static __isl_give isl_set
*extract_pending(__isl_keep isl_ast_build
*build
,
1240 __isl_keep isl_basic_set
*enforced
)
1242 isl_set
*guard
, *context
;
1244 guard
= isl_ast_build_get_pending(build
);
1245 context
= isl_set_from_basic_set(isl_basic_set_copy(enforced
));
1246 context
= isl_set_intersect(context
,
1247 isl_ast_build_get_generated(build
));
1248 return isl_set_gist(guard
, context
);
1251 /* Create an AST node for the current dimension based on
1252 * the schedule domain "bounds" and return the node encapsulated
1253 * in an isl_ast_graft.
1255 * "executed" is the current inverse schedule, taking into account
1256 * the bounds in "bounds"
1257 * "domain" is the domain of "executed", with inner dimensions projected out.
1258 * It may be a strict subset of "bounds" in case "bounds" was created
1259 * based on the atomic option or based on separation with explicit bounds.
1261 * "domain" may satisfy additional equalities that result
1262 * from intersecting "executed" with "bounds" in add_node.
1263 * It may also satisfy some global constraints that were dropped out because
1264 * we performed separation with explicit bounds.
1265 * The very first step is then to copy these constraints to "bounds".
1267 * Since we may be calling before_each_for and after_each_for
1268 * callbacks, we record the current inverse schedule in the build.
1270 * We consider three builds,
1271 * "build" is the one in which the current level is created,
1272 * "body_build" is the build in which the next level is created,
1273 * "sub_build" is essentially the same as "body_build", except that
1274 * the depth has not been increased yet.
1276 * "build" already contains information (in strides and offsets)
1277 * about the strides at the current level, but this information is not
1278 * reflected in the build->domain.
1279 * We first add this information and the "bounds" to the sub_build->domain.
1280 * isl_ast_build_set_loop_bounds adds the stride information and
1281 * checks whether the current dimension attains
1282 * only a single value and whether this single value can be represented using
1283 * a single affine expression.
1284 * In the first case, the current level is considered "degenerate".
1285 * In the second, sub-case, the current level is considered "eliminated".
1286 * Eliminated levels don't need to be reflected in the AST since we can
1287 * simply plug in the affine expression. For degenerate, but non-eliminated,
1288 * levels, we do introduce a for node, but mark is as degenerate so that
1289 * it can be printed as an assignment of the single value to the loop
1292 * If the current level is eliminated, we explicitly plug in the value
1293 * for the current level found by isl_ast_build_set_loop_bounds in the
1294 * inverse schedule. This ensures that if we are working on a slice
1295 * of the domain based on information available in the inverse schedule
1296 * and the build domain, that then this information is also reflected
1297 * in the inverse schedule. This operation also eliminates the current
1298 * dimension from the inverse schedule making sure no inner dimensions depend
1299 * on the current dimension. Otherwise, we create a for node, marking
1300 * it degenerate if appropriate. The initial for node is still incomplete
1301 * and will be completed in either refine_degenerate or refine_generic.
1303 * We then generate a sequence of grafts for the next level,
1304 * create a surrounding graft for the current level and insert
1305 * the for node we created (if the current level is not eliminated).
1306 * Before creating a graft for the current level, we first extract
1307 * hoistable constraints from the child guards and combine them
1308 * with the pending constraints in the build. These constraints
1309 * are used to simplify the child guards and then added to the guard
1310 * of the current graft to ensure that they will be generated.
1311 * If the hoisted guard is a disjunction, then we use it directly
1312 * to gist the guards on the children before intersect it with the
1313 * pending constraints. We do so because this disjunction is typically
1314 * identical to the guards on the children such that these guards
1315 * can be effectively removed completely. After the intersection,
1316 * the gist operation would have a harder time figuring this out.
1318 * Finally, we set the bounds of the for loop in either
1319 * refine_degenerate or refine_generic.
1320 * We do so in a context where the pending constraints of the build
1321 * have been replaced by the guard of the current graft.
1323 static __isl_give isl_ast_graft
*create_node_scaled(
1324 __isl_take isl_union_map
*executed
,
1325 __isl_take isl_basic_set
*bounds
, __isl_take isl_set
*domain
,
1326 __isl_take isl_ast_build
*build
)
1329 int degenerate
, eliminated
;
1330 isl_basic_set
*hull
;
1331 isl_basic_set
*enforced
;
1332 isl_set
*guard
, *hoisted
;
1333 isl_ast_node
*node
= NULL
;
1334 isl_ast_graft
*graft
;
1335 isl_ast_graft_list
*children
;
1336 isl_ast_build
*sub_build
;
1337 isl_ast_build
*body_build
;
1339 domain
= isl_ast_build_eliminate_divs(build
, domain
);
1340 domain
= isl_set_detect_equalities(domain
);
1341 hull
= isl_set_unshifted_simple_hull(isl_set_copy(domain
));
1342 bounds
= isl_basic_set_intersect(bounds
, hull
);
1343 build
= isl_ast_build_set_executed(build
, isl_union_map_copy(executed
));
1345 depth
= isl_ast_build_get_depth(build
);
1346 sub_build
= isl_ast_build_copy(build
);
1347 sub_build
= isl_ast_build_set_loop_bounds(sub_build
,
1348 isl_basic_set_copy(bounds
));
1349 degenerate
= isl_ast_build_has_value(sub_build
);
1350 eliminated
= isl_ast_build_has_affine_value(sub_build
, depth
);
1351 if (degenerate
< 0 || eliminated
< 0)
1352 executed
= isl_union_map_free(executed
);
1354 executed
= plug_in_values(executed
, sub_build
);
1356 node
= create_for(build
, degenerate
);
1358 body_build
= isl_ast_build_copy(sub_build
);
1359 body_build
= isl_ast_build_increase_depth(body_build
);
1361 node
= before_each_for(node
, body_build
);
1362 children
= generate_next_level(executed
,
1363 isl_ast_build_copy(body_build
));
1365 enforced
= extract_shared_enforced(children
, build
);
1366 guard
= extract_pending(sub_build
, enforced
);
1367 hoisted
= isl_ast_graft_list_extract_hoistable_guard(children
, build
);
1368 if (isl_set_n_basic_set(hoisted
) > 1)
1369 children
= isl_ast_graft_list_gist_guards(children
,
1370 isl_set_copy(hoisted
));
1371 guard
= isl_set_intersect(guard
, hoisted
);
1373 guard
= add_implied_guards(guard
, degenerate
, bounds
, build
);
1375 graft
= isl_ast_graft_alloc_from_children(children
,
1376 isl_set_copy(guard
), enforced
, build
, sub_build
);
1379 bounds
= isl_ast_build_compute_gist_basic_set(build
, bounds
);
1381 isl_ast_build
*for_build
;
1383 graft
= isl_ast_graft_insert_for(graft
, node
);
1384 for_build
= isl_ast_build_copy(build
);
1385 for_build
= isl_ast_build_replace_pending_by_guard(for_build
,
1386 isl_set_copy(guard
));
1388 graft
= refine_degenerate(graft
, for_build
, sub_build
);
1390 graft
= refine_generic(graft
, bounds
,
1392 isl_ast_build_free(for_build
);
1394 isl_set_free(guard
);
1396 graft
= after_each_for(graft
, body_build
);
1398 isl_ast_build_free(body_build
);
1399 isl_ast_build_free(sub_build
);
1400 isl_ast_build_free(build
);
1401 isl_basic_set_free(bounds
);
1402 isl_set_free(domain
);
1407 /* Internal data structure for checking if all constraints involving
1408 * the input dimension "depth" are such that the other coefficients
1409 * are multiples of "m", reducing "m" if they are not.
1410 * If "m" is reduced all the way down to "1", then the check has failed
1411 * and we break out of the iteration.
1413 struct isl_check_scaled_data
{
1418 /* If constraint "c" involves the input dimension data->depth,
1419 * then make sure that all the other coefficients are multiples of data->m,
1420 * reducing data->m if needed.
1421 * Break out of the iteration if data->m has become equal to "1".
1423 static int constraint_check_scaled(__isl_take isl_constraint
*c
, void *user
)
1425 struct isl_check_scaled_data
*data
= user
;
1427 enum isl_dim_type t
[] = { isl_dim_param
, isl_dim_in
, isl_dim_out
,
1430 if (!isl_constraint_involves_dims(c
, isl_dim_in
, data
->depth
, 1)) {
1431 isl_constraint_free(c
);
1435 for (i
= 0; i
< 4; ++i
) {
1436 n
= isl_constraint_dim(c
, t
[i
]);
1437 for (j
= 0; j
< n
; ++j
) {
1440 if (t
[i
] == isl_dim_in
&& j
== data
->depth
)
1442 if (!isl_constraint_involves_dims(c
, t
[i
], j
, 1))
1444 d
= isl_constraint_get_coefficient_val(c
, t
[i
], j
);
1445 data
->m
= isl_val_gcd(data
->m
, d
);
1446 if (isl_val_is_one(data
->m
))
1453 isl_constraint_free(c
);
1455 return i
< 4 ? -1 : 0;
1458 /* For each constraint of "bmap" that involves the input dimension data->depth,
1459 * make sure that all the other coefficients are multiples of data->m,
1460 * reducing data->m if needed.
1461 * Break out of the iteration if data->m has become equal to "1".
1463 static int basic_map_check_scaled(__isl_take isl_basic_map
*bmap
, void *user
)
1467 r
= isl_basic_map_foreach_constraint(bmap
,
1468 &constraint_check_scaled
, user
);
1469 isl_basic_map_free(bmap
);
1474 /* For each constraint of "map" that involves the input dimension data->depth,
1475 * make sure that all the other coefficients are multiples of data->m,
1476 * reducing data->m if needed.
1477 * Break out of the iteration if data->m has become equal to "1".
1479 static int map_check_scaled(__isl_take isl_map
*map
, void *user
)
1483 r
= isl_map_foreach_basic_map(map
, &basic_map_check_scaled
, user
);
1489 /* Create an AST node for the current dimension based on
1490 * the schedule domain "bounds" and return the node encapsulated
1491 * in an isl_ast_graft.
1493 * "executed" is the current inverse schedule, taking into account
1494 * the bounds in "bounds"
1495 * "domain" is the domain of "executed", with inner dimensions projected out.
1498 * Before moving on to the actual AST node construction in create_node_scaled,
1499 * we first check if the current dimension is strided and if we can scale
1500 * down this stride. Note that we only do this if the ast_build_scale_strides
1503 * In particular, let the current dimension take on values
1507 * with a an integer. We check if we can find an integer m that (obviously)
1508 * divides both f and s.
1510 * If so, we check if the current dimension only appears in constraints
1511 * where the coefficients of the other variables are multiples of m.
1512 * We perform this extra check to avoid the risk of introducing
1513 * divisions by scaling down the current dimension.
1515 * If so, we scale the current dimension down by a factor of m.
1516 * That is, we plug in
1520 * Note that in principle we could always scale down strided loops
1525 * but this may result in i' taking on larger values than the original i,
1526 * due to the shift by "f".
1527 * By constrast, the scaling in (1) can only reduce the (absolute) value "i".
1529 static __isl_give isl_ast_graft
*create_node(__isl_take isl_union_map
*executed
,
1530 __isl_take isl_basic_set
*bounds
, __isl_take isl_set
*domain
,
1531 __isl_take isl_ast_build
*build
)
1533 struct isl_check_scaled_data data
;
1538 ctx
= isl_ast_build_get_ctx(build
);
1539 if (!isl_options_get_ast_build_scale_strides(ctx
))
1540 return create_node_scaled(executed
, bounds
, domain
, build
);
1542 data
.depth
= isl_ast_build_get_depth(build
);
1543 if (!isl_ast_build_has_stride(build
, data
.depth
))
1544 return create_node_scaled(executed
, bounds
, domain
, build
);
1546 offset
= isl_ast_build_get_offset(build
, data
.depth
);
1547 data
.m
= isl_ast_build_get_stride(build
, data
.depth
);
1549 offset
= isl_aff_free(offset
);
1550 offset
= isl_aff_scale_down_val(offset
, isl_val_copy(data
.m
));
1551 d
= isl_aff_get_denominator_val(offset
);
1553 executed
= isl_union_map_free(executed
);
1555 if (executed
&& isl_val_is_divisible_by(data
.m
, d
))
1556 data
.m
= isl_val_div(data
.m
, d
);
1558 data
.m
= isl_val_set_si(data
.m
, 1);
1562 if (!isl_val_is_one(data
.m
)) {
1563 if (isl_union_map_foreach_map(executed
, &map_check_scaled
,
1565 !isl_val_is_one(data
.m
))
1566 executed
= isl_union_map_free(executed
);
1569 if (!isl_val_is_one(data
.m
)) {
1574 isl_union_map
*umap
;
1576 space
= isl_ast_build_get_space(build
, 1);
1577 space
= isl_space_map_from_set(space
);
1578 ma
= isl_multi_aff_identity(space
);
1579 aff
= isl_multi_aff_get_aff(ma
, data
.depth
);
1580 aff
= isl_aff_scale_val(aff
, isl_val_copy(data
.m
));
1581 ma
= isl_multi_aff_set_aff(ma
, data
.depth
, aff
);
1583 bounds
= isl_basic_set_preimage_multi_aff(bounds
,
1584 isl_multi_aff_copy(ma
));
1585 domain
= isl_set_preimage_multi_aff(domain
,
1586 isl_multi_aff_copy(ma
));
1587 map
= isl_map_reverse(isl_map_from_multi_aff(ma
));
1588 umap
= isl_union_map_from_map(map
);
1589 executed
= isl_union_map_apply_domain(executed
,
1590 isl_union_map_copy(umap
));
1591 build
= isl_ast_build_scale_down(build
, isl_val_copy(data
.m
),
1594 isl_aff_free(offset
);
1595 isl_val_free(data
.m
);
1597 return create_node_scaled(executed
, bounds
, domain
, build
);
1600 /* Add the basic set to the list that "user" points to.
1602 static int collect_basic_set(__isl_take isl_basic_set
*bset
, void *user
)
1604 isl_basic_set_list
**list
= user
;
1606 *list
= isl_basic_set_list_add(*list
, bset
);
1611 /* Extract the basic sets of "set" and collect them in an isl_basic_set_list.
1613 static __isl_give isl_basic_set_list
*isl_basic_set_list_from_set(
1614 __isl_take isl_set
*set
)
1618 isl_basic_set_list
*list
;
1623 ctx
= isl_set_get_ctx(set
);
1625 n
= isl_set_n_basic_set(set
);
1626 list
= isl_basic_set_list_alloc(ctx
, n
);
1627 if (isl_set_foreach_basic_set(set
, &collect_basic_set
, &list
) < 0)
1628 list
= isl_basic_set_list_free(list
);
1634 /* Generate code for the schedule domain "bounds"
1635 * and add the result to "list".
1637 * We mainly detect strides here and check if the bounds do not
1638 * conflict with the current build domain
1639 * and then pass over control to create_node.
1641 * "bounds" reflects the bounds on the current dimension and possibly
1642 * some extra conditions on outer dimensions.
1643 * It does not, however, include any divs involving the current dimension,
1644 * so it does not capture any stride constraints.
1645 * We therefore need to compute that part of the schedule domain that
1646 * intersects with "bounds" and derive the strides from the result.
1648 static __isl_give isl_ast_graft_list
*add_node(
1649 __isl_take isl_ast_graft_list
*list
, __isl_take isl_union_map
*executed
,
1650 __isl_take isl_basic_set
*bounds
, __isl_take isl_ast_build
*build
)
1652 isl_ast_graft
*graft
;
1653 isl_set
*domain
= NULL
;
1654 isl_union_set
*uset
;
1655 int empty
, disjoint
;
1657 uset
= isl_union_set_from_basic_set(isl_basic_set_copy(bounds
));
1658 executed
= isl_union_map_intersect_domain(executed
, uset
);
1659 empty
= isl_union_map_is_empty(executed
);
1665 uset
= isl_union_map_domain(isl_union_map_copy(executed
));
1666 domain
= isl_set_from_union_set(uset
);
1667 domain
= isl_ast_build_specialize(build
, domain
);
1669 domain
= isl_set_compute_divs(domain
);
1670 domain
= isl_ast_build_eliminate_inner(build
, domain
);
1671 disjoint
= isl_set_is_disjoint(domain
, build
->domain
);
1677 build
= isl_ast_build_detect_strides(build
, isl_set_copy(domain
));
1679 graft
= create_node(executed
, bounds
, domain
,
1680 isl_ast_build_copy(build
));
1681 list
= isl_ast_graft_list_add(list
, graft
);
1682 isl_ast_build_free(build
);
1685 list
= isl_ast_graft_list_free(list
);
1687 isl_set_free(domain
);
1688 isl_basic_set_free(bounds
);
1689 isl_union_map_free(executed
);
1690 isl_ast_build_free(build
);
1694 /* Does any element of i follow or coincide with any element of j
1695 * at the current depth for equal values of the outer dimensions?
1697 static int domain_follows_at_depth(__isl_keep isl_basic_set
*i
,
1698 __isl_keep isl_basic_set
*j
, void *user
)
1700 int depth
= *(int *) user
;
1701 isl_basic_map
*test
;
1705 test
= isl_basic_map_from_domain_and_range(isl_basic_set_copy(i
),
1706 isl_basic_set_copy(j
));
1707 for (l
= 0; l
< depth
; ++l
)
1708 test
= isl_basic_map_equate(test
, isl_dim_in
, l
,
1710 test
= isl_basic_map_order_ge(test
, isl_dim_in
, depth
,
1711 isl_dim_out
, depth
);
1712 empty
= isl_basic_map_is_empty(test
);
1713 isl_basic_map_free(test
);
1715 return empty
< 0 ? -1 : !empty
;
1718 /* Split up each element of "list" into a part that is related to "bset"
1719 * according to "gt" and a part that is not.
1720 * Return a list that consist of "bset" and all the pieces.
1722 static __isl_give isl_basic_set_list
*add_split_on(
1723 __isl_take isl_basic_set_list
*list
, __isl_take isl_basic_set
*bset
,
1724 __isl_keep isl_basic_map
*gt
)
1727 isl_basic_set_list
*res
;
1730 bset
= isl_basic_set_free(bset
);
1732 gt
= isl_basic_map_copy(gt
);
1733 gt
= isl_basic_map_intersect_domain(gt
, isl_basic_set_copy(bset
));
1734 n
= isl_basic_set_list_n_basic_set(list
);
1735 res
= isl_basic_set_list_from_basic_set(bset
);
1736 for (i
= 0; res
&& i
< n
; ++i
) {
1737 isl_basic_set
*bset
;
1738 isl_set
*set1
, *set2
;
1739 isl_basic_map
*bmap
;
1742 bset
= isl_basic_set_list_get_basic_set(list
, i
);
1743 bmap
= isl_basic_map_copy(gt
);
1744 bmap
= isl_basic_map_intersect_range(bmap
, bset
);
1745 bset
= isl_basic_map_range(bmap
);
1746 empty
= isl_basic_set_is_empty(bset
);
1748 res
= isl_basic_set_list_free(res
);
1750 isl_basic_set_free(bset
);
1751 bset
= isl_basic_set_list_get_basic_set(list
, i
);
1752 res
= isl_basic_set_list_add(res
, bset
);
1756 res
= isl_basic_set_list_add(res
, isl_basic_set_copy(bset
));
1757 set1
= isl_set_from_basic_set(bset
);
1758 bset
= isl_basic_set_list_get_basic_set(list
, i
);
1759 set2
= isl_set_from_basic_set(bset
);
1760 set1
= isl_set_subtract(set2
, set1
);
1761 set1
= isl_set_make_disjoint(set1
);
1763 res
= isl_basic_set_list_concat(res
,
1764 isl_basic_set_list_from_set(set1
));
1766 isl_basic_map_free(gt
);
1767 isl_basic_set_list_free(list
);
1771 static __isl_give isl_ast_graft_list
*generate_sorted_domains(
1772 __isl_keep isl_basic_set_list
*domain_list
,
1773 __isl_keep isl_union_map
*executed
,
1774 __isl_keep isl_ast_build
*build
);
1776 /* Internal data structure for add_nodes.
1778 * "executed" and "build" are extra arguments to be passed to add_node.
1779 * "list" collects the results.
1781 struct isl_add_nodes_data
{
1782 isl_union_map
*executed
;
1783 isl_ast_build
*build
;
1785 isl_ast_graft_list
*list
;
1788 /* Generate code for the schedule domains in "scc"
1789 * and add the results to "list".
1791 * The domains in "scc" form a strongly connected component in the ordering.
1792 * If the number of domains in "scc" is larger than 1, then this means
1793 * that we cannot determine a valid ordering for the domains in the component.
1794 * This should be fairly rare because the individual domains
1795 * have been made disjoint first.
1796 * The problem is that the domains may be integrally disjoint but not
1797 * rationally disjoint. For example, we may have domains
1799 * { [i,i] : 0 <= i <= 1 } and { [i,1-i] : 0 <= i <= 1 }
1801 * These two domains have an empty intersection, but their rational
1802 * relaxations do intersect. It is impossible to order these domains
1803 * in the second dimension because the first should be ordered before
1804 * the second for outer dimension equal to 0, while it should be ordered
1805 * after for outer dimension equal to 1.
1807 * This may happen in particular in case of unrolling since the domain
1808 * of each slice is replaced by its simple hull.
1810 * For each basic set i in "scc" and for each of the following basic sets j,
1811 * we split off that part of the basic set i that shares the outer dimensions
1812 * with j and lies before j in the current dimension.
1813 * We collect all the pieces in a new list that replaces "scc".
1815 * While the elements in "scc" should be disjoint, we double-check
1816 * this property to avoid running into an infinite recursion in case
1817 * they intersect due to some internal error.
1819 static int add_nodes(__isl_take isl_basic_set_list
*scc
, void *user
)
1821 struct isl_add_nodes_data
*data
= user
;
1823 isl_basic_set
*bset
, *first
;
1824 isl_basic_set_list
*list
;
1828 n
= isl_basic_set_list_n_basic_set(scc
);
1829 bset
= isl_basic_set_list_get_basic_set(scc
, 0);
1831 isl_basic_set_list_free(scc
);
1832 data
->list
= add_node(data
->list
,
1833 isl_union_map_copy(data
->executed
), bset
,
1834 isl_ast_build_copy(data
->build
));
1835 return data
->list
? 0 : -1;
1838 depth
= isl_ast_build_get_depth(data
->build
);
1839 space
= isl_basic_set_get_space(bset
);
1840 space
= isl_space_map_from_set(space
);
1841 gt
= isl_basic_map_universe(space
);
1842 for (i
= 0; i
< depth
; ++i
)
1843 gt
= isl_basic_map_equate(gt
, isl_dim_in
, i
, isl_dim_out
, i
);
1844 gt
= isl_basic_map_order_gt(gt
, isl_dim_in
, depth
, isl_dim_out
, depth
);
1846 first
= isl_basic_set_copy(bset
);
1847 list
= isl_basic_set_list_from_basic_set(bset
);
1848 for (i
= 1; i
< n
; ++i
) {
1851 bset
= isl_basic_set_list_get_basic_set(scc
, i
);
1853 disjoint
= isl_basic_set_is_disjoint(bset
, first
);
1855 list
= isl_basic_set_list_free(list
);
1857 isl_die(isl_basic_set_list_get_ctx(scc
),
1859 "basic sets in scc are assumed to be disjoint",
1860 list
= isl_basic_set_list_free(list
));
1862 list
= add_split_on(list
, bset
, gt
);
1864 isl_basic_set_free(first
);
1865 isl_basic_map_free(gt
);
1866 isl_basic_set_list_free(scc
);
1868 data
->list
= isl_ast_graft_list_concat(data
->list
,
1869 generate_sorted_domains(scc
, data
->executed
, data
->build
));
1870 isl_basic_set_list_free(scc
);
1872 return data
->list
? 0 : -1;
1875 /* Sort the domains in "domain_list" according to the execution order
1876 * at the current depth (for equal values of the outer dimensions),
1877 * generate code for each of them, collecting the results in a list.
1878 * If no code is generated (because the intersection of the inverse schedule
1879 * with the domains turns out to be empty), then an empty list is returned.
1881 * The caller is responsible for ensuring that the basic sets in "domain_list"
1882 * are pair-wise disjoint. It can, however, in principle happen that
1883 * two basic sets should be ordered one way for one value of the outer
1884 * dimensions and the other way for some other value of the outer dimensions.
1885 * We therefore play safe and look for strongly connected components.
1886 * The function add_nodes takes care of handling non-trivial components.
1888 static __isl_give isl_ast_graft_list
*generate_sorted_domains(
1889 __isl_keep isl_basic_set_list
*domain_list
,
1890 __isl_keep isl_union_map
*executed
, __isl_keep isl_ast_build
*build
)
1893 struct isl_add_nodes_data data
;
1900 ctx
= isl_basic_set_list_get_ctx(domain_list
);
1901 n
= isl_basic_set_list_n_basic_set(domain_list
);
1902 data
.list
= isl_ast_graft_list_alloc(ctx
, n
);
1906 return add_node(data
.list
, isl_union_map_copy(executed
),
1907 isl_basic_set_list_get_basic_set(domain_list
, 0),
1908 isl_ast_build_copy(build
));
1910 depth
= isl_ast_build_get_depth(build
);
1911 data
.executed
= executed
;
1913 if (isl_basic_set_list_foreach_scc(domain_list
,
1914 &domain_follows_at_depth
, &depth
,
1915 &add_nodes
, &data
) < 0)
1916 data
.list
= isl_ast_graft_list_free(data
.list
);
1921 /* Do i and j share any values for the outer dimensions?
1923 static int shared_outer(__isl_keep isl_basic_set
*i
,
1924 __isl_keep isl_basic_set
*j
, void *user
)
1926 int depth
= *(int *) user
;
1927 isl_basic_map
*test
;
1931 test
= isl_basic_map_from_domain_and_range(isl_basic_set_copy(i
),
1932 isl_basic_set_copy(j
));
1933 for (l
= 0; l
< depth
; ++l
)
1934 test
= isl_basic_map_equate(test
, isl_dim_in
, l
,
1936 empty
= isl_basic_map_is_empty(test
);
1937 isl_basic_map_free(test
);
1939 return empty
< 0 ? -1 : !empty
;
1942 /* Internal data structure for generate_sorted_domains_wrap.
1944 * "n" is the total number of basic sets
1945 * "executed" and "build" are extra arguments to be passed
1946 * to generate_sorted_domains.
1948 * "single" is set to 1 by generate_sorted_domains_wrap if there
1949 * is only a single component.
1950 * "list" collects the results.
1952 struct isl_ast_generate_parallel_domains_data
{
1954 isl_union_map
*executed
;
1955 isl_ast_build
*build
;
1958 isl_ast_graft_list
*list
;
1961 /* Call generate_sorted_domains on "scc", fuse the result into a list
1962 * with either zero or one graft and collect the these single element
1963 * lists into data->list.
1965 * If there is only one component, i.e., if the number of basic sets
1966 * in the current component is equal to the total number of basic sets,
1967 * then data->single is set to 1 and the result of generate_sorted_domains
1970 static int generate_sorted_domains_wrap(__isl_take isl_basic_set_list
*scc
,
1973 struct isl_ast_generate_parallel_domains_data
*data
= user
;
1974 isl_ast_graft_list
*list
;
1976 list
= generate_sorted_domains(scc
, data
->executed
, data
->build
);
1977 data
->single
= isl_basic_set_list_n_basic_set(scc
) == data
->n
;
1979 list
= isl_ast_graft_list_fuse(list
, data
->build
);
1983 data
->list
= isl_ast_graft_list_concat(data
->list
, list
);
1985 isl_basic_set_list_free(scc
);
1992 /* Look for any (weakly connected) components in the "domain_list"
1993 * of domains that share some values of the outer dimensions.
1994 * That is, domains in different components do not share any values
1995 * of the outer dimensions. This means that these components
1996 * can be freely reordered.
1997 * Within each of the components, we sort the domains according
1998 * to the execution order at the current depth.
2000 * If there is more than one component, then generate_sorted_domains_wrap
2001 * fuses the result of each call to generate_sorted_domains
2002 * into a list with either zero or one graft and collects these (at most)
2003 * single element lists into a bigger list. This means that the elements of the
2004 * final list can be freely reordered. In particular, we sort them
2005 * according to an arbitrary but fixed ordering to ease merging of
2006 * graft lists from different components.
2008 static __isl_give isl_ast_graft_list
*generate_parallel_domains(
2009 __isl_keep isl_basic_set_list
*domain_list
,
2010 __isl_keep isl_union_map
*executed
, __isl_keep isl_ast_build
*build
)
2013 struct isl_ast_generate_parallel_domains_data data
;
2018 data
.n
= isl_basic_set_list_n_basic_set(domain_list
);
2020 return generate_sorted_domains(domain_list
, executed
, build
);
2022 depth
= isl_ast_build_get_depth(build
);
2024 data
.executed
= executed
;
2027 if (isl_basic_set_list_foreach_scc(domain_list
, &shared_outer
, &depth
,
2028 &generate_sorted_domains_wrap
,
2030 data
.list
= isl_ast_graft_list_free(data
.list
);
2033 data
.list
= isl_ast_graft_list_sort_guard(data
.list
);
2038 /* Internal data for separate_domain.
2040 * "explicit" is set if we only want to use explicit bounds.
2042 * "domain" collects the separated domains.
2044 struct isl_separate_domain_data
{
2045 isl_ast_build
*build
;
2050 /* Extract implicit bounds on the current dimension for the executed "map".
2052 * The domain of "map" may involve inner dimensions, so we
2053 * need to eliminate them.
2055 static __isl_give isl_set
*implicit_bounds(__isl_take isl_map
*map
,
2056 __isl_keep isl_ast_build
*build
)
2060 domain
= isl_map_domain(map
);
2061 domain
= isl_ast_build_eliminate(build
, domain
);
2066 /* Extract explicit bounds on the current dimension for the executed "map".
2068 * Rather than eliminating the inner dimensions as in implicit_bounds,
2069 * we simply drop any constraints involving those inner dimensions.
2070 * The idea is that most bounds that are implied by constraints on the
2071 * inner dimensions will be enforced by for loops and not by explicit guards.
2072 * There is then no need to separate along those bounds.
2074 static __isl_give isl_set
*explicit_bounds(__isl_take isl_map
*map
,
2075 __isl_keep isl_ast_build
*build
)
2080 dim
= isl_map_dim(map
, isl_dim_out
);
2081 map
= isl_map_drop_constraints_involving_dims(map
, isl_dim_out
, 0, dim
);
2083 domain
= isl_map_domain(map
);
2084 depth
= isl_ast_build_get_depth(build
);
2085 dim
= isl_set_dim(domain
, isl_dim_set
);
2086 domain
= isl_set_detect_equalities(domain
);
2087 domain
= isl_set_drop_constraints_involving_dims(domain
,
2088 isl_dim_set
, depth
+ 1, dim
- (depth
+ 1));
2089 domain
= isl_set_remove_divs_involving_dims(domain
,
2090 isl_dim_set
, depth
, 1);
2091 domain
= isl_set_remove_unknown_divs(domain
);
2096 /* Split data->domain into pieces that intersect with the range of "map"
2097 * and pieces that do not intersect with the range of "map"
2098 * and then add that part of the range of "map" that does not intersect
2099 * with data->domain.
2101 static int separate_domain(__isl_take isl_map
*map
, void *user
)
2103 struct isl_separate_domain_data
*data
= user
;
2108 domain
= explicit_bounds(map
, data
->build
);
2110 domain
= implicit_bounds(map
, data
->build
);
2112 domain
= isl_set_coalesce(domain
);
2113 domain
= isl_set_make_disjoint(domain
);
2114 d1
= isl_set_subtract(isl_set_copy(domain
), isl_set_copy(data
->domain
));
2115 d2
= isl_set_subtract(isl_set_copy(data
->domain
), isl_set_copy(domain
));
2116 data
->domain
= isl_set_intersect(data
->domain
, domain
);
2117 data
->domain
= isl_set_union(data
->domain
, d1
);
2118 data
->domain
= isl_set_union(data
->domain
, d2
);
2123 /* Separate the schedule domains of "executed".
2125 * That is, break up the domain of "executed" into basic sets,
2126 * such that for each basic set S, every element in S is associated with
2127 * the same domain spaces.
2129 * "space" is the (single) domain space of "executed".
2131 static __isl_give isl_set
*separate_schedule_domains(
2132 __isl_take isl_space
*space
, __isl_take isl_union_map
*executed
,
2133 __isl_keep isl_ast_build
*build
)
2135 struct isl_separate_domain_data data
= { build
};
2138 ctx
= isl_ast_build_get_ctx(build
);
2139 data
.explicit = isl_options_get_ast_build_separation_bounds(ctx
) ==
2140 ISL_AST_BUILD_SEPARATION_BOUNDS_EXPLICIT
;
2141 data
.domain
= isl_set_empty(space
);
2142 if (isl_union_map_foreach_map(executed
, &separate_domain
, &data
) < 0)
2143 data
.domain
= isl_set_free(data
.domain
);
2145 isl_union_map_free(executed
);
2149 /* Temporary data used during the search for a lower bound for unrolling.
2151 * "domain" is the original set for which to find a lower bound
2152 * "depth" is the dimension for which to find a lower boudn
2154 * "lower" is the best lower bound found so far. It is NULL if we have not
2156 * "n" is the corresponding size. If lower is NULL, then the value of n
2159 struct isl_find_unroll_data
{
2167 /* Check if we can use "c" as a lower bound and if it is better than
2168 * any previously found lower bound.
2170 * If "c" does not involve the dimension at the current depth,
2171 * then we cannot use it.
2172 * Otherwise, let "c" be of the form
2176 * We compute the maximal value of
2178 * -ceil(f(j)/a)) + i + 1
2180 * over the domain. If there is such a value "n", then we know
2182 * -ceil(f(j)/a)) + i + 1 <= n
2186 * i < ceil(f(j)/a)) + n
2188 * meaning that we can use ceil(f(j)/a)) as a lower bound for unrolling.
2189 * We just need to check if we have found any lower bound before and
2190 * if the new lower bound is better (smaller n) than the previously found
2193 static int update_unrolling_lower_bound(struct isl_find_unroll_data
*data
,
2194 __isl_keep isl_constraint
*c
)
2196 isl_aff
*aff
, *lower
;
2199 if (!isl_constraint_is_lower_bound(c
, isl_dim_set
, data
->depth
))
2202 lower
= isl_constraint_get_bound(c
, isl_dim_set
, data
->depth
);
2203 lower
= isl_aff_ceil(lower
);
2204 aff
= isl_aff_copy(lower
);
2205 aff
= isl_aff_neg(aff
);
2206 aff
= isl_aff_add_coefficient_si(aff
, isl_dim_in
, data
->depth
, 1);
2207 aff
= isl_aff_add_constant_si(aff
, 1);
2208 max
= isl_set_max_val(data
->domain
, aff
);
2213 if (isl_val_is_infty(max
)) {
2215 isl_aff_free(lower
);
2219 if (isl_val_cmp_si(max
, INT_MAX
) <= 0 &&
2220 (!data
->lower
|| isl_val_cmp_si(max
, *data
->n
) < 0)) {
2221 isl_aff_free(data
->lower
);
2222 data
->lower
= lower
;
2223 *data
->n
= isl_val_get_num_si(max
);
2225 isl_aff_free(lower
);
2230 isl_aff_free(lower
);
2234 /* Check if we can use "c" as a lower bound and if it is better than
2235 * any previously found lower bound.
2237 static int constraint_find_unroll(__isl_take isl_constraint
*c
, void *user
)
2239 struct isl_find_unroll_data
*data
;
2242 data
= (struct isl_find_unroll_data
*) user
;
2243 r
= update_unrolling_lower_bound(data
, c
);
2244 isl_constraint_free(c
);
2249 /* Look for a lower bound l(i) on the dimension at "depth"
2250 * and a size n such that "domain" is a subset of
2252 * { [i] : l(i) <= i_d < l(i) + n }
2254 * where d is "depth" and l(i) depends only on earlier dimensions.
2255 * Furthermore, try and find a lower bound such that n is as small as possible.
2256 * In particular, "n" needs to be finite.
2258 * Inner dimensions have been eliminated from "domain" by the caller.
2260 * We first construct a collection of lower bounds on the input set
2261 * by computing its simple hull. We then iterate through them,
2262 * discarding those that we cannot use (either because they do not
2263 * involve the dimension at "depth" or because they have no corresponding
2264 * upper bound, meaning that "n" would be unbounded) and pick out the
2265 * best from the remaining ones.
2267 * If we cannot find a suitable lower bound, then we consider that
2270 static __isl_give isl_aff
*find_unroll_lower_bound(__isl_keep isl_set
*domain
,
2273 struct isl_find_unroll_data data
= { domain
, depth
, NULL
, n
};
2274 isl_basic_set
*hull
;
2276 hull
= isl_set_simple_hull(isl_set_copy(domain
));
2278 if (isl_basic_set_foreach_constraint(hull
,
2279 &constraint_find_unroll
, &data
) < 0)
2282 isl_basic_set_free(hull
);
2285 isl_die(isl_set_get_ctx(domain
), isl_error_invalid
,
2286 "cannot find lower bound for unrolling", return NULL
);
2290 isl_basic_set_free(hull
);
2291 return isl_aff_free(data
.lower
);
2294 /* Return the constraint
2296 * i_"depth" = aff + offset
2298 static __isl_give isl_constraint
*at_offset(int depth
, __isl_keep isl_aff
*aff
,
2301 aff
= isl_aff_copy(aff
);
2302 aff
= isl_aff_add_coefficient_si(aff
, isl_dim_in
, depth
, -1);
2303 aff
= isl_aff_add_constant_si(aff
, offset
);
2304 return isl_equality_from_aff(aff
);
2307 /* Data structure for storing the results and the intermediate objects
2308 * of compute_domains.
2310 * "list" is the main result of the function and contains a list
2311 * of disjoint basic sets for which code should be generated.
2313 * "executed" and "build" are inputs to compute_domains.
2314 * "schedule_domain" is the domain of "executed".
2316 * "option" constains the domains at the current depth that should by
2317 * atomic, separated or unrolled. These domains are as specified by
2318 * the user, except that inner dimensions have been eliminated and
2319 * that they have been made pair-wise disjoint.
2321 * "sep_class" contains the user-specified split into separation classes
2322 * specialized to the current depth.
2323 * "done" contains the union of the separation domains that have already
2326 struct isl_codegen_domains
{
2327 isl_basic_set_list
*list
;
2329 isl_union_map
*executed
;
2330 isl_ast_build
*build
;
2331 isl_set
*schedule_domain
;
2339 /* Extend domains->list with a list of basic sets, one for each value
2340 * of the current dimension in "domain" and remove the corresponding
2341 * sets from the class domain. Return the updated class domain.
2342 * The divs that involve the current dimension have not been projected out
2345 * Since we are going to be iterating over the individual values,
2346 * we first check if there are any strides on the current dimension.
2347 * If there is, we rewrite the current dimension i as
2349 * i = stride i' + offset
2351 * and then iterate over individual values of i' instead.
2353 * We then look for a lower bound on i' and a size such that the domain
2356 * { [j,i'] : l(j) <= i' < l(j) + n }
2358 * and then take slices of the domain at values of i'
2359 * between l(j) and l(j) + n - 1.
2361 * We compute the unshifted simple hull of each slice to ensure that
2362 * we have a single basic set per offset. The slicing constraint
2363 * may get simplified away before the unshifted simple hull is taken
2364 * and may therefore in some rare cases disappear from the result.
2365 * We therefore explicitly add the constraint back after computing
2366 * the unshifted simple hull to ensure that the basic sets
2367 * remain disjoint. The constraints that are dropped by taking the hull
2368 * will be taken into account at the next level, as in the case of the
2371 * Finally, we map i' back to i and add each basic set to the list.
2372 * Since we may have dropped some constraints, we intersect with
2373 * the class domain again to ensure that each element in the list
2374 * is disjoint from the other class domains.
2376 static __isl_give isl_set
*do_unroll(struct isl_codegen_domains
*domains
,
2377 __isl_take isl_set
*domain
, __isl_take isl_set
*class_domain
)
2383 isl_multi_aff
*expansion
;
2384 isl_basic_map
*bmap
;
2385 isl_set
*unroll_domain
;
2386 isl_ast_build
*build
;
2389 return isl_set_free(class_domain
);
2391 ctx
= isl_set_get_ctx(domain
);
2392 depth
= isl_ast_build_get_depth(domains
->build
);
2393 build
= isl_ast_build_copy(domains
->build
);
2394 domain
= isl_ast_build_eliminate_inner(build
, domain
);
2395 domain
= isl_set_intersect(domain
, isl_ast_build_get_domain(build
));
2396 build
= isl_ast_build_detect_strides(build
, isl_set_copy(domain
));
2397 expansion
= isl_ast_build_get_stride_expansion(build
);
2399 domain
= isl_set_preimage_multi_aff(domain
,
2400 isl_multi_aff_copy(expansion
));
2401 domain
= isl_ast_build_eliminate_divs(build
, domain
);
2403 isl_ast_build_free(build
);
2405 lower
= find_unroll_lower_bound(domain
, depth
, &n
);
2407 class_domain
= isl_set_free(class_domain
);
2409 bmap
= isl_basic_map_from_multi_aff(expansion
);
2411 unroll_domain
= isl_set_empty(isl_set_get_space(domain
));
2413 for (i
= 0; class_domain
&& i
< n
; ++i
) {
2415 isl_basic_set
*bset
;
2416 isl_constraint
*slice
;
2417 isl_basic_set_list
*list
;
2419 slice
= at_offset(depth
, lower
, i
);
2420 set
= isl_set_copy(domain
);
2421 set
= isl_set_add_constraint(set
, isl_constraint_copy(slice
));
2422 bset
= isl_set_unshifted_simple_hull(set
);
2423 bset
= isl_basic_set_add_constraint(bset
, slice
);
2424 bset
= isl_basic_set_apply(bset
, isl_basic_map_copy(bmap
));
2425 set
= isl_set_from_basic_set(bset
);
2426 unroll_domain
= isl_set_union(unroll_domain
, isl_set_copy(set
));
2427 set
= isl_set_intersect(set
, isl_set_copy(class_domain
));
2428 set
= isl_set_make_disjoint(set
);
2429 list
= isl_basic_set_list_from_set(set
);
2430 domains
->list
= isl_basic_set_list_concat(domains
->list
, list
);
2433 class_domain
= isl_set_subtract(class_domain
, unroll_domain
);
2435 isl_aff_free(lower
);
2436 isl_set_free(domain
);
2437 isl_basic_map_free(bmap
);
2439 return class_domain
;
2442 /* Add domains to domains->list for each individual value of the current
2443 * dimension, for that part of the schedule domain that lies in the
2444 * intersection of the option domain and the class domain.
2445 * Remove the corresponding sets from the class domain and
2446 * return the updated class domain.
2448 * We first break up the unroll option domain into individual pieces
2449 * and then handle each of them separately. The unroll option domain
2450 * has been made disjoint in compute_domains_init_options,
2452 * Note that we actively want to combine different pieces of the
2453 * schedule domain that have the same value at the current dimension.
2454 * We therefore need to break up the unroll option domain before
2455 * intersecting with class and schedule domain, hoping that the
2456 * unroll option domain specified by the user is relatively simple.
2458 static __isl_give isl_set
*compute_unroll_domains(
2459 struct isl_codegen_domains
*domains
, __isl_take isl_set
*class_domain
)
2461 isl_set
*unroll_domain
;
2462 isl_basic_set_list
*unroll_list
;
2466 empty
= isl_set_is_empty(domains
->option
[unroll
]);
2468 return isl_set_free(class_domain
);
2470 return class_domain
;
2472 unroll_domain
= isl_set_copy(domains
->option
[unroll
]);
2473 unroll_list
= isl_basic_set_list_from_set(unroll_domain
);
2475 n
= isl_basic_set_list_n_basic_set(unroll_list
);
2476 for (i
= 0; i
< n
; ++i
) {
2477 isl_basic_set
*bset
;
2479 bset
= isl_basic_set_list_get_basic_set(unroll_list
, i
);
2480 unroll_domain
= isl_set_from_basic_set(bset
);
2481 unroll_domain
= isl_set_intersect(unroll_domain
,
2482 isl_set_copy(class_domain
));
2483 unroll_domain
= isl_set_intersect(unroll_domain
,
2484 isl_set_copy(domains
->schedule_domain
));
2486 empty
= isl_set_is_empty(unroll_domain
);
2487 if (empty
>= 0 && empty
) {
2488 isl_set_free(unroll_domain
);
2492 class_domain
= do_unroll(domains
, unroll_domain
, class_domain
);
2495 isl_basic_set_list_free(unroll_list
);
2497 return class_domain
;
2500 /* Try and construct a single basic set that includes the intersection of
2501 * the schedule domain, the atomic option domain and the class domain.
2502 * Add the resulting basic set(s) to domains->list and remove them
2503 * from class_domain. Return the updated class domain.
2505 * We construct a single domain rather than trying to combine
2506 * the schedule domains of individual domains because we are working
2507 * within a single component so that non-overlapping schedule domains
2508 * should already have been separated.
2509 * We do however need to make sure that this single domains is a subset
2510 * of the class domain so that it would not intersect with any other
2511 * class domains. This means that we may end up splitting up the atomic
2512 * domain in case separation classes are being used.
2514 * "domain" is the intersection of the schedule domain and the class domain,
2515 * with inner dimensions projected out.
2517 static __isl_give isl_set
*compute_atomic_domain(
2518 struct isl_codegen_domains
*domains
, __isl_take isl_set
*class_domain
)
2520 isl_basic_set
*bset
;
2521 isl_basic_set_list
*list
;
2522 isl_set
*domain
, *atomic_domain
;
2525 domain
= isl_set_copy(domains
->option
[atomic
]);
2526 domain
= isl_set_intersect(domain
, isl_set_copy(class_domain
));
2527 domain
= isl_set_intersect(domain
,
2528 isl_set_copy(domains
->schedule_domain
));
2529 empty
= isl_set_is_empty(domain
);
2531 class_domain
= isl_set_free(class_domain
);
2533 isl_set_free(domain
);
2534 return class_domain
;
2537 domain
= isl_ast_build_eliminate(domains
->build
, domain
);
2538 domain
= isl_set_coalesce(domain
);
2539 bset
= isl_set_unshifted_simple_hull(domain
);
2540 domain
= isl_set_from_basic_set(bset
);
2541 atomic_domain
= isl_set_copy(domain
);
2542 domain
= isl_set_intersect(domain
, isl_set_copy(class_domain
));
2543 class_domain
= isl_set_subtract(class_domain
, atomic_domain
);
2544 domain
= isl_set_make_disjoint(domain
);
2545 list
= isl_basic_set_list_from_set(domain
);
2546 domains
->list
= isl_basic_set_list_concat(domains
->list
, list
);
2548 return class_domain
;
2551 /* Split up the schedule domain into uniform basic sets,
2552 * in the sense that each element in a basic set is associated to
2553 * elements of the same domains, and add the result to domains->list.
2554 * Do this for that part of the schedule domain that lies in the
2555 * intersection of "class_domain" and the separate option domain.
2557 * "class_domain" may or may not include the constraints
2558 * of the schedule domain, but this does not make a difference
2559 * since we are going to intersect it with the domain of the inverse schedule.
2560 * If it includes schedule domain constraints, then they may involve
2561 * inner dimensions, but we will eliminate them in separation_domain.
2563 static int compute_separate_domain(struct isl_codegen_domains
*domains
,
2564 __isl_keep isl_set
*class_domain
)
2568 isl_union_map
*executed
;
2569 isl_basic_set_list
*list
;
2572 domain
= isl_set_copy(domains
->option
[separate
]);
2573 domain
= isl_set_intersect(domain
, isl_set_copy(class_domain
));
2574 executed
= isl_union_map_copy(domains
->executed
);
2575 executed
= isl_union_map_intersect_domain(executed
,
2576 isl_union_set_from_set(domain
));
2577 empty
= isl_union_map_is_empty(executed
);
2578 if (empty
< 0 || empty
) {
2579 isl_union_map_free(executed
);
2580 return empty
< 0 ? -1 : 0;
2583 space
= isl_set_get_space(class_domain
);
2584 domain
= separate_schedule_domains(space
, executed
, domains
->build
);
2586 list
= isl_basic_set_list_from_set(domain
);
2587 domains
->list
= isl_basic_set_list_concat(domains
->list
, list
);
2592 /* Split up the domain at the current depth into disjoint
2593 * basic sets for which code should be generated separately
2594 * for the given separation class domain.
2596 * If any separation classes have been defined, then "class_domain"
2597 * is the domain of the current class and does not refer to inner dimensions.
2598 * Otherwise, "class_domain" is the universe domain.
2600 * We first make sure that the class domain is disjoint from
2601 * previously considered class domains.
2603 * The separate domains can be computed directly from the "class_domain".
2605 * The unroll, atomic and remainder domains need the constraints
2606 * from the schedule domain.
2608 * For unrolling, the actual schedule domain is needed (with divs that
2609 * may refer to the current dimension) so that stride detection can be
2612 * For atomic and remainder domains, inner dimensions and divs involving
2613 * the current dimensions should be eliminated.
2614 * In case we are working within a separation class, we need to intersect
2615 * the result with the current "class_domain" to ensure that the domains
2616 * are disjoint from those generated from other class domains.
2618 * The domain that has been made atomic may be larger than specified
2619 * by the user since it needs to be representable as a single basic set.
2620 * This possibly larger domain is removed from class_domain by
2621 * compute_atomic_domain. It is computed first so that the extended domain
2622 * would not overlap with any domains computed before.
2623 * Similary, the unrolled domains may have some constraints removed and
2624 * may therefore also be larger than specified by the user.
2626 * If anything is left after handling separate, unroll and atomic,
2627 * we split it up into basic sets and append the basic sets to domains->list.
2629 static int compute_partial_domains(struct isl_codegen_domains
*domains
,
2630 __isl_take isl_set
*class_domain
)
2632 isl_basic_set_list
*list
;
2635 class_domain
= isl_set_subtract(class_domain
,
2636 isl_set_copy(domains
->done
));
2637 domains
->done
= isl_set_union(domains
->done
,
2638 isl_set_copy(class_domain
));
2640 class_domain
= compute_atomic_domain(domains
, class_domain
);
2641 class_domain
= compute_unroll_domains(domains
, class_domain
);
2643 domain
= isl_set_copy(class_domain
);
2645 if (compute_separate_domain(domains
, domain
) < 0)
2647 domain
= isl_set_subtract(domain
,
2648 isl_set_copy(domains
->option
[separate
]));
2650 domain
= isl_set_intersect(domain
,
2651 isl_set_copy(domains
->schedule_domain
));
2653 domain
= isl_ast_build_eliminate(domains
->build
, domain
);
2654 domain
= isl_set_intersect(domain
, isl_set_copy(class_domain
));
2656 domain
= isl_set_coalesce(domain
);
2657 domain
= isl_set_make_disjoint(domain
);
2659 list
= isl_basic_set_list_from_set(domain
);
2660 domains
->list
= isl_basic_set_list_concat(domains
->list
, list
);
2662 isl_set_free(class_domain
);
2666 isl_set_free(domain
);
2667 isl_set_free(class_domain
);
2671 /* Split up the domain at the current depth into disjoint
2672 * basic sets for which code should be generated separately
2673 * for the separation class identified by "pnt".
2675 * We extract the corresponding class domain from domains->sep_class,
2676 * eliminate inner dimensions and pass control to compute_partial_domains.
2678 static int compute_class_domains(__isl_take isl_point
*pnt
, void *user
)
2680 struct isl_codegen_domains
*domains
= user
;
2685 class_set
= isl_set_from_point(pnt
);
2686 domain
= isl_map_domain(isl_map_intersect_range(
2687 isl_map_copy(domains
->sep_class
), class_set
));
2688 domain
= isl_ast_build_compute_gist(domains
->build
, domain
);
2689 domain
= isl_ast_build_eliminate(domains
->build
, domain
);
2691 disjoint
= isl_set_plain_is_disjoint(domain
, domains
->schedule_domain
);
2695 isl_set_free(domain
);
2699 return compute_partial_domains(domains
, domain
);
2702 /* Extract the domains at the current depth that should be atomic,
2703 * separated or unrolled and store them in option.
2705 * The domains specified by the user might overlap, so we make
2706 * them disjoint by subtracting earlier domains from later domains.
2708 static void compute_domains_init_options(isl_set
*option
[3],
2709 __isl_keep isl_ast_build
*build
)
2711 enum isl_ast_build_domain_type type
, type2
;
2713 for (type
= atomic
; type
<= separate
; ++type
) {
2714 option
[type
] = isl_ast_build_get_option_domain(build
, type
);
2715 for (type2
= atomic
; type2
< type
; ++type2
)
2716 option
[type
] = isl_set_subtract(option
[type
],
2717 isl_set_copy(option
[type2
]));
2720 option
[unroll
] = isl_set_coalesce(option
[unroll
]);
2721 option
[unroll
] = isl_set_make_disjoint(option
[unroll
]);
2724 /* Split up the domain at the current depth into disjoint
2725 * basic sets for which code should be generated separately,
2726 * based on the user-specified options.
2727 * Return the list of disjoint basic sets.
2729 * There are three kinds of domains that we need to keep track of.
2730 * - the "schedule domain" is the domain of "executed"
2731 * - the "class domain" is the domain corresponding to the currrent
2733 * - the "option domain" is the domain corresponding to one of the options
2734 * atomic, unroll or separate
2736 * We first consider the individial values of the separation classes
2737 * and split up the domain for each of them separately.
2738 * Finally, we consider the remainder. If no separation classes were
2739 * specified, then we call compute_partial_domains with the universe
2740 * "class_domain". Otherwise, we take the "schedule_domain" as "class_domain",
2741 * with inner dimensions removed. We do this because we want to
2742 * avoid computing the complement of the class domains (i.e., the difference
2743 * between the universe and domains->done).
2745 static __isl_give isl_basic_set_list
*compute_domains(
2746 __isl_keep isl_union_map
*executed
, __isl_keep isl_ast_build
*build
)
2748 struct isl_codegen_domains domains
;
2751 isl_union_set
*schedule_domain
;
2755 enum isl_ast_build_domain_type type
;
2761 ctx
= isl_union_map_get_ctx(executed
);
2762 domains
.list
= isl_basic_set_list_alloc(ctx
, 0);
2764 schedule_domain
= isl_union_map_domain(isl_union_map_copy(executed
));
2765 domain
= isl_set_from_union_set(schedule_domain
);
2767 compute_domains_init_options(domains
.option
, build
);
2769 domains
.sep_class
= isl_ast_build_get_separation_class(build
);
2770 classes
= isl_map_range(isl_map_copy(domains
.sep_class
));
2771 n_param
= isl_set_dim(classes
, isl_dim_param
);
2772 classes
= isl_set_project_out(classes
, isl_dim_param
, 0, n_param
);
2774 space
= isl_set_get_space(domain
);
2775 domains
.build
= build
;
2776 domains
.schedule_domain
= isl_set_copy(domain
);
2777 domains
.executed
= executed
;
2778 domains
.done
= isl_set_empty(space
);
2780 if (isl_set_foreach_point(classes
, &compute_class_domains
, &domains
) < 0)
2781 domains
.list
= isl_basic_set_list_free(domains
.list
);
2782 isl_set_free(classes
);
2784 empty
= isl_set_is_empty(domains
.done
);
2786 domains
.list
= isl_basic_set_list_free(domains
.list
);
2787 domain
= isl_set_free(domain
);
2789 isl_set_free(domain
);
2790 domain
= isl_set_universe(isl_set_get_space(domains
.done
));
2792 domain
= isl_ast_build_eliminate(build
, domain
);
2794 if (compute_partial_domains(&domains
, domain
) < 0)
2795 domains
.list
= isl_basic_set_list_free(domains
.list
);
2797 isl_set_free(domains
.schedule_domain
);
2798 isl_set_free(domains
.done
);
2799 isl_map_free(domains
.sep_class
);
2800 for (type
= atomic
; type
<= separate
; ++type
)
2801 isl_set_free(domains
.option
[type
]);
2803 return domains
.list
;
2806 /* Generate code for a single component, after shifting (if any)
2809 * We first split up the domain at the current depth into disjoint
2810 * basic sets based on the user-specified options.
2811 * Then we generated code for each of them and concatenate the results.
2813 static __isl_give isl_ast_graft_list
*generate_shifted_component(
2814 __isl_take isl_union_map
*executed
, __isl_take isl_ast_build
*build
)
2816 isl_basic_set_list
*domain_list
;
2817 isl_ast_graft_list
*list
= NULL
;
2819 domain_list
= compute_domains(executed
, build
);
2820 list
= generate_parallel_domains(domain_list
, executed
, build
);
2822 isl_basic_set_list_free(domain_list
);
2823 isl_union_map_free(executed
);
2824 isl_ast_build_free(build
);
2829 struct isl_set_map_pair
{
2834 /* Given an array "domain" of isl_set_map_pairs and an array "order"
2835 * of indices into the "domain" array,
2836 * return the union of the "map" fields of the elements
2837 * indexed by the first "n" elements of "order".
2839 static __isl_give isl_union_map
*construct_component_executed(
2840 struct isl_set_map_pair
*domain
, int *order
, int n
)
2844 isl_union_map
*executed
;
2846 map
= isl_map_copy(domain
[order
[0]].map
);
2847 executed
= isl_union_map_from_map(map
);
2848 for (i
= 1; i
< n
; ++i
) {
2849 map
= isl_map_copy(domain
[order
[i
]].map
);
2850 executed
= isl_union_map_add_map(executed
, map
);
2856 /* Generate code for a single component, after shifting (if any)
2859 * The component inverse schedule is specified as the "map" fields
2860 * of the elements of "domain" indexed by the first "n" elements of "order".
2862 static __isl_give isl_ast_graft_list
*generate_shifted_component_from_list(
2863 struct isl_set_map_pair
*domain
, int *order
, int n
,
2864 __isl_take isl_ast_build
*build
)
2866 isl_union_map
*executed
;
2868 executed
= construct_component_executed(domain
, order
, n
);
2869 return generate_shifted_component(executed
, build
);
2872 /* Does set dimension "pos" of "set" have an obviously fixed value?
2874 static int dim_is_fixed(__isl_keep isl_set
*set
, int pos
)
2879 v
= isl_set_plain_get_val_if_fixed(set
, isl_dim_set
, pos
);
2882 fixed
= !isl_val_is_nan(v
);
2888 /* Given an array "domain" of isl_set_map_pairs and an array "order"
2889 * of indices into the "domain" array,
2890 * do all (except for at most one) of the "set" field of the elements
2891 * indexed by the first "n" elements of "order" have a fixed value
2892 * at position "depth"?
2894 static int at_most_one_non_fixed(struct isl_set_map_pair
*domain
,
2895 int *order
, int n
, int depth
)
2900 for (i
= 0; i
< n
; ++i
) {
2903 f
= dim_is_fixed(domain
[order
[i
]].set
, depth
);
2916 /* Given an array "domain" of isl_set_map_pairs and an array "order"
2917 * of indices into the "domain" array,
2918 * eliminate the inner dimensions from the "set" field of the elements
2919 * indexed by the first "n" elements of "order", provided the current
2920 * dimension does not have a fixed value.
2922 * Return the index of the first element in "order" with a corresponding
2923 * "set" field that does not have an (obviously) fixed value.
2925 static int eliminate_non_fixed(struct isl_set_map_pair
*domain
,
2926 int *order
, int n
, int depth
, __isl_keep isl_ast_build
*build
)
2931 for (i
= n
- 1; i
>= 0; --i
) {
2933 f
= dim_is_fixed(domain
[order
[i
]].set
, depth
);
2938 domain
[order
[i
]].set
= isl_ast_build_eliminate_inner(build
,
2939 domain
[order
[i
]].set
);
2946 /* Given an array "domain" of isl_set_map_pairs and an array "order"
2947 * of indices into the "domain" array,
2948 * find the element of "domain" (amongst those indexed by the first "n"
2949 * elements of "order") with the "set" field that has the smallest
2950 * value for the current iterator.
2952 * Note that the domain with the smallest value may depend on the parameters
2953 * and/or outer loop dimension. Since the result of this function is only
2954 * used as heuristic, we only make a reasonable attempt at finding the best
2955 * domain, one that should work in case a single domain provides the smallest
2956 * value for the current dimension over all values of the parameters
2957 * and outer dimensions.
2959 * In particular, we compute the smallest value of the first domain
2960 * and replace it by that of any later domain if that later domain
2961 * has a smallest value that is smaller for at least some value
2962 * of the parameters and outer dimensions.
2964 static int first_offset(struct isl_set_map_pair
*domain
, int *order
, int n
,
2965 __isl_keep isl_ast_build
*build
)
2971 min_first
= isl_ast_build_map_to_iterator(build
,
2972 isl_set_copy(domain
[order
[0]].set
));
2973 min_first
= isl_map_lexmin(min_first
);
2975 for (i
= 1; i
< n
; ++i
) {
2976 isl_map
*min
, *test
;
2979 min
= isl_ast_build_map_to_iterator(build
,
2980 isl_set_copy(domain
[order
[i
]].set
));
2981 min
= isl_map_lexmin(min
);
2982 test
= isl_map_copy(min
);
2983 test
= isl_map_apply_domain(isl_map_copy(min_first
), test
);
2984 test
= isl_map_order_lt(test
, isl_dim_in
, 0, isl_dim_out
, 0);
2985 empty
= isl_map_is_empty(test
);
2987 if (empty
>= 0 && !empty
) {
2988 isl_map_free(min_first
);
2998 isl_map_free(min_first
);
3000 return i
< n
? -1 : first
;
3003 /* Construct a shifted inverse schedule based on the original inverse schedule,
3004 * the stride and the offset.
3006 * The original inverse schedule is specified as the "map" fields
3007 * of the elements of "domain" indexed by the first "n" elements of "order".
3009 * "stride" and "offset" are such that the difference
3010 * between the values of the current dimension of domain "i"
3011 * and the values of the current dimension for some reference domain are
3014 * stride * integer + offset[i]
3016 * Moreover, 0 <= offset[i] < stride.
3018 * For each domain, we create a map
3020 * { [..., j, ...] -> [..., j - offset[i], offset[i], ....] }
3022 * where j refers to the current dimension and the other dimensions are
3023 * unchanged, and apply this map to the original schedule domain.
3025 * For example, for the original schedule
3027 * { A[i] -> [2i]: 0 <= i < 10; B[i] -> [2i+1] : 0 <= i < 10 }
3029 * and assuming the offset is 0 for the A domain and 1 for the B domain,
3030 * we apply the mapping
3034 * to the schedule of the "A" domain and the mapping
3036 * { [j - 1] -> [j, 1] }
3038 * to the schedule of the "B" domain.
3041 * Note that after the transformation, the differences between pairs
3042 * of values of the current dimension over all domains are multiples
3043 * of stride and that we have therefore exposed the stride.
3046 * To see that the mapping preserves the lexicographic order,
3047 * first note that each of the individual maps above preserves the order.
3048 * If the value of the current iterator is j1 in one domain and j2 in another,
3049 * then if j1 = j2, we know that the same map is applied to both domains
3050 * and the order is preserved.
3051 * Otherwise, let us assume, without loss of generality, that j1 < j2.
3052 * If c1 >= c2 (with c1 and c2 the corresponding offsets), then
3056 * and the order is preserved.
3057 * If c1 < c2, then we know
3063 * j2 - j1 = n * s + r
3065 * with n >= 0 and 0 <= r < s.
3066 * In other words, r = c2 - c1.
3077 * (j1 - c1, c1) << (j2 - c2, c2)
3079 * with "<<" the lexicographic order, proving that the order is preserved
3082 static __isl_give isl_union_map
*contruct_shifted_executed(
3083 struct isl_set_map_pair
*domain
, int *order
, int n
,
3084 __isl_keep isl_val
*stride
, __isl_keep isl_multi_val
*offset
,
3085 __isl_take isl_ast_build
*build
)
3088 isl_union_map
*executed
;
3094 depth
= isl_ast_build_get_depth(build
);
3095 space
= isl_ast_build_get_space(build
, 1);
3096 executed
= isl_union_map_empty(isl_space_copy(space
));
3097 space
= isl_space_map_from_set(space
);
3098 map
= isl_map_identity(isl_space_copy(space
));
3099 map
= isl_map_eliminate(map
, isl_dim_out
, depth
, 1);
3100 map
= isl_map_insert_dims(map
, isl_dim_out
, depth
+ 1, 1);
3101 space
= isl_space_insert_dims(space
, isl_dim_out
, depth
+ 1, 1);
3103 c
= isl_equality_alloc(isl_local_space_from_space(space
));
3104 c
= isl_constraint_set_coefficient_si(c
, isl_dim_in
, depth
, 1);
3105 c
= isl_constraint_set_coefficient_si(c
, isl_dim_out
, depth
, -1);
3107 for (i
= 0; i
< n
; ++i
) {
3111 v
= isl_multi_val_get_val(offset
, i
);
3114 map_i
= isl_map_copy(map
);
3115 map_i
= isl_map_fix_val(map_i
, isl_dim_out
, depth
+ 1,
3118 c
= isl_constraint_set_constant_val(c
, v
);
3119 map_i
= isl_map_add_constraint(map_i
, isl_constraint_copy(c
));
3121 map_i
= isl_map_apply_domain(isl_map_copy(domain
[order
[i
]].map
),
3123 executed
= isl_union_map_add_map(executed
, map_i
);
3126 isl_constraint_free(c
);
3130 executed
= isl_union_map_free(executed
);
3135 /* Generate code for a single component, after exposing the stride,
3136 * given that the schedule domain is "shifted strided".
3138 * The component inverse schedule is specified as the "map" fields
3139 * of the elements of "domain" indexed by the first "n" elements of "order".
3141 * The schedule domain being "shifted strided" means that the differences
3142 * between the values of the current dimension of domain "i"
3143 * and the values of the current dimension for some reference domain are
3146 * stride * integer + offset[i]
3148 * We first look for the domain with the "smallest" value for the current
3149 * dimension and adjust the offsets such that the offset of the "smallest"
3150 * domain is equal to zero. The other offsets are reduced modulo stride.
3152 * Based on this information, we construct a new inverse schedule in
3153 * contruct_shifted_executed that exposes the stride.
3154 * Since this involves the introduction of a new schedule dimension,
3155 * the build needs to be changed accodingly.
3156 * After computing the AST, the newly introduced dimension needs
3157 * to be removed again from the list of grafts. We do this by plugging
3158 * in a mapping that represents the new schedule domain in terms of the
3159 * old schedule domain.
3161 static __isl_give isl_ast_graft_list
*generate_shift_component(
3162 struct isl_set_map_pair
*domain
, int *order
, int n
,
3163 __isl_keep isl_val
*stride
, __isl_keep isl_multi_val
*offset
,
3164 __isl_take isl_ast_build
*build
)
3166 isl_ast_graft_list
*list
;
3173 isl_multi_aff
*ma
, *zero
;
3174 isl_union_map
*executed
;
3176 ctx
= isl_ast_build_get_ctx(build
);
3177 depth
= isl_ast_build_get_depth(build
);
3179 first
= first_offset(domain
, order
, n
, build
);
3183 mv
= isl_multi_val_copy(offset
);
3184 val
= isl_multi_val_get_val(offset
, first
);
3185 val
= isl_val_neg(val
);
3186 mv
= isl_multi_val_add_val(mv
, val
);
3187 mv
= isl_multi_val_mod_val(mv
, isl_val_copy(stride
));
3189 executed
= contruct_shifted_executed(domain
, order
, n
, stride
, mv
,
3191 space
= isl_ast_build_get_space(build
, 1);
3192 space
= isl_space_map_from_set(space
);
3193 ma
= isl_multi_aff_identity(isl_space_copy(space
));
3194 space
= isl_space_from_domain(isl_space_domain(space
));
3195 space
= isl_space_add_dims(space
, isl_dim_out
, 1);
3196 zero
= isl_multi_aff_zero(space
);
3197 ma
= isl_multi_aff_range_splice(ma
, depth
+ 1, zero
);
3198 build
= isl_ast_build_insert_dim(build
, depth
+ 1);
3199 list
= generate_shifted_component(executed
, build
);
3201 list
= isl_ast_graft_list_preimage_multi_aff(list
, ma
);
3203 isl_multi_val_free(mv
);
3207 isl_ast_build_free(build
);
3211 /* Generate code for a single component.
3213 * The component inverse schedule is specified as the "map" fields
3214 * of the elements of "domain" indexed by the first "n" elements of "order".
3216 * This function may modify the "set" fields of "domain".
3218 * Before proceeding with the actual code generation for the component,
3219 * we first check if there are any "shifted" strides, meaning that
3220 * the schedule domains of the individual domains are all strided,
3221 * but that they have different offsets, resulting in the union
3222 * of schedule domains not being strided anymore.
3224 * The simplest example is the schedule
3226 * { A[i] -> [2i]: 0 <= i < 10; B[i] -> [2i+1] : 0 <= i < 10 }
3228 * Both schedule domains are strided, but their union is not.
3229 * This function detects such cases and then rewrites the schedule to
3231 * { A[i] -> [2i, 0]: 0 <= i < 10; B[i] -> [2i, 1] : 0 <= i < 10 }
3233 * In the new schedule, the schedule domains have the same offset (modulo
3234 * the stride), ensuring that the union of schedule domains is also strided.
3237 * If there is only a single domain in the component, then there is
3238 * nothing to do. Similarly, if the current schedule dimension has
3239 * a fixed value for almost all domains then there is nothing to be done.
3240 * In particular, we need at least two domains where the current schedule
3241 * dimension does not have a fixed value.
3242 * Finally, if any of the options refer to the current schedule dimension,
3243 * then we bail out as well. It would be possible to reformulate the options
3244 * in terms of the new schedule domain, but that would introduce constraints
3245 * that separate the domains in the options and that is something we would
3249 * To see if there is any shifted stride, we look at the differences
3250 * between the values of the current dimension in pairs of domains
3251 * for equal values of outer dimensions. These differences should be
3256 * with "m" the stride and "r" a constant. Note that we cannot perform
3257 * this analysis on individual domains as the lower bound in each domain
3258 * may depend on parameters or outer dimensions and so the current dimension
3259 * itself may not have a fixed remainder on division by the stride.
3261 * In particular, we compare the first domain that does not have an
3262 * obviously fixed value for the current dimension to itself and all
3263 * other domains and collect the offsets and the gcd of the strides.
3264 * If the gcd becomes one, then we failed to find shifted strides.
3265 * If the gcd is zero, then the differences were all fixed, meaning
3266 * that some domains had non-obviously fixed values for the current dimension.
3267 * If all the offsets are the same (for those domains that do not have
3268 * an obviously fixed value for the current dimension), then we do not
3269 * apply the transformation.
3270 * If none of the domains were skipped, then there is nothing to do.
3271 * If some of them were skipped, then if we apply separation, the schedule
3272 * domain should get split in pieces with a (non-shifted) stride.
3274 * Otherwise, we apply a shift to expose the stride in
3275 * generate_shift_component.
3277 static __isl_give isl_ast_graft_list
*generate_component(
3278 struct isl_set_map_pair
*domain
, int *order
, int n
,
3279 __isl_take isl_ast_build
*build
)
3286 isl_val
*gcd
= NULL
;
3290 isl_ast_graft_list
*list
;
3293 depth
= isl_ast_build_get_depth(build
);
3296 if (skip
>= 0 && !skip
)
3297 skip
= at_most_one_non_fixed(domain
, order
, n
, depth
);
3298 if (skip
>= 0 && !skip
)
3299 skip
= isl_ast_build_options_involve_depth(build
);
3303 return generate_shifted_component_from_list(domain
,
3306 base
= eliminate_non_fixed(domain
, order
, n
, depth
, build
);
3310 ctx
= isl_ast_build_get_ctx(build
);
3312 mv
= isl_multi_val_zero(isl_space_set_alloc(ctx
, 0, n
));
3315 for (i
= 0; i
< n
; ++i
) {
3318 map
= isl_map_from_domain_and_range(
3319 isl_set_copy(domain
[order
[base
]].set
),
3320 isl_set_copy(domain
[order
[i
]].set
));
3321 for (d
= 0; d
< depth
; ++d
)
3322 map
= isl_map_equate(map
, isl_dim_in
, d
,
3324 deltas
= isl_map_deltas(map
);
3325 res
= isl_set_dim_residue_class_val(deltas
, depth
, &m
, &r
);
3326 isl_set_free(deltas
);
3333 gcd
= isl_val_gcd(gcd
, m
);
3334 if (isl_val_is_one(gcd
)) {
3338 mv
= isl_multi_val_set_val(mv
, i
, r
);
3340 res
= dim_is_fixed(domain
[order
[i
]].set
, depth
);
3346 if (fixed
&& i
> base
) {
3348 a
= isl_multi_val_get_val(mv
, i
);
3349 b
= isl_multi_val_get_val(mv
, base
);
3350 if (isl_val_ne(a
, b
))
3357 if (res
< 0 || !gcd
) {
3358 isl_ast_build_free(build
);
3360 } else if (i
< n
|| fixed
|| isl_val_is_zero(gcd
)) {
3361 list
= generate_shifted_component_from_list(domain
,
3364 list
= generate_shift_component(domain
, order
, n
, gcd
, mv
,
3369 isl_multi_val_free(mv
);
3373 isl_ast_build_free(build
);
3377 /* Store both "map" itself and its domain in the
3378 * structure pointed to by *next and advance to the next array element.
3380 static int extract_domain(__isl_take isl_map
*map
, void *user
)
3382 struct isl_set_map_pair
**next
= user
;
3384 (*next
)->map
= isl_map_copy(map
);
3385 (*next
)->set
= isl_map_domain(map
);
3391 /* Internal data for any_scheduled_after.
3393 * "depth" is the number of loops that have already been generated
3394 * "group_coscheduled" is a local copy of options->ast_build_group_coscheduled
3395 * "domain" is an array of set-map pairs corresponding to the different
3396 * iteration domains. The set is the schedule domain, i.e., the domain
3397 * of the inverse schedule, while the map is the inverse schedule itself.
3399 struct isl_any_scheduled_after_data
{
3401 int group_coscheduled
;
3402 struct isl_set_map_pair
*domain
;
3405 /* Is any element of domain "i" scheduled after any element of domain "j"
3406 * (for a common iteration of the first data->depth loops)?
3408 * data->domain[i].set contains the domain of the inverse schedule
3409 * for domain "i", i.e., elements in the schedule domain.
3411 * If data->group_coscheduled is set, then we also return 1 if there
3412 * is any pair of elements in the two domains that are scheduled together.
3414 static int any_scheduled_after(int i
, int j
, void *user
)
3416 struct isl_any_scheduled_after_data
*data
= user
;
3417 int dim
= isl_set_dim(data
->domain
[i
].set
, isl_dim_set
);
3420 for (pos
= data
->depth
; pos
< dim
; ++pos
) {
3423 follows
= isl_set_follows_at(data
->domain
[i
].set
,
3424 data
->domain
[j
].set
, pos
);
3434 return data
->group_coscheduled
;
3437 /* Look for independent components at the current depth and generate code
3438 * for each component separately. The resulting lists of grafts are
3439 * merged in an attempt to combine grafts with identical guards.
3441 * Code for two domains can be generated separately if all the elements
3442 * of one domain are scheduled before (or together with) all the elements
3443 * of the other domain. We therefore consider the graph with as nodes
3444 * the domains and an edge between two nodes if any element of the first
3445 * node is scheduled after any element of the second node.
3446 * If the ast_build_group_coscheduled is set, then we also add an edge if
3447 * there is any pair of elements in the two domains that are scheduled
3449 * Code is then generated (by generate_component)
3450 * for each of the strongly connected components in this graph
3451 * in their topological order.
3453 * Since the test is performed on the domain of the inverse schedules of
3454 * the different domains, we precompute these domains and store
3455 * them in data.domain.
3457 static __isl_give isl_ast_graft_list
*generate_components(
3458 __isl_take isl_union_map
*executed
, __isl_take isl_ast_build
*build
)
3461 isl_ctx
*ctx
= isl_ast_build_get_ctx(build
);
3462 int n
= isl_union_map_n_map(executed
);
3463 struct isl_any_scheduled_after_data data
;
3464 struct isl_set_map_pair
*next
;
3465 struct isl_tarjan_graph
*g
= NULL
;
3466 isl_ast_graft_list
*list
= NULL
;
3469 data
.domain
= isl_calloc_array(ctx
, struct isl_set_map_pair
, n
);
3475 if (isl_union_map_foreach_map(executed
, &extract_domain
, &next
) < 0)
3480 data
.depth
= isl_ast_build_get_depth(build
);
3481 data
.group_coscheduled
= isl_options_get_ast_build_group_coscheduled(ctx
);
3482 g
= isl_tarjan_graph_init(ctx
, n
, &any_scheduled_after
, &data
);
3486 list
= isl_ast_graft_list_alloc(ctx
, 0);
3490 isl_ast_graft_list
*list_c
;
3493 if (g
->order
[i
] == -1)
3494 isl_die(ctx
, isl_error_internal
, "cannot happen",
3497 while (g
->order
[i
] != -1) {
3501 list_c
= generate_component(data
.domain
,
3502 g
->order
+ first
, i
- first
,
3503 isl_ast_build_copy(build
));
3504 list
= isl_ast_graft_list_merge(list
, list_c
, build
);
3510 error
: list
= isl_ast_graft_list_free(list
);
3511 isl_tarjan_graph_free(g
);
3512 for (i
= 0; i
< n_domain
; ++i
) {
3513 isl_map_free(data
.domain
[i
].map
);
3514 isl_set_free(data
.domain
[i
].set
);
3517 isl_union_map_free(executed
);
3518 isl_ast_build_free(build
);
3523 /* Generate code for the next level (and all inner levels).
3525 * If "executed" is empty, i.e., no code needs to be generated,
3526 * then we return an empty list.
3528 * If we have already generated code for all loop levels, then we pass
3529 * control to generate_inner_level.
3531 * If "executed" lives in a single space, i.e., if code needs to be
3532 * generated for a single domain, then there can only be a single
3533 * component and we go directly to generate_shifted_component.
3534 * Otherwise, we call generate_components to detect the components
3535 * and to call generate_component on each of them separately.
3537 static __isl_give isl_ast_graft_list
*generate_next_level(
3538 __isl_take isl_union_map
*executed
, __isl_take isl_ast_build
*build
)
3542 if (!build
|| !executed
)
3545 if (isl_union_map_is_empty(executed
)) {
3546 isl_ctx
*ctx
= isl_ast_build_get_ctx(build
);
3547 isl_union_map_free(executed
);
3548 isl_ast_build_free(build
);
3549 return isl_ast_graft_list_alloc(ctx
, 0);
3552 depth
= isl_ast_build_get_depth(build
);
3553 if (depth
>= isl_ast_build_dim(build
, isl_dim_set
))
3554 return generate_inner_level(executed
, build
);
3556 if (isl_union_map_n_map(executed
) == 1)
3557 return generate_shifted_component(executed
, build
);
3559 return generate_components(executed
, build
);
3561 isl_union_map_free(executed
);
3562 isl_ast_build_free(build
);
3566 /* Internal data structure used by isl_ast_build_ast_from_schedule.
3567 * internal, executed and build are the inputs to generate_code.
3568 * list collects the output.
3570 struct isl_generate_code_data
{
3572 isl_union_map
*executed
;
3573 isl_ast_build
*build
;
3575 isl_ast_graft_list
*list
;
3578 /* Given an inverse schedule in terms of the external build schedule, i.e.,
3582 * with E the external build schedule and S the additional schedule "space",
3583 * reformulate the inverse schedule in terms of the internal schedule domain,
3588 * We first obtain a mapping
3592 * take the inverse and the product with S -> S, resulting in
3594 * [I -> S] -> [E -> S]
3596 * Applying the map to the input produces the desired result.
3598 static __isl_give isl_union_map
*internal_executed(
3599 __isl_take isl_union_map
*executed
, __isl_keep isl_space
*space
,
3600 __isl_keep isl_ast_build
*build
)
3604 proj
= isl_ast_build_get_schedule_map(build
);
3605 proj
= isl_map_reverse(proj
);
3606 space
= isl_space_map_from_set(isl_space_copy(space
));
3607 id
= isl_map_identity(space
);
3608 proj
= isl_map_product(proj
, id
);
3609 executed
= isl_union_map_apply_domain(executed
,
3610 isl_union_map_from_map(proj
));
3614 /* Generate an AST that visits the elements in the range of data->executed
3615 * in the relative order specified by the corresponding domain element(s)
3616 * for those domain elements that belong to "set".
3617 * Add the result to data->list.
3619 * The caller ensures that "set" is a universe domain.
3620 * "space" is the space of the additional part of the schedule.
3621 * It is equal to the space of "set" if build->domain is parametric.
3622 * Otherwise, it is equal to the range of the wrapped space of "set".
3624 * If the build space is not parametric and if isl_ast_build_ast_from_schedule
3625 * was called from an outside user (data->internal not set), then
3626 * the (inverse) schedule refers to the external build domain and needs to
3627 * be transformed to refer to the internal build domain.
3629 * If the build space is parametric, then we add some of the parameter
3630 * constraints to the executed relation. Adding these constraints
3631 * allows for an earlier detection of conflicts in some cases.
3632 * However, we do not want to divide the executed relation into
3633 * more disjuncts than necessary. We therefore approximate
3634 * the constraints on the parameters by a single disjunct set.
3636 * The build is extended to include the additional part of the schedule.
3637 * If the original build space was not parametric, then the options
3638 * in data->build refer only to the additional part of the schedule
3639 * and they need to be adjusted to refer to the complete AST build
3642 * After having adjusted inverse schedule and build, we start generating
3643 * code with the outer loop of the current code generation
3644 * in generate_next_level.
3646 * If the original build space was not parametric, we undo the embedding
3647 * on the resulting isl_ast_node_list so that it can be used within
3648 * the outer AST build.
3650 static int generate_code_in_space(struct isl_generate_code_data
*data
,
3651 __isl_take isl_set
*set
, __isl_take isl_space
*space
)
3653 isl_union_map
*executed
;
3654 isl_ast_build
*build
;
3655 isl_ast_graft_list
*list
;
3658 executed
= isl_union_map_copy(data
->executed
);
3659 executed
= isl_union_map_intersect_domain(executed
,
3660 isl_union_set_from_set(set
));
3662 embed
= !isl_set_is_params(data
->build
->domain
);
3663 if (embed
&& !data
->internal
)
3664 executed
= internal_executed(executed
, space
, data
->build
);
3667 domain
= isl_ast_build_get_domain(data
->build
);
3668 domain
= isl_set_from_basic_set(isl_set_simple_hull(domain
));
3669 executed
= isl_union_map_intersect_params(executed
, domain
);
3672 build
= isl_ast_build_copy(data
->build
);
3673 build
= isl_ast_build_product(build
, space
);
3675 list
= generate_next_level(executed
, build
);
3677 list
= isl_ast_graft_list_unembed(list
, embed
);
3679 data
->list
= isl_ast_graft_list_concat(data
->list
, list
);
3684 /* Generate an AST that visits the elements in the range of data->executed
3685 * in the relative order specified by the corresponding domain element(s)
3686 * for those domain elements that belong to "set".
3687 * Add the result to data->list.
3689 * The caller ensures that "set" is a universe domain.
3691 * If the build space S is not parametric, then the space of "set"
3692 * need to be a wrapped relation with S as domain. That is, it needs
3697 * Check this property and pass control to generate_code_in_space
3699 * If the build space is not parametric, then T is the space of "set".
3701 static int generate_code_set(__isl_take isl_set
*set
, void *user
)
3703 struct isl_generate_code_data
*data
= user
;
3704 isl_space
*space
, *build_space
;
3707 space
= isl_set_get_space(set
);
3709 if (isl_set_is_params(data
->build
->domain
))
3710 return generate_code_in_space(data
, set
, space
);
3712 build_space
= isl_ast_build_get_space(data
->build
, data
->internal
);
3713 space
= isl_space_unwrap(space
);
3714 is_domain
= isl_space_is_domain(build_space
, space
);
3715 isl_space_free(build_space
);
3716 space
= isl_space_range(space
);
3721 isl_die(isl_set_get_ctx(set
), isl_error_invalid
,
3722 "invalid nested schedule space", goto error
);
3724 return generate_code_in_space(data
, set
, space
);
3727 isl_space_free(space
);
3731 /* Generate an AST that visits the elements in the range of "executed"
3732 * in the relative order specified by the corresponding domain element(s).
3734 * "build" is an isl_ast_build that has either been constructed by
3735 * isl_ast_build_from_context or passed to a callback set by
3736 * isl_ast_build_set_create_leaf.
3737 * In the first case, the space of the isl_ast_build is typically
3738 * a parametric space, although this is currently not enforced.
3739 * In the second case, the space is never a parametric space.
3740 * If the space S is not parametric, then the domain space(s) of "executed"
3741 * need to be wrapped relations with S as domain.
3743 * If the domain of "executed" consists of several spaces, then an AST
3744 * is generated for each of them (in arbitrary order) and the results
3747 * If "internal" is set, then the domain "S" above refers to the internal
3748 * schedule domain representation. Otherwise, it refers to the external
3749 * representation, as returned by isl_ast_build_get_schedule_space.
3751 * We essentially run over all the spaces in the domain of "executed"
3752 * and call generate_code_set on each of them.
3754 static __isl_give isl_ast_graft_list
*generate_code(
3755 __isl_take isl_union_map
*executed
, __isl_take isl_ast_build
*build
,
3759 struct isl_generate_code_data data
= { 0 };
3761 isl_union_set
*schedule_domain
;
3762 isl_union_map
*universe
;
3766 space
= isl_ast_build_get_space(build
, 1);
3767 space
= isl_space_align_params(space
,
3768 isl_union_map_get_space(executed
));
3769 space
= isl_space_align_params(space
,
3770 isl_union_map_get_space(build
->options
));
3771 build
= isl_ast_build_align_params(build
, isl_space_copy(space
));
3772 executed
= isl_union_map_align_params(executed
, space
);
3773 if (!executed
|| !build
)
3776 ctx
= isl_ast_build_get_ctx(build
);
3778 data
.internal
= internal
;
3779 data
.executed
= executed
;
3781 data
.list
= isl_ast_graft_list_alloc(ctx
, 0);
3783 universe
= isl_union_map_universe(isl_union_map_copy(executed
));
3784 schedule_domain
= isl_union_map_domain(universe
);
3785 if (isl_union_set_foreach_set(schedule_domain
, &generate_code_set
,
3787 data
.list
= isl_ast_graft_list_free(data
.list
);
3789 isl_union_set_free(schedule_domain
);
3790 isl_union_map_free(executed
);
3792 isl_ast_build_free(build
);
3795 isl_union_map_free(executed
);
3796 isl_ast_build_free(build
);
3800 /* Generate an AST that visits the elements in the domain of "schedule"
3801 * in the relative order specified by the corresponding image element(s).
3803 * "build" is an isl_ast_build that has either been constructed by
3804 * isl_ast_build_from_context or passed to a callback set by
3805 * isl_ast_build_set_create_leaf.
3806 * In the first case, the space of the isl_ast_build is typically
3807 * a parametric space, although this is currently not enforced.
3808 * In the second case, the space is never a parametric space.
3809 * If the space S is not parametric, then the range space(s) of "schedule"
3810 * need to be wrapped relations with S as domain.
3812 * If the range of "schedule" consists of several spaces, then an AST
3813 * is generated for each of them (in arbitrary order) and the results
3816 * We first initialize the local copies of the relevant options.
3817 * We do this here rather than when the isl_ast_build is created
3818 * because the options may have changed between the construction
3819 * of the isl_ast_build and the call to isl_generate_code.
3821 * The main computation is performed on an inverse schedule (with
3822 * the schedule domain in the domain and the elements to be executed
3823 * in the range) called "executed".
3825 __isl_give isl_ast_node
*isl_ast_build_ast_from_schedule(
3826 __isl_keep isl_ast_build
*build
, __isl_take isl_union_map
*schedule
)
3828 isl_ast_graft_list
*list
;
3830 isl_union_map
*executed
;
3832 build
= isl_ast_build_copy(build
);
3833 build
= isl_ast_build_set_single_valued(build
, 0);
3834 schedule
= isl_union_map_coalesce(schedule
);
3835 executed
= isl_union_map_reverse(schedule
);
3836 list
= generate_code(executed
, isl_ast_build_copy(build
), 0);
3837 node
= isl_ast_node_from_graft_list(list
, build
);
3838 isl_ast_build_free(build
);