2 * Copyright 2012 Ecole Normale Superieure
4 * Use of this software is governed by the MIT license
6 * Written by Sven Verdoolaege,
7 * Ecole Normale Superieure, 45 rue d’Ulm, 75230 Paris, France
14 #include <isl/union_map.h>
16 #include <isl_tarjan.h>
17 #include <isl_ast_private.h>
18 #include <isl_ast_build_expr.h>
19 #include <isl_ast_build_private.h>
20 #include <isl_ast_graft_private.h>
22 /* Add the constraint to the list that "user" points to, if it is not
25 static int collect_constraint(__isl_take isl_constraint
*constraint
,
28 isl_constraint_list
**list
= user
;
30 if (isl_constraint_is_div_constraint(constraint
))
31 isl_constraint_free(constraint
);
33 *list
= isl_constraint_list_add(*list
, constraint
);
38 /* Extract the constraints of "bset" (except the div constraints)
39 * and collect them in an isl_constraint_list.
41 static __isl_give isl_constraint_list
*isl_constraint_list_from_basic_set(
42 __isl_take isl_basic_set
*bset
)
46 isl_constraint_list
*list
;
51 ctx
= isl_basic_set_get_ctx(bset
);
53 n
= isl_basic_set_n_constraint(bset
);
54 list
= isl_constraint_list_alloc(ctx
, n
);
55 if (isl_basic_set_foreach_constraint(bset
,
56 &collect_constraint
, &list
) < 0)
57 list
= isl_constraint_list_free(list
);
59 isl_basic_set_free(bset
);
63 /* Data used in generate_domain.
65 * "build" is the input build.
66 * "list" collects the results.
68 struct isl_generate_domain_data
{
71 isl_ast_graft_list
*list
;
74 static __isl_give isl_ast_graft_list
*generate_next_level(
75 __isl_take isl_union_map
*executed
,
76 __isl_take isl_ast_build
*build
);
77 static __isl_give isl_ast_graft_list
*generate_code(
78 __isl_take isl_union_map
*executed
, __isl_take isl_ast_build
*build
,
81 /* Generate an AST for a single domain based on
82 * the (non single valued) inverse schedule "executed".
84 * We extend the schedule with the iteration domain
85 * and continue generating through a call to generate_code.
87 * In particular, if executed has the form
91 * then we continue generating code on
95 * The extended inverse schedule is clearly single valued
96 * ensuring that the nested generate_code will not reach this function,
97 * but will instead create calls to all elements of D that need
98 * to be executed from the current schedule domain.
100 static int generate_non_single_valued(__isl_take isl_map
*executed
,
101 struct isl_generate_domain_data
*data
)
104 isl_ast_build
*build
;
105 isl_ast_graft_list
*list
;
107 build
= isl_ast_build_copy(data
->build
);
109 identity
= isl_set_identity(isl_map_range(isl_map_copy(executed
)));
110 executed
= isl_map_domain_product(executed
, identity
);
111 build
= isl_ast_build_set_single_valued(build
, 1);
113 list
= generate_code(isl_union_map_from_map(executed
), build
, 1);
115 data
->list
= isl_ast_graft_list_concat(data
->list
, list
);
120 /* Call the at_each_domain callback, if requested by the user,
121 * after recording the current inverse schedule in the build.
123 static __isl_give isl_ast_graft
*at_each_domain(__isl_take isl_ast_graft
*graft
,
124 __isl_keep isl_map
*executed
, __isl_keep isl_ast_build
*build
)
126 if (!graft
|| !build
)
127 return isl_ast_graft_free(graft
);
128 if (!build
->at_each_domain
)
131 build
= isl_ast_build_copy(build
);
132 build
= isl_ast_build_set_executed(build
,
133 isl_union_map_from_map(isl_map_copy(executed
)));
135 return isl_ast_graft_free(graft
);
137 graft
->node
= build
->at_each_domain(graft
->node
,
138 build
, build
->at_each_domain_user
);
139 isl_ast_build_free(build
);
142 graft
= isl_ast_graft_free(graft
);
147 /* Generate an AST for a single domain based on
148 * the inverse schedule "executed".
150 * If there is more than one domain element associated to the current
151 * schedule "time", then we need to continue the generation process
152 * in generate_non_single_valued.
153 * Note that the inverse schedule being single-valued may depend
154 * on constraints that are only available in the original context
155 * domain specified by the user. We therefore first introduce
156 * the constraints from data->build->domain.
157 * On the other hand, we only perform the test after having taken the gist
158 * of the domain as the resulting map is the one from which the call
159 * expression is constructed. Using this map to construct the call
160 * expression usually yields simpler results.
161 * Because we perform the single-valuedness test on the gisted map,
162 * we may in rare cases fail to recognize that the inverse schedule
163 * is single-valued. This becomes problematic if this happens
164 * from the recursive call through generate_non_single_valued
165 * as we would then end up in an infinite recursion.
166 * We therefore check if we are inside a call to generate_non_single_valued
167 * and revert to the ungisted map if the gisted map turns out not to be
170 * Otherwise, we generate a call expression for the single executed
171 * domain element and put a guard around it based on the (simplified)
172 * domain of "executed".
174 * If the user has set an at_each_domain callback, it is called
175 * on the constructed call expression node.
177 static int generate_domain(__isl_take isl_map
*executed
, void *user
)
179 struct isl_generate_domain_data
*data
= user
;
180 isl_ast_graft
*graft
;
181 isl_ast_graft_list
*list
;
186 executed
= isl_map_intersect_domain(executed
,
187 isl_set_copy(data
->build
->domain
));
189 executed
= isl_map_coalesce(executed
);
190 map
= isl_map_copy(executed
);
191 map
= isl_ast_build_compute_gist_map_domain(data
->build
, map
);
192 sv
= isl_map_is_single_valued(map
);
197 if (data
->build
->single_valued
)
198 map
= isl_map_copy(executed
);
200 return generate_non_single_valued(executed
, data
);
202 guard
= isl_map_domain(isl_map_copy(map
));
203 guard
= isl_set_coalesce(guard
);
204 guard
= isl_ast_build_compute_gist(data
->build
, guard
);
205 graft
= isl_ast_graft_alloc_domain(map
, data
->build
);
206 graft
= at_each_domain(graft
, executed
, data
->build
);
208 isl_map_free(executed
);
209 graft
= isl_ast_graft_add_guard(graft
, guard
, data
->build
);
211 list
= isl_ast_graft_list_from_ast_graft(graft
);
212 data
->list
= isl_ast_graft_list_concat(data
->list
, list
);
217 isl_map_free(executed
);
221 /* Call build->create_leaf to a create "leaf" node in the AST,
222 * encapsulate the result in an isl_ast_graft and return the result
223 * as a 1-element list.
225 * Note that the node returned by the user may be an entire tree.
227 * Before we pass control to the user, we first clear some information
228 * from the build that is (presumbably) only meaningful
229 * for the current code generation.
230 * This includes the create_leaf callback itself, so we make a copy
231 * of the build first.
233 static __isl_give isl_ast_graft_list
*call_create_leaf(
234 __isl_take isl_union_map
*executed
, __isl_take isl_ast_build
*build
)
237 isl_ast_graft
*graft
;
238 isl_ast_build
*user_build
;
240 user_build
= isl_ast_build_copy(build
);
241 user_build
= isl_ast_build_set_executed(user_build
, executed
);
242 user_build
= isl_ast_build_clear_local_info(user_build
);
246 node
= build
->create_leaf(user_build
, build
->create_leaf_user
);
247 graft
= isl_ast_graft_alloc(node
, build
);
248 isl_ast_build_free(build
);
249 return isl_ast_graft_list_from_ast_graft(graft
);
252 /* Generate an AST after having handled the complete schedule
253 * of this call to the code generator.
255 * If the user has specified a create_leaf callback, control
256 * is passed to the user in call_create_leaf.
258 * Otherwise, we generate one or more calls for each individual
259 * domain in generate_domain.
261 static __isl_give isl_ast_graft_list
*generate_inner_level(
262 __isl_take isl_union_map
*executed
, __isl_take isl_ast_build
*build
)
265 struct isl_generate_domain_data data
= { build
};
267 if (!build
|| !executed
)
270 if (build
->create_leaf
)
271 return call_create_leaf(executed
, build
);
273 ctx
= isl_union_map_get_ctx(executed
);
274 data
.list
= isl_ast_graft_list_alloc(ctx
, 0);
275 if (isl_union_map_foreach_map(executed
, &generate_domain
, &data
) < 0)
276 data
.list
= isl_ast_graft_list_free(data
.list
);
279 error
: data
.list
= NULL
;
280 isl_ast_build_free(build
);
281 isl_union_map_free(executed
);
285 /* Call the before_each_for callback, if requested by the user.
287 static __isl_give isl_ast_node
*before_each_for(__isl_take isl_ast_node
*node
,
288 __isl_keep isl_ast_build
*build
)
293 return isl_ast_node_free(node
);
294 if (!build
->before_each_for
)
296 id
= build
->before_each_for(build
, build
->before_each_for_user
);
297 node
= isl_ast_node_set_annotation(node
, id
);
301 /* Call the after_each_for callback, if requested by the user.
303 static __isl_give isl_ast_graft
*after_each_for(__isl_keep isl_ast_graft
*graft
,
304 __isl_keep isl_ast_build
*build
)
306 if (!graft
|| !build
)
307 return isl_ast_graft_free(graft
);
308 if (!build
->after_each_for
)
310 graft
->node
= build
->after_each_for(graft
->node
, build
,
311 build
->after_each_for_user
);
313 return isl_ast_graft_free(graft
);
317 /* Plug in all the know values of the current and outer dimensions
318 * in the domain of "executed". In principle, we only need to plug
319 * in the known value of the current dimension since the values of
320 * outer dimensions have been plugged in already.
321 * However, it turns out to be easier to just plug in all known values.
323 static __isl_give isl_union_map
*plug_in_values(
324 __isl_take isl_union_map
*executed
, __isl_keep isl_ast_build
*build
)
326 return isl_ast_build_substitute_values_union_map_domain(build
,
330 /* Check if the constraint "c" is a lower bound on dimension "pos",
331 * an upper bound, or independent of dimension "pos".
333 static int constraint_type(isl_constraint
*c
, int pos
)
335 if (isl_constraint_is_lower_bound(c
, isl_dim_set
, pos
))
337 if (isl_constraint_is_upper_bound(c
, isl_dim_set
, pos
))
342 /* Compare the types of the constraints "a" and "b",
343 * resulting in constraints that are independent of "depth"
344 * to be sorted before the lower bounds on "depth", which in
345 * turn are sorted before the upper bounds on "depth".
347 static int cmp_constraint(__isl_keep isl_constraint
*a
,
348 __isl_keep isl_constraint
*b
, void *user
)
351 int t1
= constraint_type(a
, *depth
);
352 int t2
= constraint_type(b
, *depth
);
357 /* Extract a lower bound on dimension "pos" from constraint "c".
359 * If the constraint is of the form
363 * then we essentially return
365 * l = ceil(-f(...)/a)
367 * However, if the current dimension is strided, then we need to make
368 * sure that the lower bound we construct is of the form
372 * with f the offset and s the stride.
373 * We therefore compute
375 * f + s * ceil((l - f)/s)
377 static __isl_give isl_aff
*lower_bound(__isl_keep isl_constraint
*c
,
378 int pos
, __isl_keep isl_ast_build
*build
)
382 aff
= isl_constraint_get_bound(c
, isl_dim_set
, pos
);
383 aff
= isl_aff_ceil(aff
);
385 if (isl_ast_build_has_stride(build
, pos
)) {
389 offset
= isl_ast_build_get_offset(build
, pos
);
390 stride
= isl_ast_build_get_stride(build
, pos
);
392 aff
= isl_aff_sub(aff
, isl_aff_copy(offset
));
393 aff
= isl_aff_scale_down_val(aff
, isl_val_copy(stride
));
394 aff
= isl_aff_ceil(aff
);
395 aff
= isl_aff_scale_val(aff
, stride
);
396 aff
= isl_aff_add(aff
, offset
);
399 aff
= isl_ast_build_compute_gist_aff(build
, aff
);
404 /* Return the exact lower bound (or upper bound if "upper" is set)
405 * of "domain" as a piecewise affine expression.
407 * If we are computing a lower bound (of a strided dimension), then
408 * we need to make sure it is of the form
412 * where f is the offset and s is the stride.
413 * We therefore need to include the stride constraint before computing
416 static __isl_give isl_pw_aff
*exact_bound(__isl_keep isl_set
*domain
,
417 __isl_keep isl_ast_build
*build
, int upper
)
422 isl_pw_multi_aff
*pma
;
424 domain
= isl_set_copy(domain
);
426 stride
= isl_ast_build_get_stride_constraint(build
);
427 domain
= isl_set_intersect(domain
, stride
);
429 it_map
= isl_ast_build_map_to_iterator(build
, domain
);
431 pma
= isl_map_lexmax_pw_multi_aff(it_map
);
433 pma
= isl_map_lexmin_pw_multi_aff(it_map
);
434 pa
= isl_pw_multi_aff_get_pw_aff(pma
, 0);
435 isl_pw_multi_aff_free(pma
);
436 pa
= isl_ast_build_compute_gist_pw_aff(build
, pa
);
437 pa
= isl_pw_aff_coalesce(pa
);
442 /* Extract a lower bound on dimension "pos" from each constraint
443 * in "constraints" and return the list of lower bounds.
444 * If "constraints" has zero elements, then we extract a lower bound
445 * from "domain" instead.
447 static __isl_give isl_pw_aff_list
*lower_bounds(
448 __isl_keep isl_constraint_list
*constraints
, int pos
,
449 __isl_keep isl_set
*domain
, __isl_keep isl_ast_build
*build
)
452 isl_pw_aff_list
*list
;
458 n
= isl_constraint_list_n_constraint(constraints
);
461 pa
= exact_bound(domain
, build
, 0);
462 return isl_pw_aff_list_from_pw_aff(pa
);
465 ctx
= isl_ast_build_get_ctx(build
);
466 list
= isl_pw_aff_list_alloc(ctx
,n
);
468 for (i
= 0; i
< n
; ++i
) {
472 c
= isl_constraint_list_get_constraint(constraints
, i
);
473 aff
= lower_bound(c
, pos
, build
);
474 isl_constraint_free(c
);
475 list
= isl_pw_aff_list_add(list
, isl_pw_aff_from_aff(aff
));
481 /* Extract an upper bound on dimension "pos" from each constraint
482 * in "constraints" and return the list of upper bounds.
483 * If "constraints" has zero elements, then we extract an upper bound
484 * from "domain" instead.
486 static __isl_give isl_pw_aff_list
*upper_bounds(
487 __isl_keep isl_constraint_list
*constraints
, int pos
,
488 __isl_keep isl_set
*domain
, __isl_keep isl_ast_build
*build
)
491 isl_pw_aff_list
*list
;
494 n
= isl_constraint_list_n_constraint(constraints
);
497 pa
= exact_bound(domain
, build
, 1);
498 return isl_pw_aff_list_from_pw_aff(pa
);
501 ctx
= isl_ast_build_get_ctx(build
);
502 list
= isl_pw_aff_list_alloc(ctx
,n
);
504 for (i
= 0; i
< n
; ++i
) {
508 c
= isl_constraint_list_get_constraint(constraints
, i
);
509 aff
= isl_constraint_get_bound(c
, isl_dim_set
, pos
);
510 isl_constraint_free(c
);
511 aff
= isl_aff_floor(aff
);
512 list
= isl_pw_aff_list_add(list
, isl_pw_aff_from_aff(aff
));
518 /* Return an isl_ast_expr that performs the reduction of type "type"
519 * on AST expressions corresponding to the elements in "list".
521 * The list is assumed to contain at least one element.
522 * If the list contains exactly one element, then the returned isl_ast_expr
523 * simply computes that affine expression.
525 static __isl_give isl_ast_expr
*reduce_list(enum isl_ast_op_type type
,
526 __isl_keep isl_pw_aff_list
*list
, __isl_keep isl_ast_build
*build
)
535 n
= isl_pw_aff_list_n_pw_aff(list
);
538 return isl_ast_build_expr_from_pw_aff_internal(build
,
539 isl_pw_aff_list_get_pw_aff(list
, 0));
541 ctx
= isl_pw_aff_list_get_ctx(list
);
542 expr
= isl_ast_expr_alloc_op(ctx
, type
, n
);
546 for (i
= 0; i
< n
; ++i
) {
547 isl_ast_expr
*expr_i
;
549 expr_i
= isl_ast_build_expr_from_pw_aff_internal(build
,
550 isl_pw_aff_list_get_pw_aff(list
, i
));
552 return isl_ast_expr_free(expr
);
553 expr
->u
.op
.args
[i
] = expr_i
;
559 /* Add a guard to "graft" based on "bound" in the case of a degenerate
560 * level (including the special case of an eliminated level).
562 * We eliminate the current dimension, simplify the result in the current
563 * build and add the result as guards to the graft.
565 * Note that we cannot simply drop the constraints on the current dimension
566 * even in the eliminated case, because the single affine expression may
567 * not be explicitly available in "bounds". Moreover, the single affine
568 * expression may only be defined on a subset of the build domain,
569 * so we do in some cases need to insert a guard even in the eliminated case.
571 static __isl_give isl_ast_graft
*add_degenerate_guard(
572 __isl_take isl_ast_graft
*graft
, __isl_keep isl_basic_set
*bounds
,
573 __isl_keep isl_ast_build
*build
)
578 depth
= isl_ast_build_get_depth(build
);
580 dom
= isl_set_from_basic_set(isl_basic_set_copy(bounds
));
581 if (isl_ast_build_has_stride(build
, depth
)) {
584 stride
= isl_ast_build_get_stride_constraint(build
);
585 dom
= isl_set_intersect(dom
, stride
);
587 dom
= isl_set_eliminate(dom
, isl_dim_set
, depth
, 1);
588 dom
= isl_ast_build_compute_gist(build
, dom
);
590 graft
= isl_ast_graft_add_guard(graft
, dom
, build
);
595 /* Update "graft" based on "bounds" for the eliminated case.
597 * In the eliminated case, no for node is created, so we only need
598 * to check if "bounds" imply any guards that need to be inserted.
600 static __isl_give isl_ast_graft
*refine_eliminated(
601 __isl_take isl_ast_graft
*graft
, __isl_keep isl_basic_set
*bounds
,
602 __isl_keep isl_ast_build
*build
)
604 return add_degenerate_guard(graft
, bounds
, build
);
607 /* Update "graft" based on "bounds" and "sub_build" for the degenerate case.
609 * "build" is the build in which graft->node was created
610 * "sub_build" contains information about the current level itself,
611 * including the single value attained.
613 * We first set the initialization part of the for loop to the single
614 * value attained by the current dimension.
615 * The increment and condition are not strictly needed as the are known
616 * to be "1" and "iterator <= value" respectively.
617 * Then we set the size of the iterator and
618 * check if "bounds" imply any guards that need to be inserted.
620 static __isl_give isl_ast_graft
*refine_degenerate(
621 __isl_take isl_ast_graft
*graft
, __isl_keep isl_basic_set
*bounds
,
622 __isl_keep isl_ast_build
*build
,
623 __isl_keep isl_ast_build
*sub_build
)
627 if (!graft
|| !sub_build
)
628 return isl_ast_graft_free(graft
);
630 value
= isl_pw_aff_copy(sub_build
->value
);
632 graft
->node
->u
.f
.init
= isl_ast_build_expr_from_pw_aff_internal(build
,
634 if (!graft
->node
->u
.f
.init
)
635 return isl_ast_graft_free(graft
);
637 graft
= add_degenerate_guard(graft
, bounds
, build
);
642 /* Return the intersection of constraints in "list" as a set.
644 static __isl_give isl_set
*intersect_constraints(
645 __isl_keep isl_constraint_list
*list
)
650 n
= isl_constraint_list_n_constraint(list
);
652 isl_die(isl_constraint_list_get_ctx(list
), isl_error_internal
,
653 "expecting at least one constraint", return NULL
);
655 bset
= isl_basic_set_from_constraint(
656 isl_constraint_list_get_constraint(list
, 0));
657 for (i
= 1; i
< n
; ++i
) {
658 isl_basic_set
*bset_i
;
660 bset_i
= isl_basic_set_from_constraint(
661 isl_constraint_list_get_constraint(list
, i
));
662 bset
= isl_basic_set_intersect(bset
, bset_i
);
665 return isl_set_from_basic_set(bset
);
668 /* Compute the constraints on the outer dimensions enforced by
669 * graft->node and add those constraints to graft->enforced,
670 * in case the upper bound is expressed as a set "upper".
672 * In particular, if l(...) is a lower bound in "lower", and
674 * -a i + f(...) >= 0 or a i <= f(...)
676 * is an upper bound ocnstraint on the current dimension i,
677 * then the for loop enforces the constraint
679 * -a l(...) + f(...) >= 0 or a l(...) <= f(...)
681 * We therefore simply take each lower bound in turn, plug it into
682 * the upper bounds and compute the intersection over all lower bounds.
684 * If a lower bound is a rational expression, then
685 * isl_basic_set_preimage_multi_aff will force this rational
686 * expression to have only integer values. However, the loop
687 * itself does not enforce this integrality constraint. We therefore
688 * use the ceil of the lower bounds instead of the lower bounds themselves.
689 * Other constraints will make sure that the for loop is only executed
690 * when each of the lower bounds attains an integral value.
691 * In particular, potentially rational values only occur in
692 * lower_bound if the offset is a (seemingly) rational expression,
693 * but then outer conditions will make sure that this rational expression
694 * only attains integer values.
696 static __isl_give isl_ast_graft
*set_enforced_from_set(
697 __isl_take isl_ast_graft
*graft
,
698 __isl_keep isl_pw_aff_list
*lower
, int pos
, __isl_keep isl_set
*upper
)
701 isl_basic_set
*enforced
;
702 isl_pw_multi_aff
*pma
;
705 if (!graft
|| !lower
)
706 return isl_ast_graft_free(graft
);
708 space
= isl_set_get_space(upper
);
709 enforced
= isl_basic_set_universe(isl_space_copy(space
));
711 space
= isl_space_map_from_set(space
);
712 pma
= isl_pw_multi_aff_identity(space
);
714 n
= isl_pw_aff_list_n_pw_aff(lower
);
715 for (i
= 0; i
< n
; ++i
) {
719 isl_pw_multi_aff
*pma_i
;
721 pa
= isl_pw_aff_list_get_pw_aff(lower
, i
);
722 pa
= isl_pw_aff_ceil(pa
);
723 pma_i
= isl_pw_multi_aff_copy(pma
);
724 pma_i
= isl_pw_multi_aff_set_pw_aff(pma_i
, pos
, pa
);
725 enforced_i
= isl_set_copy(upper
);
726 enforced_i
= isl_set_preimage_pw_multi_aff(enforced_i
, pma_i
);
727 hull
= isl_set_simple_hull(enforced_i
);
728 enforced
= isl_basic_set_intersect(enforced
, hull
);
731 isl_pw_multi_aff_free(pma
);
733 graft
= isl_ast_graft_enforce(graft
, enforced
);
738 /* Compute the constraints on the outer dimensions enforced by
739 * graft->node and add those constraints to graft->enforced,
740 * in case the upper bound is expressed as
741 * a list of affine expressions "upper".
743 * The enforced condition is that each lower bound expression is less
744 * than or equal to each upper bound expression.
746 static __isl_give isl_ast_graft
*set_enforced_from_list(
747 __isl_take isl_ast_graft
*graft
,
748 __isl_keep isl_pw_aff_list
*lower
, __isl_keep isl_pw_aff_list
*upper
)
751 isl_basic_set
*enforced
;
753 lower
= isl_pw_aff_list_copy(lower
);
754 upper
= isl_pw_aff_list_copy(upper
);
755 cond
= isl_pw_aff_list_le_set(lower
, upper
);
756 enforced
= isl_set_simple_hull(cond
);
757 graft
= isl_ast_graft_enforce(graft
, enforced
);
762 /* Does "aff" have a negative constant term?
764 static int aff_constant_is_negative(__isl_take isl_set
*set
,
765 __isl_take isl_aff
*aff
, void *user
)
770 v
= isl_aff_get_constant_val(aff
);
771 *neg
= isl_val_is_neg(v
);
776 return *neg
? 0 : -1;
779 /* Does "pa" have a negative constant term over its entire domain?
781 static int pw_aff_constant_is_negative(__isl_take isl_pw_aff
*pa
, void *user
)
786 r
= isl_pw_aff_foreach_piece(pa
, &aff_constant_is_negative
, user
);
789 return *neg
? 0 : -1;
792 /* Does each element in "list" have a negative constant term?
794 * The callback terminates the iteration as soon an element has been
795 * found that does not have a negative constant term.
797 static int list_constant_is_negative(__isl_keep isl_pw_aff_list
*list
)
801 if (isl_pw_aff_list_foreach(list
,
802 &pw_aff_constant_is_negative
, &neg
) < 0 && neg
)
808 /* Add 1 to each of the elements in "list", where each of these elements
809 * is defined over the internal schedule space of "build".
811 static __isl_give isl_pw_aff_list
*list_add_one(
812 __isl_take isl_pw_aff_list
*list
, __isl_keep isl_ast_build
*build
)
819 space
= isl_ast_build_get_space(build
, 1);
820 aff
= isl_aff_zero_on_domain(isl_local_space_from_space(space
));
821 aff
= isl_aff_add_constant_si(aff
, 1);
822 one
= isl_pw_aff_from_aff(aff
);
824 n
= isl_pw_aff_list_n_pw_aff(list
);
825 for (i
= 0; i
< n
; ++i
) {
827 pa
= isl_pw_aff_list_get_pw_aff(list
, i
);
828 pa
= isl_pw_aff_add(pa
, isl_pw_aff_copy(one
));
829 list
= isl_pw_aff_list_set_pw_aff(list
, i
, pa
);
832 isl_pw_aff_free(one
);
837 /* Set the condition part of the for node graft->node in case
838 * the upper bound is represented as a list of piecewise affine expressions.
840 * In particular, set the condition to
842 * iterator <= min(list of upper bounds)
844 * If each of the upper bounds has a negative constant term, then
845 * set the condition to
847 * iterator < min(list of (upper bound + 1)s)
850 static __isl_give isl_ast_graft
*set_for_cond_from_list(
851 __isl_take isl_ast_graft
*graft
, __isl_keep isl_pw_aff_list
*list
,
852 __isl_keep isl_ast_build
*build
)
855 isl_ast_expr
*bound
, *iterator
, *cond
;
856 enum isl_ast_op_type type
= isl_ast_op_le
;
859 return isl_ast_graft_free(graft
);
861 neg
= list_constant_is_negative(list
);
863 return isl_ast_graft_free(graft
);
864 list
= isl_pw_aff_list_copy(list
);
866 list
= list_add_one(list
, build
);
867 type
= isl_ast_op_lt
;
870 bound
= reduce_list(isl_ast_op_min
, list
, build
);
871 iterator
= isl_ast_expr_copy(graft
->node
->u
.f
.iterator
);
872 cond
= isl_ast_expr_alloc_binary(type
, iterator
, bound
);
873 graft
->node
->u
.f
.cond
= cond
;
875 isl_pw_aff_list_free(list
);
876 if (!graft
->node
->u
.f
.cond
)
877 return isl_ast_graft_free(graft
);
881 /* Set the condition part of the for node graft->node in case
882 * the upper bound is represented as a set.
884 static __isl_give isl_ast_graft
*set_for_cond_from_set(
885 __isl_take isl_ast_graft
*graft
, __isl_keep isl_set
*set
,
886 __isl_keep isl_ast_build
*build
)
893 cond
= isl_ast_build_expr_from_set(build
, isl_set_copy(set
));
894 graft
->node
->u
.f
.cond
= cond
;
895 if (!graft
->node
->u
.f
.cond
)
896 return isl_ast_graft_free(graft
);
900 /* Construct an isl_ast_expr for the increment (i.e., stride) of
901 * the current dimension.
903 static __isl_give isl_ast_expr
*for_inc(__isl_keep isl_ast_build
*build
)
911 ctx
= isl_ast_build_get_ctx(build
);
912 depth
= isl_ast_build_get_depth(build
);
914 if (!isl_ast_build_has_stride(build
, depth
))
915 return isl_ast_expr_alloc_int_si(ctx
, 1);
917 v
= isl_ast_build_get_stride(build
, depth
);
918 return isl_ast_expr_from_val(v
);
921 /* Should we express the loop condition as
923 * iterator <= min(list of upper bounds)
925 * or as a conjunction of constraints?
927 * The first is constructed from a list of upper bounds.
928 * The second is constructed from a set.
930 * If there are no upper bounds in "constraints", then this could mean
931 * that "domain" simply doesn't have an upper bound or that we didn't
932 * pick any upper bound. In the first case, we want to generate the
933 * loop condition as a(n empty) conjunction of constraints
934 * In the second case, we will compute
935 * a single upper bound from "domain" and so we use the list form.
937 * If there are upper bounds in "constraints",
938 * then we use the list form iff the atomic_upper_bound option is set.
940 static int use_upper_bound_list(isl_ctx
*ctx
, int n_upper
,
941 __isl_keep isl_set
*domain
, int depth
)
944 return isl_options_get_ast_build_atomic_upper_bound(ctx
);
946 return isl_set_dim_has_upper_bound(domain
, isl_dim_set
, depth
);
949 /* Fill in the expressions of the for node in graft->node.
952 * - set the initialization part of the loop to the maximum of the lower bounds
953 * - set the size of the iterator based on the values attained by the iterator
954 * - extract the increment from the stride of the current dimension
955 * - construct the for condition either based on a list of upper bounds
956 * or on a set of upper bound constraints.
958 static __isl_give isl_ast_graft
*set_for_node_expressions(
959 __isl_take isl_ast_graft
*graft
, __isl_keep isl_pw_aff_list
*lower
,
960 int use_list
, __isl_keep isl_pw_aff_list
*upper_list
,
961 __isl_keep isl_set
*upper_set
, __isl_keep isl_ast_build
*build
)
968 build
= isl_ast_build_copy(build
);
969 build
= isl_ast_build_set_enforced(build
,
970 isl_ast_graft_get_enforced(graft
));
973 node
->u
.f
.init
= reduce_list(isl_ast_op_max
, lower
, build
);
974 node
->u
.f
.inc
= for_inc(build
);
977 graft
= set_for_cond_from_list(graft
, upper_list
, build
);
979 graft
= set_for_cond_from_set(graft
, upper_set
, build
);
981 isl_ast_build_free(build
);
983 if (!node
->u
.f
.iterator
|| !node
->u
.f
.init
||
984 !node
->u
.f
.cond
|| !node
->u
.f
.inc
)
985 return isl_ast_graft_free(graft
);
990 /* Update "graft" based on "bounds" and "domain" for the generic,
991 * non-degenerate, case.
993 * "c_lower" and "c_upper" contain the lower and upper bounds
994 * that the loop node should express.
995 * "domain" is the subset of the intersection of the constraints
996 * for which some code is executed.
998 * There may be zero lower bounds or zero upper bounds in "constraints"
999 * in case the list of constraints was created
1000 * based on the atomic option or based on separation with explicit bounds.
1001 * In that case, we use "domain" to derive lower and/or upper bounds.
1003 * We first compute a list of one or more lower bounds.
1005 * Then we decide if we want to express the condition as
1007 * iterator <= min(list of upper bounds)
1009 * or as a conjunction of constraints.
1011 * The set of enforced constraints is then computed either based on
1012 * a list of upper bounds or on a set of upper bound constraints.
1013 * We do not compute any enforced constraints if we were forced
1014 * to compute a lower or upper bound using exact_bound. The domains
1015 * of the resulting expressions may imply some bounds on outer dimensions
1016 * that we do not want to appear in the enforced constraints since
1017 * they are not actually enforced by the corresponding code.
1019 * Finally, we fill in the expressions of the for node.
1021 static __isl_give isl_ast_graft
*refine_generic_bounds(
1022 __isl_take isl_ast_graft
*graft
,
1023 __isl_take isl_constraint_list
*c_lower
,
1024 __isl_take isl_constraint_list
*c_upper
,
1025 __isl_keep isl_set
*domain
, __isl_keep isl_ast_build
*build
)
1029 isl_pw_aff_list
*lower
;
1031 isl_set
*upper_set
= NULL
;
1032 isl_pw_aff_list
*upper_list
= NULL
;
1033 int n_lower
, n_upper
;
1035 if (!graft
|| !c_lower
|| !c_upper
|| !build
)
1038 depth
= isl_ast_build_get_depth(build
);
1039 ctx
= isl_ast_graft_get_ctx(graft
);
1041 n_lower
= isl_constraint_list_n_constraint(c_lower
);
1042 n_upper
= isl_constraint_list_n_constraint(c_upper
);
1044 use_list
= use_upper_bound_list(ctx
, n_upper
, domain
, depth
);
1046 lower
= lower_bounds(c_lower
, depth
, domain
, build
);
1049 upper_list
= upper_bounds(c_upper
, depth
, domain
, build
);
1050 else if (n_upper
> 0)
1051 upper_set
= intersect_constraints(c_upper
);
1053 upper_set
= isl_set_universe(isl_set_get_space(domain
));
1055 if (n_lower
== 0 || n_upper
== 0)
1058 graft
= set_enforced_from_list(graft
, lower
, upper_list
);
1060 graft
= set_enforced_from_set(graft
, lower
, depth
, upper_set
);
1062 graft
= set_for_node_expressions(graft
, lower
, use_list
, upper_list
,
1065 isl_pw_aff_list_free(lower
);
1066 isl_pw_aff_list_free(upper_list
);
1067 isl_set_free(upper_set
);
1068 isl_constraint_list_free(c_lower
);
1069 isl_constraint_list_free(c_upper
);
1073 isl_constraint_list_free(c_lower
);
1074 isl_constraint_list_free(c_upper
);
1075 return isl_ast_graft_free(graft
);
1078 /* Internal data structure used inside count_constraints to keep
1079 * track of the number of constraints that are independent of dimension "pos",
1080 * the lower bounds in "pos" and the upper bounds in "pos".
1082 struct isl_ast_count_constraints_data
{
1090 /* Increment data->n_indep, data->lower or data->upper depending
1091 * on whether "c" is independenct of dimensions data->pos,
1092 * a lower bound or an upper bound.
1094 static int count_constraints(__isl_take isl_constraint
*c
, void *user
)
1096 struct isl_ast_count_constraints_data
*data
= user
;
1098 if (isl_constraint_is_lower_bound(c
, isl_dim_set
, data
->pos
))
1100 else if (isl_constraint_is_upper_bound(c
, isl_dim_set
, data
->pos
))
1105 isl_constraint_free(c
);
1110 /* Update "graft" based on "bounds" and "domain" for the generic,
1111 * non-degenerate, case.
1113 * "list" respresent the list of bounds that need to be encoded by
1114 * the for loop (or a guard around the for loop).
1115 * "domain" is the subset of the intersection of the constraints
1116 * for which some code is executed.
1117 * "build" is the build in which graft->node was created.
1119 * We separate lower bounds, upper bounds and constraints that
1120 * are independent of the loop iterator.
1122 * The actual for loop bounds are generated in refine_generic_bounds.
1123 * If there are any constraints that are independent of the loop iterator,
1124 * we need to put a guard around the for loop (which may get hoisted up
1125 * to higher levels) and we call refine_generic_bounds in a build
1126 * where this guard is enforced.
1128 static __isl_give isl_ast_graft
*refine_generic_split(
1129 __isl_take isl_ast_graft
*graft
, __isl_take isl_constraint_list
*list
,
1130 __isl_keep isl_set
*domain
, __isl_keep isl_ast_build
*build
)
1132 isl_ast_build
*for_build
;
1134 struct isl_ast_count_constraints_data data
;
1135 isl_constraint_list
*lower
;
1136 isl_constraint_list
*upper
;
1139 return isl_ast_graft_free(graft
);
1141 data
.pos
= isl_ast_build_get_depth(build
);
1143 list
= isl_constraint_list_sort(list
, &cmp_constraint
, &data
.pos
);
1145 return isl_ast_graft_free(graft
);
1147 data
.n_indep
= data
.n_lower
= data
.n_upper
= 0;
1148 if (isl_constraint_list_foreach(list
, &count_constraints
, &data
) < 0) {
1149 isl_constraint_list_free(list
);
1150 return isl_ast_graft_free(graft
);
1153 lower
= isl_constraint_list_copy(list
);
1154 lower
= isl_constraint_list_drop(lower
, 0, data
.n_indep
);
1155 upper
= isl_constraint_list_copy(lower
);
1156 lower
= isl_constraint_list_drop(lower
, data
.n_lower
, data
.n_upper
);
1157 upper
= isl_constraint_list_drop(upper
, 0, data
.n_lower
);
1159 if (data
.n_indep
== 0) {
1160 isl_constraint_list_free(list
);
1161 return refine_generic_bounds(graft
, lower
, upper
,
1165 list
= isl_constraint_list_drop(list
, data
.n_indep
,
1166 data
.n_lower
+ data
.n_upper
);
1167 guard
= intersect_constraints(list
);
1168 isl_constraint_list_free(list
);
1170 for_build
= isl_ast_build_copy(build
);
1171 for_build
= isl_ast_build_restrict_pending(for_build
,
1172 isl_set_copy(guard
));
1173 graft
= refine_generic_bounds(graft
, lower
, upper
, domain
, for_build
);
1174 isl_ast_build_free(for_build
);
1176 graft
= isl_ast_graft_add_guard(graft
, guard
, build
);
1181 /* Add the guard implied by the current stride constraint (if any),
1182 * but not (necessarily) enforced by the generated AST to "graft".
1184 static __isl_give isl_ast_graft
*add_stride_guard(
1185 __isl_take isl_ast_graft
*graft
, __isl_keep isl_ast_build
*build
)
1190 depth
= isl_ast_build_get_depth(build
);
1191 if (!isl_ast_build_has_stride(build
, depth
))
1194 dom
= isl_ast_build_get_stride_constraint(build
);
1195 dom
= isl_set_eliminate(dom
, isl_dim_set
, depth
, 1);
1196 dom
= isl_ast_build_compute_gist(build
, dom
);
1198 graft
= isl_ast_graft_add_guard(graft
, dom
, build
);
1203 /* Update "graft" based on "bounds" and "domain" for the generic,
1204 * non-degenerate, case.
1206 * "bounds" respresent the bounds that need to be encoded by
1207 * the for loop (or a guard around the for loop).
1208 * "domain" is the subset of "bounds" for which some code is executed.
1209 * "build" is the build in which graft->node was created.
1211 * We break up "bounds" into a list of constraints and continue with
1212 * refine_generic_split.
1214 static __isl_give isl_ast_graft
*refine_generic(
1215 __isl_take isl_ast_graft
*graft
,
1216 __isl_keep isl_basic_set
*bounds
, __isl_keep isl_set
*domain
,
1217 __isl_keep isl_ast_build
*build
)
1219 isl_constraint_list
*list
;
1221 if (!build
|| !graft
)
1222 return isl_ast_graft_free(graft
);
1224 bounds
= isl_basic_set_copy(bounds
);
1225 bounds
= isl_ast_build_compute_gist_basic_set(build
, bounds
);
1226 list
= isl_constraint_list_from_basic_set(bounds
);
1228 graft
= refine_generic_split(graft
, list
, domain
, build
);
1229 graft
= add_stride_guard(graft
, build
);
1234 /* Create a for node for the current level.
1236 * Mark the for node degenerate if "degenerate" is set.
1238 static __isl_give isl_ast_node
*create_for(__isl_keep isl_ast_build
*build
,
1248 depth
= isl_ast_build_get_depth(build
);
1249 id
= isl_ast_build_get_iterator_id(build
, depth
);
1250 node
= isl_ast_node_alloc_for(id
);
1252 node
= isl_ast_node_for_mark_degenerate(node
);
1257 /* Create an AST node for the current dimension based on
1258 * the schedule domain "bounds" and return the node encapsulated
1259 * in an isl_ast_graft.
1261 * "executed" is the current inverse schedule, taking into account
1262 * the bounds in "bounds"
1263 * "domain" is the domain of "executed", with inner dimensions projected out.
1264 * It may be a strict subset of "bounds" in case "bounds" was created
1265 * based on the atomic option or based on separation with explicit bounds.
1267 * "domain" may satisfy additional equalities that result
1268 * from intersecting "executed" with "bounds" in add_node.
1269 * It may also satisfy some global constraints that were dropped out because
1270 * we performed separation with explicit bounds.
1271 * The very first step is then to copy these constraints to "bounds".
1273 * Since we may be calling before_each_for and after_each_for
1274 * callbacks, we record the current inverse schedule in the build.
1276 * We consider three builds,
1277 * "build" is the one in which the current level is created,
1278 * "body_build" is the build in which the next level is created,
1279 * "sub_build" is essentially the same as "body_build", except that
1280 * the depth has not been increased yet.
1282 * "build" already contains information (in strides and offsets)
1283 * about the strides at the current level, but this information is not
1284 * reflected in the build->domain.
1285 * We first add this information and the "bounds" to the sub_build->domain.
1286 * isl_ast_build_set_loop_bounds checks whether the current dimension attains
1287 * only a single value and whether this single value can be represented using
1288 * a single affine expression.
1289 * In the first case, the current level is considered "degenerate".
1290 * In the second, sub-case, the current level is considered "eliminated".
1291 * Eliminated level don't need to be reflected in the AST since we can
1292 * simply plug in the affine expression. For degenerate, but non-eliminated,
1293 * levels, we do introduce a for node, but mark is as degenerate so that
1294 * it can be printed as an assignment of the single value to the loop
1297 * If the current level is eliminated, we explicitly plug in the value
1298 * for the current level found by isl_ast_build_set_loop_bounds in the
1299 * inverse schedule. This ensures that if we are working on a slice
1300 * of the domain based on information available in the inverse schedule
1301 * and the build domain, that then this information is also reflected
1302 * in the inverse schedule. This operation also eliminates the current
1303 * dimension from the inverse schedule making sure no inner dimensions depend
1304 * on the current dimension. Otherwise, we create a for node, marking
1305 * it degenerate if appropriate. The initial for node is still incomplete
1306 * and will be completed in either refine_degenerate or refine_generic.
1308 * We then generate a sequence of grafts for the next level,
1309 * create a surrounding graft for the current level and insert
1310 * the for node we created (if the current level is not eliminated).
1312 * Finally, we set the bounds of the for loop and insert guards
1313 * (either in the AST or in the graft) in one of
1314 * refine_eliminated, refine_degenerate or refine_generic.
1316 static __isl_give isl_ast_graft
*create_node_scaled(
1317 __isl_take isl_union_map
*executed
,
1318 __isl_take isl_basic_set
*bounds
, __isl_take isl_set
*domain
,
1319 __isl_take isl_ast_build
*build
)
1322 int degenerate
, eliminated
;
1323 isl_basic_set
*hull
;
1324 isl_ast_node
*node
= NULL
;
1325 isl_ast_graft
*graft
;
1326 isl_ast_graft_list
*children
;
1327 isl_ast_build
*sub_build
;
1328 isl_ast_build
*body_build
;
1330 domain
= isl_ast_build_eliminate_divs(build
, domain
);
1331 domain
= isl_set_detect_equalities(domain
);
1332 hull
= isl_set_unshifted_simple_hull(isl_set_copy(domain
));
1333 bounds
= isl_basic_set_intersect(bounds
, hull
);
1334 build
= isl_ast_build_set_executed(build
, isl_union_map_copy(executed
));
1336 depth
= isl_ast_build_get_depth(build
);
1337 sub_build
= isl_ast_build_copy(build
);
1338 sub_build
= isl_ast_build_include_stride(sub_build
);
1339 sub_build
= isl_ast_build_set_loop_bounds(sub_build
,
1340 isl_basic_set_copy(bounds
));
1341 degenerate
= isl_ast_build_has_value(sub_build
);
1342 eliminated
= isl_ast_build_has_affine_value(sub_build
, depth
);
1343 if (degenerate
< 0 || eliminated
< 0)
1344 executed
= isl_union_map_free(executed
);
1346 executed
= plug_in_values(executed
, sub_build
);
1348 node
= create_for(build
, degenerate
);
1350 body_build
= isl_ast_build_copy(sub_build
);
1351 body_build
= isl_ast_build_increase_depth(body_build
);
1353 node
= before_each_for(node
, body_build
);
1354 children
= generate_next_level(executed
,
1355 isl_ast_build_copy(body_build
));
1357 graft
= isl_ast_graft_alloc_level(children
, build
, sub_build
);
1359 graft
= isl_ast_graft_insert_for(graft
, node
);
1361 graft
= refine_eliminated(graft
, bounds
, build
);
1362 else if (degenerate
)
1363 graft
= refine_degenerate(graft
, bounds
, build
, sub_build
);
1365 graft
= refine_generic(graft
, bounds
, domain
, build
);
1367 graft
= after_each_for(graft
, body_build
);
1369 isl_ast_build_free(body_build
);
1370 isl_ast_build_free(sub_build
);
1371 isl_ast_build_free(build
);
1372 isl_basic_set_free(bounds
);
1373 isl_set_free(domain
);
1378 /* Internal data structure for checking if all constraints involving
1379 * the input dimension "depth" are such that the other coefficients
1380 * are multiples of "m", reducing "m" if they are not.
1381 * If "m" is reduced all the way down to "1", then the check has failed
1382 * and we break out of the iteration.
1384 struct isl_check_scaled_data
{
1389 /* If constraint "c" involves the input dimension data->depth,
1390 * then make sure that all the other coefficients are multiples of data->m,
1391 * reducing data->m if needed.
1392 * Break out of the iteration if data->m has become equal to "1".
1394 static int constraint_check_scaled(__isl_take isl_constraint
*c
, void *user
)
1396 struct isl_check_scaled_data
*data
= user
;
1398 enum isl_dim_type t
[] = { isl_dim_param
, isl_dim_in
, isl_dim_out
,
1401 if (!isl_constraint_involves_dims(c
, isl_dim_in
, data
->depth
, 1)) {
1402 isl_constraint_free(c
);
1406 for (i
= 0; i
< 4; ++i
) {
1407 n
= isl_constraint_dim(c
, t
[i
]);
1408 for (j
= 0; j
< n
; ++j
) {
1411 if (t
[i
] == isl_dim_in
&& j
== data
->depth
)
1413 if (!isl_constraint_involves_dims(c
, t
[i
], j
, 1))
1415 d
= isl_constraint_get_coefficient_val(c
, t
[i
], j
);
1416 data
->m
= isl_val_gcd(data
->m
, d
);
1417 if (isl_val_is_one(data
->m
))
1424 isl_constraint_free(c
);
1426 return i
< 4 ? -1 : 0;
1429 /* For each constraint of "bmap" that involves the input dimension data->depth,
1430 * make sure that all the other coefficients are multiples of data->m,
1431 * reducing data->m if needed.
1432 * Break out of the iteration if data->m has become equal to "1".
1434 static int basic_map_check_scaled(__isl_take isl_basic_map
*bmap
, void *user
)
1438 r
= isl_basic_map_foreach_constraint(bmap
,
1439 &constraint_check_scaled
, user
);
1440 isl_basic_map_free(bmap
);
1445 /* For each constraint of "map" that involves the input dimension data->depth,
1446 * make sure that all the other coefficients are multiples of data->m,
1447 * reducing data->m if needed.
1448 * Break out of the iteration if data->m has become equal to "1".
1450 static int map_check_scaled(__isl_take isl_map
*map
, void *user
)
1454 r
= isl_map_foreach_basic_map(map
, &basic_map_check_scaled
, user
);
1460 /* Create an AST node for the current dimension based on
1461 * the schedule domain "bounds" and return the node encapsulated
1462 * in an isl_ast_graft.
1464 * "executed" is the current inverse schedule, taking into account
1465 * the bounds in "bounds"
1466 * "domain" is the domain of "executed", with inner dimensions projected out.
1469 * Before moving on to the actual AST node construction in create_node_scaled,
1470 * we first check if the current dimension is strided and if we can scale
1471 * down this stride. Note that we only do this if the ast_build_scale_strides
1474 * In particular, let the current dimension take on values
1478 * with a an integer. We check if we can find an integer m that (obviouly)
1479 * divides both f and s.
1481 * If so, we check if the current dimension only appears in constraints
1482 * where the coefficients of the other variables are multiples of m.
1483 * We perform this extra check to avoid the risk of introducing
1484 * divisions by scaling down the current dimension.
1486 * If so, we scale the current dimension down by a factor of m.
1487 * That is, we plug in
1491 * Note that in principle we could always scale down strided loops
1496 * but this may result in i' taking on larger values than the original i,
1497 * due to the shift by "f".
1498 * By constrast, the scaling in (1) can only reduce the (absolute) value "i".
1500 static __isl_give isl_ast_graft
*create_node(__isl_take isl_union_map
*executed
,
1501 __isl_take isl_basic_set
*bounds
, __isl_take isl_set
*domain
,
1502 __isl_take isl_ast_build
*build
)
1504 struct isl_check_scaled_data data
;
1509 ctx
= isl_ast_build_get_ctx(build
);
1510 if (!isl_options_get_ast_build_scale_strides(ctx
))
1511 return create_node_scaled(executed
, bounds
, domain
, build
);
1513 data
.depth
= isl_ast_build_get_depth(build
);
1514 if (!isl_ast_build_has_stride(build
, data
.depth
))
1515 return create_node_scaled(executed
, bounds
, domain
, build
);
1517 offset
= isl_ast_build_get_offset(build
, data
.depth
);
1518 data
.m
= isl_ast_build_get_stride(build
, data
.depth
);
1520 offset
= isl_aff_free(offset
);
1521 offset
= isl_aff_scale_down_val(offset
, isl_val_copy(data
.m
));
1522 d
= isl_aff_get_denominator_val(offset
);
1524 executed
= isl_union_map_free(executed
);
1526 if (executed
&& isl_val_is_divisible_by(data
.m
, d
))
1527 data
.m
= isl_val_div(data
.m
, d
);
1529 data
.m
= isl_val_set_si(data
.m
, 1);
1533 if (!isl_val_is_one(data
.m
)) {
1534 if (isl_union_map_foreach_map(executed
, &map_check_scaled
,
1536 !isl_val_is_one(data
.m
))
1537 executed
= isl_union_map_free(executed
);
1540 if (!isl_val_is_one(data
.m
)) {
1545 isl_union_map
*umap
;
1547 space
= isl_ast_build_get_space(build
, 1);
1548 space
= isl_space_map_from_set(space
);
1549 ma
= isl_multi_aff_identity(space
);
1550 aff
= isl_multi_aff_get_aff(ma
, data
.depth
);
1551 aff
= isl_aff_scale_val(aff
, isl_val_copy(data
.m
));
1552 ma
= isl_multi_aff_set_aff(ma
, data
.depth
, aff
);
1554 bounds
= isl_basic_set_preimage_multi_aff(bounds
,
1555 isl_multi_aff_copy(ma
));
1556 domain
= isl_set_preimage_multi_aff(domain
,
1557 isl_multi_aff_copy(ma
));
1558 map
= isl_map_reverse(isl_map_from_multi_aff(ma
));
1559 umap
= isl_union_map_from_map(map
);
1560 executed
= isl_union_map_apply_domain(executed
,
1561 isl_union_map_copy(umap
));
1562 build
= isl_ast_build_scale_down(build
, isl_val_copy(data
.m
),
1565 isl_aff_free(offset
);
1566 isl_val_free(data
.m
);
1568 return create_node_scaled(executed
, bounds
, domain
, build
);
1571 /* Add the basic set to the list that "user" points to.
1573 static int collect_basic_set(__isl_take isl_basic_set
*bset
, void *user
)
1575 isl_basic_set_list
**list
= user
;
1577 *list
= isl_basic_set_list_add(*list
, bset
);
1582 /* Extract the basic sets of "set" and collect them in an isl_basic_set_list.
1584 static __isl_give isl_basic_set_list
*isl_basic_set_list_from_set(
1585 __isl_take isl_set
*set
)
1589 isl_basic_set_list
*list
;
1594 ctx
= isl_set_get_ctx(set
);
1596 n
= isl_set_n_basic_set(set
);
1597 list
= isl_basic_set_list_alloc(ctx
, n
);
1598 if (isl_set_foreach_basic_set(set
, &collect_basic_set
, &list
) < 0)
1599 list
= isl_basic_set_list_free(list
);
1605 /* Generate code for the schedule domain "bounds"
1606 * and add the result to "list".
1608 * We mainly detect strides and additional equalities here
1609 * and then pass over control to create_node.
1611 * "bounds" reflects the bounds on the current dimension and possibly
1612 * some extra conditions on outer dimensions.
1613 * It does not, however, include any divs involving the current dimension,
1614 * so it does not capture any stride constraints.
1615 * We therefore need to compute that part of the schedule domain that
1616 * intersects with "bounds" and derive the strides from the result.
1618 static __isl_give isl_ast_graft_list
*add_node(
1619 __isl_take isl_ast_graft_list
*list
, __isl_take isl_union_map
*executed
,
1620 __isl_take isl_basic_set
*bounds
, __isl_take isl_ast_build
*build
)
1622 isl_ast_graft
*graft
;
1623 isl_set
*domain
= NULL
;
1624 isl_union_set
*uset
;
1627 uset
= isl_union_set_from_basic_set(isl_basic_set_copy(bounds
));
1628 executed
= isl_union_map_intersect_domain(executed
, uset
);
1629 empty
= isl_union_map_is_empty(executed
);
1635 uset
= isl_union_map_domain(isl_union_map_copy(executed
));
1636 domain
= isl_set_from_union_set(uset
);
1637 domain
= isl_ast_build_compute_gist(build
, domain
);
1638 empty
= isl_set_is_empty(domain
);
1644 domain
= isl_ast_build_eliminate_inner(build
, domain
);
1645 build
= isl_ast_build_detect_strides(build
, isl_set_copy(domain
));
1647 graft
= create_node(executed
, bounds
, domain
,
1648 isl_ast_build_copy(build
));
1649 list
= isl_ast_graft_list_add(list
, graft
);
1650 isl_ast_build_free(build
);
1653 list
= isl_ast_graft_list_free(list
);
1655 isl_set_free(domain
);
1656 isl_basic_set_free(bounds
);
1657 isl_union_map_free(executed
);
1658 isl_ast_build_free(build
);
1662 /* Does any element of i follow or coincide with any element of j
1663 * at the current depth for equal values of the outer dimensions?
1665 static int domain_follows_at_depth(__isl_keep isl_basic_set
*i
,
1666 __isl_keep isl_basic_set
*j
, void *user
)
1668 int depth
= *(int *) user
;
1669 isl_basic_map
*test
;
1673 test
= isl_basic_map_from_domain_and_range(isl_basic_set_copy(i
),
1674 isl_basic_set_copy(j
));
1675 for (l
= 0; l
< depth
; ++l
)
1676 test
= isl_basic_map_equate(test
, isl_dim_in
, l
,
1678 test
= isl_basic_map_order_ge(test
, isl_dim_in
, depth
,
1679 isl_dim_out
, depth
);
1680 empty
= isl_basic_map_is_empty(test
);
1681 isl_basic_map_free(test
);
1683 return empty
< 0 ? -1 : !empty
;
1686 /* Split up each element of "list" into a part that is related to "bset"
1687 * according to "gt" and a part that is not.
1688 * Return a list that consist of "bset" and all the pieces.
1690 static __isl_give isl_basic_set_list
*add_split_on(
1691 __isl_take isl_basic_set_list
*list
, __isl_take isl_basic_set
*bset
,
1692 __isl_keep isl_basic_map
*gt
)
1695 isl_basic_set_list
*res
;
1697 gt
= isl_basic_map_copy(gt
);
1698 gt
= isl_basic_map_intersect_domain(gt
, isl_basic_set_copy(bset
));
1699 n
= isl_basic_set_list_n_basic_set(list
);
1700 res
= isl_basic_set_list_from_basic_set(bset
);
1701 for (i
= 0; res
&& i
< n
; ++i
) {
1702 isl_basic_set
*bset
;
1703 isl_set
*set1
, *set2
;
1704 isl_basic_map
*bmap
;
1707 bset
= isl_basic_set_list_get_basic_set(list
, i
);
1708 bmap
= isl_basic_map_copy(gt
);
1709 bmap
= isl_basic_map_intersect_range(bmap
, bset
);
1710 bset
= isl_basic_map_range(bmap
);
1711 empty
= isl_basic_set_is_empty(bset
);
1713 res
= isl_basic_set_list_free(res
);
1715 isl_basic_set_free(bset
);
1716 bset
= isl_basic_set_list_get_basic_set(list
, i
);
1717 res
= isl_basic_set_list_add(res
, bset
);
1721 res
= isl_basic_set_list_add(res
, isl_basic_set_copy(bset
));
1722 set1
= isl_set_from_basic_set(bset
);
1723 bset
= isl_basic_set_list_get_basic_set(list
, i
);
1724 set2
= isl_set_from_basic_set(bset
);
1725 set1
= isl_set_subtract(set2
, set1
);
1726 set1
= isl_set_make_disjoint(set1
);
1728 res
= isl_basic_set_list_concat(res
,
1729 isl_basic_set_list_from_set(set1
));
1731 isl_basic_map_free(gt
);
1732 isl_basic_set_list_free(list
);
1736 static __isl_give isl_ast_graft_list
*generate_sorted_domains(
1737 __isl_keep isl_basic_set_list
*domain_list
,
1738 __isl_keep isl_union_map
*executed
,
1739 __isl_keep isl_ast_build
*build
);
1741 /* Internal data structure for add_nodes.
1743 * "executed" and "build" are extra arguments to be passed to add_node.
1744 * "list" collects the results.
1746 struct isl_add_nodes_data
{
1747 isl_union_map
*executed
;
1748 isl_ast_build
*build
;
1750 isl_ast_graft_list
*list
;
1753 /* Generate code for the schedule domains in "scc"
1754 * and add the results to "list".
1756 * The domains in "scc" form a strongly connected component in the ordering.
1757 * If the number of domains in "scc" is larger than 1, then this means
1758 * that we cannot determine a valid ordering for the domains in the component.
1759 * This should be fairly rare because the individual domains
1760 * have been made disjoint first.
1761 * The problem is that the domains may be integrally disjoint but not
1762 * rationally disjoint. For example, we may have domains
1764 * { [i,i] : 0 <= i <= 1 } and { [i,1-i] : 0 <= i <= 1 }
1766 * These two domains have an empty intersection, but their rational
1767 * relaxations do intersect. It is impossible to order these domains
1768 * in the second dimension because the first should be ordered before
1769 * the second for outer dimension equal to 0, while it should be ordered
1770 * after for outer dimension equal to 1.
1772 * This may happen in particular in case of unrolling since the domain
1773 * of each slice is replaced by its simple hull.
1775 * For each basic set i in "scc" and for each of the following basic sets j,
1776 * we split off that part of the basic set i that shares the outer dimensions
1777 * with j and lies before j in the current dimension.
1778 * We collect all the pieces in a new list that replaces "scc".
1780 static int add_nodes(__isl_take isl_basic_set_list
*scc
, void *user
)
1782 struct isl_add_nodes_data
*data
= user
;
1784 isl_basic_set
*bset
;
1785 isl_basic_set_list
*list
;
1789 n
= isl_basic_set_list_n_basic_set(scc
);
1790 bset
= isl_basic_set_list_get_basic_set(scc
, 0);
1792 isl_basic_set_list_free(scc
);
1793 data
->list
= add_node(data
->list
,
1794 isl_union_map_copy(data
->executed
), bset
,
1795 isl_ast_build_copy(data
->build
));
1796 return data
->list
? 0 : -1;
1799 depth
= isl_ast_build_get_depth(data
->build
);
1800 space
= isl_basic_set_get_space(bset
);
1801 space
= isl_space_map_from_set(space
);
1802 gt
= isl_basic_map_universe(space
);
1803 for (i
= 0; i
< depth
; ++i
)
1804 gt
= isl_basic_map_equate(gt
, isl_dim_in
, i
, isl_dim_out
, i
);
1805 gt
= isl_basic_map_order_gt(gt
, isl_dim_in
, depth
, isl_dim_out
, depth
);
1807 list
= isl_basic_set_list_from_basic_set(bset
);
1808 for (i
= 1; i
< n
; ++i
) {
1809 bset
= isl_basic_set_list_get_basic_set(scc
, i
);
1810 list
= add_split_on(list
, bset
, gt
);
1812 isl_basic_map_free(gt
);
1813 isl_basic_set_list_free(scc
);
1815 data
->list
= isl_ast_graft_list_concat(data
->list
,
1816 generate_sorted_domains(scc
, data
->executed
, data
->build
));
1817 isl_basic_set_list_free(scc
);
1819 return data
->list
? 0 : -1;
1822 /* Sort the domains in "domain_list" according to the execution order
1823 * at the current depth (for equal values of the outer dimensions),
1824 * generate code for each of them, collecting the results in a list.
1825 * If no code is generated (because the intersection of the inverse schedule
1826 * with the domains turns out to be empty), then an empty list is returned.
1828 * The caller is responsible for ensuring that the basic sets in "domain_list"
1829 * are pair-wise disjoint. It can, however, in principle happen that
1830 * two basic sets should be ordered one way for one value of the outer
1831 * dimensions and the other way for some other value of the outer dimensions.
1832 * We therefore play safe and look for strongly connected components.
1833 * The function add_nodes takes care of handling non-trivial components.
1835 static __isl_give isl_ast_graft_list
*generate_sorted_domains(
1836 __isl_keep isl_basic_set_list
*domain_list
,
1837 __isl_keep isl_union_map
*executed
, __isl_keep isl_ast_build
*build
)
1840 struct isl_add_nodes_data data
;
1847 ctx
= isl_basic_set_list_get_ctx(domain_list
);
1848 n
= isl_basic_set_list_n_basic_set(domain_list
);
1849 data
.list
= isl_ast_graft_list_alloc(ctx
, n
);
1853 return add_node(data
.list
, isl_union_map_copy(executed
),
1854 isl_basic_set_list_get_basic_set(domain_list
, 0),
1855 isl_ast_build_copy(build
));
1857 depth
= isl_ast_build_get_depth(build
);
1858 data
.executed
= executed
;
1860 if (isl_basic_set_list_foreach_scc(domain_list
,
1861 &domain_follows_at_depth
, &depth
,
1862 &add_nodes
, &data
) < 0)
1863 data
.list
= isl_ast_graft_list_free(data
.list
);
1868 /* Do i and j share any values for the outer dimensions?
1870 static int shared_outer(__isl_keep isl_basic_set
*i
,
1871 __isl_keep isl_basic_set
*j
, void *user
)
1873 int depth
= *(int *) user
;
1874 isl_basic_map
*test
;
1878 test
= isl_basic_map_from_domain_and_range(isl_basic_set_copy(i
),
1879 isl_basic_set_copy(j
));
1880 for (l
= 0; l
< depth
; ++l
)
1881 test
= isl_basic_map_equate(test
, isl_dim_in
, l
,
1883 empty
= isl_basic_map_is_empty(test
);
1884 isl_basic_map_free(test
);
1886 return empty
< 0 ? -1 : !empty
;
1889 /* Internal data structure for generate_sorted_domains_wrap.
1891 * "n" is the total number of basic sets
1892 * "executed" and "build" are extra arguments to be passed
1893 * to generate_sorted_domains.
1895 * "single" is set to 1 by generate_sorted_domains_wrap if there
1896 * is only a single component.
1897 * "list" collects the results.
1899 struct isl_ast_generate_parallel_domains_data
{
1901 isl_union_map
*executed
;
1902 isl_ast_build
*build
;
1905 isl_ast_graft_list
*list
;
1908 /* Call generate_sorted_domains on "scc", fuse the result into a list
1909 * with either zero or one graft and collect the these single element
1910 * lists into data->list.
1912 * If there is only one component, i.e., if the number of basic sets
1913 * in the current component is equal to the total number of basic sets,
1914 * then data->single is set to 1 and the result of generate_sorted_domains
1917 static int generate_sorted_domains_wrap(__isl_take isl_basic_set_list
*scc
,
1920 struct isl_ast_generate_parallel_domains_data
*data
= user
;
1921 isl_ast_graft_list
*list
;
1923 list
= generate_sorted_domains(scc
, data
->executed
, data
->build
);
1924 data
->single
= isl_basic_set_list_n_basic_set(scc
) == data
->n
;
1926 list
= isl_ast_graft_list_fuse(list
, data
->build
);
1930 data
->list
= isl_ast_graft_list_concat(data
->list
, list
);
1932 isl_basic_set_list_free(scc
);
1939 /* Look for any (weakly connected) components in the "domain_list"
1940 * of domains that share some values of the outer dimensions.
1941 * That is, domains in different components do not share any values
1942 * of the outer dimensions. This means that these components
1943 * can be freely reordered.
1944 * Within each of the components, we sort the domains according
1945 * to the execution order at the current depth.
1947 * If there is more than one component, then generate_sorted_domains_wrap
1948 * fuses the result of each call to generate_sorted_domains
1949 * into a list with either zero or one graft and collects these (at most)
1950 * single element lists into a bigger list. This means that the elements of the
1951 * final list can be freely reordered. In particular, we sort them
1952 * according to an arbitrary but fixed ordering to ease merging of
1953 * graft lists from different components.
1955 static __isl_give isl_ast_graft_list
*generate_parallel_domains(
1956 __isl_keep isl_basic_set_list
*domain_list
,
1957 __isl_keep isl_union_map
*executed
, __isl_keep isl_ast_build
*build
)
1960 struct isl_ast_generate_parallel_domains_data data
;
1965 data
.n
= isl_basic_set_list_n_basic_set(domain_list
);
1967 return generate_sorted_domains(domain_list
, executed
, build
);
1969 depth
= isl_ast_build_get_depth(build
);
1971 data
.executed
= executed
;
1974 if (isl_basic_set_list_foreach_scc(domain_list
, &shared_outer
, &depth
,
1975 &generate_sorted_domains_wrap
,
1977 data
.list
= isl_ast_graft_list_free(data
.list
);
1980 data
.list
= isl_ast_graft_list_sort_guard(data
.list
);
1985 /* Internal data for separate_domain.
1987 * "explicit" is set if we only want to use explicit bounds.
1989 * "domain" collects the separated domains.
1991 struct isl_separate_domain_data
{
1992 isl_ast_build
*build
;
1997 /* Extract implicit bounds on the current dimension for the executed "map".
1999 * The domain of "map" may involve inner dimensions, so we
2000 * need to eliminate them.
2002 static __isl_give isl_set
*implicit_bounds(__isl_take isl_map
*map
,
2003 __isl_keep isl_ast_build
*build
)
2007 domain
= isl_map_domain(map
);
2008 domain
= isl_ast_build_eliminate(build
, domain
);
2013 /* Extract explicit bounds on the current dimension for the executed "map".
2015 * Rather than eliminating the inner dimensions as in implicit_bounds,
2016 * we simply drop any constraints involving those inner dimensions.
2017 * The idea is that most bounds that are implied by constraints on the
2018 * inner dimensions will be enforced by for loops and not by explicit guards.
2019 * There is then no need to separate along those bounds.
2021 static __isl_give isl_set
*explicit_bounds(__isl_take isl_map
*map
,
2022 __isl_keep isl_ast_build
*build
)
2027 dim
= isl_map_dim(map
, isl_dim_out
);
2028 map
= isl_map_drop_constraints_involving_dims(map
, isl_dim_out
, 0, dim
);
2030 domain
= isl_map_domain(map
);
2031 depth
= isl_ast_build_get_depth(build
);
2032 dim
= isl_set_dim(domain
, isl_dim_set
);
2033 domain
= isl_set_detect_equalities(domain
);
2034 domain
= isl_set_drop_constraints_involving_dims(domain
,
2035 isl_dim_set
, depth
+ 1, dim
- (depth
+ 1));
2036 domain
= isl_set_remove_divs_involving_dims(domain
,
2037 isl_dim_set
, depth
, 1);
2038 domain
= isl_set_remove_unknown_divs(domain
);
2043 /* Split data->domain into pieces that intersect with the range of "map"
2044 * and pieces that do not intersect with the range of "map"
2045 * and then add that part of the range of "map" that does not intersect
2046 * with data->domain.
2048 static int separate_domain(__isl_take isl_map
*map
, void *user
)
2050 struct isl_separate_domain_data
*data
= user
;
2055 domain
= explicit_bounds(map
, data
->build
);
2057 domain
= implicit_bounds(map
, data
->build
);
2059 domain
= isl_set_coalesce(domain
);
2060 domain
= isl_set_make_disjoint(domain
);
2061 d1
= isl_set_subtract(isl_set_copy(domain
), isl_set_copy(data
->domain
));
2062 d2
= isl_set_subtract(isl_set_copy(data
->domain
), isl_set_copy(domain
));
2063 data
->domain
= isl_set_intersect(data
->domain
, domain
);
2064 data
->domain
= isl_set_union(data
->domain
, d1
);
2065 data
->domain
= isl_set_union(data
->domain
, d2
);
2070 /* Separate the schedule domains of "executed".
2072 * That is, break up the domain of "executed" into basic sets,
2073 * such that for each basic set S, every element in S is associated with
2074 * the same domain spaces.
2076 * "space" is the (single) domain space of "executed".
2078 static __isl_give isl_set
*separate_schedule_domains(
2079 __isl_take isl_space
*space
, __isl_take isl_union_map
*executed
,
2080 __isl_keep isl_ast_build
*build
)
2082 struct isl_separate_domain_data data
= { build
};
2085 ctx
= isl_ast_build_get_ctx(build
);
2086 data
.explicit = isl_options_get_ast_build_separation_bounds(ctx
) ==
2087 ISL_AST_BUILD_SEPARATION_BOUNDS_EXPLICIT
;
2088 data
.domain
= isl_set_empty(space
);
2089 if (isl_union_map_foreach_map(executed
, &separate_domain
, &data
) < 0)
2090 data
.domain
= isl_set_free(data
.domain
);
2092 isl_union_map_free(executed
);
2096 /* Temporary data used during the search for a lower bound for unrolling.
2098 * "domain" is the original set for which to find a lower bound
2099 * "depth" is the dimension for which to find a lower boudn
2101 * "lower" is the best lower bound found so far. It is NULL if we have not
2103 * "n" is the corresponding size. If lower is NULL, then the value of n
2106 struct isl_find_unroll_data
{
2114 /* Check if we can use "c" as a lower bound and if it is better than
2115 * any previously found lower bound.
2117 * If "c" does not involve the dimension at the current depth,
2118 * then we cannot use it.
2119 * Otherwise, let "c" be of the form
2123 * We compute the maximal value of
2125 * -ceil(f(j)/a)) + i + 1
2127 * over the domain. If there is such a value "n", then we know
2129 * -ceil(f(j)/a)) + i + 1 <= n
2133 * i < ceil(f(j)/a)) + n
2135 * meaning that we can use ceil(f(j)/a)) as a lower bound for unrolling.
2136 * We just need to check if we have found any lower bound before and
2137 * if the new lower bound is better (smaller n) than the previously found
2140 static int update_unrolling_lower_bound(struct isl_find_unroll_data
*data
,
2141 __isl_keep isl_constraint
*c
)
2143 isl_aff
*aff
, *lower
;
2146 if (!isl_constraint_is_lower_bound(c
, isl_dim_set
, data
->depth
))
2149 lower
= isl_constraint_get_bound(c
, isl_dim_set
, data
->depth
);
2150 lower
= isl_aff_ceil(lower
);
2151 aff
= isl_aff_copy(lower
);
2152 aff
= isl_aff_neg(aff
);
2153 aff
= isl_aff_add_coefficient_si(aff
, isl_dim_in
, data
->depth
, 1);
2154 aff
= isl_aff_add_constant_si(aff
, 1);
2155 max
= isl_set_max_val(data
->domain
, aff
);
2160 if (isl_val_is_infty(max
)) {
2162 isl_aff_free(lower
);
2166 if (isl_val_cmp_si(max
, INT_MAX
) <= 0 &&
2167 (!data
->lower
|| isl_val_cmp_si(max
, *data
->n
) < 0)) {
2168 isl_aff_free(data
->lower
);
2169 data
->lower
= lower
;
2170 *data
->n
= isl_val_get_num_si(max
);
2172 isl_aff_free(lower
);
2177 isl_aff_free(lower
);
2181 /* Check if we can use "c" as a lower bound and if it is better than
2182 * any previously found lower bound.
2184 static int constraint_find_unroll(__isl_take isl_constraint
*c
, void *user
)
2186 struct isl_find_unroll_data
*data
;
2189 data
= (struct isl_find_unroll_data
*) user
;
2190 r
= update_unrolling_lower_bound(data
, c
);
2191 isl_constraint_free(c
);
2196 /* Look for a lower bound l(i) on the dimension at "depth"
2197 * and a size n such that "domain" is a subset of
2199 * { [i] : l(i) <= i_d < l(i) + n }
2201 * where d is "depth" and l(i) depends only on earlier dimensions.
2202 * Furthermore, try and find a lower bound such that n is as small as possible.
2203 * In particular, "n" needs to be finite.
2205 * Inner dimensions have been eliminated from "domain" by the caller.
2207 * We first construct a collection of lower bounds on the input set
2208 * by computing its simple hull. We then iterate through them,
2209 * discarding those that we cannot use (either because they do not
2210 * involve the dimension at "depth" or because they have no corresponding
2211 * upper bound, meaning that "n" would be unbounded) and pick out the
2212 * best from the remaining ones.
2214 * If we cannot find a suitable lower bound, then we consider that
2217 static __isl_give isl_aff
*find_unroll_lower_bound(__isl_keep isl_set
*domain
,
2220 struct isl_find_unroll_data data
= { domain
, depth
, NULL
, n
};
2221 isl_basic_set
*hull
;
2223 hull
= isl_set_simple_hull(isl_set_copy(domain
));
2225 if (isl_basic_set_foreach_constraint(hull
,
2226 &constraint_find_unroll
, &data
) < 0)
2229 isl_basic_set_free(hull
);
2232 isl_die(isl_set_get_ctx(domain
), isl_error_invalid
,
2233 "cannot find lower bound for unrolling", return NULL
);
2237 isl_basic_set_free(hull
);
2238 return isl_aff_free(data
.lower
);
2241 /* Return the constraint
2243 * i_"depth" = aff + offset
2245 static __isl_give isl_constraint
*at_offset(int depth
, __isl_keep isl_aff
*aff
,
2248 aff
= isl_aff_copy(aff
);
2249 aff
= isl_aff_add_coefficient_si(aff
, isl_dim_in
, depth
, -1);
2250 aff
= isl_aff_add_constant_si(aff
, offset
);
2251 return isl_equality_from_aff(aff
);
2254 /* Return a list of basic sets, one for each value of the current dimension
2256 * The divs that involve the current dimension have not been projected out
2259 * Since we are going to be iterating over the individual values,
2260 * we first check if there are any strides on the current dimension.
2261 * If there is, we rewrite the current dimension i as
2263 * i = stride i' + offset
2265 * and then iterate over individual values of i' instead.
2267 * We then look for a lower bound on i' and a size such that the domain
2270 * { [j,i'] : l(j) <= i' < l(j) + n }
2272 * and then take slices of the domain at values of i'
2273 * between l(j) and l(j) + n - 1.
2275 * We compute the unshifted simple hull of each slice to ensure that
2276 * we have a single basic set per offset. The slicing constraint
2277 * may get simplified away before the unshifted simple hull is taken
2278 * and may therefore in some rare cases disappear from the result.
2279 * We therefore explicitly add the constraint back after computing
2280 * the unshifted simple hull to ensure that the basic sets
2281 * remain disjoint. The constraints that are dropped by taking the hull
2282 * will be taken into account at the next level, as in the case of the
2285 * Finally, we map i' back to i and add each basic set to the list.
2287 static __isl_give isl_basic_set_list
*do_unroll(__isl_take isl_set
*domain
,
2288 __isl_keep isl_ast_build
*build
)
2294 isl_basic_set_list
*list
;
2295 isl_multi_aff
*expansion
;
2296 isl_basic_map
*bmap
;
2301 ctx
= isl_set_get_ctx(domain
);
2302 depth
= isl_ast_build_get_depth(build
);
2303 build
= isl_ast_build_copy(build
);
2304 domain
= isl_ast_build_eliminate_inner(build
, domain
);
2305 build
= isl_ast_build_detect_strides(build
, isl_set_copy(domain
));
2306 expansion
= isl_ast_build_get_stride_expansion(build
);
2308 domain
= isl_set_preimage_multi_aff(domain
,
2309 isl_multi_aff_copy(expansion
));
2310 domain
= isl_ast_build_eliminate_divs(build
, domain
);
2312 isl_ast_build_free(build
);
2314 list
= isl_basic_set_list_alloc(ctx
, 0);
2316 lower
= find_unroll_lower_bound(domain
, depth
, &n
);
2318 list
= isl_basic_set_list_free(list
);
2320 bmap
= isl_basic_map_from_multi_aff(expansion
);
2322 for (i
= 0; list
&& i
< n
; ++i
) {
2324 isl_basic_set
*bset
;
2325 isl_constraint
*slice
;
2327 slice
= at_offset(depth
, lower
, i
);
2328 set
= isl_set_copy(domain
);
2329 set
= isl_set_add_constraint(set
, isl_constraint_copy(slice
));
2330 bset
= isl_set_unshifted_simple_hull(set
);
2331 bset
= isl_basic_set_add_constraint(bset
, slice
);
2332 bset
= isl_basic_set_apply(bset
, isl_basic_map_copy(bmap
));
2333 list
= isl_basic_set_list_add(list
, bset
);
2336 isl_aff_free(lower
);
2337 isl_set_free(domain
);
2338 isl_basic_map_free(bmap
);
2343 /* Data structure for storing the results and the intermediate objects
2344 * of compute_domains.
2346 * "list" is the main result of the function and contains a list
2347 * of disjoint basic sets for which code should be generated.
2349 * "executed" and "build" are inputs to compute_domains.
2350 * "schedule_domain" is the domain of "executed".
2352 * "option" constains the domains at the current depth that should by
2353 * atomic, separated or unrolled. These domains are as specified by
2354 * the user, except that inner dimensions have been eliminated and
2355 * that they have been made pair-wise disjoint.
2357 * "sep_class" contains the user-specified split into separation classes
2358 * specialized to the current depth.
2359 * "done" contains the union of the separation domains that have already
2361 * "atomic" contains the domain that has effectively been made atomic.
2362 * This domain may be larger than the intersection of option[atomic]
2363 * and the schedule domain.
2365 struct isl_codegen_domains
{
2366 isl_basic_set_list
*list
;
2368 isl_union_map
*executed
;
2369 isl_ast_build
*build
;
2370 isl_set
*schedule_domain
;
2379 /* Add domains to domains->list for each individual value of the current
2380 * dimension, for that part of the schedule domain that lies in the
2381 * intersection of the option domain and the class domain.
2383 * "domain" is the intersection of the class domain and the schedule domain.
2384 * The divs that involve the current dimension have not been projected out
2387 * We first break up the unroll option domain into individual pieces
2388 * and then handle each of them separately. The unroll option domain
2389 * has been made disjoint in compute_domains_init_options,
2391 * Note that we actively want to combine different pieces of the
2392 * schedule domain that have the same value at the current dimension.
2393 * We therefore need to break up the unroll option domain before
2394 * intersecting with class and schedule domain, hoping that the
2395 * unroll option domain specified by the user is relatively simple.
2397 static int compute_unroll_domains(struct isl_codegen_domains
*domains
,
2398 __isl_keep isl_set
*domain
)
2400 isl_set
*unroll_domain
;
2401 isl_basic_set_list
*unroll_list
;
2405 empty
= isl_set_is_empty(domains
->option
[unroll
]);
2411 unroll_domain
= isl_set_copy(domains
->option
[unroll
]);
2412 unroll_list
= isl_basic_set_list_from_set(unroll_domain
);
2414 n
= isl_basic_set_list_n_basic_set(unroll_list
);
2415 for (i
= 0; i
< n
; ++i
) {
2416 isl_basic_set
*bset
;
2417 isl_basic_set_list
*list
;
2419 bset
= isl_basic_set_list_get_basic_set(unroll_list
, i
);
2420 unroll_domain
= isl_set_from_basic_set(bset
);
2421 unroll_domain
= isl_set_intersect(unroll_domain
,
2422 isl_set_copy(domain
));
2424 empty
= isl_set_is_empty(unroll_domain
);
2425 if (empty
>= 0 && empty
) {
2426 isl_set_free(unroll_domain
);
2430 list
= do_unroll(unroll_domain
, domains
->build
);
2431 domains
->list
= isl_basic_set_list_concat(domains
->list
, list
);
2434 isl_basic_set_list_free(unroll_list
);
2439 /* Construct a single basic set that includes the intersection of
2440 * the schedule domain, the atomic option domain and the class domain.
2441 * Add the resulting basic set to domains->list and save a copy
2442 * in domains->atomic for use in compute_partial_domains.
2444 * We construct a single domain rather than trying to combine
2445 * the schedule domains of individual domains because we are working
2446 * within a single component so that non-overlapping schedule domains
2447 * should already have been separated.
2448 * Note, though, that this does not take into account the class domain.
2449 * So, it is possible for a class domain to carve out a piece of the
2450 * schedule domain with independent pieces and then we would only
2451 * generate a single domain for them. If this proves to be problematic
2452 * for some users, then this function will have to be adjusted.
2454 * "domain" is the intersection of the schedule domain and the class domain,
2455 * with inner dimensions projected out.
2457 static int compute_atomic_domain(struct isl_codegen_domains
*domains
,
2458 __isl_keep isl_set
*domain
)
2460 isl_basic_set
*bset
;
2461 isl_set
*atomic_domain
;
2464 atomic_domain
= isl_set_copy(domains
->option
[atomic
]);
2465 atomic_domain
= isl_set_intersect(atomic_domain
, isl_set_copy(domain
));
2466 empty
= isl_set_is_empty(atomic_domain
);
2467 if (empty
< 0 || empty
) {
2468 domains
->atomic
= atomic_domain
;
2469 return empty
< 0 ? -1 : 0;
2472 atomic_domain
= isl_set_coalesce(atomic_domain
);
2473 bset
= isl_set_unshifted_simple_hull(atomic_domain
);
2474 domains
->atomic
= isl_set_from_basic_set(isl_basic_set_copy(bset
));
2475 domains
->list
= isl_basic_set_list_add(domains
->list
, bset
);
2480 /* Split up the schedule domain into uniform basic sets,
2481 * in the sense that each element in a basic set is associated to
2482 * elements of the same domains, and add the result to domains->list.
2483 * Do this for that part of the schedule domain that lies in the
2484 * intersection of "class_domain" and the separate option domain.
2486 * "class_domain" may or may not include the constraints
2487 * of the schedule domain, but this does not make a difference
2488 * since we are going to intersect it with the domain of the inverse schedule.
2489 * If it includes schedule domain constraints, then they may involve
2490 * inner dimensions, but we will eliminate them in separation_domain.
2492 static int compute_separate_domain(struct isl_codegen_domains
*domains
,
2493 __isl_keep isl_set
*class_domain
)
2497 isl_union_map
*executed
;
2498 isl_basic_set_list
*list
;
2501 domain
= isl_set_copy(domains
->option
[separate
]);
2502 domain
= isl_set_intersect(domain
, isl_set_copy(class_domain
));
2503 executed
= isl_union_map_copy(domains
->executed
);
2504 executed
= isl_union_map_intersect_domain(executed
,
2505 isl_union_set_from_set(domain
));
2506 empty
= isl_union_map_is_empty(executed
);
2507 if (empty
< 0 || empty
) {
2508 isl_union_map_free(executed
);
2509 return empty
< 0 ? -1 : 0;
2512 space
= isl_set_get_space(class_domain
);
2513 domain
= separate_schedule_domains(space
, executed
, domains
->build
);
2515 list
= isl_basic_set_list_from_set(domain
);
2516 domains
->list
= isl_basic_set_list_concat(domains
->list
, list
);
2521 /* Split up the domain at the current depth into disjoint
2522 * basic sets for which code should be generated separately
2523 * for the given separation class domain.
2525 * If any separation classes have been defined, then "class_domain"
2526 * is the domain of the current class and does not refer to inner dimensions.
2527 * Otherwise, "class_domain" is the universe domain.
2529 * We first make sure that the class domain is disjoint from
2530 * previously considered class domains.
2532 * The separate domains can be computed directly from the "class_domain".
2534 * The unroll, atomic and remainder domains need the constraints
2535 * from the schedule domain.
2537 * For unrolling, the actual schedule domain is needed (with divs that
2538 * may refer to the current dimension) so that stride detection can be
2541 * For atomic and remainder domains, inner dimensions and divs involving
2542 * the current dimensions should be eliminated.
2543 * In case we are working within a separation class, we need to intersect
2544 * the result with the current "class_domain" to ensure that the domains
2545 * are disjoint from those generated from other class domains.
2547 * The domain that has been made atomic may be larger than specified
2548 * by the user since it needs to be representable as a single basic set.
2549 * This possibly larger domain is stored in domains->atomic by
2550 * compute_atomic_domain.
2552 * If anything is left after handling separate, unroll and atomic,
2553 * we split it up into basic sets and append the basic sets to domains->list.
2555 static int compute_partial_domains(struct isl_codegen_domains
*domains
,
2556 __isl_take isl_set
*class_domain
)
2558 isl_basic_set_list
*list
;
2561 class_domain
= isl_set_subtract(class_domain
,
2562 isl_set_copy(domains
->done
));
2563 domains
->done
= isl_set_union(domains
->done
,
2564 isl_set_copy(class_domain
));
2566 domain
= isl_set_copy(class_domain
);
2568 if (compute_separate_domain(domains
, domain
) < 0)
2570 domain
= isl_set_subtract(domain
,
2571 isl_set_copy(domains
->option
[separate
]));
2573 domain
= isl_set_intersect(domain
,
2574 isl_set_copy(domains
->schedule_domain
));
2576 if (compute_unroll_domains(domains
, domain
) < 0)
2578 domain
= isl_set_subtract(domain
,
2579 isl_set_copy(domains
->option
[unroll
]));
2581 domain
= isl_ast_build_eliminate(domains
->build
, domain
);
2582 domain
= isl_set_intersect(domain
, isl_set_copy(class_domain
));
2584 if (compute_atomic_domain(domains
, domain
) < 0)
2585 domain
= isl_set_free(domain
);
2586 domain
= isl_set_subtract(domain
, domains
->atomic
);
2588 domain
= isl_set_coalesce(domain
);
2589 domain
= isl_set_make_disjoint(domain
);
2591 list
= isl_basic_set_list_from_set(domain
);
2592 domains
->list
= isl_basic_set_list_concat(domains
->list
, list
);
2594 isl_set_free(class_domain
);
2598 isl_set_free(domain
);
2599 isl_set_free(class_domain
);
2603 /* Split up the domain at the current depth into disjoint
2604 * basic sets for which code should be generated separately
2605 * for the separation class identified by "pnt".
2607 * We extract the corresponding class domain from domains->sep_class,
2608 * eliminate inner dimensions and pass control to compute_partial_domains.
2610 static int compute_class_domains(__isl_take isl_point
*pnt
, void *user
)
2612 struct isl_codegen_domains
*domains
= user
;
2617 class_set
= isl_set_from_point(pnt
);
2618 domain
= isl_map_domain(isl_map_intersect_range(
2619 isl_map_copy(domains
->sep_class
), class_set
));
2620 domain
= isl_ast_build_compute_gist(domains
->build
, domain
);
2621 domain
= isl_ast_build_eliminate(domains
->build
, domain
);
2623 disjoint
= isl_set_plain_is_disjoint(domain
, domains
->schedule_domain
);
2627 isl_set_free(domain
);
2631 return compute_partial_domains(domains
, domain
);
2634 /* Extract the domains at the current depth that should be atomic,
2635 * separated or unrolled and store them in option.
2637 * The domains specified by the user might overlap, so we make
2638 * them disjoint by subtracting earlier domains from later domains.
2640 static void compute_domains_init_options(isl_set
*option
[3],
2641 __isl_keep isl_ast_build
*build
)
2643 enum isl_ast_build_domain_type type
, type2
;
2645 for (type
= atomic
; type
<= separate
; ++type
) {
2646 option
[type
] = isl_ast_build_get_option_domain(build
, type
);
2647 for (type2
= atomic
; type2
< type
; ++type2
)
2648 option
[type
] = isl_set_subtract(option
[type
],
2649 isl_set_copy(option
[type2
]));
2652 option
[unroll
] = isl_set_coalesce(option
[unroll
]);
2653 option
[unroll
] = isl_set_make_disjoint(option
[unroll
]);
2656 /* Split up the domain at the current depth into disjoint
2657 * basic sets for which code should be generated separately,
2658 * based on the user-specified options.
2659 * Return the list of disjoint basic sets.
2661 * There are three kinds of domains that we need to keep track of.
2662 * - the "schedule domain" is the domain of "executed"
2663 * - the "class domain" is the domain corresponding to the currrent
2665 * - the "option domain" is the domain corresponding to one of the options
2666 * atomic, unroll or separate
2668 * We first consider the individial values of the separation classes
2669 * and split up the domain for each of them separately.
2670 * Finally, we consider the remainder. If no separation classes were
2671 * specified, then we call compute_partial_domains with the universe
2672 * "class_domain". Otherwise, we take the "schedule_domain" as "class_domain",
2673 * with inner dimensions removed. We do this because we want to
2674 * avoid computing the complement of the class domains (i.e., the difference
2675 * between the universe and domains->done).
2677 static __isl_give isl_basic_set_list
*compute_domains(
2678 __isl_keep isl_union_map
*executed
, __isl_keep isl_ast_build
*build
)
2680 struct isl_codegen_domains domains
;
2683 isl_union_set
*schedule_domain
;
2687 enum isl_ast_build_domain_type type
;
2693 ctx
= isl_union_map_get_ctx(executed
);
2694 domains
.list
= isl_basic_set_list_alloc(ctx
, 0);
2696 schedule_domain
= isl_union_map_domain(isl_union_map_copy(executed
));
2697 domain
= isl_set_from_union_set(schedule_domain
);
2699 compute_domains_init_options(domains
.option
, build
);
2701 domains
.sep_class
= isl_ast_build_get_separation_class(build
);
2702 classes
= isl_map_range(isl_map_copy(domains
.sep_class
));
2703 n_param
= isl_set_dim(classes
, isl_dim_param
);
2704 classes
= isl_set_project_out(classes
, isl_dim_param
, 0, n_param
);
2706 space
= isl_set_get_space(domain
);
2707 domains
.build
= build
;
2708 domains
.schedule_domain
= isl_set_copy(domain
);
2709 domains
.executed
= executed
;
2710 domains
.done
= isl_set_empty(space
);
2712 if (isl_set_foreach_point(classes
, &compute_class_domains
, &domains
) < 0)
2713 domains
.list
= isl_basic_set_list_free(domains
.list
);
2714 isl_set_free(classes
);
2716 empty
= isl_set_is_empty(domains
.done
);
2718 domains
.list
= isl_basic_set_list_free(domains
.list
);
2719 domain
= isl_set_free(domain
);
2721 isl_set_free(domain
);
2722 domain
= isl_set_universe(isl_set_get_space(domains
.done
));
2724 domain
= isl_ast_build_eliminate(build
, domain
);
2726 if (compute_partial_domains(&domains
, domain
) < 0)
2727 domains
.list
= isl_basic_set_list_free(domains
.list
);
2729 isl_set_free(domains
.schedule_domain
);
2730 isl_set_free(domains
.done
);
2731 isl_map_free(domains
.sep_class
);
2732 for (type
= atomic
; type
<= separate
; ++type
)
2733 isl_set_free(domains
.option
[type
]);
2735 return domains
.list
;
2738 /* Generate code for a single component, after shifting (if any)
2741 * We first split up the domain at the current depth into disjoint
2742 * basic sets based on the user-specified options.
2743 * Then we generated code for each of them and concatenate the results.
2745 static __isl_give isl_ast_graft_list
*generate_shifted_component(
2746 __isl_take isl_union_map
*executed
, __isl_take isl_ast_build
*build
)
2748 isl_basic_set_list
*domain_list
;
2749 isl_ast_graft_list
*list
= NULL
;
2751 domain_list
= compute_domains(executed
, build
);
2752 list
= generate_parallel_domains(domain_list
, executed
, build
);
2754 isl_basic_set_list_free(domain_list
);
2755 isl_union_map_free(executed
);
2756 isl_ast_build_free(build
);
2761 struct isl_set_map_pair
{
2766 /* Given an array "domain" of isl_set_map_pairs and an array "order"
2767 * of indices into the "domain" array,
2768 * return the union of the "map" fields of the elements
2769 * indexed by the first "n" elements of "order".
2771 static __isl_give isl_union_map
*construct_component_executed(
2772 struct isl_set_map_pair
*domain
, int *order
, int n
)
2776 isl_union_map
*executed
;
2778 map
= isl_map_copy(domain
[order
[0]].map
);
2779 executed
= isl_union_map_from_map(map
);
2780 for (i
= 1; i
< n
; ++i
) {
2781 map
= isl_map_copy(domain
[order
[i
]].map
);
2782 executed
= isl_union_map_add_map(executed
, map
);
2788 /* Generate code for a single component, after shifting (if any)
2791 * The component inverse schedule is specified as the "map" fields
2792 * of the elements of "domain" indexed by the first "n" elements of "order".
2794 static __isl_give isl_ast_graft_list
*generate_shifted_component_from_list(
2795 struct isl_set_map_pair
*domain
, int *order
, int n
,
2796 __isl_take isl_ast_build
*build
)
2798 isl_union_map
*executed
;
2800 executed
= construct_component_executed(domain
, order
, n
);
2801 return generate_shifted_component(executed
, build
);
2804 /* Does set dimension "pos" of "set" have an obviously fixed value?
2806 static int dim_is_fixed(__isl_keep isl_set
*set
, int pos
)
2811 v
= isl_set_plain_get_val_if_fixed(set
, isl_dim_set
, pos
);
2814 fixed
= !isl_val_is_nan(v
);
2820 /* Given an array "domain" of isl_set_map_pairs and an array "order"
2821 * of indices into the "domain" array,
2822 * do all (except for at most one) of the "set" field of the elements
2823 * indexed by the first "n" elements of "order" have a fixed value
2824 * at position "depth"?
2826 static int at_most_one_non_fixed(struct isl_set_map_pair
*domain
,
2827 int *order
, int n
, int depth
)
2832 for (i
= 0; i
< n
; ++i
) {
2835 f
= dim_is_fixed(domain
[order
[i
]].set
, depth
);
2848 /* Given an array "domain" of isl_set_map_pairs and an array "order"
2849 * of indices into the "domain" array,
2850 * eliminate the inner dimensions from the "set" field of the elements
2851 * indexed by the first "n" elements of "order", provided the current
2852 * dimension does not have a fixed value.
2854 * Return the index of the first element in "order" with a corresponding
2855 * "set" field that does not have an (obviously) fixed value.
2857 static int eliminate_non_fixed(struct isl_set_map_pair
*domain
,
2858 int *order
, int n
, int depth
, __isl_keep isl_ast_build
*build
)
2863 for (i
= n
- 1; i
>= 0; --i
) {
2865 f
= dim_is_fixed(domain
[order
[i
]].set
, depth
);
2870 domain
[order
[i
]].set
= isl_ast_build_eliminate_inner(build
,
2871 domain
[order
[i
]].set
);
2878 /* Given an array "domain" of isl_set_map_pairs and an array "order"
2879 * of indices into the "domain" array,
2880 * find the element of "domain" (amongst those indexed by the first "n"
2881 * elements of "order") with the "set" field that has the smallest
2882 * value for the current iterator.
2884 * Note that the domain with the smallest value may depend on the parameters
2885 * and/or outer loop dimension. Since the result of this function is only
2886 * used as heuristic, we only make a reasonable attempt at finding the best
2887 * domain, one that should work in case a single domain provides the smallest
2888 * value for the current dimension over all values of the parameters
2889 * and outer dimensions.
2891 * In particular, we compute the smallest value of the first domain
2892 * and replace it by that of any later domain if that later domain
2893 * has a smallest value that is smaller for at least some value
2894 * of the parameters and outer dimensions.
2896 static int first_offset(struct isl_set_map_pair
*domain
, int *order
, int n
,
2897 __isl_keep isl_ast_build
*build
)
2903 min_first
= isl_ast_build_map_to_iterator(build
,
2904 isl_set_copy(domain
[order
[0]].set
));
2905 min_first
= isl_map_lexmin(min_first
);
2907 for (i
= 1; i
< n
; ++i
) {
2908 isl_map
*min
, *test
;
2911 min
= isl_ast_build_map_to_iterator(build
,
2912 isl_set_copy(domain
[order
[i
]].set
));
2913 min
= isl_map_lexmin(min
);
2914 test
= isl_map_copy(min
);
2915 test
= isl_map_apply_domain(isl_map_copy(min_first
), test
);
2916 test
= isl_map_order_lt(test
, isl_dim_in
, 0, isl_dim_out
, 0);
2917 empty
= isl_map_is_empty(test
);
2919 if (empty
>= 0 && !empty
) {
2920 isl_map_free(min_first
);
2930 isl_map_free(min_first
);
2932 return i
< n
? -1 : first
;
2935 /* Construct a shifted inverse schedule based on the original inverse schedule,
2936 * the stride and the offset.
2938 * The original inverse schedule is specified as the "map" fields
2939 * of the elements of "domain" indexed by the first "n" elements of "order".
2941 * "stride" and "offset" are such that the difference
2942 * between the values of the current dimension of domain "i"
2943 * and the values of the current dimension for some reference domain are
2946 * stride * integer + offset[i]
2948 * Moreover, 0 <= offset[i] < stride.
2950 * For each domain, we create a map
2952 * { [..., j, ...] -> [..., j - offset[i], offset[i], ....] }
2954 * where j refers to the current dimension and the other dimensions are
2955 * unchanged, and apply this map to the original schedule domain.
2957 * For example, for the original schedule
2959 * { A[i] -> [2i]: 0 <= i < 10; B[i] -> [2i+1] : 0 <= i < 10 }
2961 * and assuming the offset is 0 for the A domain and 1 for the B domain,
2962 * we apply the mapping
2966 * to the schedule of the "A" domain and the mapping
2968 * { [j - 1] -> [j, 1] }
2970 * to the schedule of the "B" domain.
2973 * Note that after the transformation, the differences between pairs
2974 * of values of the current dimension over all domains are multiples
2975 * of stride and that we have therefore exposed the stride.
2978 * To see that the mapping preserves the lexicographic order,
2979 * first note that each of the individual maps above preserves the order.
2980 * If the value of the current iterator is j1 in one domain and j2 in another,
2981 * then if j1 = j2, we know that the same map is applied to both domains
2982 * and the order is preserved.
2983 * Otherwise, let us assume, without loss of generality, that j1 < j2.
2984 * If c1 >= c2 (with c1 and c2 the corresponding offsets), then
2988 * and the order is preserved.
2989 * If c1 < c2, then we know
2995 * j2 - j1 = n * s + r
2997 * with n >= 0 and 0 <= r < s.
2998 * In other words, r = c2 - c1.
3009 * (j1 - c1, c1) << (j2 - c2, c2)
3011 * with "<<" the lexicographic order, proving that the order is preserved
3014 static __isl_give isl_union_map
*contruct_shifted_executed(
3015 struct isl_set_map_pair
*domain
, int *order
, int n
,
3016 __isl_keep isl_val
*stride
, __isl_keep isl_multi_val
*offset
,
3017 __isl_take isl_ast_build
*build
)
3020 isl_union_map
*executed
;
3026 depth
= isl_ast_build_get_depth(build
);
3027 space
= isl_ast_build_get_space(build
, 1);
3028 executed
= isl_union_map_empty(isl_space_copy(space
));
3029 space
= isl_space_map_from_set(space
);
3030 map
= isl_map_identity(isl_space_copy(space
));
3031 map
= isl_map_eliminate(map
, isl_dim_out
, depth
, 1);
3032 map
= isl_map_insert_dims(map
, isl_dim_out
, depth
+ 1, 1);
3033 space
= isl_space_insert_dims(space
, isl_dim_out
, depth
+ 1, 1);
3035 c
= isl_equality_alloc(isl_local_space_from_space(space
));
3036 c
= isl_constraint_set_coefficient_si(c
, isl_dim_in
, depth
, 1);
3037 c
= isl_constraint_set_coefficient_si(c
, isl_dim_out
, depth
, -1);
3039 for (i
= 0; i
< n
; ++i
) {
3043 v
= isl_multi_val_get_val(offset
, i
);
3046 map_i
= isl_map_copy(map
);
3047 map_i
= isl_map_fix_val(map_i
, isl_dim_out
, depth
+ 1,
3050 c
= isl_constraint_set_constant_val(c
, v
);
3051 map_i
= isl_map_add_constraint(map_i
, isl_constraint_copy(c
));
3053 map_i
= isl_map_apply_domain(isl_map_copy(domain
[order
[i
]].map
),
3055 executed
= isl_union_map_add_map(executed
, map_i
);
3058 isl_constraint_free(c
);
3062 executed
= isl_union_map_free(executed
);
3067 /* Generate code for a single component, after exposing the stride,
3068 * given that the schedule domain is "shifted strided".
3070 * The component inverse schedule is specified as the "map" fields
3071 * of the elements of "domain" indexed by the first "n" elements of "order".
3073 * The schedule domain being "shifted strided" means that the differences
3074 * between the values of the current dimension of domain "i"
3075 * and the values of the current dimension for some reference domain are
3078 * stride * integer + offset[i]
3080 * We first look for the domain with the "smallest" value for the current
3081 * dimension and adjust the offsets such that the offset of the "smallest"
3082 * domain is equal to zero. The other offsets are reduced modulo stride.
3084 * Based on this information, we construct a new inverse schedule in
3085 * contruct_shifted_executed that exposes the stride.
3086 * Since this involves the introduction of a new schedule dimension,
3087 * the build needs to be changed accodingly.
3088 * After computing the AST, the newly introduced dimension needs
3089 * to be removed again from the list of grafts. We do this by plugging
3090 * in a mapping that represents the new schedule domain in terms of the
3091 * old schedule domain.
3093 static __isl_give isl_ast_graft_list
*generate_shift_component(
3094 struct isl_set_map_pair
*domain
, int *order
, int n
,
3095 __isl_keep isl_val
*stride
, __isl_keep isl_multi_val
*offset
,
3096 __isl_take isl_ast_build
*build
)
3098 isl_ast_graft_list
*list
;
3105 isl_multi_aff
*ma
, *zero
;
3106 isl_union_map
*executed
;
3108 ctx
= isl_ast_build_get_ctx(build
);
3109 depth
= isl_ast_build_get_depth(build
);
3111 first
= first_offset(domain
, order
, n
, build
);
3113 return isl_ast_build_free(build
);
3115 mv
= isl_multi_val_copy(offset
);
3116 val
= isl_multi_val_get_val(offset
, first
);
3117 val
= isl_val_neg(val
);
3118 mv
= isl_multi_val_add_val(mv
, val
);
3119 mv
= isl_multi_val_mod_val(mv
, isl_val_copy(stride
));
3121 executed
= contruct_shifted_executed(domain
, order
, n
, stride
, mv
,
3123 space
= isl_ast_build_get_space(build
, 1);
3124 space
= isl_space_map_from_set(space
);
3125 ma
= isl_multi_aff_identity(isl_space_copy(space
));
3126 space
= isl_space_from_domain(isl_space_domain(space
));
3127 space
= isl_space_add_dims(space
, isl_dim_out
, 1);
3128 zero
= isl_multi_aff_zero(space
);
3129 ma
= isl_multi_aff_range_splice(ma
, depth
+ 1, zero
);
3130 build
= isl_ast_build_insert_dim(build
, depth
+ 1);
3131 list
= generate_shifted_component(executed
, build
);
3133 list
= isl_ast_graft_list_preimage_multi_aff(list
, ma
);
3135 isl_multi_val_free(mv
);
3140 /* Generate code for a single component.
3142 * The component inverse schedule is specified as the "map" fields
3143 * of the elements of "domain" indexed by the first "n" elements of "order".
3145 * This function may modify the "set" fields of "domain".
3147 * Before proceeding with the actual code generation for the component,
3148 * we first check if there are any "shifted" strides, meaning that
3149 * the schedule domains of the individual domains are all strided,
3150 * but that they have different offsets, resulting in the union
3151 * of schedule domains not being strided anymore.
3153 * The simplest example is the schedule
3155 * { A[i] -> [2i]: 0 <= i < 10; B[i] -> [2i+1] : 0 <= i < 10 }
3157 * Both schedule domains are strided, but their union is not.
3158 * This function detects such cases and then rewrites the schedule to
3160 * { A[i] -> [2i, 0]: 0 <= i < 10; B[i] -> [2i, 1] : 0 <= i < 10 }
3162 * In the new schedule, the schedule domains have the same offset (modulo
3163 * the stride), ensuring that the union of schedule domains is also strided.
3166 * If there is only a single domain in the component, then there is
3167 * nothing to do. Similarly, if the current schedule dimension has
3168 * a fixed value for almost all domains then there is nothing to be done.
3169 * In particular, we need at least two domains where the current schedule
3170 * dimension does not have a fixed value.
3171 * Finally, if any of the options refer to the current schedule dimension,
3172 * then we bail out as well. It would be possible to reformulate the options
3173 * in terms of the new schedule domain, but that would introduce constraints
3174 * that separate the domains in the options and that is something we would
3178 * To see if there is any shifted stride, we look at the differences
3179 * between the values of the current dimension in pairs of domains
3180 * for equal values of outer dimensions. These differences should be
3185 * with "m" the stride and "r" a constant. Note that we cannot perform
3186 * this analysis on individual domains as the lower bound in each domain
3187 * may depend on parameters or outer dimensions and so the current dimension
3188 * itself may not have a fixed remainder on division by the stride.
3190 * In particular, we compare the first domain that does not have an
3191 * obviously fixed value for the current dimension to itself and all
3192 * other domains and collect the offsets and the gcd of the strides.
3193 * If the gcd becomes one, then we failed to find shifted strides.
3194 * If all the offsets are the same (for those domains that do not have
3195 * an obviously fixed value for the current dimension), then we do not
3196 * apply the transformation.
3197 * If none of the domains were skipped, then there is nothing to do.
3198 * If some of them were skipped, then if we apply separation, the schedule
3199 * domain should get split in pieces with a (non-shifted) stride.
3201 * Otherwise, we apply a shift to expose the stride in
3202 * generate_shift_component.
3204 static __isl_give isl_ast_graft_list
*generate_component(
3205 struct isl_set_map_pair
*domain
, int *order
, int n
,
3206 __isl_take isl_ast_build
*build
)
3213 isl_val
*gcd
= NULL
;
3217 isl_ast_graft_list
*list
;
3220 depth
= isl_ast_build_get_depth(build
);
3223 if (skip
>= 0 && !skip
)
3224 skip
= at_most_one_non_fixed(domain
, order
, n
, depth
);
3225 if (skip
>= 0 && !skip
)
3226 skip
= isl_ast_build_options_involve_depth(build
);
3228 return isl_ast_build_free(build
);
3230 return generate_shifted_component_from_list(domain
,
3233 base
= eliminate_non_fixed(domain
, order
, n
, depth
, build
);
3235 return isl_ast_build_free(build
);
3237 ctx
= isl_ast_build_get_ctx(build
);
3239 mv
= isl_multi_val_zero(isl_space_set_alloc(ctx
, 0, n
));
3242 for (i
= 0; i
< n
; ++i
) {
3245 map
= isl_map_from_domain_and_range(
3246 isl_set_copy(domain
[order
[base
]].set
),
3247 isl_set_copy(domain
[order
[i
]].set
));
3248 for (d
= 0; d
< depth
; ++d
)
3249 map
= isl_map_equate(map
, isl_dim_in
, d
,
3251 deltas
= isl_map_deltas(map
);
3252 res
= isl_set_dim_residue_class_val(deltas
, depth
, &m
, &r
);
3253 isl_set_free(deltas
);
3260 gcd
= isl_val_gcd(gcd
, m
);
3261 if (isl_val_is_one(gcd
)) {
3265 mv
= isl_multi_val_set_val(mv
, i
, r
);
3267 res
= dim_is_fixed(domain
[order
[i
]].set
, depth
);
3273 if (fixed
&& i
> base
) {
3275 a
= isl_multi_val_get_val(mv
, i
);
3276 b
= isl_multi_val_get_val(mv
, base
);
3277 if (isl_val_ne(a
, b
))
3284 if (res
< 0 || !gcd
) {
3285 isl_ast_build_free(build
);
3287 } else if (i
< n
|| fixed
) {
3288 list
= generate_shifted_component_from_list(domain
,
3291 list
= generate_shift_component(domain
, order
, n
, gcd
, mv
,
3296 isl_multi_val_free(mv
);
3301 /* Store both "map" itself and its domain in the
3302 * structure pointed to by *next and advance to the next array element.
3304 static int extract_domain(__isl_take isl_map
*map
, void *user
)
3306 struct isl_set_map_pair
**next
= user
;
3308 (*next
)->map
= isl_map_copy(map
);
3309 (*next
)->set
= isl_map_domain(map
);
3315 /* Internal data for any_scheduled_after.
3317 * "depth" is the number of loops that have already been generated
3318 * "group_coscheduled" is a local copy of options->ast_build_group_coscheduled
3319 * "domain" is an array of set-map pairs corresponding to the different
3320 * iteration domains. The set is the schedule domain, i.e., the domain
3321 * of the inverse schedule, while the map is the inverse schedule itself.
3323 struct isl_any_scheduled_after_data
{
3325 int group_coscheduled
;
3326 struct isl_set_map_pair
*domain
;
3329 /* Is any element of domain "i" scheduled after any element of domain "j"
3330 * (for a common iteration of the first data->depth loops)?
3332 * data->domain[i].set contains the domain of the inverse schedule
3333 * for domain "i", i.e., elements in the schedule domain.
3335 * If data->group_coscheduled is set, then we also return 1 if there
3336 * is any pair of elements in the two domains that are scheduled together.
3338 static int any_scheduled_after(int i
, int j
, void *user
)
3340 struct isl_any_scheduled_after_data
*data
= user
;
3341 int dim
= isl_set_dim(data
->domain
[i
].set
, isl_dim_set
);
3344 for (pos
= data
->depth
; pos
< dim
; ++pos
) {
3347 follows
= isl_set_follows_at(data
->domain
[i
].set
,
3348 data
->domain
[j
].set
, pos
);
3358 return data
->group_coscheduled
;
3361 /* Look for independent components at the current depth and generate code
3362 * for each component separately. The resulting lists of grafts are
3363 * merged in an attempt to combine grafts with identical guards.
3365 * Code for two domains can be generated separately if all the elements
3366 * of one domain are scheduled before (or together with) all the elements
3367 * of the other domain. We therefore consider the graph with as nodes
3368 * the domains and an edge between two nodes if any element of the first
3369 * node is scheduled after any element of the second node.
3370 * If the ast_build_group_coscheduled is set, then we also add an edge if
3371 * there is any pair of elements in the two domains that are scheduled
3373 * Code is then generated (by generate_component)
3374 * for each of the strongly connected components in this graph
3375 * in their topological order.
3377 * Since the test is performed on the domain of the inverse schedules of
3378 * the different domains, we precompute these domains and store
3379 * them in data.domain.
3381 static __isl_give isl_ast_graft_list
*generate_components(
3382 __isl_take isl_union_map
*executed
, __isl_take isl_ast_build
*build
)
3385 isl_ctx
*ctx
= isl_ast_build_get_ctx(build
);
3386 int n
= isl_union_map_n_map(executed
);
3387 struct isl_any_scheduled_after_data data
;
3388 struct isl_set_map_pair
*next
;
3389 struct isl_tarjan_graph
*g
= NULL
;
3390 isl_ast_graft_list
*list
= NULL
;
3393 data
.domain
= isl_calloc_array(ctx
, struct isl_set_map_pair
, n
);
3399 if (isl_union_map_foreach_map(executed
, &extract_domain
, &next
) < 0)
3404 data
.depth
= isl_ast_build_get_depth(build
);
3405 data
.group_coscheduled
= isl_options_get_ast_build_group_coscheduled(ctx
);
3406 g
= isl_tarjan_graph_init(ctx
, n
, &any_scheduled_after
, &data
);
3408 list
= isl_ast_graft_list_alloc(ctx
, 0);
3412 isl_ast_graft_list
*list_c
;
3415 if (g
->order
[i
] == -1)
3416 isl_die(ctx
, isl_error_internal
, "cannot happen",
3419 while (g
->order
[i
] != -1) {
3423 list_c
= generate_component(data
.domain
,
3424 g
->order
+ first
, i
- first
,
3425 isl_ast_build_copy(build
));
3426 list
= isl_ast_graft_list_merge(list
, list_c
, build
);
3432 error
: list
= isl_ast_graft_list_free(list
);
3433 isl_tarjan_graph_free(g
);
3434 for (i
= 0; i
< n_domain
; ++i
) {
3435 isl_map_free(data
.domain
[i
].map
);
3436 isl_set_free(data
.domain
[i
].set
);
3439 isl_union_map_free(executed
);
3440 isl_ast_build_free(build
);
3445 /* Generate code for the next level (and all inner levels).
3447 * If "executed" is empty, i.e., no code needs to be generated,
3448 * then we return an empty list.
3450 * If we have already generated code for all loop levels, then we pass
3451 * control to generate_inner_level.
3453 * If "executed" lives in a single space, i.e., if code needs to be
3454 * generated for a single domain, then there can only be a single
3455 * component and we go directly to generate_shifted_component.
3456 * Otherwise, we call generate_components to detect the components
3457 * and to call generate_component on each of them separately.
3459 static __isl_give isl_ast_graft_list
*generate_next_level(
3460 __isl_take isl_union_map
*executed
, __isl_take isl_ast_build
*build
)
3464 if (!build
|| !executed
)
3467 if (isl_union_map_is_empty(executed
)) {
3468 isl_ctx
*ctx
= isl_ast_build_get_ctx(build
);
3469 isl_union_map_free(executed
);
3470 isl_ast_build_free(build
);
3471 return isl_ast_graft_list_alloc(ctx
, 0);
3474 depth
= isl_ast_build_get_depth(build
);
3475 if (depth
>= isl_set_dim(build
->domain
, isl_dim_set
))
3476 return generate_inner_level(executed
, build
);
3478 if (isl_union_map_n_map(executed
) == 1)
3479 return generate_shifted_component(executed
, build
);
3481 return generate_components(executed
, build
);
3483 isl_union_map_free(executed
);
3484 isl_ast_build_free(build
);
3488 /* Internal data structure used by isl_ast_build_ast_from_schedule.
3489 * internal, executed and build are the inputs to generate_code.
3490 * list collects the output.
3492 struct isl_generate_code_data
{
3494 isl_union_map
*executed
;
3495 isl_ast_build
*build
;
3497 isl_ast_graft_list
*list
;
3500 /* Given an inverse schedule in terms of the external build schedule, i.e.,
3504 * with E the external build schedule and S the additional schedule "space",
3505 * reformulate the inverse schedule in terms of the internal schedule domain,
3510 * We first obtain a mapping
3514 * take the inverse and the product with S -> S, resulting in
3516 * [I -> S] -> [E -> S]
3518 * Applying the map to the input produces the desired result.
3520 static __isl_give isl_union_map
*internal_executed(
3521 __isl_take isl_union_map
*executed
, __isl_keep isl_space
*space
,
3522 __isl_keep isl_ast_build
*build
)
3526 proj
= isl_ast_build_get_schedule_map(build
);
3527 proj
= isl_map_reverse(proj
);
3528 space
= isl_space_map_from_set(isl_space_copy(space
));
3529 id
= isl_map_identity(space
);
3530 proj
= isl_map_product(proj
, id
);
3531 executed
= isl_union_map_apply_domain(executed
,
3532 isl_union_map_from_map(proj
));
3536 /* Generate an AST that visits the elements in the range of data->executed
3537 * in the relative order specified by the corresponding image element(s)
3538 * for those image elements that belong to "set".
3539 * Add the result to data->list.
3541 * The caller ensures that "set" is a universe domain.
3542 * "space" is the space of the additional part of the schedule.
3543 * It is equal to the space of "set" if build->domain is parametric.
3544 * Otherwise, it is equal to the range of the wrapped space of "set".
3546 * If the build space is not parametric and if isl_ast_build_ast_from_schedule
3547 * was called from an outside user (data->internal not set), then
3548 * the (inverse) schedule refers to the external build domain and needs to
3549 * be transformed to refer to the internal build domain.
3551 * The build is extended to include the additional part of the schedule.
3552 * If the original build space was not parametric, then the options
3553 * in data->build refer only to the additional part of the schedule
3554 * and they need to be adjusted to refer to the complete AST build
3557 * After having adjusted inverse schedule and build, we start generating
3558 * code with the outer loop of the current code generation
3559 * in generate_next_level.
3561 * If the original build space was not parametric, we undo the embedding
3562 * on the resulting isl_ast_node_list so that it can be used within
3563 * the outer AST build.
3565 static int generate_code_in_space(struct isl_generate_code_data
*data
,
3566 __isl_take isl_set
*set
, __isl_take isl_space
*space
)
3568 isl_union_map
*executed
;
3569 isl_ast_build
*build
;
3570 isl_ast_graft_list
*list
;
3573 executed
= isl_union_map_copy(data
->executed
);
3574 executed
= isl_union_map_intersect_domain(executed
,
3575 isl_union_set_from_set(set
));
3577 embed
= !isl_set_is_params(data
->build
->domain
);
3578 if (embed
&& !data
->internal
)
3579 executed
= internal_executed(executed
, space
, data
->build
);
3581 build
= isl_ast_build_copy(data
->build
);
3582 build
= isl_ast_build_product(build
, space
);
3584 list
= generate_next_level(executed
, build
);
3586 list
= isl_ast_graft_list_unembed(list
, embed
);
3588 data
->list
= isl_ast_graft_list_concat(data
->list
, list
);
3593 /* Generate an AST that visits the elements in the range of data->executed
3594 * in the relative order specified by the corresponding domain element(s)
3595 * for those domain elements that belong to "set".
3596 * Add the result to data->list.
3598 * The caller ensures that "set" is a universe domain.
3600 * If the build space S is not parametric, then the space of "set"
3601 * need to be a wrapped relation with S as domain. That is, it needs
3606 * Check this property and pass control to generate_code_in_space
3608 * If the build space is not parametric, then T is the space of "set".
3610 static int generate_code_set(__isl_take isl_set
*set
, void *user
)
3612 struct isl_generate_code_data
*data
= user
;
3613 isl_space
*space
, *build_space
;
3616 space
= isl_set_get_space(set
);
3618 if (isl_set_is_params(data
->build
->domain
))
3619 return generate_code_in_space(data
, set
, space
);
3621 build_space
= isl_ast_build_get_space(data
->build
, data
->internal
);
3622 space
= isl_space_unwrap(space
);
3623 is_domain
= isl_space_is_domain(build_space
, space
);
3624 isl_space_free(build_space
);
3625 space
= isl_space_range(space
);
3630 isl_die(isl_set_get_ctx(set
), isl_error_invalid
,
3631 "invalid nested schedule space", goto error
);
3633 return generate_code_in_space(data
, set
, space
);
3636 isl_space_free(space
);
3640 /* Generate an AST that visits the elements in the range of "executed"
3641 * in the relative order specified by the corresponding domain element(s).
3643 * "build" is an isl_ast_build that has either been constructed by
3644 * isl_ast_build_from_context or passed to a callback set by
3645 * isl_ast_build_set_create_leaf.
3646 * In the first case, the space of the isl_ast_build is typically
3647 * a parametric space, although this is currently not enforced.
3648 * In the second case, the space is never a parametric space.
3649 * If the space S is not parametric, then the domain space(s) of "executed"
3650 * need to be wrapped relations with S as domain.
3652 * If the domain of "executed" consists of several spaces, then an AST
3653 * is generated for each of them (in arbitrary order) and the results
3656 * If "internal" is set, then the domain "S" above refers to the internal
3657 * schedule domain representation. Otherwise, it refers to the external
3658 * representation, as returned by isl_ast_build_get_schedule_space.
3660 * We essentially run over all the spaces in the domain of "executed"
3661 * and call generate_code_set on each of them.
3663 static __isl_give isl_ast_graft_list
*generate_code(
3664 __isl_take isl_union_map
*executed
, __isl_take isl_ast_build
*build
,
3668 struct isl_generate_code_data data
= { 0 };
3670 isl_union_set
*schedule_domain
;
3671 isl_union_map
*universe
;
3675 space
= isl_ast_build_get_space(build
, 1);
3676 space
= isl_space_align_params(space
,
3677 isl_union_map_get_space(executed
));
3678 space
= isl_space_align_params(space
,
3679 isl_union_map_get_space(build
->options
));
3680 build
= isl_ast_build_align_params(build
, isl_space_copy(space
));
3681 executed
= isl_union_map_align_params(executed
, space
);
3682 if (!executed
|| !build
)
3685 ctx
= isl_ast_build_get_ctx(build
);
3687 data
.internal
= internal
;
3688 data
.executed
= executed
;
3690 data
.list
= isl_ast_graft_list_alloc(ctx
, 0);
3692 universe
= isl_union_map_universe(isl_union_map_copy(executed
));
3693 schedule_domain
= isl_union_map_domain(universe
);
3694 if (isl_union_set_foreach_set(schedule_domain
, &generate_code_set
,
3696 data
.list
= isl_ast_graft_list_free(data
.list
);
3698 isl_union_set_free(schedule_domain
);
3699 isl_union_map_free(executed
);
3701 isl_ast_build_free(build
);
3704 isl_union_map_free(executed
);
3705 isl_ast_build_free(build
);
3709 /* Generate an AST that visits the elements in the domain of "schedule"
3710 * in the relative order specified by the corresponding image element(s).
3712 * "build" is an isl_ast_build that has either been constructed by
3713 * isl_ast_build_from_context or passed to a callback set by
3714 * isl_ast_build_set_create_leaf.
3715 * In the first case, the space of the isl_ast_build is typically
3716 * a parametric space, although this is currently not enforced.
3717 * In the second case, the space is never a parametric space.
3718 * If the space S is not parametric, then the range space(s) of "schedule"
3719 * need to be wrapped relations with S as domain.
3721 * If the range of "schedule" consists of several spaces, then an AST
3722 * is generated for each of them (in arbitrary order) and the results
3725 * We first initialize the local copies of the relevant options.
3726 * We do this here rather than when the isl_ast_build is created
3727 * because the options may have changed between the construction
3728 * of the isl_ast_build and the call to isl_generate_code.
3730 * The main computation is performed on an inverse schedule (with
3731 * the schedule domain in the domain and the elements to be executed
3732 * in the range) called "executed".
3734 __isl_give isl_ast_node
*isl_ast_build_ast_from_schedule(
3735 __isl_keep isl_ast_build
*build
, __isl_take isl_union_map
*schedule
)
3737 isl_ast_graft_list
*list
;
3739 isl_union_map
*executed
;
3741 build
= isl_ast_build_copy(build
);
3742 build
= isl_ast_build_set_single_valued(build
, 0);
3743 executed
= isl_union_map_reverse(schedule
);
3744 list
= generate_code(executed
, isl_ast_build_copy(build
), 0);
3745 node
= isl_ast_node_from_graft_list(list
, build
);
3746 isl_ast_build_free(build
);