2 * Copyright 2005-2007 Universiteit Leiden
3 * Copyright 2008-2009 Katholieke Universiteit Leuven
4 * Copyright 2010 INRIA Saclay
5 * Copyright 2012 Universiteit Leiden
7 * Use of this software is governed by the MIT license
9 * Written by Sven Verdoolaege, Leiden Institute of Advanced Computer Science,
10 * Universiteit Leiden, Niels Bohrweg 1, 2333 CA Leiden, The Netherlands
11 * and K.U.Leuven, Departement Computerwetenschappen, Celestijnenlaan 200A,
12 * B-3001 Leuven, Belgium
13 * and INRIA Saclay - Ile-de-France, Parc Club Orsay Universite,
14 * ZAC des vignes, 4 rue Jacques Monod, 91893 Orsay, France
22 enum isl_restriction_type
{
23 isl_restriction_type_empty
,
24 isl_restriction_type_none
,
25 isl_restriction_type_input
,
26 isl_restriction_type_output
29 struct isl_restriction
{
30 enum isl_restriction_type type
;
36 /* Create a restriction of the given type.
38 static __isl_give isl_restriction
*isl_restriction_alloc(
39 __isl_take isl_map
*source_map
, enum isl_restriction_type type
)
42 isl_restriction
*restr
;
47 ctx
= isl_map_get_ctx(source_map
);
48 restr
= isl_calloc_type(ctx
, struct isl_restriction
);
54 isl_map_free(source_map
);
57 isl_map_free(source_map
);
61 /* Create a restriction that doesn't restrict anything.
63 __isl_give isl_restriction
*isl_restriction_none(__isl_take isl_map
*source_map
)
65 return isl_restriction_alloc(source_map
, isl_restriction_type_none
);
68 /* Create a restriction that removes everything.
70 __isl_give isl_restriction
*isl_restriction_empty(
71 __isl_take isl_map
*source_map
)
73 return isl_restriction_alloc(source_map
, isl_restriction_type_empty
);
76 /* Create a restriction on the input of the maximization problem
77 * based on the given source and sink restrictions.
79 __isl_give isl_restriction
*isl_restriction_input(
80 __isl_take isl_set
*source_restr
, __isl_take isl_set
*sink_restr
)
83 isl_restriction
*restr
;
85 if (!source_restr
|| !sink_restr
)
88 ctx
= isl_set_get_ctx(source_restr
);
89 restr
= isl_calloc_type(ctx
, struct isl_restriction
);
93 restr
->type
= isl_restriction_type_input
;
94 restr
->source
= source_restr
;
95 restr
->sink
= sink_restr
;
99 isl_set_free(source_restr
);
100 isl_set_free(sink_restr
);
104 /* Create a restriction on the output of the maximization problem
105 * based on the given source restriction.
107 __isl_give isl_restriction
*isl_restriction_output(
108 __isl_take isl_set
*source_restr
)
111 isl_restriction
*restr
;
116 ctx
= isl_set_get_ctx(source_restr
);
117 restr
= isl_calloc_type(ctx
, struct isl_restriction
);
121 restr
->type
= isl_restriction_type_output
;
122 restr
->source
= source_restr
;
126 isl_set_free(source_restr
);
130 __isl_null isl_restriction
*isl_restriction_free(
131 __isl_take isl_restriction
*restr
)
136 isl_set_free(restr
->source
);
137 isl_set_free(restr
->sink
);
142 isl_ctx
*isl_restriction_get_ctx(__isl_keep isl_restriction
*restr
)
144 return restr
? isl_set_get_ctx(restr
->source
) : NULL
;
147 /* A private structure to keep track of a mapping together with
148 * a user-specified identifier and a boolean indicating whether
149 * the map represents a must or may access/dependence.
151 struct isl_labeled_map
{
157 /* A structure containing the input for dependence analysis:
159 * - n_must + n_may (<= max_source) sources
160 * - a function for determining the relative order of sources and sink
161 * The must sources are placed before the may sources.
163 * domain_map is an auxiliary map that maps the sink access relation
164 * to the domain of this access relation.
166 * restrict_fn is a callback that (if not NULL) will be called
167 * right before any lexicographical maximization.
169 struct isl_access_info
{
171 struct isl_labeled_map sink
;
172 isl_access_level_before level_before
;
174 isl_access_restrict restrict_fn
;
180 struct isl_labeled_map source
[1];
183 /* A structure containing the output of dependence analysis:
184 * - n_source dependences
185 * - a wrapped subset of the sink for which definitely no source could be found
186 * - a wrapped subset of the sink for which possibly no source could be found
189 isl_set
*must_no_source
;
190 isl_set
*may_no_source
;
192 struct isl_labeled_map
*dep
;
195 /* Construct an isl_access_info structure and fill it up with
196 * the given data. The number of sources is set to 0.
198 __isl_give isl_access_info
*isl_access_info_alloc(__isl_take isl_map
*sink
,
199 void *sink_user
, isl_access_level_before fn
, int max_source
)
202 struct isl_access_info
*acc
;
207 ctx
= isl_map_get_ctx(sink
);
208 isl_assert(ctx
, max_source
>= 0, goto error
);
210 acc
= isl_calloc(ctx
, struct isl_access_info
,
211 sizeof(struct isl_access_info
) +
212 (max_source
- 1) * sizeof(struct isl_labeled_map
));
216 acc
->sink
.map
= sink
;
217 acc
->sink
.data
= sink_user
;
218 acc
->level_before
= fn
;
219 acc
->max_source
= max_source
;
229 /* Free the given isl_access_info structure.
231 __isl_null isl_access_info
*isl_access_info_free(
232 __isl_take isl_access_info
*acc
)
238 isl_map_free(acc
->domain_map
);
239 isl_map_free(acc
->sink
.map
);
240 for (i
= 0; i
< acc
->n_must
+ acc
->n_may
; ++i
)
241 isl_map_free(acc
->source
[i
].map
);
246 isl_ctx
*isl_access_info_get_ctx(__isl_keep isl_access_info
*acc
)
248 return acc
? isl_map_get_ctx(acc
->sink
.map
) : NULL
;
251 __isl_give isl_access_info
*isl_access_info_set_restrict(
252 __isl_take isl_access_info
*acc
, isl_access_restrict fn
, void *user
)
256 acc
->restrict_fn
= fn
;
257 acc
->restrict_user
= user
;
261 /* Add another source to an isl_access_info structure, making
262 * sure the "must" sources are placed before the "may" sources.
263 * This function may be called at most max_source times on a
264 * given isl_access_info structure, with max_source as specified
265 * in the call to isl_access_info_alloc that constructed the structure.
267 __isl_give isl_access_info
*isl_access_info_add_source(
268 __isl_take isl_access_info
*acc
, __isl_take isl_map
*source
,
269 int must
, void *source_user
)
275 ctx
= isl_map_get_ctx(acc
->sink
.map
);
276 isl_assert(ctx
, acc
->n_must
+ acc
->n_may
< acc
->max_source
, goto error
);
280 acc
->source
[acc
->n_must
+ acc
->n_may
] =
281 acc
->source
[acc
->n_must
];
282 acc
->source
[acc
->n_must
].map
= source
;
283 acc
->source
[acc
->n_must
].data
= source_user
;
284 acc
->source
[acc
->n_must
].must
= 1;
287 acc
->source
[acc
->n_must
+ acc
->n_may
].map
= source
;
288 acc
->source
[acc
->n_must
+ acc
->n_may
].data
= source_user
;
289 acc
->source
[acc
->n_must
+ acc
->n_may
].must
= 0;
295 isl_map_free(source
);
296 isl_access_info_free(acc
);
300 /* Return -n, 0 or n (with n a positive value), depending on whether
301 * the source access identified by p1 should be sorted before, together
302 * or after that identified by p2.
304 * If p1 appears before p2, then it should be sorted first.
305 * For more generic initial schedules, it is possible that neither
306 * p1 nor p2 appears before the other, or at least not in any obvious way.
307 * We therefore also check if p2 appears before p1, in which case p2
308 * should be sorted first.
309 * If not, we try to order the two statements based on the description
310 * of the iteration domains. This results in an arbitrary, but fairly
313 static int access_sort_cmp(const void *p1
, const void *p2
, void *user
)
315 isl_access_info
*acc
= user
;
316 const struct isl_labeled_map
*i1
, *i2
;
319 i1
= (const struct isl_labeled_map
*) p1
;
320 i2
= (const struct isl_labeled_map
*) p2
;
322 level1
= acc
->level_before(i1
->data
, i2
->data
);
326 level2
= acc
->level_before(i2
->data
, i1
->data
);
330 h1
= isl_map_get_hash(i1
->map
);
331 h2
= isl_map_get_hash(i2
->map
);
332 return h1
> h2
? 1 : h1
< h2
? -1 : 0;
335 /* Sort the must source accesses in their textual order.
337 static __isl_give isl_access_info
*isl_access_info_sort_sources(
338 __isl_take isl_access_info
*acc
)
342 if (acc
->n_must
<= 1)
345 if (isl_sort(acc
->source
, acc
->n_must
, sizeof(struct isl_labeled_map
),
346 access_sort_cmp
, acc
) < 0)
347 return isl_access_info_free(acc
);
352 /* Align the parameters of the two spaces if needed and then call
355 static __isl_give isl_space
*space_align_and_join(__isl_take isl_space
*left
,
356 __isl_take isl_space
*right
)
358 if (isl_space_match(left
, isl_dim_param
, right
, isl_dim_param
))
359 return isl_space_join(left
, right
);
361 left
= isl_space_align_params(left
, isl_space_copy(right
));
362 right
= isl_space_align_params(right
, isl_space_copy(left
));
363 return isl_space_join(left
, right
);
366 /* Initialize an empty isl_flow structure corresponding to a given
367 * isl_access_info structure.
368 * For each must access, two dependences are created (initialized
369 * to the empty relation), one for the resulting must dependences
370 * and one for the resulting may dependences. May accesses can
371 * only lead to may dependences, so only one dependence is created
373 * This function is private as isl_flow structures are only supposed
374 * to be created by isl_access_info_compute_flow.
376 static __isl_give isl_flow
*isl_flow_alloc(__isl_keep isl_access_info
*acc
)
380 struct isl_flow
*dep
;
385 ctx
= isl_map_get_ctx(acc
->sink
.map
);
386 dep
= isl_calloc_type(ctx
, struct isl_flow
);
390 n
= 2 * acc
->n_must
+ acc
->n_may
;
391 dep
->dep
= isl_calloc_array(ctx
, struct isl_labeled_map
, n
);
396 for (i
= 0; i
< acc
->n_must
; ++i
) {
398 dim
= space_align_and_join(
399 isl_map_get_space(acc
->source
[i
].map
),
400 isl_space_reverse(isl_map_get_space(acc
->sink
.map
)));
401 dep
->dep
[2 * i
].map
= isl_map_empty(dim
);
402 dep
->dep
[2 * i
+ 1].map
= isl_map_copy(dep
->dep
[2 * i
].map
);
403 dep
->dep
[2 * i
].data
= acc
->source
[i
].data
;
404 dep
->dep
[2 * i
+ 1].data
= acc
->source
[i
].data
;
405 dep
->dep
[2 * i
].must
= 1;
406 dep
->dep
[2 * i
+ 1].must
= 0;
407 if (!dep
->dep
[2 * i
].map
|| !dep
->dep
[2 * i
+ 1].map
)
410 for (i
= acc
->n_must
; i
< acc
->n_must
+ acc
->n_may
; ++i
) {
412 dim
= space_align_and_join(
413 isl_map_get_space(acc
->source
[i
].map
),
414 isl_space_reverse(isl_map_get_space(acc
->sink
.map
)));
415 dep
->dep
[acc
->n_must
+ i
].map
= isl_map_empty(dim
);
416 dep
->dep
[acc
->n_must
+ i
].data
= acc
->source
[i
].data
;
417 dep
->dep
[acc
->n_must
+ i
].must
= 0;
418 if (!dep
->dep
[acc
->n_must
+ i
].map
)
428 /* Iterate over all sources and for each resulting flow dependence
429 * that is not empty, call the user specfied function.
430 * The second argument in this function call identifies the source,
431 * while the third argument correspond to the final argument of
432 * the isl_flow_foreach call.
434 int isl_flow_foreach(__isl_keep isl_flow
*deps
,
435 int (*fn
)(__isl_take isl_map
*dep
, int must
, void *dep_user
, void *user
),
443 for (i
= 0; i
< deps
->n_source
; ++i
) {
444 if (isl_map_plain_is_empty(deps
->dep
[i
].map
))
446 if (fn(isl_map_copy(deps
->dep
[i
].map
), deps
->dep
[i
].must
,
447 deps
->dep
[i
].data
, user
) < 0)
454 /* Return a copy of the subset of the sink for which no source could be found.
456 __isl_give isl_map
*isl_flow_get_no_source(__isl_keep isl_flow
*deps
, int must
)
462 return isl_set_unwrap(isl_set_copy(deps
->must_no_source
));
464 return isl_set_unwrap(isl_set_copy(deps
->may_no_source
));
467 void isl_flow_free(__isl_take isl_flow
*deps
)
473 isl_set_free(deps
->must_no_source
);
474 isl_set_free(deps
->may_no_source
);
476 for (i
= 0; i
< deps
->n_source
; ++i
)
477 isl_map_free(deps
->dep
[i
].map
);
483 isl_ctx
*isl_flow_get_ctx(__isl_keep isl_flow
*deps
)
485 return deps
? isl_set_get_ctx(deps
->must_no_source
) : NULL
;
488 /* Return a map that enforces that the domain iteration occurs after
489 * the range iteration at the given level.
490 * If level is odd, then the domain iteration should occur after
491 * the target iteration in their shared level/2 outermost loops.
492 * In this case we simply need to enforce that these outermost
493 * loop iterations are the same.
494 * If level is even, then the loop iterator of the domain should
495 * be greater than the loop iterator of the range at the last
496 * of the level/2 shared loops, i.e., loop level/2 - 1.
498 static __isl_give isl_map
*after_at_level(__isl_take isl_space
*dim
, int level
)
500 struct isl_basic_map
*bmap
;
503 bmap
= isl_basic_map_equal(dim
, level
/2);
505 bmap
= isl_basic_map_more_at(dim
, level
/2 - 1);
507 return isl_map_from_basic_map(bmap
);
510 /* Compute the partial lexicographic maximum of "dep" on domain "sink",
511 * but first check if the user has set acc->restrict_fn and if so
512 * update either the input or the output of the maximization problem
513 * with respect to the resulting restriction.
515 * Since the user expects a mapping from sink iterations to source iterations,
516 * whereas the domain of "dep" is a wrapped map, mapping sink iterations
517 * to accessed array elements, we first need to project out the accessed
518 * sink array elements by applying acc->domain_map.
519 * Similarly, the sink restriction specified by the user needs to be
520 * converted back to the wrapped map.
522 static __isl_give isl_map
*restricted_partial_lexmax(
523 __isl_keep isl_access_info
*acc
, __isl_take isl_map
*dep
,
524 int source
, __isl_take isl_set
*sink
, __isl_give isl_set
**empty
)
527 isl_restriction
*restr
;
528 isl_set
*sink_domain
;
532 if (!acc
->restrict_fn
)
533 return isl_map_partial_lexmax(dep
, sink
, empty
);
535 source_map
= isl_map_copy(dep
);
536 source_map
= isl_map_apply_domain(source_map
,
537 isl_map_copy(acc
->domain_map
));
538 sink_domain
= isl_set_copy(sink
);
539 sink_domain
= isl_set_apply(sink_domain
, isl_map_copy(acc
->domain_map
));
540 restr
= acc
->restrict_fn(source_map
, sink_domain
,
541 acc
->source
[source
].data
, acc
->restrict_user
);
542 isl_set_free(sink_domain
);
543 isl_map_free(source_map
);
547 if (restr
->type
== isl_restriction_type_input
) {
548 dep
= isl_map_intersect_range(dep
, isl_set_copy(restr
->source
));
549 sink_restr
= isl_set_copy(restr
->sink
);
550 sink_restr
= isl_set_apply(sink_restr
,
551 isl_map_reverse(isl_map_copy(acc
->domain_map
)));
552 sink
= isl_set_intersect(sink
, sink_restr
);
553 } else if (restr
->type
== isl_restriction_type_empty
) {
554 isl_space
*space
= isl_map_get_space(dep
);
556 dep
= isl_map_empty(space
);
559 res
= isl_map_partial_lexmax(dep
, sink
, empty
);
561 if (restr
->type
== isl_restriction_type_output
)
562 res
= isl_map_intersect_range(res
, isl_set_copy(restr
->source
));
564 isl_restriction_free(restr
);
573 /* Compute the last iteration of must source j that precedes the sink
574 * at the given level for sink iterations in set_C.
575 * The subset of set_C for which no such iteration can be found is returned
578 static struct isl_map
*last_source(struct isl_access_info
*acc
,
579 struct isl_set
*set_C
,
580 int j
, int level
, struct isl_set
**empty
)
582 struct isl_map
*read_map
;
583 struct isl_map
*write_map
;
584 struct isl_map
*dep_map
;
585 struct isl_map
*after
;
586 struct isl_map
*result
;
588 read_map
= isl_map_copy(acc
->sink
.map
);
589 write_map
= isl_map_copy(acc
->source
[j
].map
);
590 write_map
= isl_map_reverse(write_map
);
591 dep_map
= isl_map_apply_range(read_map
, write_map
);
592 after
= after_at_level(isl_map_get_space(dep_map
), level
);
593 dep_map
= isl_map_intersect(dep_map
, after
);
594 result
= restricted_partial_lexmax(acc
, dep_map
, j
, set_C
, empty
);
595 result
= isl_map_reverse(result
);
600 /* For a given mapping between iterations of must source j and iterations
601 * of the sink, compute the last iteration of must source k preceding
602 * the sink at level before_level for any of the sink iterations,
603 * but following the corresponding iteration of must source j at level
606 static struct isl_map
*last_later_source(struct isl_access_info
*acc
,
607 struct isl_map
*old_map
,
608 int j
, int before_level
,
609 int k
, int after_level
,
610 struct isl_set
**empty
)
613 struct isl_set
*set_C
;
614 struct isl_map
*read_map
;
615 struct isl_map
*write_map
;
616 struct isl_map
*dep_map
;
617 struct isl_map
*after_write
;
618 struct isl_map
*before_read
;
619 struct isl_map
*result
;
621 set_C
= isl_map_range(isl_map_copy(old_map
));
622 read_map
= isl_map_copy(acc
->sink
.map
);
623 write_map
= isl_map_copy(acc
->source
[k
].map
);
625 write_map
= isl_map_reverse(write_map
);
626 dep_map
= isl_map_apply_range(read_map
, write_map
);
627 dim
= space_align_and_join(isl_map_get_space(acc
->source
[k
].map
),
628 isl_space_reverse(isl_map_get_space(acc
->source
[j
].map
)));
629 after_write
= after_at_level(dim
, after_level
);
630 after_write
= isl_map_apply_range(after_write
, old_map
);
631 after_write
= isl_map_reverse(after_write
);
632 dep_map
= isl_map_intersect(dep_map
, after_write
);
633 before_read
= after_at_level(isl_map_get_space(dep_map
), before_level
);
634 dep_map
= isl_map_intersect(dep_map
, before_read
);
635 result
= restricted_partial_lexmax(acc
, dep_map
, k
, set_C
, empty
);
636 result
= isl_map_reverse(result
);
641 /* Given a shared_level between two accesses, return 1 if the
642 * the first can precede the second at the requested target_level.
643 * If the target level is odd, i.e., refers to a statement level
644 * dimension, then first needs to precede second at the requested
645 * level, i.e., shared_level must be equal to target_level.
646 * If the target level is odd, then the two loops should share
647 * at least the requested number of outer loops.
649 static int can_precede_at_level(int shared_level
, int target_level
)
651 if (shared_level
< target_level
)
653 if ((target_level
% 2) && shared_level
> target_level
)
658 /* Given a possible flow dependence temp_rel[j] between source j and the sink
659 * at level sink_level, remove those elements for which
660 * there is an iteration of another source k < j that is closer to the sink.
661 * The flow dependences temp_rel[k] are updated with the improved sources.
662 * Any improved source needs to precede the sink at the same level
663 * and needs to follow source j at the same or a deeper level.
664 * The lower this level, the later the execution date of source k.
665 * We therefore consider lower levels first.
667 * If temp_rel[j] is empty, then there can be no improvement and
668 * we return immediately.
670 static int intermediate_sources(__isl_keep isl_access_info
*acc
,
671 struct isl_map
**temp_rel
, int j
, int sink_level
)
674 int depth
= 2 * isl_map_dim(acc
->source
[j
].map
, isl_dim_in
) + 1;
676 if (isl_map_plain_is_empty(temp_rel
[j
]))
679 for (k
= j
- 1; k
>= 0; --k
) {
681 plevel
= acc
->level_before(acc
->source
[k
].data
, acc
->sink
.data
);
682 if (!can_precede_at_level(plevel
, sink_level
))
685 plevel2
= acc
->level_before(acc
->source
[j
].data
,
686 acc
->source
[k
].data
);
688 for (level
= sink_level
; level
<= depth
; ++level
) {
690 struct isl_set
*trest
;
691 struct isl_map
*copy
;
693 if (!can_precede_at_level(plevel2
, level
))
696 copy
= isl_map_copy(temp_rel
[j
]);
697 T
= last_later_source(acc
, copy
, j
, sink_level
, k
,
699 if (isl_map_plain_is_empty(T
)) {
704 temp_rel
[j
] = isl_map_intersect_range(temp_rel
[j
], trest
);
705 temp_rel
[k
] = isl_map_union_disjoint(temp_rel
[k
], T
);
712 /* Compute all iterations of may source j that precedes the sink at the given
713 * level for sink iterations in set_C.
715 static __isl_give isl_map
*all_sources(__isl_keep isl_access_info
*acc
,
716 __isl_take isl_set
*set_C
, int j
, int level
)
723 read_map
= isl_map_copy(acc
->sink
.map
);
724 read_map
= isl_map_intersect_domain(read_map
, set_C
);
725 write_map
= isl_map_copy(acc
->source
[acc
->n_must
+ j
].map
);
726 write_map
= isl_map_reverse(write_map
);
727 dep_map
= isl_map_apply_range(read_map
, write_map
);
728 after
= after_at_level(isl_map_get_space(dep_map
), level
);
729 dep_map
= isl_map_intersect(dep_map
, after
);
731 return isl_map_reverse(dep_map
);
734 /* For a given mapping between iterations of must source k and iterations
735 * of the sink, compute the all iteration of may source j preceding
736 * the sink at level before_level for any of the sink iterations,
737 * but following the corresponding iteration of must source k at level
740 static __isl_give isl_map
*all_later_sources(__isl_keep isl_access_info
*acc
,
741 __isl_take isl_map
*old_map
,
742 int j
, int before_level
, int k
, int after_level
)
749 isl_map
*after_write
;
750 isl_map
*before_read
;
752 set_C
= isl_map_range(isl_map_copy(old_map
));
753 read_map
= isl_map_copy(acc
->sink
.map
);
754 read_map
= isl_map_intersect_domain(read_map
, set_C
);
755 write_map
= isl_map_copy(acc
->source
[acc
->n_must
+ j
].map
);
757 write_map
= isl_map_reverse(write_map
);
758 dep_map
= isl_map_apply_range(read_map
, write_map
);
759 dim
= isl_space_join(isl_map_get_space(acc
->source
[acc
->n_must
+ j
].map
),
760 isl_space_reverse(isl_map_get_space(acc
->source
[k
].map
)));
761 after_write
= after_at_level(dim
, after_level
);
762 after_write
= isl_map_apply_range(after_write
, old_map
);
763 after_write
= isl_map_reverse(after_write
);
764 dep_map
= isl_map_intersect(dep_map
, after_write
);
765 before_read
= after_at_level(isl_map_get_space(dep_map
), before_level
);
766 dep_map
= isl_map_intersect(dep_map
, before_read
);
767 return isl_map_reverse(dep_map
);
770 /* Given the must and may dependence relations for the must accesses
771 * for level sink_level, check if there are any accesses of may access j
772 * that occur in between and return their union.
773 * If some of these accesses are intermediate with respect to
774 * (previously thought to be) must dependences, then these
775 * must dependences are turned into may dependences.
777 static __isl_give isl_map
*all_intermediate_sources(
778 __isl_keep isl_access_info
*acc
, __isl_take isl_map
*map
,
779 struct isl_map
**must_rel
, struct isl_map
**may_rel
,
780 int j
, int sink_level
)
783 int depth
= 2 * isl_map_dim(acc
->source
[acc
->n_must
+ j
].map
,
786 for (k
= 0; k
< acc
->n_must
; ++k
) {
789 if (isl_map_plain_is_empty(may_rel
[k
]) &&
790 isl_map_plain_is_empty(must_rel
[k
]))
793 plevel
= acc
->level_before(acc
->source
[k
].data
,
794 acc
->source
[acc
->n_must
+ j
].data
);
796 for (level
= sink_level
; level
<= depth
; ++level
) {
801 if (!can_precede_at_level(plevel
, level
))
804 copy
= isl_map_copy(may_rel
[k
]);
805 T
= all_later_sources(acc
, copy
, j
, sink_level
, k
, level
);
806 map
= isl_map_union(map
, T
);
808 copy
= isl_map_copy(must_rel
[k
]);
809 T
= all_later_sources(acc
, copy
, j
, sink_level
, k
, level
);
810 ran
= isl_map_range(isl_map_copy(T
));
811 map
= isl_map_union(map
, T
);
812 may_rel
[k
] = isl_map_union_disjoint(may_rel
[k
],
813 isl_map_intersect_range(isl_map_copy(must_rel
[k
]),
815 T
= isl_map_from_domain_and_range(
817 isl_space_domain(isl_map_get_space(must_rel
[k
]))),
819 must_rel
[k
] = isl_map_subtract(must_rel
[k
], T
);
826 /* Compute dependences for the case where all accesses are "may"
827 * accesses, which boils down to computing memory based dependences.
828 * The generic algorithm would also work in this case, but it would
829 * be overkill to use it.
831 static __isl_give isl_flow
*compute_mem_based_dependences(
832 __isl_keep isl_access_info
*acc
)
839 res
= isl_flow_alloc(acc
);
843 mustdo
= isl_map_domain(isl_map_copy(acc
->sink
.map
));
844 maydo
= isl_set_copy(mustdo
);
846 for (i
= 0; i
< acc
->n_may
; ++i
) {
853 plevel
= acc
->level_before(acc
->source
[i
].data
, acc
->sink
.data
);
854 is_before
= plevel
& 1;
857 dim
= isl_map_get_space(res
->dep
[i
].map
);
859 before
= isl_map_lex_le_first(dim
, plevel
);
861 before
= isl_map_lex_lt_first(dim
, plevel
);
862 dep
= isl_map_apply_range(isl_map_copy(acc
->source
[i
].map
),
863 isl_map_reverse(isl_map_copy(acc
->sink
.map
)));
864 dep
= isl_map_intersect(dep
, before
);
865 mustdo
= isl_set_subtract(mustdo
,
866 isl_map_range(isl_map_copy(dep
)));
867 res
->dep
[i
].map
= isl_map_union(res
->dep
[i
].map
, dep
);
870 res
->may_no_source
= isl_set_subtract(maydo
, isl_set_copy(mustdo
));
871 res
->must_no_source
= mustdo
;
876 /* Compute dependences for the case where there is at least one
879 * The core algorithm considers all levels in which a source may precede
880 * the sink, where a level may either be a statement level or a loop level.
881 * The outermost statement level is 1, the first loop level is 2, etc...
882 * The algorithm basically does the following:
883 * for all levels l of the read access from innermost to outermost
884 * for all sources w that may precede the sink access at that level
885 * compute the last iteration of the source that precedes the sink access
887 * add result to possible last accesses at level l of source w
888 * for all sources w2 that we haven't considered yet at this level that may
889 * also precede the sink access
890 * for all levels l2 of w from l to innermost
891 * for all possible last accesses dep of w at l
892 * compute last iteration of w2 between the source and sink
894 * add result to possible last accesses at level l of write w2
895 * and replace possible last accesses dep by the remainder
898 * The above algorithm is applied to the must access. During the course
899 * of the algorithm, we keep track of sink iterations that still
900 * need to be considered. These iterations are split into those that
901 * haven't been matched to any source access (mustdo) and those that have only
902 * been matched to may accesses (maydo).
903 * At the end of each level, we also consider the may accesses.
904 * In particular, we consider may accesses that precede the remaining
905 * sink iterations, moving elements from mustdo to maydo when appropriate,
906 * and may accesses that occur between a must source and a sink of any
907 * dependences found at the current level, turning must dependences into
908 * may dependences when appropriate.
911 static __isl_give isl_flow
*compute_val_based_dependences(
912 __isl_keep isl_access_info
*acc
)
916 isl_set
*mustdo
= NULL
;
917 isl_set
*maydo
= NULL
;
920 isl_map
**must_rel
= NULL
;
921 isl_map
**may_rel
= NULL
;
926 res
= isl_flow_alloc(acc
);
929 ctx
= isl_map_get_ctx(acc
->sink
.map
);
931 depth
= 2 * isl_map_dim(acc
->sink
.map
, isl_dim_in
) + 1;
932 mustdo
= isl_map_domain(isl_map_copy(acc
->sink
.map
));
933 maydo
= isl_set_empty_like(mustdo
);
934 if (!mustdo
|| !maydo
)
936 if (isl_set_plain_is_empty(mustdo
))
939 must_rel
= isl_alloc_array(ctx
, struct isl_map
*, acc
->n_must
);
940 may_rel
= isl_alloc_array(ctx
, struct isl_map
*, acc
->n_must
);
941 if (!must_rel
|| !may_rel
)
944 for (level
= depth
; level
>= 1; --level
) {
945 for (j
= acc
->n_must
-1; j
>=0; --j
) {
946 must_rel
[j
] = isl_map_empty_like(res
->dep
[2 * j
].map
);
947 may_rel
[j
] = isl_map_copy(must_rel
[j
]);
950 for (j
= acc
->n_must
- 1; j
>= 0; --j
) {
952 struct isl_set
*rest
;
955 plevel
= acc
->level_before(acc
->source
[j
].data
,
957 if (!can_precede_at_level(plevel
, level
))
960 T
= last_source(acc
, mustdo
, j
, level
, &rest
);
961 must_rel
[j
] = isl_map_union_disjoint(must_rel
[j
], T
);
964 intermediate_sources(acc
, must_rel
, j
, level
);
966 T
= last_source(acc
, maydo
, j
, level
, &rest
);
967 may_rel
[j
] = isl_map_union_disjoint(may_rel
[j
], T
);
970 intermediate_sources(acc
, may_rel
, j
, level
);
972 if (isl_set_plain_is_empty(mustdo
) &&
973 isl_set_plain_is_empty(maydo
))
976 for (j
= j
- 1; j
>= 0; --j
) {
979 plevel
= acc
->level_before(acc
->source
[j
].data
,
981 if (!can_precede_at_level(plevel
, level
))
984 intermediate_sources(acc
, must_rel
, j
, level
);
985 intermediate_sources(acc
, may_rel
, j
, level
);
988 for (j
= 0; j
< acc
->n_may
; ++j
) {
993 plevel
= acc
->level_before(acc
->source
[acc
->n_must
+ j
].data
,
995 if (!can_precede_at_level(plevel
, level
))
998 T
= all_sources(acc
, isl_set_copy(maydo
), j
, level
);
999 res
->dep
[2 * acc
->n_must
+ j
].map
=
1000 isl_map_union(res
->dep
[2 * acc
->n_must
+ j
].map
, T
);
1001 T
= all_sources(acc
, isl_set_copy(mustdo
), j
, level
);
1002 ran
= isl_map_range(isl_map_copy(T
));
1003 res
->dep
[2 * acc
->n_must
+ j
].map
=
1004 isl_map_union(res
->dep
[2 * acc
->n_must
+ j
].map
, T
);
1005 mustdo
= isl_set_subtract(mustdo
, isl_set_copy(ran
));
1006 maydo
= isl_set_union_disjoint(maydo
, ran
);
1008 T
= res
->dep
[2 * acc
->n_must
+ j
].map
;
1009 T
= all_intermediate_sources(acc
, T
, must_rel
, may_rel
,
1011 res
->dep
[2 * acc
->n_must
+ j
].map
= T
;
1014 for (j
= acc
->n_must
- 1; j
>= 0; --j
) {
1015 res
->dep
[2 * j
].map
=
1016 isl_map_union_disjoint(res
->dep
[2 * j
].map
,
1018 res
->dep
[2 * j
+ 1].map
=
1019 isl_map_union_disjoint(res
->dep
[2 * j
+ 1].map
,
1023 if (isl_set_plain_is_empty(mustdo
) &&
1024 isl_set_plain_is_empty(maydo
))
1031 res
->must_no_source
= mustdo
;
1032 res
->may_no_source
= maydo
;
1036 isl_set_free(mustdo
);
1037 isl_set_free(maydo
);
1043 /* Given a "sink" access, a list of n "source" accesses,
1044 * compute for each iteration of the sink access
1045 * and for each element accessed by that iteration,
1046 * the source access in the list that last accessed the
1047 * element accessed by the sink access before this sink access.
1048 * Each access is given as a map from the loop iterators
1049 * to the array indices.
1050 * The result is a list of n relations between source and sink
1051 * iterations and a subset of the domain of the sink access,
1052 * corresponding to those iterations that access an element
1053 * not previously accessed.
1055 * To deal with multi-valued sink access relations, the sink iteration
1056 * domain is first extended with dimensions that correspond to the data
1057 * space. After the computation is finished, these extra dimensions are
1058 * projected out again.
1060 __isl_give isl_flow
*isl_access_info_compute_flow(__isl_take isl_access_info
*acc
)
1063 struct isl_flow
*res
= NULL
;
1068 acc
->domain_map
= isl_map_domain_map(isl_map_copy(acc
->sink
.map
));
1069 acc
->sink
.map
= isl_map_range_map(acc
->sink
.map
);
1073 if (acc
->n_must
== 0)
1074 res
= compute_mem_based_dependences(acc
);
1076 acc
= isl_access_info_sort_sources(acc
);
1077 res
= compute_val_based_dependences(acc
);
1082 for (j
= 0; j
< res
->n_source
; ++j
) {
1083 res
->dep
[j
].map
= isl_map_apply_range(res
->dep
[j
].map
,
1084 isl_map_copy(acc
->domain_map
));
1085 if (!res
->dep
[j
].map
)
1088 if (!res
->must_no_source
|| !res
->may_no_source
)
1091 isl_access_info_free(acc
);
1094 isl_access_info_free(acc
);
1100 /* Keep track of some information about a schedule for a given
1101 * access. In particular, keep track of which dimensions
1102 * have a constant value and of the actual constant values.
1104 struct isl_sched_info
{
1109 static void sched_info_free(__isl_take
struct isl_sched_info
*info
)
1113 isl_vec_free(info
->cst
);
1118 /* Extract information on the constant dimensions of the schedule
1119 * for a given access. The "map" is of the form
1123 * with S the schedule domain, D the iteration domain and A the data domain.
1125 static __isl_give
struct isl_sched_info
*sched_info_alloc(
1126 __isl_keep isl_map
*map
)
1130 struct isl_sched_info
*info
;
1136 dim
= isl_space_unwrap(isl_space_domain(isl_map_get_space(map
)));
1139 n
= isl_space_dim(dim
, isl_dim_in
);
1140 isl_space_free(dim
);
1142 ctx
= isl_map_get_ctx(map
);
1143 info
= isl_alloc_type(ctx
, struct isl_sched_info
);
1146 info
->is_cst
= isl_alloc_array(ctx
, int, n
);
1147 info
->cst
= isl_vec_alloc(ctx
, n
);
1148 if (n
&& (!info
->is_cst
|| !info
->cst
))
1151 for (i
= 0; i
< n
; ++i
) {
1154 v
= isl_map_plain_get_val_if_fixed(map
, isl_dim_in
, i
);
1157 info
->is_cst
[i
] = !isl_val_is_nan(v
);
1158 if (info
->is_cst
[i
])
1159 info
->cst
= isl_vec_set_element_val(info
->cst
, i
, v
);
1166 sched_info_free(info
);
1170 struct isl_compute_flow_data
{
1171 isl_union_map
*must_source
;
1172 isl_union_map
*may_source
;
1173 isl_union_map
*must_dep
;
1174 isl_union_map
*may_dep
;
1175 isl_union_map
*must_no_source
;
1176 isl_union_map
*may_no_source
;
1181 struct isl_sched_info
*sink_info
;
1182 struct isl_sched_info
**source_info
;
1183 isl_access_info
*accesses
;
1186 static int count_matching_array(__isl_take isl_map
*map
, void *user
)
1190 struct isl_compute_flow_data
*data
;
1192 data
= (struct isl_compute_flow_data
*)user
;
1194 dim
= isl_space_range(isl_map_get_space(map
));
1196 eq
= isl_space_is_equal(dim
, data
->dim
);
1198 isl_space_free(dim
);
1209 static int collect_matching_array(__isl_take isl_map
*map
, void *user
)
1213 struct isl_sched_info
*info
;
1214 struct isl_compute_flow_data
*data
;
1216 data
= (struct isl_compute_flow_data
*)user
;
1218 dim
= isl_space_range(isl_map_get_space(map
));
1220 eq
= isl_space_is_equal(dim
, data
->dim
);
1222 isl_space_free(dim
);
1231 info
= sched_info_alloc(map
);
1232 data
->source_info
[data
->count
] = info
;
1234 data
->accesses
= isl_access_info_add_source(data
->accesses
,
1235 map
, data
->must
, info
);
1245 /* Determine the shared nesting level and the "textual order" of
1246 * the given accesses.
1248 * We first determine the minimal schedule dimension for both accesses.
1250 * If among those dimensions, we can find one where both have a fixed
1251 * value and if moreover those values are different, then the previous
1252 * dimension is the last shared nesting level and the textual order
1253 * is determined based on the order of the fixed values.
1254 * If no such fixed values can be found, then we set the shared
1255 * nesting level to the minimal schedule dimension, with no textual ordering.
1257 static int before(void *first
, void *second
)
1259 struct isl_sched_info
*info1
= first
;
1260 struct isl_sched_info
*info2
= second
;
1264 n1
= isl_vec_size(info1
->cst
);
1265 n2
= isl_vec_size(info2
->cst
);
1270 for (i
= 0; i
< n1
; ++i
) {
1274 if (!info1
->is_cst
[i
])
1276 if (!info2
->is_cst
[i
])
1278 cmp
= isl_vec_cmp_element(info1
->cst
, info2
->cst
, i
);
1282 r
= 2 * i
+ (cmp
< 0);
1290 /* Given a sink access, look for all the source accesses that access
1291 * the same array and perform dataflow analysis on them using
1292 * isl_access_info_compute_flow.
1294 static int compute_flow(__isl_take isl_map
*map
, void *user
)
1298 struct isl_compute_flow_data
*data
;
1301 data
= (struct isl_compute_flow_data
*)user
;
1303 ctx
= isl_map_get_ctx(map
);
1305 data
->accesses
= NULL
;
1306 data
->sink_info
= NULL
;
1307 data
->source_info
= NULL
;
1309 data
->dim
= isl_space_range(isl_map_get_space(map
));
1311 if (isl_union_map_foreach_map(data
->must_source
,
1312 &count_matching_array
, data
) < 0)
1314 if (isl_union_map_foreach_map(data
->may_source
,
1315 &count_matching_array
, data
) < 0)
1318 data
->sink_info
= sched_info_alloc(map
);
1319 data
->source_info
= isl_calloc_array(ctx
, struct isl_sched_info
*,
1322 data
->accesses
= isl_access_info_alloc(isl_map_copy(map
),
1323 data
->sink_info
, &before
, data
->count
);
1324 if (!data
->sink_info
|| (data
->count
&& !data
->source_info
) ||
1329 if (isl_union_map_foreach_map(data
->must_source
,
1330 &collect_matching_array
, data
) < 0)
1333 if (isl_union_map_foreach_map(data
->may_source
,
1334 &collect_matching_array
, data
) < 0)
1337 flow
= isl_access_info_compute_flow(data
->accesses
);
1338 data
->accesses
= NULL
;
1343 data
->must_no_source
= isl_union_map_union(data
->must_no_source
,
1344 isl_union_map_from_map(isl_flow_get_no_source(flow
, 1)));
1345 data
->may_no_source
= isl_union_map_union(data
->may_no_source
,
1346 isl_union_map_from_map(isl_flow_get_no_source(flow
, 0)));
1348 for (i
= 0; i
< flow
->n_source
; ++i
) {
1350 dep
= isl_union_map_from_map(isl_map_copy(flow
->dep
[i
].map
));
1351 if (flow
->dep
[i
].must
)
1352 data
->must_dep
= isl_union_map_union(data
->must_dep
, dep
);
1354 data
->may_dep
= isl_union_map_union(data
->may_dep
, dep
);
1357 isl_flow_free(flow
);
1359 sched_info_free(data
->sink_info
);
1360 if (data
->source_info
) {
1361 for (i
= 0; i
< data
->count
; ++i
)
1362 sched_info_free(data
->source_info
[i
]);
1363 free(data
->source_info
);
1365 isl_space_free(data
->dim
);
1370 isl_access_info_free(data
->accesses
);
1371 sched_info_free(data
->sink_info
);
1372 if (data
->source_info
) {
1373 for (i
= 0; i
< data
->count
; ++i
)
1374 sched_info_free(data
->source_info
[i
]);
1375 free(data
->source_info
);
1377 isl_space_free(data
->dim
);
1383 /* Given a collection of "sink" and "source" accesses,
1384 * compute for each iteration of a sink access
1385 * and for each element accessed by that iteration,
1386 * the source access in the list that last accessed the
1387 * element accessed by the sink access before this sink access.
1388 * Each access is given as a map from the loop iterators
1389 * to the array indices.
1390 * The result is a relations between source and sink
1391 * iterations and a subset of the domain of the sink accesses,
1392 * corresponding to those iterations that access an element
1393 * not previously accessed.
1395 * We first prepend the schedule dimensions to the domain
1396 * of the accesses so that we can easily compare their relative order.
1397 * Then we consider each sink access individually in compute_flow.
1399 int isl_union_map_compute_flow(__isl_take isl_union_map
*sink
,
1400 __isl_take isl_union_map
*must_source
,
1401 __isl_take isl_union_map
*may_source
,
1402 __isl_take isl_union_map
*schedule
,
1403 __isl_give isl_union_map
**must_dep
, __isl_give isl_union_map
**may_dep
,
1404 __isl_give isl_union_map
**must_no_source
,
1405 __isl_give isl_union_map
**may_no_source
)
1408 isl_union_map
*range_map
= NULL
;
1409 struct isl_compute_flow_data data
;
1411 sink
= isl_union_map_align_params(sink
,
1412 isl_union_map_get_space(must_source
));
1413 sink
= isl_union_map_align_params(sink
,
1414 isl_union_map_get_space(may_source
));
1415 sink
= isl_union_map_align_params(sink
,
1416 isl_union_map_get_space(schedule
));
1417 dim
= isl_union_map_get_space(sink
);
1418 must_source
= isl_union_map_align_params(must_source
, isl_space_copy(dim
));
1419 may_source
= isl_union_map_align_params(may_source
, isl_space_copy(dim
));
1420 schedule
= isl_union_map_align_params(schedule
, isl_space_copy(dim
));
1422 schedule
= isl_union_map_reverse(schedule
);
1423 range_map
= isl_union_map_range_map(schedule
);
1424 schedule
= isl_union_map_reverse(isl_union_map_copy(range_map
));
1425 sink
= isl_union_map_apply_domain(sink
, isl_union_map_copy(schedule
));
1426 must_source
= isl_union_map_apply_domain(must_source
,
1427 isl_union_map_copy(schedule
));
1428 may_source
= isl_union_map_apply_domain(may_source
, schedule
);
1430 data
.must_source
= must_source
;
1431 data
.may_source
= may_source
;
1432 data
.must_dep
= must_dep
?
1433 isl_union_map_empty(isl_space_copy(dim
)) : NULL
;
1434 data
.may_dep
= may_dep
? isl_union_map_empty(isl_space_copy(dim
)) : NULL
;
1435 data
.must_no_source
= must_no_source
?
1436 isl_union_map_empty(isl_space_copy(dim
)) : NULL
;
1437 data
.may_no_source
= may_no_source
?
1438 isl_union_map_empty(isl_space_copy(dim
)) : NULL
;
1440 isl_space_free(dim
);
1442 if (isl_union_map_foreach_map(sink
, &compute_flow
, &data
) < 0)
1445 isl_union_map_free(sink
);
1446 isl_union_map_free(must_source
);
1447 isl_union_map_free(may_source
);
1450 data
.must_dep
= isl_union_map_apply_domain(data
.must_dep
,
1451 isl_union_map_copy(range_map
));
1452 data
.must_dep
= isl_union_map_apply_range(data
.must_dep
,
1453 isl_union_map_copy(range_map
));
1454 *must_dep
= data
.must_dep
;
1457 data
.may_dep
= isl_union_map_apply_domain(data
.may_dep
,
1458 isl_union_map_copy(range_map
));
1459 data
.may_dep
= isl_union_map_apply_range(data
.may_dep
,
1460 isl_union_map_copy(range_map
));
1461 *may_dep
= data
.may_dep
;
1463 if (must_no_source
) {
1464 data
.must_no_source
= isl_union_map_apply_domain(
1465 data
.must_no_source
, isl_union_map_copy(range_map
));
1466 *must_no_source
= data
.must_no_source
;
1468 if (may_no_source
) {
1469 data
.may_no_source
= isl_union_map_apply_domain(
1470 data
.may_no_source
, isl_union_map_copy(range_map
));
1471 *may_no_source
= data
.may_no_source
;
1474 isl_union_map_free(range_map
);
1478 isl_union_map_free(range_map
);
1479 isl_union_map_free(sink
);
1480 isl_union_map_free(must_source
);
1481 isl_union_map_free(may_source
);
1482 isl_union_map_free(data
.must_dep
);
1483 isl_union_map_free(data
.may_dep
);
1484 isl_union_map_free(data
.must_no_source
);
1485 isl_union_map_free(data
.may_no_source
);
1492 *must_no_source
= NULL
;
1494 *may_no_source
= NULL
;