2 * Copyright 2005-2007 Universiteit Leiden
3 * Copyright 2008-2009 Katholieke Universiteit Leuven
4 * Copyright 2010 INRIA Saclay
6 * Use of this software is governed by the GNU LGPLv2.1 license
8 * Written by Sven Verdoolaege, Leiden Institute of Advanced Computer Science,
9 * Universiteit Leiden, Niels Bohrweg 1, 2333 CA Leiden, The Netherlands
10 * and K.U.Leuven, Departement Computerwetenschappen, Celestijnenlaan 200A,
11 * B-3001 Leuven, Belgium
12 * and INRIA Saclay - Ile-de-France, Parc Club Orsay Universite,
13 * ZAC des vignes, 4 rue Jacques Monod, 91893 Orsay, France
19 #include <isl_qsort.h>
21 /* A private structure to keep track of a mapping together with
22 * a user-specified identifier and a boolean indicating whether
23 * the map represents a must or may access/dependence.
25 struct isl_labeled_map
{
31 /* A structure containing the input for dependence analysis:
33 * - n_must + n_may (<= max_source) sources
34 * - a function for determining the relative order of sources and sink
35 * The must sources are placed before the may sources.
37 * domain_map is an auxiliary map that maps the sink access relation
38 * to the domain of this access relation.
40 * restrict_sources is a callback that (if not NULL) will be called
41 * right before any lexicographical maximization.
43 struct isl_access_info
{
45 struct isl_labeled_map sink
;
46 isl_access_level_before level_before
;
47 isl_access_restrict_sources restrict_sources
;
51 struct isl_labeled_map source
[1];
54 /* A structure containing the output of dependence analysis:
55 * - n_source dependences
56 * - a wrapped subset of the sink for which definitely no source could be found
57 * - a wrapped subset of the sink for which possibly no source could be found
60 isl_set
*must_no_source
;
61 isl_set
*may_no_source
;
63 struct isl_labeled_map
*dep
;
66 /* Construct an isl_access_info structure and fill it up with
67 * the given data. The number of sources is set to 0.
69 __isl_give isl_access_info
*isl_access_info_alloc(__isl_take isl_map
*sink
,
70 void *sink_user
, isl_access_level_before fn
, int max_source
)
73 struct isl_access_info
*acc
;
78 ctx
= isl_map_get_ctx(sink
);
79 isl_assert(ctx
, max_source
>= 0, goto error
);
81 acc
= isl_calloc(ctx
, struct isl_access_info
,
82 sizeof(struct isl_access_info
) +
83 (max_source
- 1) * sizeof(struct isl_labeled_map
));
88 acc
->sink
.data
= sink_user
;
89 acc
->level_before
= fn
;
90 acc
->max_source
= max_source
;
100 /* Free the given isl_access_info structure.
102 void isl_access_info_free(__isl_take isl_access_info
*acc
)
108 isl_map_free(acc
->domain_map
);
109 isl_map_free(acc
->sink
.map
);
110 for (i
= 0; i
< acc
->n_must
+ acc
->n_may
; ++i
)
111 isl_map_free(acc
->source
[i
].map
);
115 isl_ctx
*isl_access_info_get_ctx(__isl_keep isl_access_info
*acc
)
117 return acc
? isl_map_get_ctx(acc
->sink
.map
) : NULL
;
120 __isl_give isl_access_info
*isl_access_info_set_restrict_sources(
121 __isl_take isl_access_info
*acc
, isl_access_restrict_sources fn
)
125 acc
->restrict_sources
= fn
;
129 /* Add another source to an isl_access_info structure, making
130 * sure the "must" sources are placed before the "may" sources.
131 * This function may be called at most max_source times on a
132 * given isl_access_info structure, with max_source as specified
133 * in the call to isl_access_info_alloc that constructed the structure.
135 __isl_give isl_access_info
*isl_access_info_add_source(
136 __isl_take isl_access_info
*acc
, __isl_take isl_map
*source
,
137 int must
, void *source_user
)
143 ctx
= isl_map_get_ctx(acc
->sink
.map
);
144 isl_assert(ctx
, acc
->n_must
+ acc
->n_may
< acc
->max_source
, goto error
);
148 acc
->source
[acc
->n_must
+ acc
->n_may
] =
149 acc
->source
[acc
->n_must
];
150 acc
->source
[acc
->n_must
].map
= source
;
151 acc
->source
[acc
->n_must
].data
= source_user
;
152 acc
->source
[acc
->n_must
].must
= 1;
155 acc
->source
[acc
->n_must
+ acc
->n_may
].map
= source
;
156 acc
->source
[acc
->n_must
+ acc
->n_may
].data
= source_user
;
157 acc
->source
[acc
->n_must
+ acc
->n_may
].must
= 0;
163 isl_map_free(source
);
164 isl_access_info_free(acc
);
168 /* Return -n, 0 or n (with n a positive value), depending on whether
169 * the source access identified by p1 should be sorted before, together
170 * or after that identified by p2.
172 * If p1 and p2 share a different number of levels with the sink,
173 * then the one with the lowest number of shared levels should be
175 * If they both share no levels, then the order is irrelevant.
176 * Otherwise, if p1 appears before p2, then it should be sorted first.
177 * For more generic initial schedules, it is possible that neither
178 * p1 nor p2 appears before the other, or at least not in any obvious way.
179 * We therefore also check if p2 appears before p1, in which case p2
180 * should be sorted first.
181 * If not, we try to order the two statements based on the description
182 * of the iteration domains. This results in an arbitrary, but fairly
185 static int access_sort_cmp(const void *p1
, const void *p2
, void *user
)
187 isl_access_info
*acc
= user
;
188 const struct isl_labeled_map
*i1
, *i2
;
191 i1
= (const struct isl_labeled_map
*) p1
;
192 i2
= (const struct isl_labeled_map
*) p2
;
194 level1
= acc
->level_before(i1
->data
, acc
->sink
.data
);
195 level2
= acc
->level_before(i2
->data
, acc
->sink
.data
);
197 if (level1
!= level2
|| !level1
)
198 return level1
- level2
;
200 level1
= acc
->level_before(i1
->data
, i2
->data
);
204 level2
= acc
->level_before(i2
->data
, i1
->data
);
208 h1
= isl_map_get_hash(i1
->map
);
209 h2
= isl_map_get_hash(i2
->map
);
210 return h1
> h2
? 1 : h1
< h2
? -1 : 0;
213 /* Sort the must source accesses in order of increasing number of shared
214 * levels with the sink access.
215 * Source accesses with the same number of shared levels are sorted
216 * in their textual order.
218 static __isl_give isl_access_info
*isl_access_info_sort_sources(
219 __isl_take isl_access_info
*acc
)
223 if (acc
->n_must
<= 1)
226 isl_quicksort(acc
->source
, acc
->n_must
, sizeof(struct isl_labeled_map
),
227 access_sort_cmp
, acc
);
232 /* Align the parameters of the two spaces if needed and then call
235 static __isl_give isl_space
*space_align_and_join(__isl_take isl_space
*left
,
236 __isl_take isl_space
*right
)
238 if (isl_space_match(left
, isl_dim_param
, right
, isl_dim_param
))
239 return isl_space_join(left
, right
);
241 left
= isl_space_align_params(left
, isl_space_copy(right
));
242 right
= isl_space_align_params(right
, isl_space_copy(left
));
243 return isl_space_join(left
, right
);
246 /* Initialize an empty isl_flow structure corresponding to a given
247 * isl_access_info structure.
248 * For each must access, two dependences are created (initialized
249 * to the empty relation), one for the resulting must dependences
250 * and one for the resulting may dependences. May accesses can
251 * only lead to may dependences, so only one dependence is created
253 * This function is private as isl_flow structures are only supposed
254 * to be created by isl_access_info_compute_flow.
256 static __isl_give isl_flow
*isl_flow_alloc(__isl_keep isl_access_info
*acc
)
260 struct isl_flow
*dep
;
265 ctx
= isl_map_get_ctx(acc
->sink
.map
);
266 dep
= isl_calloc_type(ctx
, struct isl_flow
);
270 dep
->dep
= isl_calloc_array(ctx
, struct isl_labeled_map
,
271 2 * acc
->n_must
+ acc
->n_may
);
275 dep
->n_source
= 2 * acc
->n_must
+ acc
->n_may
;
276 for (i
= 0; i
< acc
->n_must
; ++i
) {
278 dim
= space_align_and_join(
279 isl_map_get_space(acc
->source
[i
].map
),
280 isl_space_reverse(isl_map_get_space(acc
->sink
.map
)));
281 dep
->dep
[2 * i
].map
= isl_map_empty(dim
);
282 dep
->dep
[2 * i
+ 1].map
= isl_map_copy(dep
->dep
[2 * i
].map
);
283 dep
->dep
[2 * i
].data
= acc
->source
[i
].data
;
284 dep
->dep
[2 * i
+ 1].data
= acc
->source
[i
].data
;
285 dep
->dep
[2 * i
].must
= 1;
286 dep
->dep
[2 * i
+ 1].must
= 0;
287 if (!dep
->dep
[2 * i
].map
|| !dep
->dep
[2 * i
+ 1].map
)
290 for (i
= acc
->n_must
; i
< acc
->n_must
+ acc
->n_may
; ++i
) {
292 dim
= space_align_and_join(
293 isl_map_get_space(acc
->source
[i
].map
),
294 isl_space_reverse(isl_map_get_space(acc
->sink
.map
)));
295 dep
->dep
[acc
->n_must
+ i
].map
= isl_map_empty(dim
);
296 dep
->dep
[acc
->n_must
+ i
].data
= acc
->source
[i
].data
;
297 dep
->dep
[acc
->n_must
+ i
].must
= 0;
298 if (!dep
->dep
[acc
->n_must
+ i
].map
)
308 /* Iterate over all sources and for each resulting flow dependence
309 * that is not empty, call the user specfied function.
310 * The second argument in this function call identifies the source,
311 * while the third argument correspond to the final argument of
312 * the isl_flow_foreach call.
314 int isl_flow_foreach(__isl_keep isl_flow
*deps
,
315 int (*fn
)(__isl_take isl_map
*dep
, int must
, void *dep_user
, void *user
),
323 for (i
= 0; i
< deps
->n_source
; ++i
) {
324 if (isl_map_plain_is_empty(deps
->dep
[i
].map
))
326 if (fn(isl_map_copy(deps
->dep
[i
].map
), deps
->dep
[i
].must
,
327 deps
->dep
[i
].data
, user
) < 0)
334 /* Return a copy of the subset of the sink for which no source could be found.
336 __isl_give isl_map
*isl_flow_get_no_source(__isl_keep isl_flow
*deps
, int must
)
342 return isl_set_unwrap(isl_set_copy(deps
->must_no_source
));
344 return isl_set_unwrap(isl_set_copy(deps
->may_no_source
));
347 void isl_flow_free(__isl_take isl_flow
*deps
)
353 isl_set_free(deps
->must_no_source
);
354 isl_set_free(deps
->may_no_source
);
356 for (i
= 0; i
< deps
->n_source
; ++i
)
357 isl_map_free(deps
->dep
[i
].map
);
363 isl_ctx
*isl_flow_get_ctx(__isl_keep isl_flow
*deps
)
365 return deps
? isl_set_get_ctx(deps
->must_no_source
) : NULL
;
368 /* Return a map that enforces that the domain iteration occurs after
369 * the range iteration at the given level.
370 * If level is odd, then the domain iteration should occur after
371 * the target iteration in their shared level/2 outermost loops.
372 * In this case we simply need to enforce that these outermost
373 * loop iterations are the same.
374 * If level is even, then the loop iterator of the domain should
375 * be greater than the loop iterator of the range at the last
376 * of the level/2 shared loops, i.e., loop level/2 - 1.
378 static __isl_give isl_map
*after_at_level(__isl_take isl_space
*dim
, int level
)
380 struct isl_basic_map
*bmap
;
383 bmap
= isl_basic_map_equal(dim
, level
/2);
385 bmap
= isl_basic_map_more_at(dim
, level
/2 - 1);
387 return isl_map_from_basic_map(bmap
);
390 /* Check if the user has set acc->restrict_sources and if so
391 * intersect the range of "dep" with the result of a call to this function.
393 * Since the user expects a mapping from sink iterations to source iterations,
394 * whereas the domain of "dep" is a wrapped map, mapping sink iterations
395 * to accessed array elements, we first need to project out the accessed
396 * sink array elements by applying acc->domain_map.
398 static __isl_give isl_map
*restrict_sources(__isl_take isl_map
*dep
,
399 struct isl_access_info
*acc
, int source
)
404 if (!acc
->restrict_sources
)
407 source_map
= isl_map_copy(dep
);
408 source_map
= isl_map_apply_domain(source_map
,
409 isl_map_copy(acc
->domain_map
));
410 param
= acc
->restrict_sources(source_map
, acc
->sink
.data
,
411 acc
->source
[source
].data
);
412 dep
= isl_map_intersect_range(dep
, param
);
416 /* Compute the last iteration of must source j that precedes the sink
417 * at the given level for sink iterations in set_C.
418 * The subset of set_C for which no such iteration can be found is returned
421 static struct isl_map
*last_source(struct isl_access_info
*acc
,
422 struct isl_set
*set_C
,
423 int j
, int level
, struct isl_set
**empty
)
425 struct isl_map
*read_map
;
426 struct isl_map
*write_map
;
427 struct isl_map
*dep_map
;
428 struct isl_map
*after
;
429 struct isl_map
*result
;
431 read_map
= isl_map_copy(acc
->sink
.map
);
432 write_map
= isl_map_copy(acc
->source
[j
].map
);
433 write_map
= isl_map_reverse(write_map
);
434 dep_map
= isl_map_apply_range(read_map
, write_map
);
435 after
= after_at_level(isl_map_get_space(dep_map
), level
);
436 dep_map
= isl_map_intersect(dep_map
, after
);
437 dep_map
= restrict_sources(dep_map
, acc
, j
);
438 result
= isl_map_partial_lexmax(dep_map
, set_C
, empty
);
439 result
= isl_map_reverse(result
);
444 /* For a given mapping between iterations of must source j and iterations
445 * of the sink, compute the last iteration of must source k preceding
446 * the sink at level before_level for any of the sink iterations,
447 * but following the corresponding iteration of must source j at level
450 static struct isl_map
*last_later_source(struct isl_access_info
*acc
,
451 struct isl_map
*old_map
,
452 int j
, int before_level
,
453 int k
, int after_level
,
454 struct isl_set
**empty
)
457 struct isl_set
*set_C
;
458 struct isl_map
*read_map
;
459 struct isl_map
*write_map
;
460 struct isl_map
*dep_map
;
461 struct isl_map
*after_write
;
462 struct isl_map
*before_read
;
463 struct isl_map
*result
;
465 set_C
= isl_map_range(isl_map_copy(old_map
));
466 read_map
= isl_map_copy(acc
->sink
.map
);
467 write_map
= isl_map_copy(acc
->source
[k
].map
);
469 write_map
= isl_map_reverse(write_map
);
470 dep_map
= isl_map_apply_range(read_map
, write_map
);
471 dim
= space_align_and_join(isl_map_get_space(acc
->source
[k
].map
),
472 isl_space_reverse(isl_map_get_space(acc
->source
[j
].map
)));
473 after_write
= after_at_level(dim
, after_level
);
474 after_write
= isl_map_apply_range(after_write
, old_map
);
475 after_write
= isl_map_reverse(after_write
);
476 dep_map
= isl_map_intersect(dep_map
, after_write
);
477 before_read
= after_at_level(isl_map_get_space(dep_map
), before_level
);
478 dep_map
= isl_map_intersect(dep_map
, before_read
);
479 dep_map
= restrict_sources(dep_map
, acc
, k
);
480 result
= isl_map_partial_lexmax(dep_map
, set_C
, empty
);
481 result
= isl_map_reverse(result
);
486 /* Given a shared_level between two accesses, return 1 if the
487 * the first can precede the second at the requested target_level.
488 * If the target level is odd, i.e., refers to a statement level
489 * dimension, then first needs to precede second at the requested
490 * level, i.e., shared_level must be equal to target_level.
491 * If the target level is odd, then the two loops should share
492 * at least the requested number of outer loops.
494 static int can_precede_at_level(int shared_level
, int target_level
)
496 if (shared_level
< target_level
)
498 if ((target_level
% 2) && shared_level
> target_level
)
503 /* Given a possible flow dependence temp_rel[j] between source j and the sink
504 * at level sink_level, remove those elements for which
505 * there is an iteration of another source k < j that is closer to the sink.
506 * The flow dependences temp_rel[k] are updated with the improved sources.
507 * Any improved source needs to precede the sink at the same level
508 * and needs to follow source j at the same or a deeper level.
509 * The lower this level, the later the execution date of source k.
510 * We therefore consider lower levels first.
512 * If temp_rel[j] is empty, then there can be no improvement and
513 * we return immediately.
515 static int intermediate_sources(__isl_keep isl_access_info
*acc
,
516 struct isl_map
**temp_rel
, int j
, int sink_level
)
519 int depth
= 2 * isl_map_dim(acc
->source
[j
].map
, isl_dim_in
) + 1;
521 if (isl_map_plain_is_empty(temp_rel
[j
]))
524 for (k
= j
- 1; k
>= 0; --k
) {
526 plevel
= acc
->level_before(acc
->source
[k
].data
, acc
->sink
.data
);
527 if (!can_precede_at_level(plevel
, sink_level
))
530 plevel2
= acc
->level_before(acc
->source
[j
].data
,
531 acc
->source
[k
].data
);
533 for (level
= sink_level
; level
<= depth
; ++level
) {
535 struct isl_set
*trest
;
536 struct isl_map
*copy
;
538 if (!can_precede_at_level(plevel2
, level
))
541 copy
= isl_map_copy(temp_rel
[j
]);
542 T
= last_later_source(acc
, copy
, j
, sink_level
, k
,
544 if (isl_map_plain_is_empty(T
)) {
549 temp_rel
[j
] = isl_map_intersect_range(temp_rel
[j
], trest
);
550 temp_rel
[k
] = isl_map_union_disjoint(temp_rel
[k
], T
);
557 /* Compute all iterations of may source j that precedes the sink at the given
558 * level for sink iterations in set_C.
560 static __isl_give isl_map
*all_sources(__isl_keep isl_access_info
*acc
,
561 __isl_take isl_set
*set_C
, int j
, int level
)
568 read_map
= isl_map_copy(acc
->sink
.map
);
569 read_map
= isl_map_intersect_domain(read_map
, set_C
);
570 write_map
= isl_map_copy(acc
->source
[acc
->n_must
+ j
].map
);
571 write_map
= isl_map_reverse(write_map
);
572 dep_map
= isl_map_apply_range(read_map
, write_map
);
573 after
= after_at_level(isl_map_get_space(dep_map
), level
);
574 dep_map
= isl_map_intersect(dep_map
, after
);
576 return isl_map_reverse(dep_map
);
579 /* For a given mapping between iterations of must source k and iterations
580 * of the sink, compute the all iteration of may source j preceding
581 * the sink at level before_level for any of the sink iterations,
582 * but following the corresponding iteration of must source k at level
585 static __isl_give isl_map
*all_later_sources(__isl_keep isl_access_info
*acc
,
586 __isl_keep isl_map
*old_map
,
587 int j
, int before_level
, int k
, int after_level
)
594 isl_map
*after_write
;
595 isl_map
*before_read
;
597 set_C
= isl_map_range(isl_map_copy(old_map
));
598 read_map
= isl_map_copy(acc
->sink
.map
);
599 read_map
= isl_map_intersect_domain(read_map
, set_C
);
600 write_map
= isl_map_copy(acc
->source
[acc
->n_must
+ j
].map
);
602 write_map
= isl_map_reverse(write_map
);
603 dep_map
= isl_map_apply_range(read_map
, write_map
);
604 dim
= isl_space_join(isl_map_get_space(acc
->source
[acc
->n_must
+ j
].map
),
605 isl_space_reverse(isl_map_get_space(acc
->source
[k
].map
)));
606 after_write
= after_at_level(dim
, after_level
);
607 after_write
= isl_map_apply_range(after_write
, old_map
);
608 after_write
= isl_map_reverse(after_write
);
609 dep_map
= isl_map_intersect(dep_map
, after_write
);
610 before_read
= after_at_level(isl_map_get_space(dep_map
), before_level
);
611 dep_map
= isl_map_intersect(dep_map
, before_read
);
612 return isl_map_reverse(dep_map
);
615 /* Given the must and may dependence relations for the must accesses
616 * for level sink_level, check if there are any accesses of may access j
617 * that occur in between and return their union.
618 * If some of these accesses are intermediate with respect to
619 * (previously thought to be) must dependences, then these
620 * must dependences are turned into may dependences.
622 static __isl_give isl_map
*all_intermediate_sources(
623 __isl_keep isl_access_info
*acc
, __isl_take isl_map
*map
,
624 struct isl_map
**must_rel
, struct isl_map
**may_rel
,
625 int j
, int sink_level
)
628 int depth
= 2 * isl_map_dim(acc
->source
[acc
->n_must
+ j
].map
,
631 for (k
= 0; k
< acc
->n_must
; ++k
) {
634 if (isl_map_plain_is_empty(may_rel
[k
]) &&
635 isl_map_plain_is_empty(must_rel
[k
]))
638 plevel
= acc
->level_before(acc
->source
[k
].data
,
639 acc
->source
[acc
->n_must
+ j
].data
);
641 for (level
= sink_level
; level
<= depth
; ++level
) {
646 if (!can_precede_at_level(plevel
, level
))
649 copy
= isl_map_copy(may_rel
[k
]);
650 T
= all_later_sources(acc
, copy
, j
, sink_level
, k
, level
);
651 map
= isl_map_union(map
, T
);
653 copy
= isl_map_copy(must_rel
[k
]);
654 T
= all_later_sources(acc
, copy
, j
, sink_level
, k
, level
);
655 ran
= isl_map_range(isl_map_copy(T
));
656 map
= isl_map_union(map
, T
);
657 may_rel
[k
] = isl_map_union_disjoint(may_rel
[k
],
658 isl_map_intersect_range(isl_map_copy(must_rel
[k
]),
660 T
= isl_map_from_domain_and_range(
662 isl_space_domain(isl_map_get_space(must_rel
[k
]))),
664 must_rel
[k
] = isl_map_subtract(must_rel
[k
], T
);
671 /* Compute dependences for the case where all accesses are "may"
672 * accesses, which boils down to computing memory based dependences.
673 * The generic algorithm would also work in this case, but it would
674 * be overkill to use it.
676 static __isl_give isl_flow
*compute_mem_based_dependences(
677 __isl_keep isl_access_info
*acc
)
684 res
= isl_flow_alloc(acc
);
688 mustdo
= isl_map_domain(isl_map_copy(acc
->sink
.map
));
689 maydo
= isl_set_copy(mustdo
);
691 for (i
= 0; i
< acc
->n_may
; ++i
) {
698 plevel
= acc
->level_before(acc
->source
[i
].data
, acc
->sink
.data
);
699 is_before
= plevel
& 1;
702 dim
= isl_map_get_space(res
->dep
[i
].map
);
704 before
= isl_map_lex_le_first(dim
, plevel
);
706 before
= isl_map_lex_lt_first(dim
, plevel
);
707 dep
= isl_map_apply_range(isl_map_copy(acc
->source
[i
].map
),
708 isl_map_reverse(isl_map_copy(acc
->sink
.map
)));
709 dep
= isl_map_intersect(dep
, before
);
710 mustdo
= isl_set_subtract(mustdo
,
711 isl_map_range(isl_map_copy(dep
)));
712 res
->dep
[i
].map
= isl_map_union(res
->dep
[i
].map
, dep
);
715 res
->may_no_source
= isl_set_subtract(maydo
, isl_set_copy(mustdo
));
716 res
->must_no_source
= mustdo
;
721 /* Compute dependences for the case where there is at least one
724 * The core algorithm considers all levels in which a source may precede
725 * the sink, where a level may either be a statement level or a loop level.
726 * The outermost statement level is 1, the first loop level is 2, etc...
727 * The algorithm basically does the following:
728 * for all levels l of the read access from innermost to outermost
729 * for all sources w that may precede the sink access at that level
730 * compute the last iteration of the source that precedes the sink access
732 * add result to possible last accesses at level l of source w
733 * for all sources w2 that we haven't considered yet at this level that may
734 * also precede the sink access
735 * for all levels l2 of w from l to innermost
736 * for all possible last accesses dep of w at l
737 * compute last iteration of w2 between the source and sink
739 * add result to possible last accesses at level l of write w2
740 * and replace possible last accesses dep by the remainder
743 * The above algorithm is applied to the must access. During the course
744 * of the algorithm, we keep track of sink iterations that still
745 * need to be considered. These iterations are split into those that
746 * haven't been matched to any source access (mustdo) and those that have only
747 * been matched to may accesses (maydo).
748 * At the end of each level, we also consider the may accesses.
749 * In particular, we consider may accesses that precede the remaining
750 * sink iterations, moving elements from mustdo to maydo when appropriate,
751 * and may accesses that occur between a must source and a sink of any
752 * dependences found at the current level, turning must dependences into
753 * may dependences when appropriate.
756 static __isl_give isl_flow
*compute_val_based_dependences(
757 __isl_keep isl_access_info
*acc
)
761 isl_set
*mustdo
= NULL
;
762 isl_set
*maydo
= NULL
;
765 isl_map
**must_rel
= NULL
;
766 isl_map
**may_rel
= NULL
;
771 res
= isl_flow_alloc(acc
);
774 ctx
= isl_map_get_ctx(acc
->sink
.map
);
776 depth
= 2 * isl_map_dim(acc
->sink
.map
, isl_dim_in
) + 1;
777 mustdo
= isl_map_domain(isl_map_copy(acc
->sink
.map
));
778 maydo
= isl_set_empty_like(mustdo
);
779 if (!mustdo
|| !maydo
)
781 if (isl_set_plain_is_empty(mustdo
))
784 must_rel
= isl_alloc_array(ctx
, struct isl_map
*, acc
->n_must
);
785 may_rel
= isl_alloc_array(ctx
, struct isl_map
*, acc
->n_must
);
786 if (!must_rel
|| !may_rel
)
789 for (level
= depth
; level
>= 1; --level
) {
790 for (j
= acc
->n_must
-1; j
>=0; --j
) {
791 must_rel
[j
] = isl_map_empty_like(res
->dep
[j
].map
);
792 may_rel
[j
] = isl_map_copy(must_rel
[j
]);
795 for (j
= acc
->n_must
- 1; j
>= 0; --j
) {
797 struct isl_set
*rest
;
800 plevel
= acc
->level_before(acc
->source
[j
].data
,
802 if (!can_precede_at_level(plevel
, level
))
805 T
= last_source(acc
, mustdo
, j
, level
, &rest
);
806 must_rel
[j
] = isl_map_union_disjoint(must_rel
[j
], T
);
809 intermediate_sources(acc
, must_rel
, j
, level
);
811 T
= last_source(acc
, maydo
, j
, level
, &rest
);
812 may_rel
[j
] = isl_map_union_disjoint(may_rel
[j
], T
);
815 intermediate_sources(acc
, may_rel
, j
, level
);
817 if (isl_set_plain_is_empty(mustdo
) &&
818 isl_set_plain_is_empty(maydo
))
821 for (j
= j
- 1; j
>= 0; --j
) {
824 plevel
= acc
->level_before(acc
->source
[j
].data
,
826 if (!can_precede_at_level(plevel
, level
))
829 intermediate_sources(acc
, must_rel
, j
, level
);
830 intermediate_sources(acc
, may_rel
, j
, level
);
833 for (j
= 0; j
< acc
->n_may
; ++j
) {
838 plevel
= acc
->level_before(acc
->source
[acc
->n_must
+ j
].data
,
840 if (!can_precede_at_level(plevel
, level
))
843 T
= all_sources(acc
, isl_set_copy(maydo
), j
, level
);
844 res
->dep
[2 * acc
->n_must
+ j
].map
=
845 isl_map_union(res
->dep
[2 * acc
->n_must
+ j
].map
, T
);
846 T
= all_sources(acc
, isl_set_copy(mustdo
), j
, level
);
847 ran
= isl_map_range(isl_map_copy(T
));
848 res
->dep
[2 * acc
->n_must
+ j
].map
=
849 isl_map_union(res
->dep
[2 * acc
->n_must
+ j
].map
, T
);
850 mustdo
= isl_set_subtract(mustdo
, isl_set_copy(ran
));
851 maydo
= isl_set_union_disjoint(maydo
, ran
);
853 T
= res
->dep
[2 * acc
->n_must
+ j
].map
;
854 T
= all_intermediate_sources(acc
, T
, must_rel
, may_rel
,
856 res
->dep
[2 * acc
->n_must
+ j
].map
= T
;
859 for (j
= acc
->n_must
- 1; j
>= 0; --j
) {
860 res
->dep
[2 * j
].map
=
861 isl_map_union_disjoint(res
->dep
[2 * j
].map
,
863 res
->dep
[2 * j
+ 1].map
=
864 isl_map_union_disjoint(res
->dep
[2 * j
+ 1].map
,
868 if (isl_set_plain_is_empty(mustdo
) &&
869 isl_set_plain_is_empty(maydo
))
876 res
->must_no_source
= mustdo
;
877 res
->may_no_source
= maydo
;
881 isl_set_free(mustdo
);
888 /* Given a "sink" access, a list of n "source" accesses,
889 * compute for each iteration of the sink access
890 * and for each element accessed by that iteration,
891 * the source access in the list that last accessed the
892 * element accessed by the sink access before this sink access.
893 * Each access is given as a map from the loop iterators
894 * to the array indices.
895 * The result is a list of n relations between source and sink
896 * iterations and a subset of the domain of the sink access,
897 * corresponding to those iterations that access an element
898 * not previously accessed.
900 * To deal with multi-valued sink access relations, the sink iteration
901 * domain is first extended with dimensions that correspond to the data
902 * space. After the computation is finished, these extra dimensions are
903 * projected out again.
905 __isl_give isl_flow
*isl_access_info_compute_flow(__isl_take isl_access_info
*acc
)
908 struct isl_flow
*res
= NULL
;
913 acc
->domain_map
= isl_map_domain_map(isl_map_copy(acc
->sink
.map
));
914 acc
->sink
.map
= isl_map_range_map(acc
->sink
.map
);
918 if (acc
->n_must
== 0)
919 res
= compute_mem_based_dependences(acc
);
921 acc
= isl_access_info_sort_sources(acc
);
922 res
= compute_val_based_dependences(acc
);
927 for (j
= 0; j
< res
->n_source
; ++j
) {
928 res
->dep
[j
].map
= isl_map_apply_range(res
->dep
[j
].map
,
929 isl_map_copy(acc
->domain_map
));
930 if (!res
->dep
[j
].map
)
933 if (!res
->must_no_source
|| !res
->may_no_source
)
936 isl_access_info_free(acc
);
939 isl_access_info_free(acc
);
945 /* Keep track of some information about a schedule for a given
946 * access. In particular, keep track of which dimensions
947 * have a constant value and of the actual constant values.
949 struct isl_sched_info
{
954 static void sched_info_free(__isl_take
struct isl_sched_info
*info
)
958 isl_vec_free(info
->cst
);
963 /* Extract information on the constant dimensions of the schedule
964 * for a given access. The "map" is of the form
968 * with S the schedule domain, D the iteration domain and A the data domain.
970 static __isl_give
struct isl_sched_info
*sched_info_alloc(
971 __isl_keep isl_map
*map
)
975 struct isl_sched_info
*info
;
981 dim
= isl_space_unwrap(isl_space_domain(isl_map_get_space(map
)));
984 n
= isl_space_dim(dim
, isl_dim_in
);
987 ctx
= isl_map_get_ctx(map
);
988 info
= isl_alloc_type(ctx
, struct isl_sched_info
);
991 info
->is_cst
= isl_alloc_array(ctx
, int, n
);
992 info
->cst
= isl_vec_alloc(ctx
, n
);
993 if (!info
->is_cst
|| !info
->cst
)
996 for (i
= 0; i
< n
; ++i
)
997 info
->is_cst
[i
] = isl_map_plain_is_fixed(map
, isl_dim_in
, i
,
1002 sched_info_free(info
);
1006 struct isl_compute_flow_data
{
1007 isl_union_map
*must_source
;
1008 isl_union_map
*may_source
;
1009 isl_union_map
*must_dep
;
1010 isl_union_map
*may_dep
;
1011 isl_union_map
*must_no_source
;
1012 isl_union_map
*may_no_source
;
1017 struct isl_sched_info
*sink_info
;
1018 struct isl_sched_info
**source_info
;
1019 isl_access_info
*accesses
;
1022 static int count_matching_array(__isl_take isl_map
*map
, void *user
)
1026 struct isl_compute_flow_data
*data
;
1028 data
= (struct isl_compute_flow_data
*)user
;
1030 dim
= isl_space_range(isl_map_get_space(map
));
1032 eq
= isl_space_is_equal(dim
, data
->dim
);
1034 isl_space_free(dim
);
1045 static int collect_matching_array(__isl_take isl_map
*map
, void *user
)
1049 struct isl_sched_info
*info
;
1050 struct isl_compute_flow_data
*data
;
1052 data
= (struct isl_compute_flow_data
*)user
;
1054 dim
= isl_space_range(isl_map_get_space(map
));
1056 eq
= isl_space_is_equal(dim
, data
->dim
);
1058 isl_space_free(dim
);
1067 info
= sched_info_alloc(map
);
1068 data
->source_info
[data
->count
] = info
;
1070 data
->accesses
= isl_access_info_add_source(data
->accesses
,
1071 map
, data
->must
, info
);
1081 /* Determine the shared nesting level and the "textual order" of
1082 * the given accesses.
1084 * We first determine the minimal schedule dimension for both accesses.
1086 * If among those dimensions, we can find one where both have a fixed
1087 * value and if moreover those values are different, then the previous
1088 * dimension is the last shared nesting level and the textual order
1089 * is determined based on the order of the fixed values.
1090 * If no such fixed values can be found, then we set the shared
1091 * nesting level to the minimal schedule dimension, with no textual ordering.
1093 static int before(void *first
, void *second
)
1095 struct isl_sched_info
*info1
= first
;
1096 struct isl_sched_info
*info2
= second
;
1100 n1
= info1
->cst
->size
;
1101 n2
= info2
->cst
->size
;
1106 for (i
= 0; i
< n1
; ++i
) {
1107 if (!info1
->is_cst
[i
])
1109 if (!info2
->is_cst
[i
])
1111 if (isl_int_eq(info1
->cst
->el
[i
], info2
->cst
->el
[i
]))
1113 return 2 * i
+ isl_int_lt(info1
->cst
->el
[i
], info2
->cst
->el
[i
]);
1119 /* Given a sink access, look for all the source accesses that access
1120 * the same array and perform dataflow analysis on them using
1121 * isl_access_info_compute_flow.
1123 static int compute_flow(__isl_take isl_map
*map
, void *user
)
1127 struct isl_compute_flow_data
*data
;
1130 data
= (struct isl_compute_flow_data
*)user
;
1132 ctx
= isl_map_get_ctx(map
);
1134 data
->accesses
= NULL
;
1135 data
->sink_info
= NULL
;
1136 data
->source_info
= NULL
;
1138 data
->dim
= isl_space_range(isl_map_get_space(map
));
1140 if (isl_union_map_foreach_map(data
->must_source
,
1141 &count_matching_array
, data
) < 0)
1143 if (isl_union_map_foreach_map(data
->may_source
,
1144 &count_matching_array
, data
) < 0)
1147 data
->sink_info
= sched_info_alloc(map
);
1148 data
->source_info
= isl_calloc_array(ctx
, struct isl_sched_info
*,
1151 data
->accesses
= isl_access_info_alloc(isl_map_copy(map
),
1152 data
->sink_info
, &before
, data
->count
);
1153 if (!data
->sink_info
|| !data
->source_info
|| !data
->accesses
)
1157 if (isl_union_map_foreach_map(data
->must_source
,
1158 &collect_matching_array
, data
) < 0)
1161 if (isl_union_map_foreach_map(data
->may_source
,
1162 &collect_matching_array
, data
) < 0)
1165 flow
= isl_access_info_compute_flow(data
->accesses
);
1166 data
->accesses
= NULL
;
1171 data
->must_no_source
= isl_union_map_union(data
->must_no_source
,
1172 isl_union_map_from_map(isl_flow_get_no_source(flow
, 1)));
1173 data
->may_no_source
= isl_union_map_union(data
->may_no_source
,
1174 isl_union_map_from_map(isl_flow_get_no_source(flow
, 0)));
1176 for (i
= 0; i
< flow
->n_source
; ++i
) {
1178 dep
= isl_union_map_from_map(isl_map_copy(flow
->dep
[i
].map
));
1179 if (flow
->dep
[i
].must
)
1180 data
->must_dep
= isl_union_map_union(data
->must_dep
, dep
);
1182 data
->may_dep
= isl_union_map_union(data
->may_dep
, dep
);
1185 isl_flow_free(flow
);
1187 sched_info_free(data
->sink_info
);
1188 if (data
->source_info
) {
1189 for (i
= 0; i
< data
->count
; ++i
)
1190 sched_info_free(data
->source_info
[i
]);
1191 free(data
->source_info
);
1193 isl_space_free(data
->dim
);
1198 isl_access_info_free(data
->accesses
);
1199 sched_info_free(data
->sink_info
);
1200 if (data
->source_info
) {
1201 for (i
= 0; i
< data
->count
; ++i
)
1202 sched_info_free(data
->source_info
[i
]);
1203 free(data
->source_info
);
1205 isl_space_free(data
->dim
);
1211 /* Given a collection of "sink" and "source" accesses,
1212 * compute for each iteration of a sink access
1213 * and for each element accessed by that iteration,
1214 * the source access in the list that last accessed the
1215 * element accessed by the sink access before this sink access.
1216 * Each access is given as a map from the loop iterators
1217 * to the array indices.
1218 * The result is a relations between source and sink
1219 * iterations and a subset of the domain of the sink accesses,
1220 * corresponding to those iterations that access an element
1221 * not previously accessed.
1223 * We first prepend the schedule dimensions to the domain
1224 * of the accesses so that we can easily compare their relative order.
1225 * Then we consider each sink access individually in compute_flow.
1227 int isl_union_map_compute_flow(__isl_take isl_union_map
*sink
,
1228 __isl_take isl_union_map
*must_source
,
1229 __isl_take isl_union_map
*may_source
,
1230 __isl_take isl_union_map
*schedule
,
1231 __isl_give isl_union_map
**must_dep
, __isl_give isl_union_map
**may_dep
,
1232 __isl_give isl_union_map
**must_no_source
,
1233 __isl_give isl_union_map
**may_no_source
)
1236 isl_union_map
*range_map
= NULL
;
1237 struct isl_compute_flow_data data
;
1239 sink
= isl_union_map_align_params(sink
,
1240 isl_union_map_get_space(must_source
));
1241 sink
= isl_union_map_align_params(sink
,
1242 isl_union_map_get_space(may_source
));
1243 sink
= isl_union_map_align_params(sink
,
1244 isl_union_map_get_space(schedule
));
1245 dim
= isl_union_map_get_space(sink
);
1246 must_source
= isl_union_map_align_params(must_source
, isl_space_copy(dim
));
1247 may_source
= isl_union_map_align_params(may_source
, isl_space_copy(dim
));
1248 schedule
= isl_union_map_align_params(schedule
, isl_space_copy(dim
));
1250 schedule
= isl_union_map_reverse(schedule
);
1251 range_map
= isl_union_map_range_map(schedule
);
1252 schedule
= isl_union_map_reverse(isl_union_map_copy(range_map
));
1253 sink
= isl_union_map_apply_domain(sink
, isl_union_map_copy(schedule
));
1254 must_source
= isl_union_map_apply_domain(must_source
,
1255 isl_union_map_copy(schedule
));
1256 may_source
= isl_union_map_apply_domain(may_source
, schedule
);
1258 data
.must_source
= must_source
;
1259 data
.may_source
= may_source
;
1260 data
.must_dep
= must_dep
?
1261 isl_union_map_empty(isl_space_copy(dim
)) : NULL
;
1262 data
.may_dep
= may_dep
? isl_union_map_empty(isl_space_copy(dim
)) : NULL
;
1263 data
.must_no_source
= must_no_source
?
1264 isl_union_map_empty(isl_space_copy(dim
)) : NULL
;
1265 data
.may_no_source
= may_no_source
?
1266 isl_union_map_empty(isl_space_copy(dim
)) : NULL
;
1268 isl_space_free(dim
);
1270 if (isl_union_map_foreach_map(sink
, &compute_flow
, &data
) < 0)
1273 isl_union_map_free(sink
);
1274 isl_union_map_free(must_source
);
1275 isl_union_map_free(may_source
);
1278 data
.must_dep
= isl_union_map_apply_domain(data
.must_dep
,
1279 isl_union_map_copy(range_map
));
1280 data
.must_dep
= isl_union_map_apply_range(data
.must_dep
,
1281 isl_union_map_copy(range_map
));
1282 *must_dep
= data
.must_dep
;
1285 data
.may_dep
= isl_union_map_apply_domain(data
.may_dep
,
1286 isl_union_map_copy(range_map
));
1287 data
.may_dep
= isl_union_map_apply_range(data
.may_dep
,
1288 isl_union_map_copy(range_map
));
1289 *may_dep
= data
.may_dep
;
1291 if (must_no_source
) {
1292 data
.must_no_source
= isl_union_map_apply_domain(
1293 data
.must_no_source
, isl_union_map_copy(range_map
));
1294 *must_no_source
= data
.must_no_source
;
1296 if (may_no_source
) {
1297 data
.may_no_source
= isl_union_map_apply_domain(
1298 data
.may_no_source
, isl_union_map_copy(range_map
));
1299 *may_no_source
= data
.may_no_source
;
1302 isl_union_map_free(range_map
);
1306 isl_union_map_free(range_map
);
1307 isl_union_map_free(sink
);
1308 isl_union_map_free(must_source
);
1309 isl_union_map_free(may_source
);
1310 isl_union_map_free(data
.must_dep
);
1311 isl_union_map_free(data
.may_dep
);
1312 isl_union_map_free(data
.must_no_source
);
1313 isl_union_map_free(data
.may_no_source
);
1320 *must_no_source
= NULL
;
1322 *may_no_source
= NULL
;