2 * Copyright 2005-2007 Universiteit Leiden
3 * Copyright 2008-2009 Katholieke Universiteit Leuven
4 * Copyright 2010 INRIA Saclay
6 * Use of this software is governed by the GNU LGPLv2.1 license
8 * Written by Sven Verdoolaege, Leiden Institute of Advanced Computer Science,
9 * Universiteit Leiden, Niels Bohrweg 1, 2333 CA Leiden, The Netherlands
10 * and K.U.Leuven, Departement Computerwetenschappen, Celestijnenlaan 200A,
11 * B-3001 Leuven, Belgium
12 * and INRIA Saclay - Ile-de-France, Parc Club Orsay Universite,
13 * ZAC des vignes, 4 rue Jacques Monod, 91893 Orsay, France
20 /* A private structure to keep track of a mapping together with
21 * a user-specified identifier and a boolean indicating whether
22 * the map represents a must or may access/dependence.
24 struct isl_labeled_map
{
30 /* A structure containing the input for dependence analysis:
32 * - n_must + n_may (<= max_source) sources
33 * - a function for determining the relative order of sources and sink
34 * The must sources are placed before the may sources.
36 struct isl_access_info
{
37 struct isl_labeled_map sink
;
38 isl_access_level_before level_before
;
42 struct isl_labeled_map source
[1];
45 /* A structure containing the output of dependence analysis:
46 * - n_source dependences
47 * - a wrapped subset of the sink for which definitely no source could be found
48 * - a wrapped subset of the sink for which possibly no source could be found
51 isl_set
*must_no_source
;
52 isl_set
*may_no_source
;
54 struct isl_labeled_map
*dep
;
57 /* Construct an isl_access_info structure and fill it up with
58 * the given data. The number of sources is set to 0.
60 __isl_give isl_access_info
*isl_access_info_alloc(__isl_take isl_map
*sink
,
61 void *sink_user
, isl_access_level_before fn
, int max_source
)
64 struct isl_access_info
*acc
;
69 ctx
= isl_map_get_ctx(sink
);
70 isl_assert(ctx
, max_source
>= 0, goto error
);
72 acc
= isl_alloc(ctx
, struct isl_access_info
,
73 sizeof(struct isl_access_info
) +
74 (max_source
- 1) * sizeof(struct isl_labeled_map
));
79 acc
->sink
.data
= sink_user
;
80 acc
->level_before
= fn
;
81 acc
->max_source
= max_source
;
91 /* Free the given isl_access_info structure.
93 void isl_access_info_free(__isl_take isl_access_info
*acc
)
99 isl_map_free(acc
->sink
.map
);
100 for (i
= 0; i
< acc
->n_must
+ acc
->n_may
; ++i
)
101 isl_map_free(acc
->source
[i
].map
);
105 isl_ctx
*isl_access_info_get_ctx(__isl_keep isl_access_info
*acc
)
107 return acc
? isl_map_get_ctx(acc
->sink
.map
) : NULL
;
110 /* Add another source to an isl_access_info structure, making
111 * sure the "must" sources are placed before the "may" sources.
112 * This function may be called at most max_source times on a
113 * given isl_access_info structure, with max_source as specified
114 * in the call to isl_access_info_alloc that constructed the structure.
116 __isl_give isl_access_info
*isl_access_info_add_source(
117 __isl_take isl_access_info
*acc
, __isl_take isl_map
*source
,
118 int must
, void *source_user
)
124 ctx
= isl_map_get_ctx(acc
->sink
.map
);
125 isl_assert(ctx
, acc
->n_must
+ acc
->n_may
< acc
->max_source
, goto error
);
129 acc
->source
[acc
->n_must
+ acc
->n_may
] =
130 acc
->source
[acc
->n_must
];
131 acc
->source
[acc
->n_must
].map
= source
;
132 acc
->source
[acc
->n_must
].data
= source_user
;
133 acc
->source
[acc
->n_must
].must
= 1;
136 acc
->source
[acc
->n_must
+ acc
->n_may
].map
= source
;
137 acc
->source
[acc
->n_must
+ acc
->n_may
].data
= source_user
;
138 acc
->source
[acc
->n_must
+ acc
->n_may
].must
= 0;
144 isl_map_free(source
);
145 isl_access_info_free(acc
);
149 /* A temporary structure used while sorting the accesses in an isl_access_info.
151 struct isl_access_sort_info
{
152 struct isl_map
*source_map
;
154 struct isl_access_info
*acc
;
157 /* Return -n, 0 or n (with n a positive value), depending on whether
158 * the source access identified by p1 should be sorted before, together
159 * or after that identified by p2.
161 * If p1 and p2 share a different number of levels with the sink,
162 * then the one with the lowest number of shared levels should be
164 * If they both share no levels, then the order is irrelevant.
165 * Otherwise, if p1 appears before p2, then it should be sorted first.
166 * For more generic initial schedules, it is possible that neither
167 * p1 nor p2 appears before the other, or at least not in any obvious way.
168 * We therefore also check if p2 appears before p1, in which case p2
169 * should be sorted first.
170 * If not, we try to order the two statements based on the description
171 * of the iteration domains. This results in an arbitrary, but fairly
174 static int access_sort_cmp(const void *p1
, const void *p2
)
176 const struct isl_access_sort_info
*i1
, *i2
;
179 i1
= (const struct isl_access_sort_info
*) p1
;
180 i2
= (const struct isl_access_sort_info
*) p2
;
182 level1
= i1
->acc
->level_before(i1
->source_data
, i1
->acc
->sink
.data
);
183 level2
= i2
->acc
->level_before(i2
->source_data
, i2
->acc
->sink
.data
);
185 if (level1
!= level2
|| !level1
)
186 return level1
- level2
;
188 level1
= i1
->acc
->level_before(i1
->source_data
, i2
->source_data
);
192 level2
= i1
->acc
->level_before(i2
->source_data
, i1
->source_data
);
196 h1
= isl_map_get_hash(i1
->source_map
);
197 h2
= isl_map_get_hash(i2
->source_map
);
198 return h1
> h2
? 1 : h1
< h2
? -1 : 0;
201 /* Sort the must source accesses in order of increasing number of shared
202 * levels with the sink access.
203 * Source accesses with the same number of shared levels are sorted
204 * in their textual order.
206 static __isl_give isl_access_info
*isl_access_info_sort_sources(
207 __isl_take isl_access_info
*acc
)
211 struct isl_access_sort_info
*array
;
215 if (acc
->n_must
<= 1)
218 ctx
= isl_map_get_ctx(acc
->sink
.map
);
219 array
= isl_alloc_array(ctx
, struct isl_access_sort_info
, acc
->n_must
);
223 for (i
= 0; i
< acc
->n_must
; ++i
) {
224 array
[i
].source_map
= acc
->source
[i
].map
;
225 array
[i
].source_data
= acc
->source
[i
].data
;
229 qsort(array
, acc
->n_must
, sizeof(struct isl_access_sort_info
),
232 for (i
= 0; i
< acc
->n_must
; ++i
) {
233 acc
->source
[i
].map
= array
[i
].source_map
;
234 acc
->source
[i
].data
= array
[i
].source_data
;
241 isl_access_info_free(acc
);
245 /* Align the parameters of the two spaces if needed and then call
248 static __isl_give isl_space
*space_align_and_join(__isl_take isl_space
*left
,
249 __isl_take isl_space
*right
)
251 if (isl_space_match(left
, isl_dim_param
, right
, isl_dim_param
))
252 return isl_space_join(left
, right
);
254 left
= isl_space_align_params(left
, isl_space_copy(right
));
255 right
= isl_space_align_params(right
, isl_space_copy(left
));
256 return isl_space_join(left
, right
);
259 /* Initialize an empty isl_flow structure corresponding to a given
260 * isl_access_info structure.
261 * For each must access, two dependences are created (initialized
262 * to the empty relation), one for the resulting must dependences
263 * and one for the resulting may dependences. May accesses can
264 * only lead to may dependences, so only one dependence is created
266 * This function is private as isl_flow structures are only supposed
267 * to be created by isl_access_info_compute_flow.
269 static __isl_give isl_flow
*isl_flow_alloc(__isl_keep isl_access_info
*acc
)
273 struct isl_flow
*dep
;
278 ctx
= isl_map_get_ctx(acc
->sink
.map
);
279 dep
= isl_calloc_type(ctx
, struct isl_flow
);
283 dep
->dep
= isl_calloc_array(ctx
, struct isl_labeled_map
,
284 2 * acc
->n_must
+ acc
->n_may
);
288 dep
->n_source
= 2 * acc
->n_must
+ acc
->n_may
;
289 for (i
= 0; i
< acc
->n_must
; ++i
) {
291 dim
= space_align_and_join(
292 isl_map_get_space(acc
->source
[i
].map
),
293 isl_space_reverse(isl_map_get_space(acc
->sink
.map
)));
294 dep
->dep
[2 * i
].map
= isl_map_empty(dim
);
295 dep
->dep
[2 * i
+ 1].map
= isl_map_copy(dep
->dep
[2 * i
].map
);
296 dep
->dep
[2 * i
].data
= acc
->source
[i
].data
;
297 dep
->dep
[2 * i
+ 1].data
= acc
->source
[i
].data
;
298 dep
->dep
[2 * i
].must
= 1;
299 dep
->dep
[2 * i
+ 1].must
= 0;
300 if (!dep
->dep
[2 * i
].map
|| !dep
->dep
[2 * i
+ 1].map
)
303 for (i
= acc
->n_must
; i
< acc
->n_must
+ acc
->n_may
; ++i
) {
305 dim
= space_align_and_join(
306 isl_map_get_space(acc
->source
[i
].map
),
307 isl_space_reverse(isl_map_get_space(acc
->sink
.map
)));
308 dep
->dep
[acc
->n_must
+ i
].map
= isl_map_empty(dim
);
309 dep
->dep
[acc
->n_must
+ i
].data
= acc
->source
[i
].data
;
310 dep
->dep
[acc
->n_must
+ i
].must
= 0;
311 if (!dep
->dep
[acc
->n_must
+ i
].map
)
321 /* Iterate over all sources and for each resulting flow dependence
322 * that is not empty, call the user specfied function.
323 * The second argument in this function call identifies the source,
324 * while the third argument correspond to the final argument of
325 * the isl_flow_foreach call.
327 int isl_flow_foreach(__isl_keep isl_flow
*deps
,
328 int (*fn
)(__isl_take isl_map
*dep
, int must
, void *dep_user
, void *user
),
336 for (i
= 0; i
< deps
->n_source
; ++i
) {
337 if (isl_map_plain_is_empty(deps
->dep
[i
].map
))
339 if (fn(isl_map_copy(deps
->dep
[i
].map
), deps
->dep
[i
].must
,
340 deps
->dep
[i
].data
, user
) < 0)
347 /* Return a copy of the subset of the sink for which no source could be found.
349 __isl_give isl_map
*isl_flow_get_no_source(__isl_keep isl_flow
*deps
, int must
)
355 return isl_set_unwrap(isl_set_copy(deps
->must_no_source
));
357 return isl_set_unwrap(isl_set_copy(deps
->may_no_source
));
360 void isl_flow_free(__isl_take isl_flow
*deps
)
366 isl_set_free(deps
->must_no_source
);
367 isl_set_free(deps
->may_no_source
);
369 for (i
= 0; i
< deps
->n_source
; ++i
)
370 isl_map_free(deps
->dep
[i
].map
);
376 isl_ctx
*isl_flow_get_ctx(__isl_keep isl_flow
*deps
)
378 return deps
? isl_set_get_ctx(deps
->must_no_source
) : NULL
;
381 /* Return a map that enforces that the domain iteration occurs after
382 * the range iteration at the given level.
383 * If level is odd, then the domain iteration should occur after
384 * the target iteration in their shared level/2 outermost loops.
385 * In this case we simply need to enforce that these outermost
386 * loop iterations are the same.
387 * If level is even, then the loop iterator of the domain should
388 * be greater than the loop iterator of the range at the last
389 * of the level/2 shared loops, i.e., loop level/2 - 1.
391 static __isl_give isl_map
*after_at_level(__isl_take isl_space
*dim
, int level
)
393 struct isl_basic_map
*bmap
;
396 bmap
= isl_basic_map_equal(dim
, level
/2);
398 bmap
= isl_basic_map_more_at(dim
, level
/2 - 1);
400 return isl_map_from_basic_map(bmap
);
403 /* Compute the last iteration of must source j that precedes the sink
404 * at the given level for sink iterations in set_C.
405 * The subset of set_C for which no such iteration can be found is returned
408 static struct isl_map
*last_source(struct isl_access_info
*acc
,
409 struct isl_set
*set_C
,
410 int j
, int level
, struct isl_set
**empty
)
412 struct isl_map
*read_map
;
413 struct isl_map
*write_map
;
414 struct isl_map
*dep_map
;
415 struct isl_map
*after
;
416 struct isl_map
*result
;
418 read_map
= isl_map_copy(acc
->sink
.map
);
419 write_map
= isl_map_copy(acc
->source
[j
].map
);
420 write_map
= isl_map_reverse(write_map
);
421 dep_map
= isl_map_apply_range(read_map
, write_map
);
422 after
= after_at_level(isl_map_get_space(dep_map
), level
);
423 dep_map
= isl_map_intersect(dep_map
, after
);
424 result
= isl_map_partial_lexmax(dep_map
, set_C
, empty
);
425 result
= isl_map_reverse(result
);
430 /* For a given mapping between iterations of must source j and iterations
431 * of the sink, compute the last iteration of must source k preceding
432 * the sink at level before_level for any of the sink iterations,
433 * but following the corresponding iteration of must source j at level
436 static struct isl_map
*last_later_source(struct isl_access_info
*acc
,
437 struct isl_map
*old_map
,
438 int j
, int before_level
,
439 int k
, int after_level
,
440 struct isl_set
**empty
)
443 struct isl_set
*set_C
;
444 struct isl_map
*read_map
;
445 struct isl_map
*write_map
;
446 struct isl_map
*dep_map
;
447 struct isl_map
*after_write
;
448 struct isl_map
*before_read
;
449 struct isl_map
*result
;
451 set_C
= isl_map_range(isl_map_copy(old_map
));
452 read_map
= isl_map_copy(acc
->sink
.map
);
453 write_map
= isl_map_copy(acc
->source
[k
].map
);
455 write_map
= isl_map_reverse(write_map
);
456 dep_map
= isl_map_apply_range(read_map
, write_map
);
457 dim
= space_align_and_join(isl_map_get_space(acc
->source
[k
].map
),
458 isl_space_reverse(isl_map_get_space(acc
->source
[j
].map
)));
459 after_write
= after_at_level(dim
, after_level
);
460 after_write
= isl_map_apply_range(after_write
, old_map
);
461 after_write
= isl_map_reverse(after_write
);
462 dep_map
= isl_map_intersect(dep_map
, after_write
);
463 before_read
= after_at_level(isl_map_get_space(dep_map
), before_level
);
464 dep_map
= isl_map_intersect(dep_map
, before_read
);
465 result
= isl_map_partial_lexmax(dep_map
, set_C
, empty
);
466 result
= isl_map_reverse(result
);
471 /* Given a shared_level between two accesses, return 1 if the
472 * the first can precede the second at the requested target_level.
473 * If the target level is odd, i.e., refers to a statement level
474 * dimension, then first needs to precede second at the requested
475 * level, i.e., shared_level must be equal to target_level.
476 * If the target level is odd, then the two loops should share
477 * at least the requested number of outer loops.
479 static int can_precede_at_level(int shared_level
, int target_level
)
481 if (shared_level
< target_level
)
483 if ((target_level
% 2) && shared_level
> target_level
)
488 /* Given a possible flow dependence temp_rel[j] between source j and the sink
489 * at level sink_level, remove those elements for which
490 * there is an iteration of another source k < j that is closer to the sink.
491 * The flow dependences temp_rel[k] are updated with the improved sources.
492 * Any improved source needs to precede the sink at the same level
493 * and needs to follow source j at the same or a deeper level.
494 * The lower this level, the later the execution date of source k.
495 * We therefore consider lower levels first.
497 * If temp_rel[j] is empty, then there can be no improvement and
498 * we return immediately.
500 static int intermediate_sources(__isl_keep isl_access_info
*acc
,
501 struct isl_map
**temp_rel
, int j
, int sink_level
)
504 int depth
= 2 * isl_map_dim(acc
->source
[j
].map
, isl_dim_in
) + 1;
506 if (isl_map_plain_is_empty(temp_rel
[j
]))
509 for (k
= j
- 1; k
>= 0; --k
) {
511 plevel
= acc
->level_before(acc
->source
[k
].data
, acc
->sink
.data
);
512 if (!can_precede_at_level(plevel
, sink_level
))
515 plevel2
= acc
->level_before(acc
->source
[j
].data
,
516 acc
->source
[k
].data
);
518 for (level
= sink_level
; level
<= depth
; ++level
) {
520 struct isl_set
*trest
;
521 struct isl_map
*copy
;
523 if (!can_precede_at_level(plevel2
, level
))
526 copy
= isl_map_copy(temp_rel
[j
]);
527 T
= last_later_source(acc
, copy
, j
, sink_level
, k
,
529 if (isl_map_plain_is_empty(T
)) {
534 temp_rel
[j
] = isl_map_intersect_range(temp_rel
[j
], trest
);
535 temp_rel
[k
] = isl_map_union_disjoint(temp_rel
[k
], T
);
542 /* Compute all iterations of may source j that precedes the sink at the given
543 * level for sink iterations in set_C.
545 static __isl_give isl_map
*all_sources(__isl_keep isl_access_info
*acc
,
546 __isl_take isl_set
*set_C
, int j
, int level
)
553 read_map
= isl_map_copy(acc
->sink
.map
);
554 read_map
= isl_map_intersect_domain(read_map
, set_C
);
555 write_map
= isl_map_copy(acc
->source
[acc
->n_must
+ j
].map
);
556 write_map
= isl_map_reverse(write_map
);
557 dep_map
= isl_map_apply_range(read_map
, write_map
);
558 after
= after_at_level(isl_map_get_space(dep_map
), level
);
559 dep_map
= isl_map_intersect(dep_map
, after
);
561 return isl_map_reverse(dep_map
);
564 /* For a given mapping between iterations of must source k and iterations
565 * of the sink, compute the all iteration of may source j preceding
566 * the sink at level before_level for any of the sink iterations,
567 * but following the corresponding iteration of must source k at level
570 static __isl_give isl_map
*all_later_sources(__isl_keep isl_access_info
*acc
,
571 __isl_keep isl_map
*old_map
,
572 int j
, int before_level
, int k
, int after_level
)
579 isl_map
*after_write
;
580 isl_map
*before_read
;
582 set_C
= isl_map_range(isl_map_copy(old_map
));
583 read_map
= isl_map_copy(acc
->sink
.map
);
584 read_map
= isl_map_intersect_domain(read_map
, set_C
);
585 write_map
= isl_map_copy(acc
->source
[acc
->n_must
+ j
].map
);
587 write_map
= isl_map_reverse(write_map
);
588 dep_map
= isl_map_apply_range(read_map
, write_map
);
589 dim
= isl_space_join(isl_map_get_space(acc
->source
[acc
->n_must
+ j
].map
),
590 isl_space_reverse(isl_map_get_space(acc
->source
[k
].map
)));
591 after_write
= after_at_level(dim
, after_level
);
592 after_write
= isl_map_apply_range(after_write
, old_map
);
593 after_write
= isl_map_reverse(after_write
);
594 dep_map
= isl_map_intersect(dep_map
, after_write
);
595 before_read
= after_at_level(isl_map_get_space(dep_map
), before_level
);
596 dep_map
= isl_map_intersect(dep_map
, before_read
);
597 return isl_map_reverse(dep_map
);
600 /* Given the must and may dependence relations for the must accesses
601 * for level sink_level, check if there are any accesses of may access j
602 * that occur in between and return their union.
603 * If some of these accesses are intermediate with respect to
604 * (previously thought to be) must dependences, then these
605 * must dependences are turned into may dependences.
607 static __isl_give isl_map
*all_intermediate_sources(
608 __isl_keep isl_access_info
*acc
, __isl_take isl_map
*map
,
609 struct isl_map
**must_rel
, struct isl_map
**may_rel
,
610 int j
, int sink_level
)
613 int depth
= 2 * isl_map_dim(acc
->source
[acc
->n_must
+ j
].map
,
616 for (k
= 0; k
< acc
->n_must
; ++k
) {
619 if (isl_map_plain_is_empty(may_rel
[k
]) &&
620 isl_map_plain_is_empty(must_rel
[k
]))
623 plevel
= acc
->level_before(acc
->source
[k
].data
,
624 acc
->source
[acc
->n_must
+ j
].data
);
626 for (level
= sink_level
; level
<= depth
; ++level
) {
631 if (!can_precede_at_level(plevel
, level
))
634 copy
= isl_map_copy(may_rel
[k
]);
635 T
= all_later_sources(acc
, copy
, j
, sink_level
, k
, level
);
636 map
= isl_map_union(map
, T
);
638 copy
= isl_map_copy(must_rel
[k
]);
639 T
= all_later_sources(acc
, copy
, j
, sink_level
, k
, level
);
640 ran
= isl_map_range(isl_map_copy(T
));
641 map
= isl_map_union(map
, T
);
642 may_rel
[k
] = isl_map_union_disjoint(may_rel
[k
],
643 isl_map_intersect_range(isl_map_copy(must_rel
[k
]),
645 T
= isl_map_from_domain_and_range(
647 isl_space_domain(isl_map_get_space(must_rel
[k
]))),
649 must_rel
[k
] = isl_map_subtract(must_rel
[k
], T
);
656 /* Compute dependences for the case where all accesses are "may"
657 * accesses, which boils down to computing memory based dependences.
658 * The generic algorithm would also work in this case, but it would
659 * be overkill to use it.
661 static __isl_give isl_flow
*compute_mem_based_dependences(
662 __isl_take isl_access_info
*acc
)
669 res
= isl_flow_alloc(acc
);
673 mustdo
= isl_map_domain(isl_map_copy(acc
->sink
.map
));
674 maydo
= isl_set_copy(mustdo
);
676 for (i
= 0; i
< acc
->n_may
; ++i
) {
683 plevel
= acc
->level_before(acc
->source
[i
].data
, acc
->sink
.data
);
684 is_before
= plevel
& 1;
687 dim
= isl_map_get_space(res
->dep
[i
].map
);
689 before
= isl_map_lex_le_first(dim
, plevel
);
691 before
= isl_map_lex_lt_first(dim
, plevel
);
692 dep
= isl_map_apply_range(isl_map_copy(acc
->source
[i
].map
),
693 isl_map_reverse(isl_map_copy(acc
->sink
.map
)));
694 dep
= isl_map_intersect(dep
, before
);
695 mustdo
= isl_set_subtract(mustdo
,
696 isl_map_range(isl_map_copy(dep
)));
697 res
->dep
[i
].map
= isl_map_union(res
->dep
[i
].map
, dep
);
700 res
->may_no_source
= isl_set_subtract(maydo
, isl_set_copy(mustdo
));
701 res
->must_no_source
= mustdo
;
703 isl_access_info_free(acc
);
707 isl_access_info_free(acc
);
711 /* Compute dependences for the case where there is at least one
714 * The core algorithm considers all levels in which a source may precede
715 * the sink, where a level may either be a statement level or a loop level.
716 * The outermost statement level is 1, the first loop level is 2, etc...
717 * The algorithm basically does the following:
718 * for all levels l of the read access from innermost to outermost
719 * for all sources w that may precede the sink access at that level
720 * compute the last iteration of the source that precedes the sink access
722 * add result to possible last accesses at level l of source w
723 * for all sources w2 that we haven't considered yet at this level that may
724 * also precede the sink access
725 * for all levels l2 of w from l to innermost
726 * for all possible last accesses dep of w at l
727 * compute last iteration of w2 between the source and sink
729 * add result to possible last accesses at level l of write w2
730 * and replace possible last accesses dep by the remainder
733 * The above algorithm is applied to the must access. During the course
734 * of the algorithm, we keep track of sink iterations that still
735 * need to be considered. These iterations are split into those that
736 * haven't been matched to any source access (mustdo) and those that have only
737 * been matched to may accesses (maydo).
738 * At the end of each level, we also consider the may accesses.
739 * In particular, we consider may accesses that precede the remaining
740 * sink iterations, moving elements from mustdo to maydo when appropriate,
741 * and may accesses that occur between a must source and a sink of any
742 * dependences found at the current level, turning must dependences into
743 * may dependences when appropriate.
746 static __isl_give isl_flow
*compute_val_based_dependences(
747 __isl_take isl_access_info
*acc
)
751 isl_set
*mustdo
= NULL
;
752 isl_set
*maydo
= NULL
;
755 isl_map
**must_rel
= NULL
;
756 isl_map
**may_rel
= NULL
;
758 acc
= isl_access_info_sort_sources(acc
);
762 res
= isl_flow_alloc(acc
);
765 ctx
= isl_map_get_ctx(acc
->sink
.map
);
767 depth
= 2 * isl_map_dim(acc
->sink
.map
, isl_dim_in
) + 1;
768 mustdo
= isl_map_domain(isl_map_copy(acc
->sink
.map
));
769 maydo
= isl_set_empty_like(mustdo
);
770 if (!mustdo
|| !maydo
)
772 if (isl_set_plain_is_empty(mustdo
))
775 must_rel
= isl_alloc_array(ctx
, struct isl_map
*, acc
->n_must
);
776 may_rel
= isl_alloc_array(ctx
, struct isl_map
*, acc
->n_must
);
777 if (!must_rel
|| !may_rel
)
780 for (level
= depth
; level
>= 1; --level
) {
781 for (j
= acc
->n_must
-1; j
>=0; --j
) {
782 must_rel
[j
] = isl_map_empty_like(res
->dep
[j
].map
);
783 may_rel
[j
] = isl_map_copy(must_rel
[j
]);
786 for (j
= acc
->n_must
- 1; j
>= 0; --j
) {
788 struct isl_set
*rest
;
791 plevel
= acc
->level_before(acc
->source
[j
].data
,
793 if (!can_precede_at_level(plevel
, level
))
796 T
= last_source(acc
, mustdo
, j
, level
, &rest
);
797 must_rel
[j
] = isl_map_union_disjoint(must_rel
[j
], T
);
800 intermediate_sources(acc
, must_rel
, j
, level
);
802 T
= last_source(acc
, maydo
, j
, level
, &rest
);
803 may_rel
[j
] = isl_map_union_disjoint(may_rel
[j
], T
);
806 intermediate_sources(acc
, may_rel
, j
, level
);
808 if (isl_set_plain_is_empty(mustdo
) &&
809 isl_set_plain_is_empty(maydo
))
812 for (j
= j
- 1; j
>= 0; --j
) {
815 plevel
= acc
->level_before(acc
->source
[j
].data
,
817 if (!can_precede_at_level(plevel
, level
))
820 intermediate_sources(acc
, must_rel
, j
, level
);
821 intermediate_sources(acc
, may_rel
, j
, level
);
824 for (j
= 0; j
< acc
->n_may
; ++j
) {
829 plevel
= acc
->level_before(acc
->source
[acc
->n_must
+ j
].data
,
831 if (!can_precede_at_level(plevel
, level
))
834 T
= all_sources(acc
, isl_set_copy(maydo
), j
, level
);
835 res
->dep
[2 * acc
->n_must
+ j
].map
=
836 isl_map_union(res
->dep
[2 * acc
->n_must
+ j
].map
, T
);
837 T
= all_sources(acc
, isl_set_copy(mustdo
), j
, level
);
838 ran
= isl_map_range(isl_map_copy(T
));
839 res
->dep
[2 * acc
->n_must
+ j
].map
=
840 isl_map_union(res
->dep
[2 * acc
->n_must
+ j
].map
, T
);
841 mustdo
= isl_set_subtract(mustdo
, isl_set_copy(ran
));
842 maydo
= isl_set_union_disjoint(maydo
, ran
);
844 T
= res
->dep
[2 * acc
->n_must
+ j
].map
;
845 T
= all_intermediate_sources(acc
, T
, must_rel
, may_rel
,
847 res
->dep
[2 * acc
->n_must
+ j
].map
= T
;
850 for (j
= acc
->n_must
- 1; j
>= 0; --j
) {
851 res
->dep
[2 * j
].map
=
852 isl_map_union_disjoint(res
->dep
[2 * j
].map
,
854 res
->dep
[2 * j
+ 1].map
=
855 isl_map_union_disjoint(res
->dep
[2 * j
+ 1].map
,
859 if (isl_set_plain_is_empty(mustdo
) &&
860 isl_set_plain_is_empty(maydo
))
867 res
->must_no_source
= mustdo
;
868 res
->may_no_source
= maydo
;
869 isl_access_info_free(acc
);
872 isl_access_info_free(acc
);
874 isl_set_free(mustdo
);
881 /* Given a "sink" access, a list of n "source" accesses,
882 * compute for each iteration of the sink access
883 * and for each element accessed by that iteration,
884 * the source access in the list that last accessed the
885 * element accessed by the sink access before this sink access.
886 * Each access is given as a map from the loop iterators
887 * to the array indices.
888 * The result is a list of n relations between source and sink
889 * iterations and a subset of the domain of the sink access,
890 * corresponding to those iterations that access an element
891 * not previously accessed.
893 * To deal with multi-valued sink access relations, the sink iteration
894 * domain is first extended with dimensions that correspond to the data
895 * space. After the computation is finished, these extra dimensions are
896 * projected out again.
898 __isl_give isl_flow
*isl_access_info_compute_flow(__isl_take isl_access_info
*acc
)
901 struct isl_flow
*res
;
902 isl_map
*domain_map
= NULL
;
907 domain_map
= isl_map_domain_map(isl_map_copy(acc
->sink
.map
));
908 acc
->sink
.map
= isl_map_range_map(acc
->sink
.map
);
912 if (acc
->n_must
== 0)
913 res
= compute_mem_based_dependences(acc
);
915 res
= compute_val_based_dependences(acc
);
919 for (j
= 0; j
< res
->n_source
; ++j
) {
920 res
->dep
[j
].map
= isl_map_apply_range(res
->dep
[j
].map
,
921 isl_map_copy(domain_map
));
922 if (!res
->dep
[j
].map
)
925 if (!res
->must_no_source
|| !res
->may_no_source
)
928 isl_map_free(domain_map
);
931 isl_map_free(domain_map
);
932 isl_access_info_free(acc
);
935 isl_map_free(domain_map
);
941 /* Keep track of some information about a schedule for a given
942 * access. In particular, keep track of which dimensions
943 * have a constant value and of the actual constant values.
945 struct isl_sched_info
{
950 static void sched_info_free(__isl_take
struct isl_sched_info
*info
)
954 isl_vec_free(info
->cst
);
959 /* Extract information on the constant dimensions of the schedule
960 * for a given access. The "map" is of the form
964 * with S the schedule domain, D the iteration domain and A the data domain.
966 static __isl_give
struct isl_sched_info
*sched_info_alloc(
967 __isl_keep isl_map
*map
)
971 struct isl_sched_info
*info
;
977 dim
= isl_space_unwrap(isl_space_domain(isl_map_get_space(map
)));
980 n
= isl_space_dim(dim
, isl_dim_in
);
983 ctx
= isl_map_get_ctx(map
);
984 info
= isl_alloc_type(ctx
, struct isl_sched_info
);
987 info
->is_cst
= isl_alloc_array(ctx
, int, n
);
988 info
->cst
= isl_vec_alloc(ctx
, n
);
989 if (!info
->is_cst
|| !info
->cst
)
992 for (i
= 0; i
< n
; ++i
)
993 info
->is_cst
[i
] = isl_map_plain_is_fixed(map
, isl_dim_in
, i
,
998 sched_info_free(info
);
1002 struct isl_compute_flow_data
{
1003 isl_union_map
*must_source
;
1004 isl_union_map
*may_source
;
1005 isl_union_map
*must_dep
;
1006 isl_union_map
*may_dep
;
1007 isl_union_map
*must_no_source
;
1008 isl_union_map
*may_no_source
;
1013 struct isl_sched_info
*sink_info
;
1014 struct isl_sched_info
**source_info
;
1015 isl_access_info
*accesses
;
1018 static int count_matching_array(__isl_take isl_map
*map
, void *user
)
1022 struct isl_compute_flow_data
*data
;
1024 data
= (struct isl_compute_flow_data
*)user
;
1026 dim
= isl_space_range(isl_map_get_space(map
));
1028 eq
= isl_space_is_equal(dim
, data
->dim
);
1030 isl_space_free(dim
);
1041 static int collect_matching_array(__isl_take isl_map
*map
, void *user
)
1045 struct isl_sched_info
*info
;
1046 struct isl_compute_flow_data
*data
;
1048 data
= (struct isl_compute_flow_data
*)user
;
1050 dim
= isl_space_range(isl_map_get_space(map
));
1052 eq
= isl_space_is_equal(dim
, data
->dim
);
1054 isl_space_free(dim
);
1063 info
= sched_info_alloc(map
);
1064 data
->source_info
[data
->count
] = info
;
1066 data
->accesses
= isl_access_info_add_source(data
->accesses
,
1067 map
, data
->must
, info
);
1077 /* Determine the shared nesting level and the "textual order" of
1078 * the given accesses.
1080 * We first determine the minimal schedule dimension for both accesses.
1082 * If among those dimensions, we can find one where both have a fixed
1083 * value and if moreover those values are different, then the previous
1084 * dimension is the last shared nesting level and the textual order
1085 * is determined based on the order of the fixed values.
1086 * If no such fixed values can be found, then we set the shared
1087 * nesting level to the minimal schedule dimension, with no textual ordering.
1089 static int before(void *first
, void *second
)
1091 struct isl_sched_info
*info1
= first
;
1092 struct isl_sched_info
*info2
= second
;
1096 n1
= info1
->cst
->size
;
1097 n2
= info2
->cst
->size
;
1102 for (i
= 0; i
< n1
; ++i
) {
1103 if (!info1
->is_cst
[i
])
1105 if (!info2
->is_cst
[i
])
1107 if (isl_int_eq(info1
->cst
->el
[i
], info2
->cst
->el
[i
]))
1109 return 2 * i
+ isl_int_lt(info1
->cst
->el
[i
], info2
->cst
->el
[i
]);
1115 /* Given a sink access, look for all the source accesses that access
1116 * the same array and perform dataflow analysis on them using
1117 * isl_access_info_compute_flow.
1119 static int compute_flow(__isl_take isl_map
*map
, void *user
)
1123 struct isl_compute_flow_data
*data
;
1126 data
= (struct isl_compute_flow_data
*)user
;
1128 ctx
= isl_map_get_ctx(map
);
1130 data
->accesses
= NULL
;
1131 data
->sink_info
= NULL
;
1132 data
->source_info
= NULL
;
1134 data
->dim
= isl_space_range(isl_map_get_space(map
));
1136 if (isl_union_map_foreach_map(data
->must_source
,
1137 &count_matching_array
, data
) < 0)
1139 if (isl_union_map_foreach_map(data
->may_source
,
1140 &count_matching_array
, data
) < 0)
1143 data
->sink_info
= sched_info_alloc(map
);
1144 data
->source_info
= isl_calloc_array(ctx
, struct isl_sched_info
*,
1147 data
->accesses
= isl_access_info_alloc(isl_map_copy(map
),
1148 data
->sink_info
, &before
, data
->count
);
1149 if (!data
->sink_info
|| !data
->source_info
|| !data
->accesses
)
1153 if (isl_union_map_foreach_map(data
->must_source
,
1154 &collect_matching_array
, data
) < 0)
1157 if (isl_union_map_foreach_map(data
->may_source
,
1158 &collect_matching_array
, data
) < 0)
1161 flow
= isl_access_info_compute_flow(data
->accesses
);
1162 data
->accesses
= NULL
;
1167 data
->must_no_source
= isl_union_map_union(data
->must_no_source
,
1168 isl_union_map_from_map(isl_flow_get_no_source(flow
, 1)));
1169 data
->may_no_source
= isl_union_map_union(data
->may_no_source
,
1170 isl_union_map_from_map(isl_flow_get_no_source(flow
, 0)));
1172 for (i
= 0; i
< flow
->n_source
; ++i
) {
1174 dep
= isl_union_map_from_map(isl_map_copy(flow
->dep
[i
].map
));
1175 if (flow
->dep
[i
].must
)
1176 data
->must_dep
= isl_union_map_union(data
->must_dep
, dep
);
1178 data
->may_dep
= isl_union_map_union(data
->may_dep
, dep
);
1181 isl_flow_free(flow
);
1183 sched_info_free(data
->sink_info
);
1184 if (data
->source_info
) {
1185 for (i
= 0; i
< data
->count
; ++i
)
1186 sched_info_free(data
->source_info
[i
]);
1187 free(data
->source_info
);
1189 isl_space_free(data
->dim
);
1194 isl_access_info_free(data
->accesses
);
1195 sched_info_free(data
->sink_info
);
1196 if (data
->source_info
) {
1197 for (i
= 0; i
< data
->count
; ++i
)
1198 sched_info_free(data
->source_info
[i
]);
1199 free(data
->source_info
);
1201 isl_space_free(data
->dim
);
1207 /* Given a collection of "sink" and "source" accesses,
1208 * compute for each iteration of a sink access
1209 * and for each element accessed by that iteration,
1210 * the source access in the list that last accessed the
1211 * element accessed by the sink access before this sink access.
1212 * Each access is given as a map from the loop iterators
1213 * to the array indices.
1214 * The result is a relations between source and sink
1215 * iterations and a subset of the domain of the sink accesses,
1216 * corresponding to those iterations that access an element
1217 * not previously accessed.
1219 * We first prepend the schedule dimensions to the domain
1220 * of the accesses so that we can easily compare their relative order.
1221 * Then we consider each sink access individually in compute_flow.
1223 int isl_union_map_compute_flow(__isl_take isl_union_map
*sink
,
1224 __isl_take isl_union_map
*must_source
,
1225 __isl_take isl_union_map
*may_source
,
1226 __isl_take isl_union_map
*schedule
,
1227 __isl_give isl_union_map
**must_dep
, __isl_give isl_union_map
**may_dep
,
1228 __isl_give isl_union_map
**must_no_source
,
1229 __isl_give isl_union_map
**may_no_source
)
1232 isl_union_map
*range_map
= NULL
;
1233 struct isl_compute_flow_data data
;
1235 sink
= isl_union_map_align_params(sink
,
1236 isl_union_map_get_space(must_source
));
1237 sink
= isl_union_map_align_params(sink
,
1238 isl_union_map_get_space(may_source
));
1239 sink
= isl_union_map_align_params(sink
,
1240 isl_union_map_get_space(schedule
));
1241 dim
= isl_union_map_get_space(sink
);
1242 must_source
= isl_union_map_align_params(must_source
, isl_space_copy(dim
));
1243 may_source
= isl_union_map_align_params(may_source
, isl_space_copy(dim
));
1244 schedule
= isl_union_map_align_params(schedule
, isl_space_copy(dim
));
1246 schedule
= isl_union_map_reverse(schedule
);
1247 range_map
= isl_union_map_range_map(schedule
);
1248 schedule
= isl_union_map_reverse(isl_union_map_copy(range_map
));
1249 sink
= isl_union_map_apply_domain(sink
, isl_union_map_copy(schedule
));
1250 must_source
= isl_union_map_apply_domain(must_source
,
1251 isl_union_map_copy(schedule
));
1252 may_source
= isl_union_map_apply_domain(may_source
, schedule
);
1254 data
.must_source
= must_source
;
1255 data
.may_source
= may_source
;
1256 data
.must_dep
= must_dep
?
1257 isl_union_map_empty(isl_space_copy(dim
)) : NULL
;
1258 data
.may_dep
= may_dep
? isl_union_map_empty(isl_space_copy(dim
)) : NULL
;
1259 data
.must_no_source
= must_no_source
?
1260 isl_union_map_empty(isl_space_copy(dim
)) : NULL
;
1261 data
.may_no_source
= may_no_source
?
1262 isl_union_map_empty(isl_space_copy(dim
)) : NULL
;
1264 isl_space_free(dim
);
1266 if (isl_union_map_foreach_map(sink
, &compute_flow
, &data
) < 0)
1269 isl_union_map_free(sink
);
1270 isl_union_map_free(must_source
);
1271 isl_union_map_free(may_source
);
1274 data
.must_dep
= isl_union_map_apply_domain(data
.must_dep
,
1275 isl_union_map_copy(range_map
));
1276 data
.must_dep
= isl_union_map_apply_range(data
.must_dep
,
1277 isl_union_map_copy(range_map
));
1278 *must_dep
= data
.must_dep
;
1281 data
.may_dep
= isl_union_map_apply_domain(data
.may_dep
,
1282 isl_union_map_copy(range_map
));
1283 data
.may_dep
= isl_union_map_apply_range(data
.may_dep
,
1284 isl_union_map_copy(range_map
));
1285 *may_dep
= data
.may_dep
;
1287 if (must_no_source
) {
1288 data
.must_no_source
= isl_union_map_apply_domain(
1289 data
.must_no_source
, isl_union_map_copy(range_map
));
1290 *must_no_source
= data
.must_no_source
;
1292 if (may_no_source
) {
1293 data
.may_no_source
= isl_union_map_apply_domain(
1294 data
.may_no_source
, isl_union_map_copy(range_map
));
1295 *may_no_source
= data
.may_no_source
;
1298 isl_union_map_free(range_map
);
1302 isl_union_map_free(range_map
);
1303 isl_union_map_free(sink
);
1304 isl_union_map_free(must_source
);
1305 isl_union_map_free(may_source
);
1306 isl_union_map_free(data
.must_dep
);
1307 isl_union_map_free(data
.may_dep
);
1308 isl_union_map_free(data
.must_no_source
);
1309 isl_union_map_free(data
.may_no_source
);
1316 *must_no_source
= NULL
;
1318 *may_no_source
= NULL
;