2 * Copyright 2005-2007 Universiteit Leiden
3 * Copyright 2008-2009 Katholieke Universiteit Leuven
4 * Copyright 2010 INRIA Saclay
6 * Use of this software is governed by the GNU LGPLv2.1 license
8 * Written by Sven Verdoolaege, Leiden Institute of Advanced Computer Science,
9 * Universiteit Leiden, Niels Bohrweg 1, 2333 CA Leiden, The Netherlands
10 * and K.U.Leuven, Departement Computerwetenschappen, Celestijnenlaan 200A,
11 * B-3001 Leuven, Belgium
12 * and INRIA Saclay - Ile-de-France, Parc Club Orsay Universite,
13 * ZAC des vignes, 4 rue Jacques Monod, 91893 Orsay, France
18 /* A private structure to keep track of a mapping together with
19 * a user-specified identifier and a boolean indicating whether
20 * the map represents a must or may access/dependence.
22 struct isl_labeled_map
{
28 /* A structure containing the input for dependence analysis:
30 * - n_must + n_may (<= max_source) sources
31 * - a function for determining the relative order of sources and sink
32 * The must sources are placed before the may sources.
34 struct isl_access_info
{
35 struct isl_labeled_map sink
;
36 isl_access_level_before level_before
;
40 struct isl_labeled_map source
[1];
43 /* A structure containing the output of dependence analysis:
44 * - n_source dependences
45 * - a subset of the sink for which definitely no source could be found
46 * - a subset of the sink for which possibly no source could be found
49 isl_set
*must_no_source
;
50 isl_set
*may_no_source
;
52 struct isl_labeled_map
*dep
;
55 /* Construct an isl_access_info structure and fill it up with
56 * the given data. The number of sources is set to 0.
58 __isl_give isl_access_info
*isl_access_info_alloc(__isl_take isl_map
*sink
,
59 void *sink_user
, isl_access_level_before fn
, int max_source
)
62 struct isl_access_info
*acc
;
67 ctx
= isl_map_get_ctx(sink
);
68 isl_assert(ctx
, max_source
>= 0, goto error
);
70 acc
= isl_alloc(ctx
, struct isl_access_info
,
71 sizeof(struct isl_access_info
) +
72 (max_source
- 1) * sizeof(struct isl_labeled_map
));
77 acc
->sink
.data
= sink_user
;
78 acc
->level_before
= fn
;
79 acc
->max_source
= max_source
;
89 /* Free the given isl_access_info structure.
91 void isl_access_info_free(__isl_take isl_access_info
*acc
)
97 isl_map_free(acc
->sink
.map
);
98 for (i
= 0; i
< acc
->n_must
+ acc
->n_may
; ++i
)
99 isl_map_free(acc
->source
[i
].map
);
103 /* Add another source to an isl_access_info structure, making
104 * sure the "must" sources are placed before the "may" sources.
105 * This function may be called at most max_source times on a
106 * given isl_access_info structure, with max_source as specified
107 * in the call to isl_access_info_alloc that constructed the structure.
109 __isl_give isl_access_info
*isl_access_info_add_source(
110 __isl_take isl_access_info
*acc
, __isl_take isl_map
*source
,
111 int must
, void *source_user
)
117 ctx
= isl_map_get_ctx(acc
->sink
.map
);
118 isl_assert(ctx
, acc
->n_must
+ acc
->n_may
< acc
->max_source
, goto error
);
122 acc
->source
[acc
->n_must
+ acc
->n_may
] =
123 acc
->source
[acc
->n_must
];
124 acc
->source
[acc
->n_must
].map
= source
;
125 acc
->source
[acc
->n_must
].data
= source_user
;
126 acc
->source
[acc
->n_must
].must
= 1;
129 acc
->source
[acc
->n_must
+ acc
->n_may
].map
= source
;
130 acc
->source
[acc
->n_must
+ acc
->n_may
].data
= source_user
;
131 acc
->source
[acc
->n_must
+ acc
->n_may
].must
= 0;
137 isl_map_free(source
);
138 isl_access_info_free(acc
);
142 /* A temporary structure used while sorting the accesses in an isl_access_info.
144 struct isl_access_sort_info
{
145 struct isl_map
*source_map
;
147 struct isl_access_info
*acc
;
150 /* Return -n, 0 or n (with n a positive value), depending on whether
151 * the source access identified by p1 should be sorted before, together
152 * or after that identified by p2.
154 * If p1 and p2 share a different number of levels with the sink,
155 * then the one with the lowest number of shared levels should be
157 * If they both share no levels, then the order is irrelevant.
158 * Otherwise, if p1 appears before p2, then it should be sorted first.
159 * For more generic initial schedules, it is possible that neither
160 * p1 nor p2 appears before the other, or at least not in any obvious way.
161 * We therefore also check if p2 appears before p1, in which case p2
162 * should be sorted first.
163 * If not, we try to order the two statements based on the description
164 * of the iteration domains. This results in an arbitrary, but fairly
167 static int access_sort_cmp(const void *p1
, const void *p2
)
169 const struct isl_access_sort_info
*i1
, *i2
;
172 i1
= (const struct isl_access_sort_info
*) p1
;
173 i2
= (const struct isl_access_sort_info
*) p2
;
175 level1
= i1
->acc
->level_before(i1
->source_data
, i1
->acc
->sink
.data
);
176 level2
= i2
->acc
->level_before(i2
->source_data
, i2
->acc
->sink
.data
);
178 if (level1
!= level2
|| !level1
)
179 return level1
- level2
;
181 level1
= i1
->acc
->level_before(i1
->source_data
, i2
->source_data
);
185 level2
= i1
->acc
->level_before(i2
->source_data
, i1
->source_data
);
189 h1
= isl_map_get_hash(i1
->source_map
);
190 h2
= isl_map_get_hash(i2
->source_map
);
194 /* Sort the must source accesses in order of increasing number of shared
195 * levels with the sink access.
196 * Source accesses with the same number of shared levels are sorted
197 * in their textual order.
199 static __isl_give isl_access_info
*isl_access_info_sort_sources(
200 __isl_take isl_access_info
*acc
)
204 struct isl_access_sort_info
*array
;
208 if (acc
->n_must
<= 1)
211 ctx
= isl_map_get_ctx(acc
->sink
.map
);
212 array
= isl_alloc_array(ctx
, struct isl_access_sort_info
, acc
->n_must
);
216 for (i
= 0; i
< acc
->n_must
; ++i
) {
217 array
[i
].source_map
= acc
->source
[i
].map
;
218 array
[i
].source_data
= acc
->source
[i
].data
;
222 qsort(array
, acc
->n_must
, sizeof(struct isl_access_sort_info
),
225 for (i
= 0; i
< acc
->n_must
; ++i
) {
226 acc
->source
[i
].map
= array
[i
].source_map
;
227 acc
->source
[i
].data
= array
[i
].source_data
;
234 isl_access_info_free(acc
);
238 /* Initialize an empty isl_flow structure corresponding to a given
239 * isl_access_info structure.
240 * For each must access, two dependences are created (initialized
241 * to the empty relation), one for the resulting must dependences
242 * and one for the resulting may dependences. May accesses can
243 * only lead to may dependences, so only one dependence is created
245 * This function is private as isl_flow structures are only supposed
246 * to be created by isl_access_info_compute_flow.
248 static __isl_give isl_flow
*isl_flow_alloc(__isl_keep isl_access_info
*acc
)
252 struct isl_flow
*dep
;
257 ctx
= isl_map_get_ctx(acc
->sink
.map
);
258 dep
= isl_calloc_type(ctx
, struct isl_flow
);
262 dep
->dep
= isl_calloc_array(ctx
, struct isl_labeled_map
,
263 2 * acc
->n_must
+ acc
->n_may
);
267 dep
->n_source
= 2 * acc
->n_must
+ acc
->n_may
;
268 for (i
= 0; i
< acc
->n_must
; ++i
) {
270 dim
= isl_dim_join(isl_map_get_dim(acc
->source
[i
].map
),
271 isl_dim_reverse(isl_map_get_dim(acc
->sink
.map
)));
272 dep
->dep
[2 * i
].map
= isl_map_empty(dim
);
273 dep
->dep
[2 * i
+ 1].map
= isl_map_copy(dep
->dep
[2 * i
].map
);
274 dep
->dep
[2 * i
].data
= acc
->source
[i
].data
;
275 dep
->dep
[2 * i
+ 1].data
= acc
->source
[i
].data
;
276 dep
->dep
[2 * i
].must
= 1;
277 dep
->dep
[2 * i
+ 1].must
= 0;
278 if (!dep
->dep
[2 * i
].map
|| !dep
->dep
[2 * i
+ 1].map
)
281 for (i
= acc
->n_must
; i
< acc
->n_must
+ acc
->n_may
; ++i
) {
283 dim
= isl_dim_join(isl_map_get_dim(acc
->source
[i
].map
),
284 isl_dim_reverse(isl_map_get_dim(acc
->sink
.map
)));
285 dep
->dep
[acc
->n_must
+ i
].map
= isl_map_empty(dim
);
286 dep
->dep
[acc
->n_must
+ i
].data
= acc
->source
[i
].data
;
287 dep
->dep
[acc
->n_must
+ i
].must
= 0;
288 if (!dep
->dep
[acc
->n_must
+ i
].map
)
298 /* Iterate over all sources and for each resulting flow dependence
299 * that is not empty, call the user specfied function.
300 * The second argument in this function call identifies the source,
301 * while the third argument correspond to the final argument of
302 * the isl_flow_foreach call.
304 int isl_flow_foreach(__isl_keep isl_flow
*deps
,
305 int (*fn
)(__isl_take isl_map
*dep
, int must
, void *dep_user
, void *user
),
313 for (i
= 0; i
< deps
->n_source
; ++i
) {
314 if (isl_map_fast_is_empty(deps
->dep
[i
].map
))
316 if (fn(isl_map_copy(deps
->dep
[i
].map
), deps
->dep
[i
].must
,
317 deps
->dep
[i
].data
, user
) < 0)
324 /* Return a copy of the subset of the sink for which no source could be found.
326 __isl_give isl_set
*isl_flow_get_no_source(__isl_keep isl_flow
*deps
, int must
)
332 return isl_set_copy(deps
->must_no_source
);
334 return isl_set_copy(deps
->may_no_source
);
337 void isl_flow_free(__isl_take isl_flow
*deps
)
343 isl_set_free(deps
->must_no_source
);
344 isl_set_free(deps
->may_no_source
);
346 for (i
= 0; i
< deps
->n_source
; ++i
)
347 isl_map_free(deps
->dep
[i
].map
);
353 /* Return a map that enforces that the domain iteration occurs after
354 * the range iteration at the given level.
355 * If level is odd, then the domain iteration should occur after
356 * the target iteration in their shared level/2 outermost loops.
357 * In this case we simply need to enforce that these outermost
358 * loop iterations are the same.
359 * If level is even, then the loop iterator of the domain should
360 * be greater than the loop iterator of the range at the last
361 * of the level/2 shared loops, i.e., loop level/2 - 1.
363 static __isl_give isl_map
*after_at_level(struct isl_dim
*dim
, int level
)
365 struct isl_basic_map
*bmap
;
368 bmap
= isl_basic_map_equal(dim
, level
/2);
370 bmap
= isl_basic_map_more_at(dim
, level
/2 - 1);
372 return isl_map_from_basic_map(bmap
);
375 /* Compute the last iteration of must source j that precedes the sink
376 * at the given level for sink iterations in set_C.
377 * The subset of set_C for which no such iteration can be found is returned
380 static struct isl_map
*last_source(struct isl_access_info
*acc
,
381 struct isl_set
*set_C
,
382 int j
, int level
, struct isl_set
**empty
)
384 struct isl_map
*read_map
;
385 struct isl_map
*write_map
;
386 struct isl_map
*dep_map
;
387 struct isl_map
*after
;
388 struct isl_map
*result
;
390 read_map
= isl_map_copy(acc
->sink
.map
);
391 write_map
= isl_map_copy(acc
->source
[j
].map
);
392 write_map
= isl_map_reverse(write_map
);
393 dep_map
= isl_map_apply_range(read_map
, write_map
);
394 after
= after_at_level(isl_map_get_dim(dep_map
), level
);
395 dep_map
= isl_map_intersect(dep_map
, after
);
396 result
= isl_map_partial_lexmax(dep_map
, set_C
, empty
);
397 result
= isl_map_reverse(result
);
402 /* For a given mapping between iterations of must source j and iterations
403 * of the sink, compute the last iteration of must source k preceding
404 * the sink at level before_level for any of the sink iterations,
405 * but following the corresponding iteration of must source j at level
408 static struct isl_map
*last_later_source(struct isl_access_info
*acc
,
409 struct isl_map
*old_map
,
410 int j
, int before_level
,
411 int k
, int after_level
,
412 struct isl_set
**empty
)
415 struct isl_set
*set_C
;
416 struct isl_map
*read_map
;
417 struct isl_map
*write_map
;
418 struct isl_map
*dep_map
;
419 struct isl_map
*after_write
;
420 struct isl_map
*before_read
;
421 struct isl_map
*result
;
423 set_C
= isl_map_range(isl_map_copy(old_map
));
424 read_map
= isl_map_copy(acc
->sink
.map
);
425 write_map
= isl_map_copy(acc
->source
[k
].map
);
427 write_map
= isl_map_reverse(write_map
);
428 dep_map
= isl_map_apply_range(read_map
, write_map
);
429 dim
= isl_dim_join(isl_map_get_dim(acc
->source
[k
].map
),
430 isl_dim_reverse(isl_map_get_dim(acc
->source
[j
].map
)));
431 after_write
= after_at_level(dim
, after_level
);
432 after_write
= isl_map_apply_range(after_write
, old_map
);
433 after_write
= isl_map_reverse(after_write
);
434 dep_map
= isl_map_intersect(dep_map
, after_write
);
435 before_read
= after_at_level(isl_map_get_dim(dep_map
), before_level
);
436 dep_map
= isl_map_intersect(dep_map
, before_read
);
437 result
= isl_map_partial_lexmax(dep_map
, set_C
, empty
);
438 result
= isl_map_reverse(result
);
443 /* Given a shared_level between two accesses, return 1 if the
444 * the first can precede the second at the requested target_level.
445 * If the target level is odd, i.e., refers to a statement level
446 * dimension, then first needs to precede second at the requested
447 * level, i.e., shared_level must be equal to target_level.
448 * If the target level is odd, then the two loops should share
449 * at least the requested number of outer loops.
451 static int can_precede_at_level(int shared_level
, int target_level
)
453 if (shared_level
< target_level
)
455 if ((target_level
% 2) && shared_level
> target_level
)
460 /* Given a possible flow dependence temp_rel[j] between source j and the sink
461 * at level sink_level, remove those elements for which
462 * there is an iteration of another source k < j that is closer to the sink.
463 * The flow dependences temp_rel[k] are updated with the improved sources.
464 * Any improved source needs to precede the sink at the same level
465 * and needs to follow source j at the same or a deeper level.
466 * The lower this level, the later the execution date of source k.
467 * We therefore consider lower levels first.
469 * If temp_rel[j] is empty, then there can be no improvement and
470 * we return immediately.
472 static int intermediate_sources(__isl_keep isl_access_info
*acc
,
473 struct isl_map
**temp_rel
, int j
, int sink_level
)
476 int depth
= 2 * isl_map_dim(acc
->source
[j
].map
, isl_dim_in
) + 1;
478 if (isl_map_fast_is_empty(temp_rel
[j
]))
481 for (k
= j
- 1; k
>= 0; --k
) {
483 plevel
= acc
->level_before(acc
->source
[k
].data
, acc
->sink
.data
);
484 if (!can_precede_at_level(plevel
, sink_level
))
487 plevel2
= acc
->level_before(acc
->source
[j
].data
,
488 acc
->source
[k
].data
);
490 for (level
= sink_level
; level
<= depth
; ++level
) {
492 struct isl_set
*trest
;
493 struct isl_map
*copy
;
495 if (!can_precede_at_level(plevel2
, level
))
498 copy
= isl_map_copy(temp_rel
[j
]);
499 T
= last_later_source(acc
, copy
, j
, sink_level
, k
,
501 if (isl_map_fast_is_empty(T
)) {
506 temp_rel
[j
] = isl_map_intersect_range(temp_rel
[j
], trest
);
507 temp_rel
[k
] = isl_map_union_disjoint(temp_rel
[k
], T
);
514 /* Compute all iterations of may source j that precedes the sink at the given
515 * level for sink iterations in set_C.
517 static __isl_give isl_map
*all_sources(__isl_keep isl_access_info
*acc
,
518 __isl_take isl_set
*set_C
, int j
, int level
)
525 read_map
= isl_map_copy(acc
->sink
.map
);
526 read_map
= isl_map_intersect_domain(read_map
, set_C
);
527 write_map
= isl_map_copy(acc
->source
[acc
->n_must
+ j
].map
);
528 write_map
= isl_map_reverse(write_map
);
529 dep_map
= isl_map_apply_range(read_map
, write_map
);
530 after
= after_at_level(isl_map_get_dim(dep_map
), level
);
531 dep_map
= isl_map_intersect(dep_map
, after
);
533 return isl_map_reverse(dep_map
);
536 /* For a given mapping between iterations of must source k and iterations
537 * of the sink, compute the all iteration of may source j preceding
538 * the sink at level before_level for any of the sink iterations,
539 * but following the corresponding iteration of must source k at level
542 static __isl_give isl_map
*all_later_sources(__isl_keep isl_access_info
*acc
,
543 __isl_keep isl_map
*old_map
,
544 int j
, int before_level
, int k
, int after_level
)
551 isl_map
*after_write
;
552 isl_map
*before_read
;
554 set_C
= isl_map_range(isl_map_copy(old_map
));
555 read_map
= isl_map_copy(acc
->sink
.map
);
556 read_map
= isl_map_intersect_domain(read_map
, set_C
);
557 write_map
= isl_map_copy(acc
->source
[acc
->n_must
+ j
].map
);
559 write_map
= isl_map_reverse(write_map
);
560 dep_map
= isl_map_apply_range(read_map
, write_map
);
561 dim
= isl_dim_join(isl_map_get_dim(acc
->source
[acc
->n_must
+ j
].map
),
562 isl_dim_reverse(isl_map_get_dim(acc
->source
[k
].map
)));
563 after_write
= after_at_level(dim
, after_level
);
564 after_write
= isl_map_apply_range(after_write
, old_map
);
565 after_write
= isl_map_reverse(after_write
);
566 dep_map
= isl_map_intersect(dep_map
, after_write
);
567 before_read
= after_at_level(isl_map_get_dim(dep_map
), before_level
);
568 dep_map
= isl_map_intersect(dep_map
, before_read
);
569 return isl_map_reverse(dep_map
);
572 /* Given the must and may dependence relations for the must accesses
573 * for level sink_level, check if there are any accesses of may access j
574 * that occur in between and return their union.
575 * If some of these accesses are intermediate with respect to
576 * (previously thought to be) must dependences, then these
577 * must dependences are turned into may dependences.
579 static __isl_give isl_map
*all_intermediate_sources(
580 __isl_keep isl_access_info
*acc
, __isl_take isl_map
*map
,
581 struct isl_map
**must_rel
, struct isl_map
**may_rel
,
582 int j
, int sink_level
)
585 int depth
= 2 * isl_map_dim(acc
->source
[acc
->n_must
+ j
].map
,
588 for (k
= 0; k
< acc
->n_must
; ++k
) {
591 if (isl_map_fast_is_empty(may_rel
[k
]) &&
592 isl_map_fast_is_empty(must_rel
[k
]))
595 plevel
= acc
->level_before(acc
->source
[k
].data
,
596 acc
->source
[acc
->n_must
+ j
].data
);
598 for (level
= sink_level
; level
<= depth
; ++level
) {
603 if (!can_precede_at_level(plevel
, level
))
606 copy
= isl_map_copy(may_rel
[k
]);
607 T
= all_later_sources(acc
, copy
, j
, sink_level
, k
, level
);
608 map
= isl_map_union(map
, T
);
610 copy
= isl_map_copy(must_rel
[k
]);
611 T
= all_later_sources(acc
, copy
, j
, sink_level
, k
, level
);
612 ran
= isl_map_range(isl_map_copy(T
));
613 map
= isl_map_union(map
, T
);
614 may_rel
[k
] = isl_map_union_disjoint(may_rel
[k
],
615 isl_map_intersect_range(isl_map_copy(must_rel
[k
]),
617 T
= isl_map_from_domain_and_range(
619 isl_dim_domain(isl_map_get_dim(must_rel
[k
]))),
621 must_rel
[k
] = isl_map_subtract(must_rel
[k
], T
);
628 /* Compute dependences for the case where all accesses are "may"
629 * accesses, which boils down to computing memory based dependences.
630 * The generic algorithm would also work in this case, but it would
631 * be overkill to use it.
633 static __isl_give isl_flow
*compute_mem_based_dependences(
634 __isl_take isl_access_info
*acc
)
641 res
= isl_flow_alloc(acc
);
645 mustdo
= isl_map_domain(isl_map_copy(acc
->sink
.map
));
646 maydo
= isl_set_copy(mustdo
);
648 for (i
= 0; i
< acc
->n_may
; ++i
) {
655 plevel
= acc
->level_before(acc
->source
[i
].data
, acc
->sink
.data
);
656 is_before
= plevel
& 1;
659 dim
= isl_map_get_dim(res
->dep
[i
].map
);
661 before
= isl_map_lex_le_first(dim
, plevel
);
663 before
= isl_map_lex_lt_first(dim
, plevel
);
664 dep
= isl_map_apply_range(isl_map_copy(acc
->source
[i
].map
),
665 isl_map_reverse(isl_map_copy(acc
->sink
.map
)));
666 dep
= isl_map_intersect(dep
, before
);
667 mustdo
= isl_set_subtract(mustdo
,
668 isl_map_range(isl_map_copy(dep
)));
669 res
->dep
[i
].map
= isl_map_union(res
->dep
[i
].map
, dep
);
672 res
->may_no_source
= isl_set_subtract(maydo
, isl_set_copy(mustdo
));
673 res
->must_no_source
= mustdo
;
675 isl_access_info_free(acc
);
679 isl_access_info_free(acc
);
683 /* Compute dependences for the case where there is at least one
686 * The core algorithm considers all levels in which a source may precede
687 * the sink, where a level may either be a statement level or a loop level.
688 * The outermost statement level is 1, the first loop level is 2, etc...
689 * The algorithm basically does the following:
690 * for all levels l of the read access from innermost to outermost
691 * for all sources w that may precede the sink access at that level
692 * compute the last iteration of the source that precedes the sink access
694 * add result to possible last accesses at level l of source w
695 * for all sources w2 that we haven't considered yet at this level that may
696 * also precede the sink access
697 * for all levels l2 of w from l to innermost
698 * for all possible last accesses dep of w at l
699 * compute last iteration of w2 between the source and sink
701 * add result to possible last accesses at level l of write w2
702 * and replace possible last accesses dep by the remainder
705 * The above algorithm is applied to the must access. During the course
706 * of the algorithm, we keep track of sink iterations that still
707 * need to be considered. These iterations are split into those that
708 * haven't been matched to any source access (mustdo) and those that have only
709 * been matched to may accesses (maydo).
710 * At the end of each level, we also consider the may accesses.
711 * In particular, we consider may accesses that precede the remaining
712 * sink iterations, moving elements from mustdo to maydo when appropriate,
713 * and may accesses that occur between a must source and a sink of any
714 * dependences found at the current level, turning must dependences into
715 * may dependences when appropriate.
718 static __isl_give isl_flow
*compute_val_based_dependences(
719 __isl_take isl_access_info
*acc
)
723 isl_set
*mustdo
= NULL
;
724 isl_set
*maydo
= NULL
;
727 isl_map
**must_rel
= NULL
;
728 isl_map
**may_rel
= NULL
;
730 acc
= isl_access_info_sort_sources(acc
);
734 res
= isl_flow_alloc(acc
);
737 ctx
= isl_map_get_ctx(acc
->sink
.map
);
739 depth
= 2 * isl_map_dim(acc
->sink
.map
, isl_dim_in
) + 1;
740 mustdo
= isl_map_domain(isl_map_copy(acc
->sink
.map
));
741 maydo
= isl_set_empty_like(mustdo
);
742 if (!mustdo
|| !maydo
)
744 if (isl_set_fast_is_empty(mustdo
))
747 must_rel
= isl_alloc_array(ctx
, struct isl_map
*, acc
->n_must
);
748 may_rel
= isl_alloc_array(ctx
, struct isl_map
*, acc
->n_must
);
749 if (!must_rel
|| !may_rel
)
752 for (level
= depth
; level
>= 1; --level
) {
753 for (j
= acc
->n_must
-1; j
>=0; --j
) {
754 must_rel
[j
] = isl_map_empty_like(res
->dep
[j
].map
);
755 may_rel
[j
] = isl_map_copy(must_rel
[j
]);
758 for (j
= acc
->n_must
- 1; j
>= 0; --j
) {
760 struct isl_set
*rest
;
763 plevel
= acc
->level_before(acc
->source
[j
].data
,
765 if (!can_precede_at_level(plevel
, level
))
768 T
= last_source(acc
, mustdo
, j
, level
, &rest
);
769 must_rel
[j
] = isl_map_union_disjoint(must_rel
[j
], T
);
772 intermediate_sources(acc
, must_rel
, j
, level
);
774 T
= last_source(acc
, maydo
, j
, level
, &rest
);
775 may_rel
[j
] = isl_map_union_disjoint(may_rel
[j
], T
);
778 intermediate_sources(acc
, may_rel
, j
, level
);
780 if (isl_set_fast_is_empty(mustdo
) &&
781 isl_set_fast_is_empty(maydo
))
784 for (j
= j
- 1; j
>= 0; --j
) {
787 plevel
= acc
->level_before(acc
->source
[j
].data
,
789 if (!can_precede_at_level(plevel
, level
))
792 intermediate_sources(acc
, must_rel
, j
, level
);
793 intermediate_sources(acc
, may_rel
, j
, level
);
796 for (j
= 0; j
< acc
->n_may
; ++j
) {
801 plevel
= acc
->level_before(acc
->source
[acc
->n_must
+ j
].data
,
803 if (!can_precede_at_level(plevel
, level
))
806 T
= all_sources(acc
, isl_set_copy(maydo
), j
, level
);
807 res
->dep
[2 * acc
->n_must
+ j
].map
=
808 isl_map_union(res
->dep
[2 * acc
->n_must
+ j
].map
, T
);
809 T
= all_sources(acc
, isl_set_copy(mustdo
), j
, level
);
810 ran
= isl_map_range(isl_map_copy(T
));
811 res
->dep
[2 * acc
->n_must
+ j
].map
=
812 isl_map_union(res
->dep
[2 * acc
->n_must
+ j
].map
, T
);
813 mustdo
= isl_set_subtract(mustdo
, isl_set_copy(ran
));
814 maydo
= isl_set_union_disjoint(maydo
, ran
);
816 T
= res
->dep
[2 * acc
->n_must
+ j
].map
;
817 T
= all_intermediate_sources(acc
, T
, must_rel
, may_rel
,
819 res
->dep
[2 * acc
->n_must
+ j
].map
= T
;
822 for (j
= acc
->n_must
- 1; j
>= 0; --j
) {
823 res
->dep
[2 * j
].map
=
824 isl_map_union_disjoint(res
->dep
[2 * j
].map
,
826 res
->dep
[2 * j
+ 1].map
=
827 isl_map_union_disjoint(res
->dep
[2 * j
+ 1].map
,
831 if (isl_set_fast_is_empty(mustdo
) &&
832 isl_set_fast_is_empty(maydo
))
839 res
->must_no_source
= mustdo
;
840 res
->may_no_source
= maydo
;
841 isl_access_info_free(acc
);
844 isl_access_info_free(acc
);
846 isl_set_free(mustdo
);
853 /* Given a "sink" access, a list of n "source" accesses,
854 * compute for each iteration of the sink access
855 * and for each element accessed by that iteration,
856 * the source access in the list that last accessed the
857 * element accessed by the sink access before this sink access.
858 * Each access is given as a map from the loop iterators
859 * to the array indices.
860 * The result is a list of n relations between source and sink
861 * iterations and a subset of the domain of the sink access,
862 * corresponding to those iterations that access an element
863 * not previously accessed.
865 * To deal with multi-valued sink access relations, the sink iteration
866 * domain is first extended with dimensions that correspond to the data
867 * space. After the computation is finished, these extra dimensions are
868 * projected out again.
870 __isl_give isl_flow
*isl_access_info_compute_flow(__isl_take isl_access_info
*acc
)
873 struct isl_flow
*res
;
874 isl_map
*domain_map
= NULL
;
879 domain_map
= isl_map_domain_map(isl_map_copy(acc
->sink
.map
));
880 acc
->sink
.map
= isl_map_range_map(acc
->sink
.map
);
884 if (acc
->n_must
== 0)
885 res
= compute_mem_based_dependences(acc
);
887 res
= compute_val_based_dependences(acc
);
891 for (j
= 0; j
< res
->n_source
; ++j
) {
892 res
->dep
[j
].map
= isl_map_apply_range(res
->dep
[j
].map
,
893 isl_map_copy(domain_map
));
894 if (!res
->dep
[j
].map
)
897 res
->must_no_source
= isl_set_apply(res
->must_no_source
,
898 isl_map_copy(domain_map
));
899 res
->may_no_source
= isl_set_apply(res
->may_no_source
,
900 isl_map_copy(domain_map
));
901 if (!res
->must_no_source
|| !res
->may_no_source
)
904 isl_map_free(domain_map
);
907 isl_map_free(domain_map
);
908 isl_access_info_free(acc
);
911 isl_map_free(domain_map
);
917 /* Keep track of some information about a schedule for a given
918 * access. In particular, keep track of which dimensions
919 * have a constant value and of the actual constant values.
921 struct isl_sched_info
{
926 static void sched_info_free(__isl_take
struct isl_sched_info
*info
)
930 isl_vec_free(info
->cst
);
935 /* Extract information on the constant dimensions of the schedule
936 * for a given access. The "map" is of the form
940 * with S the schedule domain, D the iteration domain and A the data domain.
942 static __isl_give
struct isl_sched_info
*sched_info_alloc(
943 __isl_keep isl_map
*map
)
947 struct isl_sched_info
*info
;
953 dim
= isl_dim_unwrap(isl_dim_domain(isl_map_get_dim(map
)));
956 n
= isl_dim_size(dim
, isl_dim_in
);
959 ctx
= isl_map_get_ctx(map
);
960 info
= isl_alloc_type(ctx
, struct isl_sched_info
);
963 info
->is_cst
= isl_alloc_array(ctx
, int, n
);
964 info
->cst
= isl_vec_alloc(ctx
, n
);
965 if (!info
->is_cst
|| !info
->cst
)
968 for (i
= 0; i
< n
; ++i
)
969 info
->is_cst
[i
] = isl_map_fast_is_fixed(map
, isl_dim_in
, i
,
974 sched_info_free(info
);
978 struct isl_compute_flow_data
{
979 isl_union_map
*must_source
;
980 isl_union_map
*may_source
;
981 isl_union_map
*must_dep
;
982 isl_union_map
*may_dep
;
983 isl_union_set
*must_no_source
;
984 isl_union_set
*may_no_source
;
989 struct isl_sched_info
*sink_info
;
990 struct isl_sched_info
**source_info
;
991 isl_access_info
*accesses
;
994 static int count_matching_array(__isl_take isl_map
*map
, void *user
)
998 struct isl_compute_flow_data
*data
;
1000 data
= (struct isl_compute_flow_data
*)user
;
1002 dim
= isl_dim_range(isl_map_get_dim(map
));
1004 eq
= isl_dim_equal(dim
, data
->dim
);
1017 static int collect_matching_array(__isl_take isl_map
*map
, void *user
)
1021 struct isl_sched_info
*info
;
1022 struct isl_compute_flow_data
*data
;
1024 data
= (struct isl_compute_flow_data
*)user
;
1026 dim
= isl_dim_range(isl_map_get_dim(map
));
1028 eq
= isl_dim_equal(dim
, data
->dim
);
1039 info
= sched_info_alloc(map
);
1040 data
->source_info
[data
->count
] = info
;
1042 data
->accesses
= isl_access_info_add_source(data
->accesses
,
1043 map
, data
->must
, info
);
1053 /* Determine the shared nesting level and the "textual order" of
1054 * the given accesses.
1056 * We first determine the minimal schedule dimension for both accesses.
1058 * If among those dimensions, we can find one where both have a fixed
1059 * value and if moreover those values are different, then the previous
1060 * dimension is the last shared nesting level and the textual order
1061 * is determined based on the order of the fixed values.
1062 * If no such fixed values can be found, then we set the shared
1063 * nesting level to the minimal schedule dimension, with no textual ordering.
1065 static int before(void *first
, void *second
)
1067 struct isl_sched_info
*info1
= first
;
1068 struct isl_sched_info
*info2
= second
;
1072 n1
= info1
->cst
->size
;
1073 n2
= info2
->cst
->size
;
1078 for (i
= 0; i
< n1
; ++i
) {
1079 if (!info1
->is_cst
[i
])
1081 if (!info2
->is_cst
[i
])
1083 if (isl_int_eq(info1
->cst
->el
[i
], info2
->cst
->el
[i
]))
1085 return 2 * i
+ isl_int_lt(info1
->cst
->el
[i
], info2
->cst
->el
[i
]);
1091 /* Given a sink access, look for all the source accesses that access
1092 * the same array and perform dataflow analysis on them using
1093 * isl_access_info_compute_flow.
1095 static int compute_flow(__isl_take isl_map
*map
, void *user
)
1099 struct isl_compute_flow_data
*data
;
1102 data
= (struct isl_compute_flow_data
*)user
;
1104 ctx
= isl_map_get_ctx(map
);
1106 data
->accesses
= NULL
;
1107 data
->sink_info
= NULL
;
1108 data
->source_info
= NULL
;
1110 data
->dim
= isl_dim_range(isl_map_get_dim(map
));
1112 if (isl_union_map_foreach_map(data
->must_source
,
1113 &count_matching_array
, data
) < 0)
1115 if (isl_union_map_foreach_map(data
->may_source
,
1116 &count_matching_array
, data
) < 0)
1119 data
->sink_info
= sched_info_alloc(map
);
1120 data
->source_info
= isl_calloc_array(ctx
, struct isl_sched_info
*,
1123 data
->accesses
= isl_access_info_alloc(isl_map_copy(map
),
1124 data
->sink_info
, &before
, data
->count
);
1125 if (!data
->sink_info
|| !data
->source_info
|| !data
->accesses
)
1129 if (isl_union_map_foreach_map(data
->must_source
,
1130 &collect_matching_array
, data
) < 0)
1133 if (isl_union_map_foreach_map(data
->may_source
,
1134 &collect_matching_array
, data
) < 0)
1137 flow
= isl_access_info_compute_flow(data
->accesses
);
1138 data
->accesses
= NULL
;
1143 data
->must_no_source
= isl_union_set_union(data
->must_no_source
,
1144 isl_union_set_from_set(isl_set_copy(flow
->must_no_source
)));
1145 data
->may_no_source
= isl_union_set_union(data
->may_no_source
,
1146 isl_union_set_from_set(isl_set_copy(flow
->may_no_source
)));
1148 for (i
= 0; i
< flow
->n_source
; ++i
) {
1150 dep
= isl_union_map_from_map(isl_map_copy(flow
->dep
[i
].map
));
1151 if (flow
->dep
[i
].must
)
1152 data
->must_dep
= isl_union_map_union(data
->must_dep
, dep
);
1154 data
->may_dep
= isl_union_map_union(data
->may_dep
, dep
);
1157 isl_flow_free(flow
);
1159 sched_info_free(data
->sink_info
);
1160 if (data
->source_info
) {
1161 for (i
= 0; i
< data
->count
; ++i
)
1162 sched_info_free(data
->source_info
[i
]);
1163 free(data
->source_info
);
1165 isl_dim_free(data
->dim
);
1170 isl_access_info_free(data
->accesses
);
1171 sched_info_free(data
->sink_info
);
1172 if (data
->source_info
) {
1173 for (i
= 0; i
< data
->count
; ++i
)
1174 sched_info_free(data
->source_info
[i
]);
1175 free(data
->source_info
);
1177 isl_dim_free(data
->dim
);
1183 /* Given a collection of "sink" and "source" accesses,
1184 * compute for each iteration of a sink access
1185 * and for each element accessed by that iteration,
1186 * the source access in the list that last accessed the
1187 * element accessed by the sink access before this sink access.
1188 * Each access is given as a map from the loop iterators
1189 * to the array indices.
1190 * The result is a relations between source and sink
1191 * iterations and a subset of the domain of the sink accesses,
1192 * corresponding to those iterations that access an element
1193 * not previously accessed.
1195 * We first prepend the schedule dimensions to the domain
1196 * of the accesses so that we can easily compare their relative order.
1197 * Then we consider each sink access individually in compute_flow.
1199 int isl_union_map_compute_flow(__isl_take isl_union_map
*sink
,
1200 __isl_take isl_union_map
*must_source
,
1201 __isl_take isl_union_map
*may_source
,
1202 __isl_take isl_union_map
*schedule
,
1203 __isl_give isl_union_map
**must_dep
, __isl_give isl_union_map
**may_dep
,
1204 __isl_give isl_union_set
**must_no_source
,
1205 __isl_give isl_union_set
**may_no_source
)
1208 isl_union_map
*range_map
= NULL
;
1209 struct isl_compute_flow_data data
;
1211 sink
= isl_union_map_align_params(sink
,
1212 isl_union_map_get_dim(must_source
));
1213 sink
= isl_union_map_align_params(sink
,
1214 isl_union_map_get_dim(may_source
));
1215 sink
= isl_union_map_align_params(sink
,
1216 isl_union_map_get_dim(schedule
));
1217 dim
= isl_union_map_get_dim(sink
);
1218 must_source
= isl_union_map_align_params(must_source
, isl_dim_copy(dim
));
1219 may_source
= isl_union_map_align_params(may_source
, isl_dim_copy(dim
));
1220 schedule
= isl_union_map_align_params(schedule
, isl_dim_copy(dim
));
1222 schedule
= isl_union_map_reverse(schedule
);
1223 range_map
= isl_union_map_range_map(schedule
);
1224 schedule
= isl_union_map_reverse(isl_union_map_copy(range_map
));
1225 sink
= isl_union_map_apply_domain(sink
, isl_union_map_copy(schedule
));
1226 must_source
= isl_union_map_apply_domain(must_source
,
1227 isl_union_map_copy(schedule
));
1228 may_source
= isl_union_map_apply_domain(may_source
, schedule
);
1230 data
.must_source
= must_source
;
1231 data
.may_source
= may_source
;
1232 data
.must_dep
= must_dep
?
1233 isl_union_map_empty(isl_dim_copy(dim
)) : NULL
;
1234 data
.may_dep
= may_dep
? isl_union_map_empty(isl_dim_copy(dim
)) : NULL
;
1235 data
.must_no_source
= must_no_source
?
1236 isl_union_set_empty(isl_dim_copy(dim
)) : NULL
;
1237 data
.may_no_source
= may_no_source
?
1238 isl_union_set_empty(isl_dim_copy(dim
)) : NULL
;
1242 if (isl_union_map_foreach_map(sink
, &compute_flow
, &data
) < 0)
1245 isl_union_map_free(sink
);
1246 isl_union_map_free(must_source
);
1247 isl_union_map_free(may_source
);
1250 data
.must_dep
= isl_union_map_apply_domain(data
.must_dep
,
1251 isl_union_map_copy(range_map
));
1252 data
.must_dep
= isl_union_map_apply_range(data
.must_dep
,
1253 isl_union_map_copy(range_map
));
1254 *must_dep
= data
.must_dep
;
1257 data
.may_dep
= isl_union_map_apply_domain(data
.may_dep
,
1258 isl_union_map_copy(range_map
));
1259 data
.may_dep
= isl_union_map_apply_range(data
.may_dep
,
1260 isl_union_map_copy(range_map
));
1261 *may_dep
= data
.may_dep
;
1263 if (must_no_source
) {
1264 data
.must_no_source
= isl_union_set_apply(data
.must_no_source
,
1265 isl_union_map_copy(range_map
));
1266 *must_no_source
= data
.must_no_source
;
1268 if (may_no_source
) {
1269 data
.may_no_source
= isl_union_set_apply(data
.may_no_source
,
1270 isl_union_map_copy(range_map
));
1271 *may_no_source
= data
.may_no_source
;
1274 isl_union_map_free(range_map
);
1278 isl_union_map_free(range_map
);
1279 isl_union_map_free(sink
);
1280 isl_union_map_free(must_source
);
1281 isl_union_map_free(may_source
);
1282 isl_union_map_free(data
.must_dep
);
1283 isl_union_map_free(data
.may_dep
);
1284 isl_union_set_free(data
.must_no_source
);
1285 isl_union_set_free(data
.may_no_source
);
1292 *must_no_source
= NULL
;
1294 *may_no_source
= NULL
;