2 * Copyright 2005-2007 Universiteit Leiden
3 * Copyright 2008-2009 Katholieke Universiteit Leuven
4 * Copyright 2010 INRIA Saclay
6 * Use of this software is governed by the GNU LGPLv2.1 license
8 * Written by Sven Verdoolaege, Leiden Institute of Advanced Computer Science,
9 * Universiteit Leiden, Niels Bohrweg 1, 2333 CA Leiden, The Netherlands
10 * and K.U.Leuven, Departement Computerwetenschappen, Celestijnenlaan 200A,
11 * B-3001 Leuven, Belgium
12 * and INRIA Saclay - Ile-de-France, Parc Club Orsay Universite,
13 * ZAC des vignes, 4 rue Jacques Monod, 91893 Orsay, France
19 #include <isl_qsort.h>
21 /* A private structure to keep track of a mapping together with
22 * a user-specified identifier and a boolean indicating whether
23 * the map represents a must or may access/dependence.
25 struct isl_labeled_map
{
31 /* A structure containing the input for dependence analysis:
33 * - n_must + n_may (<= max_source) sources
34 * - a function for determining the relative order of sources and sink
35 * The must sources are placed before the may sources.
37 * domain_map is an auxiliary map that maps the sink access relation
38 * to the domain of this access relation.
40 * restrict_sources is a callback that (if not NULL) will be called
41 * right before any lexicographical maximization.
43 struct isl_access_info
{
45 struct isl_labeled_map sink
;
46 isl_access_level_before level_before
;
47 isl_access_restrict_sources restrict_sources
;
51 struct isl_labeled_map source
[1];
54 /* A structure containing the output of dependence analysis:
55 * - n_source dependences
56 * - a wrapped subset of the sink for which definitely no source could be found
57 * - a wrapped subset of the sink for which possibly no source could be found
60 isl_set
*must_no_source
;
61 isl_set
*may_no_source
;
63 struct isl_labeled_map
*dep
;
66 /* Construct an isl_access_info structure and fill it up with
67 * the given data. The number of sources is set to 0.
69 __isl_give isl_access_info
*isl_access_info_alloc(__isl_take isl_map
*sink
,
70 void *sink_user
, isl_access_level_before fn
, int max_source
)
73 struct isl_access_info
*acc
;
78 ctx
= isl_map_get_ctx(sink
);
79 isl_assert(ctx
, max_source
>= 0, goto error
);
81 acc
= isl_calloc(ctx
, struct isl_access_info
,
82 sizeof(struct isl_access_info
) +
83 (max_source
- 1) * sizeof(struct isl_labeled_map
));
88 acc
->sink
.data
= sink_user
;
89 acc
->level_before
= fn
;
90 acc
->max_source
= max_source
;
100 /* Free the given isl_access_info structure.
102 void isl_access_info_free(__isl_take isl_access_info
*acc
)
108 isl_map_free(acc
->domain_map
);
109 isl_map_free(acc
->sink
.map
);
110 for (i
= 0; i
< acc
->n_must
+ acc
->n_may
; ++i
)
111 isl_map_free(acc
->source
[i
].map
);
115 isl_ctx
*isl_access_info_get_ctx(__isl_keep isl_access_info
*acc
)
117 return acc
? isl_map_get_ctx(acc
->sink
.map
) : NULL
;
120 __isl_give isl_access_info
*isl_access_info_set_restrict_sources(
121 __isl_take isl_access_info
*acc
, isl_access_restrict_sources fn
)
125 acc
->restrict_sources
= fn
;
129 /* Add another source to an isl_access_info structure, making
130 * sure the "must" sources are placed before the "may" sources.
131 * This function may be called at most max_source times on a
132 * given isl_access_info structure, with max_source as specified
133 * in the call to isl_access_info_alloc that constructed the structure.
135 __isl_give isl_access_info
*isl_access_info_add_source(
136 __isl_take isl_access_info
*acc
, __isl_take isl_map
*source
,
137 int must
, void *source_user
)
143 ctx
= isl_map_get_ctx(acc
->sink
.map
);
144 isl_assert(ctx
, acc
->n_must
+ acc
->n_may
< acc
->max_source
, goto error
);
148 acc
->source
[acc
->n_must
+ acc
->n_may
] =
149 acc
->source
[acc
->n_must
];
150 acc
->source
[acc
->n_must
].map
= source
;
151 acc
->source
[acc
->n_must
].data
= source_user
;
152 acc
->source
[acc
->n_must
].must
= 1;
155 acc
->source
[acc
->n_must
+ acc
->n_may
].map
= source
;
156 acc
->source
[acc
->n_must
+ acc
->n_may
].data
= source_user
;
157 acc
->source
[acc
->n_must
+ acc
->n_may
].must
= 0;
163 isl_map_free(source
);
164 isl_access_info_free(acc
);
168 /* Return -n, 0 or n (with n a positive value), depending on whether
169 * the source access identified by p1 should be sorted before, together
170 * or after that identified by p2.
172 * If p1 appears before p2, then it should be sorted first.
173 * For more generic initial schedules, it is possible that neither
174 * p1 nor p2 appears before the other, or at least not in any obvious way.
175 * We therefore also check if p2 appears before p1, in which case p2
176 * should be sorted first.
177 * If not, we try to order the two statements based on the description
178 * of the iteration domains. This results in an arbitrary, but fairly
181 static int access_sort_cmp(const void *p1
, const void *p2
, void *user
)
183 isl_access_info
*acc
= user
;
184 const struct isl_labeled_map
*i1
, *i2
;
187 i1
= (const struct isl_labeled_map
*) p1
;
188 i2
= (const struct isl_labeled_map
*) p2
;
190 level1
= acc
->level_before(i1
->data
, i2
->data
);
194 level2
= acc
->level_before(i2
->data
, i1
->data
);
198 h1
= isl_map_get_hash(i1
->map
);
199 h2
= isl_map_get_hash(i2
->map
);
200 return h1
> h2
? 1 : h1
< h2
? -1 : 0;
203 /* Sort the must source accesses in their textual order.
205 static __isl_give isl_access_info
*isl_access_info_sort_sources(
206 __isl_take isl_access_info
*acc
)
210 if (acc
->n_must
<= 1)
213 isl_quicksort(acc
->source
, acc
->n_must
, sizeof(struct isl_labeled_map
),
214 access_sort_cmp
, acc
);
219 /* Align the parameters of the two spaces if needed and then call
222 static __isl_give isl_space
*space_align_and_join(__isl_take isl_space
*left
,
223 __isl_take isl_space
*right
)
225 if (isl_space_match(left
, isl_dim_param
, right
, isl_dim_param
))
226 return isl_space_join(left
, right
);
228 left
= isl_space_align_params(left
, isl_space_copy(right
));
229 right
= isl_space_align_params(right
, isl_space_copy(left
));
230 return isl_space_join(left
, right
);
233 /* Initialize an empty isl_flow structure corresponding to a given
234 * isl_access_info structure.
235 * For each must access, two dependences are created (initialized
236 * to the empty relation), one for the resulting must dependences
237 * and one for the resulting may dependences. May accesses can
238 * only lead to may dependences, so only one dependence is created
240 * This function is private as isl_flow structures are only supposed
241 * to be created by isl_access_info_compute_flow.
243 static __isl_give isl_flow
*isl_flow_alloc(__isl_keep isl_access_info
*acc
)
247 struct isl_flow
*dep
;
252 ctx
= isl_map_get_ctx(acc
->sink
.map
);
253 dep
= isl_calloc_type(ctx
, struct isl_flow
);
257 dep
->dep
= isl_calloc_array(ctx
, struct isl_labeled_map
,
258 2 * acc
->n_must
+ acc
->n_may
);
262 dep
->n_source
= 2 * acc
->n_must
+ acc
->n_may
;
263 for (i
= 0; i
< acc
->n_must
; ++i
) {
265 dim
= space_align_and_join(
266 isl_map_get_space(acc
->source
[i
].map
),
267 isl_space_reverse(isl_map_get_space(acc
->sink
.map
)));
268 dep
->dep
[2 * i
].map
= isl_map_empty(dim
);
269 dep
->dep
[2 * i
+ 1].map
= isl_map_copy(dep
->dep
[2 * i
].map
);
270 dep
->dep
[2 * i
].data
= acc
->source
[i
].data
;
271 dep
->dep
[2 * i
+ 1].data
= acc
->source
[i
].data
;
272 dep
->dep
[2 * i
].must
= 1;
273 dep
->dep
[2 * i
+ 1].must
= 0;
274 if (!dep
->dep
[2 * i
].map
|| !dep
->dep
[2 * i
+ 1].map
)
277 for (i
= acc
->n_must
; i
< acc
->n_must
+ acc
->n_may
; ++i
) {
279 dim
= space_align_and_join(
280 isl_map_get_space(acc
->source
[i
].map
),
281 isl_space_reverse(isl_map_get_space(acc
->sink
.map
)));
282 dep
->dep
[acc
->n_must
+ i
].map
= isl_map_empty(dim
);
283 dep
->dep
[acc
->n_must
+ i
].data
= acc
->source
[i
].data
;
284 dep
->dep
[acc
->n_must
+ i
].must
= 0;
285 if (!dep
->dep
[acc
->n_must
+ i
].map
)
295 /* Iterate over all sources and for each resulting flow dependence
296 * that is not empty, call the user specfied function.
297 * The second argument in this function call identifies the source,
298 * while the third argument correspond to the final argument of
299 * the isl_flow_foreach call.
301 int isl_flow_foreach(__isl_keep isl_flow
*deps
,
302 int (*fn
)(__isl_take isl_map
*dep
, int must
, void *dep_user
, void *user
),
310 for (i
= 0; i
< deps
->n_source
; ++i
) {
311 if (isl_map_plain_is_empty(deps
->dep
[i
].map
))
313 if (fn(isl_map_copy(deps
->dep
[i
].map
), deps
->dep
[i
].must
,
314 deps
->dep
[i
].data
, user
) < 0)
321 /* Return a copy of the subset of the sink for which no source could be found.
323 __isl_give isl_map
*isl_flow_get_no_source(__isl_keep isl_flow
*deps
, int must
)
329 return isl_set_unwrap(isl_set_copy(deps
->must_no_source
));
331 return isl_set_unwrap(isl_set_copy(deps
->may_no_source
));
334 void isl_flow_free(__isl_take isl_flow
*deps
)
340 isl_set_free(deps
->must_no_source
);
341 isl_set_free(deps
->may_no_source
);
343 for (i
= 0; i
< deps
->n_source
; ++i
)
344 isl_map_free(deps
->dep
[i
].map
);
350 isl_ctx
*isl_flow_get_ctx(__isl_keep isl_flow
*deps
)
352 return deps
? isl_set_get_ctx(deps
->must_no_source
) : NULL
;
355 /* Return a map that enforces that the domain iteration occurs after
356 * the range iteration at the given level.
357 * If level is odd, then the domain iteration should occur after
358 * the target iteration in their shared level/2 outermost loops.
359 * In this case we simply need to enforce that these outermost
360 * loop iterations are the same.
361 * If level is even, then the loop iterator of the domain should
362 * be greater than the loop iterator of the range at the last
363 * of the level/2 shared loops, i.e., loop level/2 - 1.
365 static __isl_give isl_map
*after_at_level(__isl_take isl_space
*dim
, int level
)
367 struct isl_basic_map
*bmap
;
370 bmap
= isl_basic_map_equal(dim
, level
/2);
372 bmap
= isl_basic_map_more_at(dim
, level
/2 - 1);
374 return isl_map_from_basic_map(bmap
);
377 /* Check if the user has set acc->restrict_sources and if so
378 * intersect the range of "dep" with the result of a call to this function.
380 * Since the user expects a mapping from sink iterations to source iterations,
381 * whereas the domain of "dep" is a wrapped map, mapping sink iterations
382 * to accessed array elements, we first need to project out the accessed
383 * sink array elements by applying acc->domain_map.
385 static __isl_give isl_map
*restrict_sources(__isl_take isl_map
*dep
,
386 struct isl_access_info
*acc
, int source
)
391 if (!acc
->restrict_sources
)
394 source_map
= isl_map_copy(dep
);
395 source_map
= isl_map_apply_domain(source_map
,
396 isl_map_copy(acc
->domain_map
));
397 param
= acc
->restrict_sources(source_map
, acc
->sink
.data
,
398 acc
->source
[source
].data
);
399 dep
= isl_map_intersect_range(dep
, param
);
403 /* Compute the last iteration of must source j that precedes the sink
404 * at the given level for sink iterations in set_C.
405 * The subset of set_C for which no such iteration can be found is returned
408 static struct isl_map
*last_source(struct isl_access_info
*acc
,
409 struct isl_set
*set_C
,
410 int j
, int level
, struct isl_set
**empty
)
412 struct isl_map
*read_map
;
413 struct isl_map
*write_map
;
414 struct isl_map
*dep_map
;
415 struct isl_map
*after
;
416 struct isl_map
*result
;
418 read_map
= isl_map_copy(acc
->sink
.map
);
419 write_map
= isl_map_copy(acc
->source
[j
].map
);
420 write_map
= isl_map_reverse(write_map
);
421 dep_map
= isl_map_apply_range(read_map
, write_map
);
422 after
= after_at_level(isl_map_get_space(dep_map
), level
);
423 dep_map
= isl_map_intersect(dep_map
, after
);
424 dep_map
= restrict_sources(dep_map
, acc
, j
);
425 result
= isl_map_partial_lexmax(dep_map
, set_C
, empty
);
426 result
= isl_map_reverse(result
);
431 /* For a given mapping between iterations of must source j and iterations
432 * of the sink, compute the last iteration of must source k preceding
433 * the sink at level before_level for any of the sink iterations,
434 * but following the corresponding iteration of must source j at level
437 static struct isl_map
*last_later_source(struct isl_access_info
*acc
,
438 struct isl_map
*old_map
,
439 int j
, int before_level
,
440 int k
, int after_level
,
441 struct isl_set
**empty
)
444 struct isl_set
*set_C
;
445 struct isl_map
*read_map
;
446 struct isl_map
*write_map
;
447 struct isl_map
*dep_map
;
448 struct isl_map
*after_write
;
449 struct isl_map
*before_read
;
450 struct isl_map
*result
;
452 set_C
= isl_map_range(isl_map_copy(old_map
));
453 read_map
= isl_map_copy(acc
->sink
.map
);
454 write_map
= isl_map_copy(acc
->source
[k
].map
);
456 write_map
= isl_map_reverse(write_map
);
457 dep_map
= isl_map_apply_range(read_map
, write_map
);
458 dim
= space_align_and_join(isl_map_get_space(acc
->source
[k
].map
),
459 isl_space_reverse(isl_map_get_space(acc
->source
[j
].map
)));
460 after_write
= after_at_level(dim
, after_level
);
461 after_write
= isl_map_apply_range(after_write
, old_map
);
462 after_write
= isl_map_reverse(after_write
);
463 dep_map
= isl_map_intersect(dep_map
, after_write
);
464 before_read
= after_at_level(isl_map_get_space(dep_map
), before_level
);
465 dep_map
= isl_map_intersect(dep_map
, before_read
);
466 dep_map
= restrict_sources(dep_map
, acc
, k
);
467 result
= isl_map_partial_lexmax(dep_map
, set_C
, empty
);
468 result
= isl_map_reverse(result
);
473 /* Given a shared_level between two accesses, return 1 if the
474 * the first can precede the second at the requested target_level.
475 * If the target level is odd, i.e., refers to a statement level
476 * dimension, then first needs to precede second at the requested
477 * level, i.e., shared_level must be equal to target_level.
478 * If the target level is odd, then the two loops should share
479 * at least the requested number of outer loops.
481 static int can_precede_at_level(int shared_level
, int target_level
)
483 if (shared_level
< target_level
)
485 if ((target_level
% 2) && shared_level
> target_level
)
490 /* Given a possible flow dependence temp_rel[j] between source j and the sink
491 * at level sink_level, remove those elements for which
492 * there is an iteration of another source k < j that is closer to the sink.
493 * The flow dependences temp_rel[k] are updated with the improved sources.
494 * Any improved source needs to precede the sink at the same level
495 * and needs to follow source j at the same or a deeper level.
496 * The lower this level, the later the execution date of source k.
497 * We therefore consider lower levels first.
499 * If temp_rel[j] is empty, then there can be no improvement and
500 * we return immediately.
502 static int intermediate_sources(__isl_keep isl_access_info
*acc
,
503 struct isl_map
**temp_rel
, int j
, int sink_level
)
506 int depth
= 2 * isl_map_dim(acc
->source
[j
].map
, isl_dim_in
) + 1;
508 if (isl_map_plain_is_empty(temp_rel
[j
]))
511 for (k
= j
- 1; k
>= 0; --k
) {
513 plevel
= acc
->level_before(acc
->source
[k
].data
, acc
->sink
.data
);
514 if (!can_precede_at_level(plevel
, sink_level
))
517 plevel2
= acc
->level_before(acc
->source
[j
].data
,
518 acc
->source
[k
].data
);
520 for (level
= sink_level
; level
<= depth
; ++level
) {
522 struct isl_set
*trest
;
523 struct isl_map
*copy
;
525 if (!can_precede_at_level(plevel2
, level
))
528 copy
= isl_map_copy(temp_rel
[j
]);
529 T
= last_later_source(acc
, copy
, j
, sink_level
, k
,
531 if (isl_map_plain_is_empty(T
)) {
536 temp_rel
[j
] = isl_map_intersect_range(temp_rel
[j
], trest
);
537 temp_rel
[k
] = isl_map_union_disjoint(temp_rel
[k
], T
);
544 /* Compute all iterations of may source j that precedes the sink at the given
545 * level for sink iterations in set_C.
547 static __isl_give isl_map
*all_sources(__isl_keep isl_access_info
*acc
,
548 __isl_take isl_set
*set_C
, int j
, int level
)
555 read_map
= isl_map_copy(acc
->sink
.map
);
556 read_map
= isl_map_intersect_domain(read_map
, set_C
);
557 write_map
= isl_map_copy(acc
->source
[acc
->n_must
+ j
].map
);
558 write_map
= isl_map_reverse(write_map
);
559 dep_map
= isl_map_apply_range(read_map
, write_map
);
560 after
= after_at_level(isl_map_get_space(dep_map
), level
);
561 dep_map
= isl_map_intersect(dep_map
, after
);
563 return isl_map_reverse(dep_map
);
566 /* For a given mapping between iterations of must source k and iterations
567 * of the sink, compute the all iteration of may source j preceding
568 * the sink at level before_level for any of the sink iterations,
569 * but following the corresponding iteration of must source k at level
572 static __isl_give isl_map
*all_later_sources(__isl_keep isl_access_info
*acc
,
573 __isl_keep isl_map
*old_map
,
574 int j
, int before_level
, int k
, int after_level
)
581 isl_map
*after_write
;
582 isl_map
*before_read
;
584 set_C
= isl_map_range(isl_map_copy(old_map
));
585 read_map
= isl_map_copy(acc
->sink
.map
);
586 read_map
= isl_map_intersect_domain(read_map
, set_C
);
587 write_map
= isl_map_copy(acc
->source
[acc
->n_must
+ j
].map
);
589 write_map
= isl_map_reverse(write_map
);
590 dep_map
= isl_map_apply_range(read_map
, write_map
);
591 dim
= isl_space_join(isl_map_get_space(acc
->source
[acc
->n_must
+ j
].map
),
592 isl_space_reverse(isl_map_get_space(acc
->source
[k
].map
)));
593 after_write
= after_at_level(dim
, after_level
);
594 after_write
= isl_map_apply_range(after_write
, old_map
);
595 after_write
= isl_map_reverse(after_write
);
596 dep_map
= isl_map_intersect(dep_map
, after_write
);
597 before_read
= after_at_level(isl_map_get_space(dep_map
), before_level
);
598 dep_map
= isl_map_intersect(dep_map
, before_read
);
599 return isl_map_reverse(dep_map
);
602 /* Given the must and may dependence relations for the must accesses
603 * for level sink_level, check if there are any accesses of may access j
604 * that occur in between and return their union.
605 * If some of these accesses are intermediate with respect to
606 * (previously thought to be) must dependences, then these
607 * must dependences are turned into may dependences.
609 static __isl_give isl_map
*all_intermediate_sources(
610 __isl_keep isl_access_info
*acc
, __isl_take isl_map
*map
,
611 struct isl_map
**must_rel
, struct isl_map
**may_rel
,
612 int j
, int sink_level
)
615 int depth
= 2 * isl_map_dim(acc
->source
[acc
->n_must
+ j
].map
,
618 for (k
= 0; k
< acc
->n_must
; ++k
) {
621 if (isl_map_plain_is_empty(may_rel
[k
]) &&
622 isl_map_plain_is_empty(must_rel
[k
]))
625 plevel
= acc
->level_before(acc
->source
[k
].data
,
626 acc
->source
[acc
->n_must
+ j
].data
);
628 for (level
= sink_level
; level
<= depth
; ++level
) {
633 if (!can_precede_at_level(plevel
, level
))
636 copy
= isl_map_copy(may_rel
[k
]);
637 T
= all_later_sources(acc
, copy
, j
, sink_level
, k
, level
);
638 map
= isl_map_union(map
, T
);
640 copy
= isl_map_copy(must_rel
[k
]);
641 T
= all_later_sources(acc
, copy
, j
, sink_level
, k
, level
);
642 ran
= isl_map_range(isl_map_copy(T
));
643 map
= isl_map_union(map
, T
);
644 may_rel
[k
] = isl_map_union_disjoint(may_rel
[k
],
645 isl_map_intersect_range(isl_map_copy(must_rel
[k
]),
647 T
= isl_map_from_domain_and_range(
649 isl_space_domain(isl_map_get_space(must_rel
[k
]))),
651 must_rel
[k
] = isl_map_subtract(must_rel
[k
], T
);
658 /* Compute dependences for the case where all accesses are "may"
659 * accesses, which boils down to computing memory based dependences.
660 * The generic algorithm would also work in this case, but it would
661 * be overkill to use it.
663 static __isl_give isl_flow
*compute_mem_based_dependences(
664 __isl_keep isl_access_info
*acc
)
671 res
= isl_flow_alloc(acc
);
675 mustdo
= isl_map_domain(isl_map_copy(acc
->sink
.map
));
676 maydo
= isl_set_copy(mustdo
);
678 for (i
= 0; i
< acc
->n_may
; ++i
) {
685 plevel
= acc
->level_before(acc
->source
[i
].data
, acc
->sink
.data
);
686 is_before
= plevel
& 1;
689 dim
= isl_map_get_space(res
->dep
[i
].map
);
691 before
= isl_map_lex_le_first(dim
, plevel
);
693 before
= isl_map_lex_lt_first(dim
, plevel
);
694 dep
= isl_map_apply_range(isl_map_copy(acc
->source
[i
].map
),
695 isl_map_reverse(isl_map_copy(acc
->sink
.map
)));
696 dep
= isl_map_intersect(dep
, before
);
697 mustdo
= isl_set_subtract(mustdo
,
698 isl_map_range(isl_map_copy(dep
)));
699 res
->dep
[i
].map
= isl_map_union(res
->dep
[i
].map
, dep
);
702 res
->may_no_source
= isl_set_subtract(maydo
, isl_set_copy(mustdo
));
703 res
->must_no_source
= mustdo
;
708 /* Compute dependences for the case where there is at least one
711 * The core algorithm considers all levels in which a source may precede
712 * the sink, where a level may either be a statement level or a loop level.
713 * The outermost statement level is 1, the first loop level is 2, etc...
714 * The algorithm basically does the following:
715 * for all levels l of the read access from innermost to outermost
716 * for all sources w that may precede the sink access at that level
717 * compute the last iteration of the source that precedes the sink access
719 * add result to possible last accesses at level l of source w
720 * for all sources w2 that we haven't considered yet at this level that may
721 * also precede the sink access
722 * for all levels l2 of w from l to innermost
723 * for all possible last accesses dep of w at l
724 * compute last iteration of w2 between the source and sink
726 * add result to possible last accesses at level l of write w2
727 * and replace possible last accesses dep by the remainder
730 * The above algorithm is applied to the must access. During the course
731 * of the algorithm, we keep track of sink iterations that still
732 * need to be considered. These iterations are split into those that
733 * haven't been matched to any source access (mustdo) and those that have only
734 * been matched to may accesses (maydo).
735 * At the end of each level, we also consider the may accesses.
736 * In particular, we consider may accesses that precede the remaining
737 * sink iterations, moving elements from mustdo to maydo when appropriate,
738 * and may accesses that occur between a must source and a sink of any
739 * dependences found at the current level, turning must dependences into
740 * may dependences when appropriate.
743 static __isl_give isl_flow
*compute_val_based_dependences(
744 __isl_keep isl_access_info
*acc
)
748 isl_set
*mustdo
= NULL
;
749 isl_set
*maydo
= NULL
;
752 isl_map
**must_rel
= NULL
;
753 isl_map
**may_rel
= NULL
;
758 res
= isl_flow_alloc(acc
);
761 ctx
= isl_map_get_ctx(acc
->sink
.map
);
763 depth
= 2 * isl_map_dim(acc
->sink
.map
, isl_dim_in
) + 1;
764 mustdo
= isl_map_domain(isl_map_copy(acc
->sink
.map
));
765 maydo
= isl_set_empty_like(mustdo
);
766 if (!mustdo
|| !maydo
)
768 if (isl_set_plain_is_empty(mustdo
))
771 must_rel
= isl_alloc_array(ctx
, struct isl_map
*, acc
->n_must
);
772 may_rel
= isl_alloc_array(ctx
, struct isl_map
*, acc
->n_must
);
773 if (!must_rel
|| !may_rel
)
776 for (level
= depth
; level
>= 1; --level
) {
777 for (j
= acc
->n_must
-1; j
>=0; --j
) {
778 must_rel
[j
] = isl_map_empty_like(res
->dep
[j
].map
);
779 may_rel
[j
] = isl_map_copy(must_rel
[j
]);
782 for (j
= acc
->n_must
- 1; j
>= 0; --j
) {
784 struct isl_set
*rest
;
787 plevel
= acc
->level_before(acc
->source
[j
].data
,
789 if (!can_precede_at_level(plevel
, level
))
792 T
= last_source(acc
, mustdo
, j
, level
, &rest
);
793 must_rel
[j
] = isl_map_union_disjoint(must_rel
[j
], T
);
796 intermediate_sources(acc
, must_rel
, j
, level
);
798 T
= last_source(acc
, maydo
, j
, level
, &rest
);
799 may_rel
[j
] = isl_map_union_disjoint(may_rel
[j
], T
);
802 intermediate_sources(acc
, may_rel
, j
, level
);
804 if (isl_set_plain_is_empty(mustdo
) &&
805 isl_set_plain_is_empty(maydo
))
808 for (j
= j
- 1; j
>= 0; --j
) {
811 plevel
= acc
->level_before(acc
->source
[j
].data
,
813 if (!can_precede_at_level(plevel
, level
))
816 intermediate_sources(acc
, must_rel
, j
, level
);
817 intermediate_sources(acc
, may_rel
, j
, level
);
820 for (j
= 0; j
< acc
->n_may
; ++j
) {
825 plevel
= acc
->level_before(acc
->source
[acc
->n_must
+ j
].data
,
827 if (!can_precede_at_level(plevel
, level
))
830 T
= all_sources(acc
, isl_set_copy(maydo
), j
, level
);
831 res
->dep
[2 * acc
->n_must
+ j
].map
=
832 isl_map_union(res
->dep
[2 * acc
->n_must
+ j
].map
, T
);
833 T
= all_sources(acc
, isl_set_copy(mustdo
), j
, level
);
834 ran
= isl_map_range(isl_map_copy(T
));
835 res
->dep
[2 * acc
->n_must
+ j
].map
=
836 isl_map_union(res
->dep
[2 * acc
->n_must
+ j
].map
, T
);
837 mustdo
= isl_set_subtract(mustdo
, isl_set_copy(ran
));
838 maydo
= isl_set_union_disjoint(maydo
, ran
);
840 T
= res
->dep
[2 * acc
->n_must
+ j
].map
;
841 T
= all_intermediate_sources(acc
, T
, must_rel
, may_rel
,
843 res
->dep
[2 * acc
->n_must
+ j
].map
= T
;
846 for (j
= acc
->n_must
- 1; j
>= 0; --j
) {
847 res
->dep
[2 * j
].map
=
848 isl_map_union_disjoint(res
->dep
[2 * j
].map
,
850 res
->dep
[2 * j
+ 1].map
=
851 isl_map_union_disjoint(res
->dep
[2 * j
+ 1].map
,
855 if (isl_set_plain_is_empty(mustdo
) &&
856 isl_set_plain_is_empty(maydo
))
863 res
->must_no_source
= mustdo
;
864 res
->may_no_source
= maydo
;
868 isl_set_free(mustdo
);
875 /* Given a "sink" access, a list of n "source" accesses,
876 * compute for each iteration of the sink access
877 * and for each element accessed by that iteration,
878 * the source access in the list that last accessed the
879 * element accessed by the sink access before this sink access.
880 * Each access is given as a map from the loop iterators
881 * to the array indices.
882 * The result is a list of n relations between source and sink
883 * iterations and a subset of the domain of the sink access,
884 * corresponding to those iterations that access an element
885 * not previously accessed.
887 * To deal with multi-valued sink access relations, the sink iteration
888 * domain is first extended with dimensions that correspond to the data
889 * space. After the computation is finished, these extra dimensions are
890 * projected out again.
892 __isl_give isl_flow
*isl_access_info_compute_flow(__isl_take isl_access_info
*acc
)
895 struct isl_flow
*res
= NULL
;
900 acc
->domain_map
= isl_map_domain_map(isl_map_copy(acc
->sink
.map
));
901 acc
->sink
.map
= isl_map_range_map(acc
->sink
.map
);
905 if (acc
->n_must
== 0)
906 res
= compute_mem_based_dependences(acc
);
908 acc
= isl_access_info_sort_sources(acc
);
909 res
= compute_val_based_dependences(acc
);
914 for (j
= 0; j
< res
->n_source
; ++j
) {
915 res
->dep
[j
].map
= isl_map_apply_range(res
->dep
[j
].map
,
916 isl_map_copy(acc
->domain_map
));
917 if (!res
->dep
[j
].map
)
920 if (!res
->must_no_source
|| !res
->may_no_source
)
923 isl_access_info_free(acc
);
926 isl_access_info_free(acc
);
932 /* Keep track of some information about a schedule for a given
933 * access. In particular, keep track of which dimensions
934 * have a constant value and of the actual constant values.
936 struct isl_sched_info
{
941 static void sched_info_free(__isl_take
struct isl_sched_info
*info
)
945 isl_vec_free(info
->cst
);
950 /* Extract information on the constant dimensions of the schedule
951 * for a given access. The "map" is of the form
955 * with S the schedule domain, D the iteration domain and A the data domain.
957 static __isl_give
struct isl_sched_info
*sched_info_alloc(
958 __isl_keep isl_map
*map
)
962 struct isl_sched_info
*info
;
968 dim
= isl_space_unwrap(isl_space_domain(isl_map_get_space(map
)));
971 n
= isl_space_dim(dim
, isl_dim_in
);
974 ctx
= isl_map_get_ctx(map
);
975 info
= isl_alloc_type(ctx
, struct isl_sched_info
);
978 info
->is_cst
= isl_alloc_array(ctx
, int, n
);
979 info
->cst
= isl_vec_alloc(ctx
, n
);
980 if (!info
->is_cst
|| !info
->cst
)
983 for (i
= 0; i
< n
; ++i
)
984 info
->is_cst
[i
] = isl_map_plain_is_fixed(map
, isl_dim_in
, i
,
989 sched_info_free(info
);
993 struct isl_compute_flow_data
{
994 isl_union_map
*must_source
;
995 isl_union_map
*may_source
;
996 isl_union_map
*must_dep
;
997 isl_union_map
*may_dep
;
998 isl_union_map
*must_no_source
;
999 isl_union_map
*may_no_source
;
1004 struct isl_sched_info
*sink_info
;
1005 struct isl_sched_info
**source_info
;
1006 isl_access_info
*accesses
;
1009 static int count_matching_array(__isl_take isl_map
*map
, void *user
)
1013 struct isl_compute_flow_data
*data
;
1015 data
= (struct isl_compute_flow_data
*)user
;
1017 dim
= isl_space_range(isl_map_get_space(map
));
1019 eq
= isl_space_is_equal(dim
, data
->dim
);
1021 isl_space_free(dim
);
1032 static int collect_matching_array(__isl_take isl_map
*map
, void *user
)
1036 struct isl_sched_info
*info
;
1037 struct isl_compute_flow_data
*data
;
1039 data
= (struct isl_compute_flow_data
*)user
;
1041 dim
= isl_space_range(isl_map_get_space(map
));
1043 eq
= isl_space_is_equal(dim
, data
->dim
);
1045 isl_space_free(dim
);
1054 info
= sched_info_alloc(map
);
1055 data
->source_info
[data
->count
] = info
;
1057 data
->accesses
= isl_access_info_add_source(data
->accesses
,
1058 map
, data
->must
, info
);
1068 /* Determine the shared nesting level and the "textual order" of
1069 * the given accesses.
1071 * We first determine the minimal schedule dimension for both accesses.
1073 * If among those dimensions, we can find one where both have a fixed
1074 * value and if moreover those values are different, then the previous
1075 * dimension is the last shared nesting level and the textual order
1076 * is determined based on the order of the fixed values.
1077 * If no such fixed values can be found, then we set the shared
1078 * nesting level to the minimal schedule dimension, with no textual ordering.
1080 static int before(void *first
, void *second
)
1082 struct isl_sched_info
*info1
= first
;
1083 struct isl_sched_info
*info2
= second
;
1087 n1
= info1
->cst
->size
;
1088 n2
= info2
->cst
->size
;
1093 for (i
= 0; i
< n1
; ++i
) {
1094 if (!info1
->is_cst
[i
])
1096 if (!info2
->is_cst
[i
])
1098 if (isl_int_eq(info1
->cst
->el
[i
], info2
->cst
->el
[i
]))
1100 return 2 * i
+ isl_int_lt(info1
->cst
->el
[i
], info2
->cst
->el
[i
]);
1106 /* Given a sink access, look for all the source accesses that access
1107 * the same array and perform dataflow analysis on them using
1108 * isl_access_info_compute_flow.
1110 static int compute_flow(__isl_take isl_map
*map
, void *user
)
1114 struct isl_compute_flow_data
*data
;
1117 data
= (struct isl_compute_flow_data
*)user
;
1119 ctx
= isl_map_get_ctx(map
);
1121 data
->accesses
= NULL
;
1122 data
->sink_info
= NULL
;
1123 data
->source_info
= NULL
;
1125 data
->dim
= isl_space_range(isl_map_get_space(map
));
1127 if (isl_union_map_foreach_map(data
->must_source
,
1128 &count_matching_array
, data
) < 0)
1130 if (isl_union_map_foreach_map(data
->may_source
,
1131 &count_matching_array
, data
) < 0)
1134 data
->sink_info
= sched_info_alloc(map
);
1135 data
->source_info
= isl_calloc_array(ctx
, struct isl_sched_info
*,
1138 data
->accesses
= isl_access_info_alloc(isl_map_copy(map
),
1139 data
->sink_info
, &before
, data
->count
);
1140 if (!data
->sink_info
|| !data
->source_info
|| !data
->accesses
)
1144 if (isl_union_map_foreach_map(data
->must_source
,
1145 &collect_matching_array
, data
) < 0)
1148 if (isl_union_map_foreach_map(data
->may_source
,
1149 &collect_matching_array
, data
) < 0)
1152 flow
= isl_access_info_compute_flow(data
->accesses
);
1153 data
->accesses
= NULL
;
1158 data
->must_no_source
= isl_union_map_union(data
->must_no_source
,
1159 isl_union_map_from_map(isl_flow_get_no_source(flow
, 1)));
1160 data
->may_no_source
= isl_union_map_union(data
->may_no_source
,
1161 isl_union_map_from_map(isl_flow_get_no_source(flow
, 0)));
1163 for (i
= 0; i
< flow
->n_source
; ++i
) {
1165 dep
= isl_union_map_from_map(isl_map_copy(flow
->dep
[i
].map
));
1166 if (flow
->dep
[i
].must
)
1167 data
->must_dep
= isl_union_map_union(data
->must_dep
, dep
);
1169 data
->may_dep
= isl_union_map_union(data
->may_dep
, dep
);
1172 isl_flow_free(flow
);
1174 sched_info_free(data
->sink_info
);
1175 if (data
->source_info
) {
1176 for (i
= 0; i
< data
->count
; ++i
)
1177 sched_info_free(data
->source_info
[i
]);
1178 free(data
->source_info
);
1180 isl_space_free(data
->dim
);
1185 isl_access_info_free(data
->accesses
);
1186 sched_info_free(data
->sink_info
);
1187 if (data
->source_info
) {
1188 for (i
= 0; i
< data
->count
; ++i
)
1189 sched_info_free(data
->source_info
[i
]);
1190 free(data
->source_info
);
1192 isl_space_free(data
->dim
);
1198 /* Given a collection of "sink" and "source" accesses,
1199 * compute for each iteration of a sink access
1200 * and for each element accessed by that iteration,
1201 * the source access in the list that last accessed the
1202 * element accessed by the sink access before this sink access.
1203 * Each access is given as a map from the loop iterators
1204 * to the array indices.
1205 * The result is a relations between source and sink
1206 * iterations and a subset of the domain of the sink accesses,
1207 * corresponding to those iterations that access an element
1208 * not previously accessed.
1210 * We first prepend the schedule dimensions to the domain
1211 * of the accesses so that we can easily compare their relative order.
1212 * Then we consider each sink access individually in compute_flow.
1214 int isl_union_map_compute_flow(__isl_take isl_union_map
*sink
,
1215 __isl_take isl_union_map
*must_source
,
1216 __isl_take isl_union_map
*may_source
,
1217 __isl_take isl_union_map
*schedule
,
1218 __isl_give isl_union_map
**must_dep
, __isl_give isl_union_map
**may_dep
,
1219 __isl_give isl_union_map
**must_no_source
,
1220 __isl_give isl_union_map
**may_no_source
)
1223 isl_union_map
*range_map
= NULL
;
1224 struct isl_compute_flow_data data
;
1226 sink
= isl_union_map_align_params(sink
,
1227 isl_union_map_get_space(must_source
));
1228 sink
= isl_union_map_align_params(sink
,
1229 isl_union_map_get_space(may_source
));
1230 sink
= isl_union_map_align_params(sink
,
1231 isl_union_map_get_space(schedule
));
1232 dim
= isl_union_map_get_space(sink
);
1233 must_source
= isl_union_map_align_params(must_source
, isl_space_copy(dim
));
1234 may_source
= isl_union_map_align_params(may_source
, isl_space_copy(dim
));
1235 schedule
= isl_union_map_align_params(schedule
, isl_space_copy(dim
));
1237 schedule
= isl_union_map_reverse(schedule
);
1238 range_map
= isl_union_map_range_map(schedule
);
1239 schedule
= isl_union_map_reverse(isl_union_map_copy(range_map
));
1240 sink
= isl_union_map_apply_domain(sink
, isl_union_map_copy(schedule
));
1241 must_source
= isl_union_map_apply_domain(must_source
,
1242 isl_union_map_copy(schedule
));
1243 may_source
= isl_union_map_apply_domain(may_source
, schedule
);
1245 data
.must_source
= must_source
;
1246 data
.may_source
= may_source
;
1247 data
.must_dep
= must_dep
?
1248 isl_union_map_empty(isl_space_copy(dim
)) : NULL
;
1249 data
.may_dep
= may_dep
? isl_union_map_empty(isl_space_copy(dim
)) : NULL
;
1250 data
.must_no_source
= must_no_source
?
1251 isl_union_map_empty(isl_space_copy(dim
)) : NULL
;
1252 data
.may_no_source
= may_no_source
?
1253 isl_union_map_empty(isl_space_copy(dim
)) : NULL
;
1255 isl_space_free(dim
);
1257 if (isl_union_map_foreach_map(sink
, &compute_flow
, &data
) < 0)
1260 isl_union_map_free(sink
);
1261 isl_union_map_free(must_source
);
1262 isl_union_map_free(may_source
);
1265 data
.must_dep
= isl_union_map_apply_domain(data
.must_dep
,
1266 isl_union_map_copy(range_map
));
1267 data
.must_dep
= isl_union_map_apply_range(data
.must_dep
,
1268 isl_union_map_copy(range_map
));
1269 *must_dep
= data
.must_dep
;
1272 data
.may_dep
= isl_union_map_apply_domain(data
.may_dep
,
1273 isl_union_map_copy(range_map
));
1274 data
.may_dep
= isl_union_map_apply_range(data
.may_dep
,
1275 isl_union_map_copy(range_map
));
1276 *may_dep
= data
.may_dep
;
1278 if (must_no_source
) {
1279 data
.must_no_source
= isl_union_map_apply_domain(
1280 data
.must_no_source
, isl_union_map_copy(range_map
));
1281 *must_no_source
= data
.must_no_source
;
1283 if (may_no_source
) {
1284 data
.may_no_source
= isl_union_map_apply_domain(
1285 data
.may_no_source
, isl_union_map_copy(range_map
));
1286 *may_no_source
= data
.may_no_source
;
1289 isl_union_map_free(range_map
);
1293 isl_union_map_free(range_map
);
1294 isl_union_map_free(sink
);
1295 isl_union_map_free(must_source
);
1296 isl_union_map_free(may_source
);
1297 isl_union_map_free(data
.must_dep
);
1298 isl_union_map_free(data
.may_dep
);
1299 isl_union_map_free(data
.must_no_source
);
1300 isl_union_map_free(data
.may_no_source
);
1307 *must_no_source
= NULL
;
1309 *may_no_source
= NULL
;