2 * Copyright 2010-2011 INRIA Saclay
3 * Copyright 2012-2014 Ecole Normale Superieure
4 * Copyright 2015 Sven Verdoolaege
6 * Use of this software is governed by the MIT license
8 * Written by Sven Verdoolaege, INRIA Saclay - Ile-de-France,
9 * Parc Club Orsay Universite, ZAC des vignes, 4 rue Jacques Monod,
11 * and Ecole Normale Superieure, 45 rue d'Ulm, 75230 Paris, France
14 #include <isl/constraint.h>
17 #include "gpu_array_tile.h"
18 #include "gpu_group.h"
22 /* Print the name of the local copy of a given group of array references.
24 __isl_give isl_printer
*gpu_array_ref_group_print_name(
25 struct gpu_array_ref_group
*group
, __isl_take isl_printer
*p
)
29 if (group
->private_tile
)
30 p
= isl_printer_print_str(p
, "private_");
31 else if (group
->shared_tile
)
32 p
= isl_printer_print_str(p
, "shared_");
35 p
= isl_printer_print_str(p
, group
->array
->name
);
36 if (!global
&& group
->local_array
->n_group
> 1) {
37 p
= isl_printer_print_str(p
, "_");
38 p
= isl_printer_print_int(p
, group
->nr
);
44 /* Return the union of all read (read = 1) and/or write (write = 1)
45 * access relations in the group.
47 __isl_give isl_union_map
*gpu_array_ref_group_access_relation(
48 struct gpu_array_ref_group
*group
, int read
, int write
)
51 isl_union_map
*access
;
53 access
= isl_union_map_empty(isl_map_get_space(group
->access
));
54 for (i
= 0; i
< group
->n_ref
; ++i
) {
57 if (!((read
&& group
->refs
[i
]->read
) ||
58 (write
&& group
->refs
[i
]->write
)))
60 map_i
= isl_map_copy(group
->refs
[i
]->access
);
61 access
= isl_union_map_union(access
,
62 isl_union_map_from_map(map_i
));
68 /* Return the effective gpu_array_tile associated to "group" or
69 * NULL if there is no such gpu_array_tile.
70 * If we have computed both a private and a shared tile, then
71 * the private tile is used.
73 struct gpu_array_tile
*gpu_array_ref_group_tile(
74 struct gpu_array_ref_group
*group
)
76 if (group
->private_tile
)
77 return group
->private_tile
;
78 if (group
->shared_tile
)
79 return group
->shared_tile
;
83 /* Does the tile associated to "group" require unrolling of the schedule
84 * dimensions mapped to threads?
85 * Note that this can only happen for private tiles.
87 int gpu_array_ref_group_requires_unroll(struct gpu_array_ref_group
*group
)
89 struct gpu_array_tile
*tile
;
91 tile
= gpu_array_ref_group_tile(group
);
94 return tile
->requires_unroll
;
101 * or -a(p,i) - j = g f(e) if sign < 0,
102 * store a(p,i) in bound->shift and g (stride) in bound->stride.
103 * a(p,i) is assumed to be an expression in only the parameters
104 * and the input dimensions.
106 static void extract_stride(__isl_keep isl_constraint
*c
,
107 struct gpu_array_bound
*bound
, __isl_keep isl_val
*stride
, int sign
)
116 isl_val_free(bound
->stride
);
117 bound
->stride
= isl_val_copy(stride
);
119 space
= isl_constraint_get_space(c
);
120 space
= isl_space_domain(space
);
122 nparam
= isl_space_dim(space
, isl_dim_param
);
123 nvar
= isl_space_dim(space
, isl_dim_set
);
125 v
= isl_constraint_get_constant_val(c
);
128 aff
= isl_aff_zero_on_domain(isl_local_space_from_space(space
));
129 aff
= isl_aff_set_constant_val(aff
, v
);
131 for (i
= 0; i
< nparam
; ++i
) {
132 if (!isl_constraint_involves_dims(c
, isl_dim_param
, i
, 1))
134 v
= isl_constraint_get_coefficient_val(c
, isl_dim_param
, i
);
137 aff
= isl_aff_add_coefficient_val(aff
, isl_dim_param
, i
, v
);
140 for (i
= 0; i
< nvar
; ++i
) {
141 if (!isl_constraint_involves_dims(c
, isl_dim_in
, i
, 1))
143 v
= isl_constraint_get_coefficient_val(c
, isl_dim_in
, i
);
146 aff
= isl_aff_add_coefficient_val(aff
, isl_dim_in
, i
, v
);
152 /* Given an equality constraint of a map with a single output dimension j,
153 * check if the constraint is of the form
155 * a(p,i) + j = g f(e)
157 * with a(p,i) an expression in the parameters and input dimensions
158 * and f(e) an expression in the existentially quantified variables.
159 * If so, and if g is larger than any such g from a previously considered
160 * constraint, then call extract_stride to record the stride information
163 static isl_stat
check_stride_constraint(__isl_take isl_constraint
*c
,
170 struct gpu_array_bound
*bound
= user
;
172 ctx
= isl_constraint_get_ctx(c
);
173 n_div
= isl_constraint_dim(c
, isl_dim_div
);
174 v
= isl_constraint_get_coefficient_val(c
, isl_dim_out
, 0);
176 if (n_div
&& (isl_val_is_one(v
) || isl_val_is_negone(v
))) {
177 int s
= isl_val_sgn(v
);
178 isl_val
*stride
= isl_val_zero(ctx
);
181 for (i
= 0; i
< n_div
; ++i
) {
182 v
= isl_constraint_get_coefficient_val(c
,
184 stride
= isl_val_gcd(stride
, v
);
186 if (!isl_val_is_zero(stride
) &&
187 isl_val_gt(stride
, bound
->stride
))
188 extract_stride(c
, bound
, stride
, s
);
190 isl_val_free(stride
);
194 isl_constraint_free(c
);
198 /* Given contraints on an array index i, check if we can find
199 * a shift a(p) and a stride g such that
203 * If so, record the information in bound and apply the mapping
204 * i -> (i + a(p))/g to the array index in bounds and return
205 * the new constraints.
206 * If not, simply return the original constraints.
208 * If bounds is a subset of the space
212 * then the bound recorded in bound->shift is of the form
216 * with s(D) equal to a(p) above.
217 * Next, we construct a mapping of the form
219 * [D -> i] -> [D -> (i + S(D))/g]
221 * This mapping is computed as follows.
222 * We first introduce "i" in the domain through precomposition
223 * with [D -> i] -> D obtaining
227 * Adding [D -> i] -> i produces
229 * [D -> i] -> i + s(D)
231 * and the domain product with [D -> i] -> D yields
233 * [D -> i] -> [D -> i + s(D)]
235 * Composition with [D -> i] -> [D -> i/g] gives the desired result.
237 static __isl_give isl_basic_map
*check_stride(struct gpu_array_bound
*bound
,
238 __isl_take isl_basic_map
*bounds
)
242 isl_basic_map
*shift
, *id
, *bmap
, *scale
;
246 bound
->stride
= NULL
;
248 hull
= isl_basic_map_affine_hull(isl_basic_map_copy(bounds
));
250 isl_basic_map_foreach_constraint(hull
, &check_stride_constraint
, bound
);
252 isl_basic_map_free(hull
);
257 shift
= isl_basic_map_from_aff(isl_aff_copy(bound
->shift
));
258 space
= isl_basic_map_get_space(bounds
);
259 bmap
= isl_basic_map_domain_map(isl_basic_map_universe(space
));
260 shift
= isl_basic_map_apply_range(bmap
, shift
);
261 space
= isl_basic_map_get_space(bounds
);
262 id
= isl_basic_map_range_map(isl_basic_map_universe(space
));
263 shift
= isl_basic_map_sum(id
, shift
);
264 space
= isl_basic_map_get_space(bounds
);
265 id
= isl_basic_map_domain_map(isl_basic_map_universe(space
));
266 shift
= isl_basic_map_range_product(id
, shift
);
268 space
= isl_space_domain(isl_basic_map_get_space(bounds
));
269 id
= isl_basic_map_identity(isl_space_map_from_set(space
));
270 space
= isl_space_range(isl_basic_map_get_space(bounds
));
271 aff
= isl_aff_zero_on_domain(isl_local_space_from_space(space
));
272 aff
= isl_aff_add_coefficient_si(aff
, isl_dim_in
, 0, 1);
273 aff
= isl_aff_scale_down_val(aff
, isl_val_copy(bound
->stride
));
274 scale
= isl_basic_map_from_aff(aff
);
275 scale
= isl_basic_map_product(id
, scale
);
277 bmap
= isl_basic_map_apply_range(shift
, scale
);
278 bset
= isl_basic_set_apply(isl_basic_map_wrap(bounds
), bmap
);
279 bounds
= isl_basic_set_unwrap(bset
);
284 /* Data used in compute_array_dim_size and compute_size_in_direction.
286 * pos is the position of the variable representing the array index,
287 * i.e., the variable for which want to compute the size. This variable
288 * is also the last variable in the set.
290 struct gpu_size_info
{
292 struct gpu_array_bound
*bound
;
296 /* Given a constraint from the basic set describing the bounds on
297 * an array index, check if it is a lower bound, say m i >= b(x), and,
298 * if so, check whether the expression "i - ceil(b(x)/m) + 1" has a constant
299 * upper bound. If so, and if this bound is smaller than any bound
300 * derived from earlier constraints, set the size to this bound on
301 * the expression and the lower bound to ceil(b(x)/m).
303 static isl_stat
compute_size_in_direction(__isl_take isl_constraint
*c
,
306 struct gpu_size_info
*size
= user
;
313 nparam
= isl_basic_set_dim(size
->bset
, isl_dim_param
);
314 n_div
= isl_constraint_dim(c
, isl_dim_div
);
316 if (isl_constraint_involves_dims(c
, isl_dim_div
, 0, n_div
) ||
317 !isl_constraint_is_lower_bound(c
, isl_dim_set
, size
->pos
)) {
318 isl_constraint_free(c
);
322 aff
= isl_constraint_get_bound(c
, isl_dim_set
, size
->pos
);
323 aff
= isl_aff_ceil(aff
);
325 lb
= isl_aff_copy(aff
);
327 aff
= isl_aff_neg(aff
);
328 aff
= isl_aff_add_coefficient_si(aff
, isl_dim_in
, size
->pos
, 1);
330 v
= isl_basic_set_max_val(size
->bset
, aff
);
333 if (isl_val_is_int(v
)) {
334 v
= isl_val_add_ui(v
, 1);
335 if (!size
->bound
->size
|| isl_val_lt(v
, size
->bound
->size
)) {
336 isl_val_free(size
->bound
->size
);
337 size
->bound
->size
= isl_val_copy(v
);
338 lb
= isl_aff_drop_dims(lb
, isl_dim_in
, size
->pos
, 1);
339 isl_aff_free(size
->bound
->lb
);
340 size
->bound
->lb
= isl_aff_copy(lb
);
346 isl_constraint_free(c
);
351 /* Given a basic map "bounds" that maps parameters and input dimensions
352 * to a single output dimension, look for an expression in the parameters
353 * and input dimensions such that the range of the output dimension shifted
354 * by this expression is a constant.
356 * In particular, we currently only consider lower bounds on the output
357 * dimension as candidate expressions.
359 static int compute_array_dim_size(struct gpu_array_bound
*bound
,
360 __isl_take isl_basic_map
*bounds
)
362 struct gpu_size_info size
;
364 bounds
= isl_basic_map_detect_equalities(bounds
);
365 bounds
= check_stride(bound
, bounds
);
371 size
.pos
= isl_basic_map_dim(bounds
, isl_dim_in
);
372 size
.bset
= isl_basic_map_wrap(bounds
);
373 size
.bset
= isl_basic_set_flatten(size
.bset
);
374 size
.bset
= isl_set_simple_hull(isl_basic_set_compute_divs(size
.bset
));
375 isl_basic_set_foreach_constraint(size
.bset
, &compute_size_in_direction
,
377 isl_basic_set_free(size
.bset
);
379 return bound
->size
? 0 : -1;
382 /* Check if we can find a memory tile for the given array
383 * based on the given accesses, and if so, put the results in "tile".
385 * We project the accesses on each index in turn and look for a parametric
386 * offset such that the size is constant.
388 static int can_tile(__isl_keep isl_map
*access
, struct gpu_array_tile
*tile
)
392 for (i
= 0; i
< tile
->n
; ++i
) {
396 access_i
= isl_map_copy(access
);
397 access_i
= isl_map_project_out(access_i
, isl_dim_out
, 0, i
);
398 access_i
= isl_map_project_out(access_i
, isl_dim_out
,
399 1, tile
->n
- (i
+ 1));
400 access_i
= isl_map_compute_divs(access_i
);
401 hull
= isl_map_simple_hull(access_i
);
402 if (compute_array_dim_size(&tile
->bound
[i
], hull
) < 0)
409 /* Internal data structure for gpu_group_references.
411 * scop represents the input scop.
412 * kernel_depth is the schedule depth where the kernel launch will
413 * be introduced, i.e., it is the depth of the band that is mapped
415 * thread_depth is the schedule depth where the thread mark is located,
416 * i.e., it is the depth of the band that is mapped to threads and also
417 * the schedule depth at which the copying to/from shared/private memory
418 * is computed. The copy operation may then later be hoisted to
420 * n_thread is the number of schedule dimensions in the band that
421 * is mapped to threads.
422 * privatization lives in the range of thread_sched (i.e., it is
423 * of dimension thread_depth + n_thread) and encodes the mapping
424 * to thread identifiers (as parameters).
425 * host_sched contains the kernel_depth dimensions of the host schedule.
426 * shared_sched contains the first thread_depth dimensions of the
428 * thread_sched contains the first (thread_depth + n_thread) dimensions
429 * of the kernel schedule.
430 * full_sched is a union_map representation of the entire kernel schedule.
432 struct gpu_group_data
{
433 struct ppcg_scop
*scop
;
437 isl_set
*privatization
;
438 isl_union_map
*host_sched
;
439 isl_union_map
*shared_sched
;
440 isl_union_map
*thread_sched
;
441 isl_union_map
*full_sched
;
444 /* Construct a map from domain_space to domain_space that increments
445 * the dimension at position "pos" and leaves all other dimensions
448 static __isl_give isl_map
*next(__isl_take isl_space
*domain_space
, int pos
)
454 space
= isl_space_map_from_set(domain_space
);
455 next
= isl_multi_aff_identity(space
);
456 aff
= isl_multi_aff_get_aff(next
, pos
);
457 aff
= isl_aff_add_constant_si(aff
, 1);
458 next
= isl_multi_aff_set_aff(next
, pos
, aff
);
460 return isl_map_from_multi_aff(next
);
463 /* Check if the given access is coalesced (or if there is no point
464 * in trying to coalesce the access by mapping the array to shared memory).
465 * That is, check whether incrementing the dimension that will get
466 * wrapped over the last thread index results in incrementing
467 * the last array index.
469 * If no two consecutive array elements are ever accessed by "access",
470 * then mapping the corresponding array to shared memory will not
471 * improve coalescing. In fact, the copying will likely be performed
472 * by a single thread. Consider the access as coalesced such that
473 * the caller will not try and map the array to shared memory just
474 * to improve coalescing.
476 * This function is only called for access relations without reuse and
477 * kernels with at least one thread identifier.
479 static int access_is_coalesced(struct gpu_group_data
*data
,
480 __isl_keep isl_union_map
*access
)
486 isl_map
*next_thread_x
;
487 isl_map
*next_element
;
489 int coalesced
, empty
;
491 access
= isl_union_map_copy(access
);
492 access
= isl_union_map_apply_domain(access
,
493 isl_union_map_copy(data
->full_sched
));
494 access_map
= isl_map_from_union_map(access
);
496 space
= isl_map_get_space(access_map
);
497 space
= isl_space_range(space
);
498 dim
= isl_space_dim(space
, isl_dim_set
);
500 next_element
= isl_map_empty(isl_space_map_from_set(space
));
502 next_element
= next(space
, dim
- 1);
504 accessed
= isl_map_range(isl_map_copy(access_map
));
505 map
= isl_map_copy(next_element
);
506 map
= isl_map_intersect_domain(map
, isl_set_copy(accessed
));
507 map
= isl_map_intersect_range(map
, accessed
);
508 empty
= isl_map_is_empty(map
);
511 if (empty
< 0 || empty
) {
512 isl_map_free(next_element
);
513 isl_map_free(access_map
);
517 space
= isl_map_get_space(access_map
);
518 space
= isl_space_domain(space
);
519 next_thread_x
= next(space
, data
->thread_depth
+ data
->n_thread
- 1);
521 map
= isl_map_apply_domain(next_thread_x
, isl_map_copy(access_map
));
522 map
= isl_map_apply_range(map
, access_map
);
524 coalesced
= isl_map_is_subset(map
, next_element
);
526 isl_map_free(next_element
);
532 /* Replace the host schedule dimensions in the access relation "access"
533 * by parameters, so that they are treated as fixed when checking for reuse
534 * (within a kernel) or whether two consecutive elements are accessed
537 static __isl_give isl_union_map
*localize_access(struct gpu_group_data
*data
,
538 __isl_take isl_union_map
*access
)
546 umap
= isl_union_map_copy(data
->host_sched
);
547 space
= isl_union_map_get_space(umap
);
548 n
= data
->kernel_depth
;
549 ids
= ppcg_scop_generate_names(data
->scop
, n
, "__ppcg_host_");
550 param
= parametrization(space
, n
, 0, ids
);
551 isl_id_list_free(ids
);
552 umap
= isl_union_map_intersect_range(umap
,
553 isl_union_set_from_set(param
));
554 access
= isl_union_map_intersect_domain(access
,
555 isl_union_map_domain(umap
));
560 /* Given an access relation in terms of at least data->thread_depth initial
561 * dimensions of the computed schedule, check if it is bijective for
562 * fixed values of the first data->thread_depth dimensions.
563 * We perform this check by equating these dimensions to parameters.
565 static int access_is_bijective(struct gpu_group_data
*data
,
566 __isl_keep isl_map
*access
)
574 access
= isl_map_copy(access
);
575 space
= isl_space_params(isl_map_get_space(access
));
576 ids
= ppcg_scop_generate_names(data
->scop
, data
->thread_depth
, "s");
577 dim
= isl_map_dim(access
, isl_dim_in
);
578 par
= parametrization(space
, dim
, 0, ids
);
579 isl_id_list_free(ids
);
580 access
= isl_map_intersect_domain(access
, par
);
581 res
= isl_map_is_bijective(access
);
582 isl_map_free(access
);
587 /* Compute the number of outer schedule tile dimensions that affect
588 * the offset of "tile".
589 * If there is no such dimension, then return the index
590 * of the first kernel dimension, i.e., data->kernel_depth.
592 static int compute_tile_depth(struct gpu_group_data
*data
,
593 struct gpu_array_tile
*tile
)
597 for (j
= data
->thread_depth
- 1; j
>= data
->kernel_depth
; --j
) {
598 for (i
= 0; i
< tile
->n
; ++i
) {
602 lb
= tile
->bound
[i
].lb
;
603 if (isl_aff_involves_dims(lb
, isl_dim_in
, j
, 1))
606 shift
= tile
->bound
[i
].shift
;
609 if (isl_aff_involves_dims(shift
, isl_dim_in
, j
, 1))
619 /* Adjust the fields of "tile" to reflect the new input dimension "new_dim",
620 * where "old_dim" is the old dimension.
621 * The dimension beyond "new_dim" are assumed not to affect the tile,
622 * so they can simply be dropped.
624 static int tile_adjust_depth(struct gpu_array_tile
*tile
,
625 int old_dim
, int new_dim
)
629 if (old_dim
== new_dim
)
632 for (i
= 0; i
< tile
->n
; ++i
) {
633 tile
->bound
[i
].lb
= isl_aff_drop_dims(tile
->bound
[i
].lb
,
634 isl_dim_in
, new_dim
, old_dim
- new_dim
);
635 if (!tile
->bound
[i
].lb
)
637 if (!tile
->bound
[i
].shift
)
639 tile
->bound
[i
].shift
= isl_aff_drop_dims(tile
->bound
[i
].shift
,
640 isl_dim_in
, new_dim
, old_dim
- new_dim
);
641 if (!tile
->bound
[i
].shift
)
648 /* Determine the number of schedule dimensions that affect the offset of the
649 * shared or private tile and store the result in group->depth, with
650 * a lower bound of data->kernel_depth.
651 * If there is no tile defined on the array reference group,
652 * then set group->depth to data->thread_depth.
653 * Also adjust the fields of the tile to only refer to the group->depth
654 * outer schedule dimensions.
656 static int set_depth(struct gpu_group_data
*data
,
657 struct gpu_array_ref_group
*group
)
659 struct gpu_array_tile
*tile
;
661 group
->depth
= data
->thread_depth
;
663 tile
= gpu_array_ref_group_tile(group
);
667 group
->depth
= compute_tile_depth(data
, tile
);
668 if (tile_adjust_depth(tile
, data
->thread_depth
, group
->depth
) < 0)
674 /* Fill up the groups array with singleton groups, i.e., one group
675 * per reference, initializing the array, access, write, n_ref and refs fields.
676 * In particular the access field is initialized to the scheduled
677 * access relation of the array reference.
679 * Return the number of elements initialized, i.e., the number of
680 * active references in the current kernel.
682 static int populate_array_references(struct gpu_local_array_info
*local
,
683 struct gpu_array_ref_group
**groups
, struct gpu_group_data
*data
)
687 isl_ctx
*ctx
= isl_union_map_get_ctx(data
->shared_sched
);
690 for (i
= 0; i
< local
->array
->n_ref
; ++i
) {
693 struct gpu_array_ref_group
*group
;
694 struct gpu_stmt_access
*access
= local
->array
->refs
[i
];
696 map
= isl_map_copy(access
->access
);
697 umap
= isl_union_map_from_map(map
);
698 umap
= isl_union_map_apply_domain(umap
,
699 isl_union_map_copy(data
->shared_sched
));
701 if (isl_union_map_is_empty(umap
)) {
702 isl_union_map_free(umap
);
706 map
= isl_map_from_union_map(umap
);
707 map
= isl_map_detect_equalities(map
);
709 group
= isl_calloc_type(ctx
, struct gpu_array_ref_group
);
712 group
->local_array
= local
;
713 group
->array
= local
->array
;
715 group
->write
= access
->write
;
716 group
->exact_write
= access
->exact_write
;
717 group
->slice
= access
->n_index
< local
->array
->n_index
;
718 group
->refs
= &local
->array
->refs
[i
];
727 /* If group->n_ref == 1, then group->refs was set by
728 * populate_array_references to point directly into
729 * group->array->refs and should not be freed.
730 * If group->n_ref > 1, then group->refs was set by join_groups
731 * to point to a newly allocated array.
733 struct gpu_array_ref_group
*gpu_array_ref_group_free(
734 struct gpu_array_ref_group
*group
)
738 gpu_array_tile_free(group
->shared_tile
);
739 gpu_array_tile_free(group
->private_tile
);
740 isl_map_free(group
->access
);
741 if (group
->n_ref
> 1)
747 /* Check if the access relations of group1 and group2 overlap within
750 static int accesses_overlap(struct gpu_array_ref_group
*group1
,
751 struct gpu_array_ref_group
*group2
)
755 disjoint
= isl_map_is_disjoint(group1
->access
, group2
->access
);
762 /* Combine the given two groups into a single group, containing
763 * the references of both groups.
765 static struct gpu_array_ref_group
*join_groups(
766 struct gpu_array_ref_group
*group1
,
767 struct gpu_array_ref_group
*group2
)
771 struct gpu_array_ref_group
*group
;
773 if (!group1
|| !group2
)
776 ctx
= isl_map_get_ctx(group1
->access
);
777 group
= isl_calloc_type(ctx
, struct gpu_array_ref_group
);
780 group
->local_array
= group1
->local_array
;
781 group
->array
= group1
->array
;
782 group
->access
= isl_map_union(isl_map_copy(group1
->access
),
783 isl_map_copy(group2
->access
));
784 group
->write
= group1
->write
|| group2
->write
;
785 group
->exact_write
= group1
->exact_write
&& group2
->exact_write
;
786 group
->slice
= group1
->slice
|| group2
->slice
;
787 group
->n_ref
= group1
->n_ref
+ group2
->n_ref
;
788 group
->refs
= isl_alloc_array(ctx
, struct gpu_stmt_access
*,
791 return gpu_array_ref_group_free(group
);
792 for (i
= 0; i
< group1
->n_ref
; ++i
)
793 group
->refs
[i
] = group1
->refs
[i
];
794 for (i
= 0; i
< group2
->n_ref
; ++i
)
795 group
->refs
[group1
->n_ref
+ i
] = group2
->refs
[i
];
800 /* Combine the given two groups into a single group and free
801 * the original two groups.
803 static struct gpu_array_ref_group
*join_groups_and_free(
804 struct gpu_array_ref_group
*group1
,
805 struct gpu_array_ref_group
*group2
)
807 struct gpu_array_ref_group
*group
;
809 group
= join_groups(group1
, group2
);
810 gpu_array_ref_group_free(group1
);
811 gpu_array_ref_group_free(group2
);
815 /* Report that the array reference group with the given access relation
816 * is not mapped to shared memory in the given kernel because
817 * it does not exhibit any reuse and is considered to be coalesced.
819 static void report_no_reuse_and_coalesced(struct ppcg_kernel
*kernel
,
820 __isl_keep isl_union_map
*access
)
825 ctx
= isl_union_map_get_ctx(access
);
826 p
= isl_printer_to_file(ctx
, stdout
);
827 p
= isl_printer_print_str(p
, "Array reference group ");
828 p
= isl_printer_print_union_map(p
, access
);
829 p
= isl_printer_print_str(p
,
830 " not considered for mapping to shared memory in kernel");
831 p
= isl_printer_print_int(p
, kernel
->id
);
832 p
= isl_printer_print_str(p
,
833 " because it exhibits no reuse and is considered to be coalesced");
834 p
= isl_printer_end_line(p
);
838 /* Given an access relation in terms of the data->thread_depth initial
839 * dimensions of the computed schedule and the thread identifiers
840 * (as parameters), check if the use of the corresponding private tile
841 * requires unrolling.
843 * If we are creating a private tile because we are forced to,
844 * then no unrolling is required.
845 * Otherwise we check if "access" is bijective and unrolling
846 * is required if it is not. Note that the access relation
847 * has already been determined to be bijective before the introduction
848 * of the thread identifiers and the removal of the schedule dimensions
849 * that are mapped to these threads. If the access relation is no longer
850 * bijective, then this means that more than one value of one of those
851 * schedule dimensions is mapped to the same thread and therefore
852 * unrolling is required.
854 static int check_requires_unroll(struct gpu_group_data
*data
,
855 __isl_keep isl_map
*access
, int force_private
)
861 bijective
= access_is_bijective(data
, access
);
867 /* Compute the private and/or shared memory tiles for the array
868 * reference group "group" of array "array".
869 * Return 0 on success and -1 on error.
871 * If the array is a read-only scalar or if the user requested
872 * not to use shared or private memory, then we do not need to do anything.
874 * If any reference in the reference group accesses more than one element,
875 * then we would have to make sure that the layout in shared memory
876 * is the same as that in global memory. Since we do not handle this yet
877 * (and it may not even be possible), we refuse to map to private or
878 * shared memory in such cases.
880 * If the array group involves any may writes (that are not must writes),
881 * then we would have to make sure that we load the data into shared/private
882 * memory first in case the data is not written by the kernel
883 * (but still written back out to global memory).
884 * Since we don't have any such mechanism at the moment, we don't
885 * compute shared/private tiles for groups involving may writes.
887 * We only try to compute a shared memory tile if there is any reuse
888 * or if the access is not coalesced.
889 * Reuse and coalescing are checked within the given kernel.
891 * For computing a private memory tile, we also require that there is
892 * some reuse. Moreover, we require that the access is private
893 * to the thread. That is, we check that any given array element
894 * is only accessed by a single thread.
895 * We compute an access relation that maps the outer
896 * data->thread_depth + data->n_thread schedule dimensions.
897 * The latter data->n_thread will be mapped to thread identifiers.
898 * We actually check that those iterators that will be wrapped
899 * partition the array space. This check is stricter than necessary
900 * since several iterations may be mapped onto the same thread
901 * and then they could be allowed to access the same memory elements,
902 * but our check does not allow this situation.
904 * We also check that the index expression only depends on parallel
905 * loops. That way, we can move those loops innermost and unroll them.
906 * Again, we use a test that is stricter than necessary.
907 * We actually check whether the index expression only depends
908 * on the iterators that are wrapped over the threads.
909 * These are necessarily parallel, but there may be more parallel loops.
911 * Combining the injectivity of the first test with the single-valuedness
912 * of the second test, we simply test for bijectivity.
914 * If the use of the private tile requires unrolling, but some
915 * of the other arrays are forcibly mapped to private memory,
916 * then we do not allow the use of this private tile since
917 * we cannot move the schedule dimensions that need to be unrolled down
918 * without performing some kind of expansion on those arrays
919 * that are forcibly mapped to private memory.
921 * If the array is marked force_private, then we bypass all checks
922 * and assume we can (and should) use registers.
924 * If it turns out we can (or have to) use registers, we compute
925 * the private memory tile size using can_tile, after introducing a dependence
926 * on the thread indices.
928 static int compute_group_bounds_core(struct ppcg_kernel
*kernel
,
929 struct gpu_array_ref_group
*group
, struct gpu_group_data
*data
)
931 isl_ctx
*ctx
= isl_space_get_ctx(group
->array
->space
);
932 isl_union_map
*access
, *local
;
933 int n_index
= group
->array
->n_index
;
934 int no_reuse
, coalesced
;
936 int force_private
= group
->local_array
->force_private
;
937 int use_shared
= kernel
->options
->use_shared_memory
&&
939 int use_private
= force_private
|| kernel
->options
->use_private_memory
;
943 if (!use_shared
&& !use_private
)
945 if (gpu_array_is_read_only_scalar(group
->array
))
947 if (!force_private
&& !group
->exact_write
)
952 access
= gpu_array_ref_group_access_relation(group
, 1, 1);
953 local
= localize_access(data
, isl_union_map_copy(access
));
954 no_reuse
= isl_union_map_is_injective(local
);
957 if (use_shared
&& no_reuse
)
958 coalesced
= access_is_coalesced(data
, local
);
959 isl_union_map_free(local
);
961 if (r
>= 0 && kernel
->options
->debug
->verbose
&&
962 use_shared
&& no_reuse
&& coalesced
)
963 report_no_reuse_and_coalesced(kernel
, access
);
965 if (use_shared
&& (!no_reuse
|| !coalesced
)) {
966 group
->shared_tile
= gpu_array_tile_create(ctx
,
967 group
->array
->n_index
);
968 if (!group
->shared_tile
)
970 else if (!can_tile(group
->access
, group
->shared_tile
))
972 gpu_array_tile_free(group
->shared_tile
);
975 if (r
< 0 || (!force_private
&& (!use_private
|| no_reuse
))) {
976 isl_union_map_free(access
);
980 access
= isl_union_map_apply_domain(access
,
981 isl_union_map_copy(data
->thread_sched
));
983 acc
= isl_map_from_union_map(access
);
985 if (!force_private
&& !access_is_bijective(data
, acc
)) {
990 acc
= isl_map_intersect_domain(acc
, isl_set_copy(data
->privatization
));
991 acc
= isl_map_project_out(acc
, isl_dim_in
, data
->thread_depth
,
993 requires_unroll
= check_requires_unroll(data
, acc
, force_private
);
994 if (requires_unroll
< 0 ||
995 (requires_unroll
&& kernel
->any_force_private
)) {
997 return requires_unroll
< 0 ? -1 : 0;
1000 group
->private_tile
= gpu_array_tile_create(ctx
, n_index
);
1001 if (!group
->private_tile
) {
1005 group
->private_tile
->requires_unroll
= requires_unroll
;
1006 if (!can_tile(acc
, group
->private_tile
))
1007 group
->private_tile
= gpu_array_tile_free(group
->private_tile
);
1011 if (force_private
&& !group
->private_tile
)
1012 isl_die(ctx
, isl_error_internal
,
1013 "unable to map array reference group to registers",
1019 /* Compute the private and/or shared memory tiles for the array
1020 * reference group "group" of array "array" and set the tile depth.
1021 * Return 0 on success and -1 on error.
1023 static int compute_group_bounds(struct ppcg_kernel
*kernel
,
1024 struct gpu_array_ref_group
*group
, struct gpu_group_data
*data
)
1028 if (compute_group_bounds_core(kernel
, group
, data
) < 0)
1030 if (set_depth(data
, group
) < 0)
1036 /* If two groups have overlapping access relations (as determined by
1037 * the "overlap" function) and if one of them involves a write,
1038 * then merge the two groups into one.
1039 * If "compute_bounds" is set, then call compute_group_bounds
1040 * on the merged groups.
1042 * Return the updated number of groups.
1043 * Return -1 on error.
1045 static int group_writes(struct ppcg_kernel
*kernel
,
1046 int n
, struct gpu_array_ref_group
**groups
,
1047 int (*overlap
)(struct gpu_array_ref_group
*group1
,
1048 struct gpu_array_ref_group
*group2
), int compute_bounds
,
1049 struct gpu_group_data
*data
)
1053 for (i
= 0; i
< n
; ++i
) {
1054 for (j
= n
- 1; j
> i
; --j
) {
1055 if (!groups
[i
]->write
&& !groups
[j
]->write
)
1058 if (!overlap(groups
[i
], groups
[j
]))
1061 groups
[i
] = join_groups_and_free(groups
[i
], groups
[j
]);
1063 groups
[j
] = groups
[n
- 1];
1064 groups
[n
- 1] = NULL
;
1069 if (compute_bounds
&&
1070 compute_group_bounds(kernel
, groups
[i
], data
) < 0)
1078 /* If two groups have overlapping access relations (within the innermost
1079 * loop) and if one of them involves a write, then merge the two groups
1082 * Return the updated number of groups.
1084 static int group_overlapping_writes(struct ppcg_kernel
*kernel
,
1085 int n
, struct gpu_array_ref_group
**groups
,
1086 struct gpu_group_data
*data
)
1088 return group_writes(kernel
, n
, groups
, &accesses_overlap
, 0, data
);
1091 /* Check if the access relations of group1 and group2 overlap within
1092 * the outermost min(group1->depth, group2->depth) loops.
1094 static int depth_accesses_overlap(struct gpu_array_ref_group
*group1
,
1095 struct gpu_array_ref_group
*group2
)
1100 isl_map
*map_i
, *map_j
, *map
;
1102 depth
= group1
->depth
;
1103 if (group2
->depth
< depth
)
1104 depth
= group2
->depth
;
1105 map_i
= isl_map_copy(group1
->access
);
1106 dim
= isl_map_dim(map_i
, isl_dim_in
);
1107 map_i
= isl_map_eliminate(map_i
, isl_dim_in
, depth
, dim
- depth
);
1108 map_j
= isl_map_copy(group2
->access
);
1109 map_j
= isl_map_eliminate(map_j
, isl_dim_in
, depth
, dim
- depth
);
1110 map
= isl_map_intersect(map_i
, map_j
);
1111 empty
= isl_map_is_empty(map
);
1117 /* If two groups have overlapping access relations (within the outer
1118 * depth loops) and if one of them involves a write,
1119 * then merge the two groups into one.
1121 * Return the updated number of groups.
1123 static int group_depth_overlapping_writes(struct ppcg_kernel
*kernel
,
1124 int n
, struct gpu_array_ref_group
**groups
, struct gpu_group_data
*data
)
1126 return group_writes(kernel
, n
, groups
, &depth_accesses_overlap
, 1,
1130 /* Is the size of the tile specified by "tile" smaller than the sum of
1131 * the sizes of the tiles specified by "tile1" and "tile2"?
1133 static int smaller_tile(struct gpu_array_tile
*tile
,
1134 struct gpu_array_tile
*tile1
, struct gpu_array_tile
*tile2
)
1137 isl_val
*size
, *size1
, *size2
;
1139 size
= gpu_array_tile_size(tile
);
1140 size1
= gpu_array_tile_size(tile1
);
1141 size2
= gpu_array_tile_size(tile2
);
1143 size
= isl_val_sub(size
, size1
);
1144 size
= isl_val_sub(size
, size2
);
1145 smaller
= isl_val_is_neg(size
);
1152 /* Given an initial grouping of array references and shared memory tiles
1153 * for each group that allows for a shared memory tile, merge two groups
1154 * if both have a shared memory tile, the merged group also has
1155 * a shared memory tile and the size of the tile for the merge group
1156 * is smaller than the sum of the tile sizes of the individual groups.
1158 * If merging two groups decreases the depth of the tile of
1159 * one or both of the two groups, then we need to check for overlapping
1162 * Return the number of groups after merging.
1163 * Return -1 on error.
1165 static int group_common_shared_memory_tile(struct ppcg_kernel
*kernel
,
1166 struct gpu_array_info
*array
, int n
,
1167 struct gpu_array_ref_group
**groups
, struct gpu_group_data
*data
)
1170 int recompute_overlap
= 0;
1171 isl_ctx
*ctx
= isl_space_get_ctx(array
->space
);
1173 for (i
= 0; i
< n
; ++i
) {
1174 if (!groups
[i
]->shared_tile
)
1176 for (j
= n
- 1; j
> i
; --j
) {
1177 struct gpu_array_ref_group
*group
;
1179 if (!groups
[j
]->shared_tile
)
1182 if (!depth_accesses_overlap(groups
[i
], groups
[j
]))
1185 group
= join_groups(groups
[i
], groups
[j
]);
1186 if (compute_group_bounds(kernel
, group
, data
) < 0) {
1187 gpu_array_ref_group_free(group
);
1190 if (!group
->shared_tile
||
1191 !smaller_tile(group
->shared_tile
,
1192 groups
[i
]->shared_tile
,
1193 groups
[j
]->shared_tile
)) {
1194 gpu_array_ref_group_free(group
);
1198 if (group
->depth
< groups
[i
]->depth
||
1199 group
->depth
< groups
[j
]->depth
)
1200 recompute_overlap
= 1;
1201 gpu_array_ref_group_free(groups
[i
]);
1202 gpu_array_ref_group_free(groups
[j
]);
1205 groups
[j
] = groups
[n
- 1];
1210 if (recompute_overlap
)
1211 n
= group_depth_overlapping_writes(kernel
, n
, groups
, data
);
1215 /* Set array->n_group and array->groups to n and groups.
1217 * Additionally, set the "nr" field of each group.
1219 static void set_array_groups(struct gpu_local_array_info
*array
,
1220 int n
, struct gpu_array_ref_group
**groups
)
1225 array
->groups
= groups
;
1227 for (i
= 0; i
< n
; ++i
)
1231 /* Combine all groups in "groups" into a single group and return
1232 * the new number of groups (1 or 0 if there were no groups to start with).
1234 static int join_all_groups(int n
, struct gpu_array_ref_group
**groups
)
1238 for (i
= n
- 1; i
> 0; --i
) {
1239 groups
[0] = join_groups_and_free(groups
[0], groups
[i
]);
1247 /* Group array references that should be considered together when
1248 * deciding whether to access them from private, shared or global memory.
1249 * Return -1 on error.
1251 * In particular, if two array references overlap and if one of them
1252 * is a write, then the two references are grouped together.
1253 * We first perform an initial grouping based only on the access relation.
1254 * After computing shared and private memory tiles, we check for
1255 * overlapping writes again, but this time taking into account
1256 * the depth of the effective tile.
1258 * Furthermore, if two groups admit a shared memory tile and if the
1259 * combination of the two also admits a shared memory tile, we merge
1262 * If the array contains structures, then we compute a single
1263 * reference group without trying to find any tiles
1264 * since we do not map such arrays to private or shared
1267 static int group_array_references(struct ppcg_kernel
*kernel
,
1268 struct gpu_local_array_info
*local
, struct gpu_group_data
*data
)
1272 isl_ctx
*ctx
= isl_union_map_get_ctx(data
->shared_sched
);
1273 struct gpu_array_ref_group
**groups
;
1275 groups
= isl_calloc_array(ctx
, struct gpu_array_ref_group
*,
1276 local
->array
->n_ref
);
1280 n
= populate_array_references(local
, groups
, data
);
1282 if (local
->array
->has_compound_element
) {
1283 n
= join_all_groups(n
, groups
);
1284 set_array_groups(local
, n
, groups
);
1288 n
= group_overlapping_writes(kernel
, n
, groups
, data
);
1290 for (i
= 0; i
< n
; ++i
)
1291 if (compute_group_bounds(kernel
, groups
[i
], data
) < 0)
1294 n
= group_depth_overlapping_writes(kernel
, n
, groups
, data
);
1296 n
= group_common_shared_memory_tile(kernel
, local
->array
,
1299 set_array_groups(local
, n
, groups
);
1304 for (i
= 0; i
< local
->array
->n_ref
; ++i
)
1305 gpu_array_ref_group_free(groups
[i
]);
1309 /* For each scalar in the input program, check if there are any
1310 * order dependences active inside the current kernel, within
1311 * the same iteration of "host_schedule".
1312 * If so, mark the scalar as force_private so that it will be
1313 * mapped to a register.
1315 static void check_scalar_live_ranges_in_host(struct ppcg_kernel
*kernel
,
1316 __isl_take isl_union_map
*host_schedule
)
1319 isl_union_map
*sched
;
1320 isl_union_set
*domain
;
1321 isl_union_map
*same_host_iteration
;
1323 kernel
->any_force_private
= 0;
1325 sched
= isl_union_map_universe(isl_union_map_copy(host_schedule
));
1326 domain
= isl_union_map_domain(sched
);
1328 same_host_iteration
= isl_union_map_apply_range(host_schedule
,
1329 isl_union_map_reverse(isl_union_map_copy(host_schedule
)));
1331 for (i
= 0; i
< kernel
->n_array
; ++i
) {
1332 struct gpu_local_array_info
*local
= &kernel
->array
[i
];
1333 isl_union_map
*order
;
1335 local
->force_private
= 0;
1336 if (local
->array
->n_index
!= 0)
1338 order
= isl_union_map_copy(local
->array
->dep_order
);
1339 order
= isl_union_map_intersect_domain(order
,
1340 isl_union_set_copy(domain
));
1341 order
= isl_union_map_intersect_range(order
,
1342 isl_union_set_copy(domain
));
1343 order
= isl_union_map_intersect(order
,
1344 isl_union_map_copy(same_host_iteration
));
1345 if (!isl_union_map_is_empty(order
)) {
1346 local
->force_private
= 1;
1347 kernel
->any_force_private
= 1;
1349 isl_union_map_free(order
);
1352 isl_union_map_free(same_host_iteration
);
1353 isl_union_set_free(domain
);
1356 /* For each scalar in the input program, check if there are any
1357 * order dependences active inside the current kernel, within
1358 * the same iteration of the host schedule, i.e., the prefix
1359 * schedule at "node".
1360 * If so, mark the scalar as force_private so that it will be
1361 * mapped to a register.
1363 static void check_scalar_live_ranges(struct ppcg_kernel
*kernel
,
1364 __isl_keep isl_schedule_node
*node
)
1366 isl_union_map
*sched
;
1368 if (!kernel
->options
->live_range_reordering
)
1371 sched
= isl_schedule_node_get_prefix_schedule_union_map(node
);
1373 check_scalar_live_ranges_in_host(kernel
, sched
);
1376 /* Create a set of dimension data->thread_depth + data->n_thread
1377 * that equates the residue of the final data->n_thread dimensions
1378 * modulo the "sizes" to the thread identifiers.
1379 * "space" is a parameter space containing the thread identifiers.
1380 * Store the computed set in data->privatization.
1382 static void compute_privatization(struct gpu_group_data
*data
,
1383 __isl_take isl_space
*space
, int *sizes
)
1387 isl_local_space
*ls
;
1390 ctx
= isl_union_map_get_ctx(data
->shared_sched
);
1391 space
= isl_space_set_from_params(space
);
1392 space
= isl_space_add_dims(space
, isl_dim_set
,
1393 data
->thread_depth
+ data
->n_thread
);
1394 set
= isl_set_universe(space
);
1395 space
= isl_set_get_space(set
);
1396 ls
= isl_local_space_from_space(space
);
1398 for (i
= 0; i
< data
->n_thread
; ++i
) {
1399 isl_aff
*aff
, *aff2
;
1405 aff
= isl_aff_var_on_domain(isl_local_space_copy(ls
),
1406 isl_dim_set
, data
->thread_depth
+ i
);
1407 v
= isl_val_int_from_si(ctx
, sizes
[i
]);
1408 aff
= isl_aff_mod_val(aff
, v
);
1409 snprintf(name
, sizeof(name
), "t%d", i
);
1410 pos
= isl_set_find_dim_by_name(set
, isl_dim_param
, name
);
1411 aff2
= isl_aff_var_on_domain(isl_local_space_copy(ls
),
1412 isl_dim_param
, pos
);
1413 aff
= isl_aff_sub(aff
, aff2
);
1414 c
= isl_equality_from_aff(aff
);
1415 set
= isl_set_add_constraint(set
, c
);
1418 isl_local_space_free(ls
);
1419 data
->privatization
= set
;
1422 /* Group references of all arrays in "kernel".
1423 * "node" points to the kernel mark.
1425 * We first extract all required schedule information into
1426 * a gpu_group_data structure and then consider each array
1429 int gpu_group_references(struct ppcg_kernel
*kernel
,
1430 __isl_keep isl_schedule_node
*node
)
1435 struct gpu_group_data data
;
1437 check_scalar_live_ranges(kernel
, node
);
1439 data
.scop
= kernel
->prog
->scop
;
1441 data
.kernel_depth
= isl_schedule_node_get_schedule_depth(node
);
1442 data
.host_sched
= isl_schedule_node_get_prefix_schedule_relation(node
);
1444 node
= isl_schedule_node_copy(node
);
1445 node
= gpu_tree_move_down_to_thread(node
, kernel
->core
);
1447 isl_schedule_node_get_prefix_schedule_relation(node
);
1448 data
.shared_sched
= isl_union_map_detect_equalities(data
.shared_sched
);
1450 node
= isl_schedule_node_child(node
, 0);
1451 data
.thread_depth
= isl_schedule_node_get_schedule_depth(node
);
1452 data
.n_thread
= isl_schedule_node_band_n_member(node
);
1453 data
.thread_sched
= isl_union_map_copy(data
.shared_sched
);
1454 data
.thread_sched
= isl_union_map_flat_range_product(data
.thread_sched
,
1455 isl_schedule_node_band_get_partial_schedule_union_map(node
));
1456 data
.thread_sched
= isl_union_map_detect_equalities(data
.thread_sched
);
1457 node
= isl_schedule_node_child(node
, 0);
1458 data
.full_sched
= isl_union_map_copy(data
.thread_sched
);
1459 data
.full_sched
= isl_union_map_flat_range_product(data
.full_sched
,
1460 isl_schedule_node_get_subtree_schedule_union_map(node
));
1461 isl_schedule_node_free(node
);
1463 space
= isl_union_set_get_space(kernel
->thread_filter
);
1464 compute_privatization(&data
, space
, kernel
->block_dim
);
1466 for (i
= 0; i
< kernel
->n_array
; ++i
) {
1467 r
= group_array_references(kernel
, &kernel
->array
[i
], &data
);
1472 isl_union_map_free(data
.host_sched
);
1473 isl_union_map_free(data
.shared_sched
);
1474 isl_union_map_free(data
.thread_sched
);
1475 isl_union_map_free(data
.full_sched
);
1476 isl_set_free(data
.privatization
);
1481 /* Given a description of an array tile "tile" and the "space"
1485 * where D represents the first group->depth schedule dimensions
1486 * and A represents the array, construct an isl_multi_aff
1488 * { [D[i] -> A[a]] -> A'[a'] }
1490 * with A' a scaled down copy of A according to the shifts and strides
1491 * in "tile". In particular,
1493 * a' = (a + shift(i))/stride
1495 * "insert_array" represents
1499 * and is used to insert A into the domain of functions that only
1502 static __isl_give isl_multi_aff
*strided_tile(
1503 struct gpu_array_tile
*tile
, __isl_keep isl_space
*space
,
1504 __isl_keep isl_multi_aff
*insert_array
)
1508 isl_multi_aff
*shift
;
1509 isl_multi_val
*stride
;
1511 isl_local_space
*ls
;
1512 isl_multi_aff
*tiling
;
1514 ctx
= isl_space_get_ctx(space
);
1515 space2
= isl_space_domain(isl_space_copy(space
));
1516 ls
= isl_local_space_from_space(space2
);
1517 space2
= isl_space_range(isl_space_copy(space
));
1518 stride
= isl_multi_val_zero(space2
);
1519 shift
= isl_multi_aff_zero(isl_space_copy(space
));
1521 for (i
= 0; i
< tile
->n
; ++i
) {
1522 struct gpu_array_bound
*bound
= &tile
->bound
[i
];
1526 if (tile
->bound
[i
].shift
) {
1527 stride_i
= isl_val_copy(bound
->stride
);
1528 shift_i
= isl_aff_copy(bound
->shift
);
1530 stride_i
= isl_val_one(ctx
);
1531 shift_i
= isl_aff_zero_on_domain(
1532 isl_local_space_copy(ls
));
1535 stride
= isl_multi_val_set_val(stride
, i
, stride_i
);
1536 shift
= isl_multi_aff_set_aff(shift
, i
, shift_i
);
1538 isl_local_space_free(ls
);
1540 shift
= isl_multi_aff_pullback_multi_aff(shift
,
1541 isl_multi_aff_copy(insert_array
));
1543 tiling
= isl_multi_aff_range_map(isl_space_copy(space
));
1544 tiling
= isl_multi_aff_add(tiling
, shift
);
1545 tiling
= isl_multi_aff_scale_down_multi_val(tiling
, stride
);
1550 /* Compute a tiling for the array reference group "group".
1552 * The tiling is of the form
1554 * { [D[i] -> A[a]] -> T[t] }
1556 * where D represents the first group->depth schedule dimensions,
1557 * A represents the global array and T represents the shared or
1558 * private memory tile. The name of T is the name of the local
1561 * If there is any stride in the accesses, then the mapping is
1563 * t = (a + shift(i))/stride - lb(i)
1565 * otherwise, it is simply
1569 void gpu_array_ref_group_compute_tiling(struct gpu_array_ref_group
*group
)
1573 struct gpu_array_tile
*tile
;
1574 struct gpu_array_info
*array
= group
->array
;
1576 isl_multi_aff
*tiling
, *lb
, *insert_array
;
1580 tile
= group
->private_tile
;
1582 tile
= group
->shared_tile
;
1586 space
= isl_map_get_space(group
->access
);
1587 dim
= isl_space_dim(space
, isl_dim_in
);
1588 space
= isl_space_drop_dims(space
, isl_dim_in
, group
->depth
,
1589 dim
- group
->depth
);
1590 insert_array
= isl_multi_aff_domain_map(isl_space_copy(space
));
1592 for (i
= 0; i
< tile
->n
; ++i
)
1593 if (tile
->bound
[i
].shift
)
1597 tiling
= strided_tile(tile
, space
, insert_array
);
1599 tiling
= isl_multi_aff_range_map(isl_space_copy(space
));
1601 lb
= isl_multi_aff_zero(space
);
1602 for (i
= 0; i
< tile
->n
; ++i
) {
1603 isl_aff
*lb_i
= isl_aff_copy(tile
->bound
[i
].lb
);
1604 lb
= isl_multi_aff_set_aff(lb
, i
, lb_i
);
1606 lb
= isl_multi_aff_pullback_multi_aff(lb
, insert_array
);
1608 tiling
= isl_multi_aff_sub(tiling
, lb
);
1610 p
= isl_printer_to_str(isl_multi_aff_get_ctx(tiling
));
1611 p
= gpu_array_ref_group_print_name(group
, p
);
1612 local_name
= isl_printer_get_str(p
);
1613 isl_printer_free(p
);
1614 tiling
= isl_multi_aff_set_tuple_name(tiling
, isl_dim_out
, local_name
);
1617 tile
->tiling
= tiling
;