3 #include "gpu_array_tile.h"
7 /* Print the name of the local copy of a given group of array references.
9 __isl_give isl_printer
*gpu_array_ref_group_print_name(
10 struct gpu_array_ref_group
*group
, __isl_take isl_printer
*p
)
14 if (group
->private_tile
)
15 p
= isl_printer_print_str(p
, "private_");
16 else if (group
->shared_tile
)
17 p
= isl_printer_print_str(p
, "shared_");
20 p
= isl_printer_print_str(p
, group
->array
->name
);
21 if (!global
&& group
->local_array
->n_group
> 1) {
22 p
= isl_printer_print_str(p
, "_");
23 p
= isl_printer_print_int(p
, group
->nr
);
29 /* Return the union of all read (read = 1) and/or write (write = 1)
30 * access relations in the group.
32 __isl_give isl_union_map
*gpu_array_ref_group_access_relation(
33 struct gpu_array_ref_group
*group
, int read
, int write
)
36 isl_union_map
*access
;
38 access
= isl_union_map_empty(isl_map_get_space(group
->access
));
39 for (i
= 0; i
< group
->n_ref
; ++i
) {
42 if (!((read
&& group
->refs
[i
]->read
) ||
43 (write
&& group
->refs
[i
]->write
)))
45 map_i
= isl_map_copy(group
->refs
[i
]->access
);
46 access
= isl_union_map_union(access
,
47 isl_union_map_from_map(map_i
));
53 /* Return the effective gpu_array_tile associated to "group" or
54 * NULL if there is no such gpu_array_tile.
55 * If we have computed both a private and a shared tile, then
56 * the private tile is used.
58 struct gpu_array_tile
*gpu_array_ref_group_tile(
59 struct gpu_array_ref_group
*group
)
61 if (group
->private_tile
)
62 return group
->private_tile
;
63 if (group
->shared_tile
)
64 return group
->shared_tile
;
72 * or -a(p,i) - j = g f(e) if sign < 0,
73 * store a(p,i) in bound->shift and g (stride) in bound->stride.
74 * a(p,i) is assumed to be an expression in only the parameters
75 * and the input dimensions.
77 static void extract_stride(__isl_keep isl_constraint
*c
,
78 struct gpu_array_bound
*bound
, __isl_keep isl_val
*stride
, int sign
)
87 isl_val_free(bound
->stride
);
88 bound
->stride
= isl_val_copy(stride
);
90 space
= isl_constraint_get_space(c
);
91 space
= isl_space_domain(space
);
93 nparam
= isl_space_dim(space
, isl_dim_param
);
94 nvar
= isl_space_dim(space
, isl_dim_set
);
96 v
= isl_constraint_get_constant_val(c
);
99 aff
= isl_aff_zero_on_domain(isl_local_space_from_space(space
));
100 aff
= isl_aff_set_constant_val(aff
, v
);
102 for (i
= 0; i
< nparam
; ++i
) {
103 if (!isl_constraint_involves_dims(c
, isl_dim_param
, i
, 1))
105 v
= isl_constraint_get_coefficient_val(c
, isl_dim_param
, i
);
108 aff
= isl_aff_add_coefficient_val(aff
, isl_dim_param
, i
, v
);
111 for (i
= 0; i
< nvar
; ++i
) {
112 if (!isl_constraint_involves_dims(c
, isl_dim_in
, i
, 1))
114 v
= isl_constraint_get_coefficient_val(c
, isl_dim_in
, i
);
117 aff
= isl_aff_add_coefficient_val(aff
, isl_dim_in
, i
, v
);
123 /* Given an equality constraint of a map with a single output dimension j,
124 * check if the constraint is of the form
126 * a(p,i) + j = g f(e)
128 * with a(p,i) an expression in the parameters and input dimensions
129 * and f(e) an expression in the existentially quantified variables.
130 * If so, and if g is larger than any such g from a previously considered
131 * constraint, then call extract_stride to record the stride information
134 static int check_stride_constraint(__isl_take isl_constraint
*c
, void *user
)
140 struct gpu_array_bound
*bound
= user
;
142 ctx
= isl_constraint_get_ctx(c
);
143 n_div
= isl_constraint_dim(c
, isl_dim_div
);
144 v
= isl_constraint_get_coefficient_val(c
, isl_dim_out
, 0);
146 if (n_div
&& (isl_val_is_one(v
) || isl_val_is_negone(v
))) {
147 int s
= isl_val_sgn(v
);
148 isl_val
*stride
= isl_val_zero(ctx
);
151 for (i
= 0; i
< n_div
; ++i
) {
152 v
= isl_constraint_get_coefficient_val(c
,
154 stride
= isl_val_gcd(stride
, v
);
156 if (!isl_val_is_zero(stride
) &&
157 isl_val_gt(stride
, bound
->stride
))
158 extract_stride(c
, bound
, stride
, s
);
160 isl_val_free(stride
);
164 isl_constraint_free(c
);
168 /* Given contraints on an array index i, check if we can find
169 * a shift a(p) and a stride g such that
173 * If so, record the information in bound and apply the mapping
174 * i -> (i + a(p))/g to the array index in bounds and return
175 * the new constraints.
176 * If not, simply return the original constraints.
178 * If bounds is a subset of the space
182 * then the bound recorded in bound->shift is of the form
186 * with s(D) equal to a(p) above.
187 * Next, we construct a mapping of the form
189 * [D -> i] -> [D -> (i + S(D))/g]
191 * This mapping is computed as follows.
192 * We first introduce "i" in the domain through precomposition
193 * with [D -> i] -> D obtaining
197 * Adding [D -> i] -> i produces
199 * [D -> i] -> i + s(D)
201 * and the domain product with [D -> i] -> D yields
203 * [D -> i] -> [D -> i + s(D)]
205 * Composition with [D -> i] -> [D -> i/g] gives the desired result.
207 static __isl_give isl_basic_map
*check_stride(struct gpu_array_bound
*bound
,
208 __isl_take isl_basic_map
*bounds
)
212 isl_basic_map
*shift
, *id
, *bmap
, *scale
;
216 bound
->stride
= NULL
;
218 hull
= isl_basic_map_affine_hull(isl_basic_map_copy(bounds
));
220 isl_basic_map_foreach_constraint(hull
, &check_stride_constraint
, bound
);
222 isl_basic_map_free(hull
);
227 shift
= isl_basic_map_from_aff(isl_aff_copy(bound
->shift
));
228 space
= isl_basic_map_get_space(bounds
);
229 bmap
= isl_basic_map_domain_map(isl_basic_map_universe(space
));
230 shift
= isl_basic_map_apply_range(bmap
, shift
);
231 space
= isl_basic_map_get_space(bounds
);
232 id
= isl_basic_map_range_map(isl_basic_map_universe(space
));
233 shift
= isl_basic_map_sum(id
, shift
);
234 space
= isl_basic_map_get_space(bounds
);
235 id
= isl_basic_map_domain_map(isl_basic_map_universe(space
));
236 shift
= isl_basic_map_range_product(id
, shift
);
238 space
= isl_space_domain(isl_basic_map_get_space(bounds
));
239 id
= isl_basic_map_identity(isl_space_map_from_set(space
));
240 space
= isl_space_range(isl_basic_map_get_space(bounds
));
241 aff
= isl_aff_zero_on_domain(isl_local_space_from_space(space
));
242 aff
= isl_aff_add_coefficient_si(aff
, isl_dim_in
, 0, 1);
243 aff
= isl_aff_scale_down_val(aff
, isl_val_copy(bound
->stride
));
244 scale
= isl_basic_map_from_aff(aff
);
245 scale
= isl_basic_map_product(id
, scale
);
247 bmap
= isl_basic_map_apply_range(shift
, scale
);
248 bset
= isl_basic_set_apply(isl_basic_map_wrap(bounds
), bmap
);
249 bounds
= isl_basic_set_unwrap(bset
);
254 /* Data used in compute_array_dim_size and compute_size_in_direction.
256 * pos is the position of the variable representing the array index,
257 * i.e., the variable for which want to compute the size. This variable
258 * is also the last variable in the set.
260 struct gpu_size_info
{
262 struct gpu_array_bound
*bound
;
266 /* Given a constraint from the basic set describing the bounds on
267 * an array index, check if it is a lower bound, say m i >= b(x), and,
268 * if so, check whether the expression "i - ceil(b(x)/m) + 1" has a constant
269 * upper bound. If so, and if this bound is smaller than any bound
270 * derived from earlier constraints, set the size to this bound on
271 * the expression and the lower bound to ceil(b(x)/m).
273 static int compute_size_in_direction(__isl_take isl_constraint
*c
, void *user
)
275 struct gpu_size_info
*size
= user
;
282 nparam
= isl_basic_set_dim(size
->bset
, isl_dim_param
);
283 n_div
= isl_constraint_dim(c
, isl_dim_div
);
285 if (isl_constraint_involves_dims(c
, isl_dim_div
, 0, n_div
) ||
286 !isl_constraint_is_lower_bound(c
, isl_dim_set
, size
->pos
)) {
287 isl_constraint_free(c
);
291 aff
= isl_constraint_get_bound(c
, isl_dim_set
, size
->pos
);
292 aff
= isl_aff_ceil(aff
);
294 lb
= isl_aff_copy(aff
);
296 aff
= isl_aff_neg(aff
);
297 aff
= isl_aff_add_coefficient_si(aff
, isl_dim_in
, size
->pos
, 1);
299 v
= isl_basic_set_max_val(size
->bset
, aff
);
302 if (isl_val_is_int(v
)) {
303 v
= isl_val_add_ui(v
, 1);
304 if (!size
->bound
->size
|| isl_val_lt(v
, size
->bound
->size
)) {
305 isl_val_free(size
->bound
->size
);
306 size
->bound
->size
= isl_val_copy(v
);
307 lb
= isl_aff_drop_dims(lb
, isl_dim_in
, size
->pos
, 1);
308 isl_aff_free(size
->bound
->lb
);
309 size
->bound
->lb
= isl_aff_copy(lb
);
315 isl_constraint_free(c
);
320 /* Given a basic map "bounds" that maps parameters and input dimensions
321 * to a single output dimension, look for an expression in the parameters
322 * and input dimensions such that the range of the output dimension shifted
323 * by this expression is a constant.
325 * In particular, we currently only consider lower bounds on the output
326 * dimension as candidate expressions.
328 static int compute_array_dim_size(struct gpu_array_bound
*bound
,
329 __isl_take isl_basic_map
*bounds
)
331 struct gpu_size_info size
;
333 bounds
= isl_basic_map_detect_equalities(bounds
);
334 bounds
= check_stride(bound
, bounds
);
340 size
.pos
= isl_basic_map_dim(bounds
, isl_dim_in
);
341 size
.bset
= isl_basic_map_wrap(bounds
);
342 size
.bset
= isl_basic_set_flatten(size
.bset
);
343 size
.bset
= isl_set_simple_hull(isl_basic_set_compute_divs(size
.bset
));
344 isl_basic_set_foreach_constraint(size
.bset
, &compute_size_in_direction
,
346 isl_basic_set_free(size
.bset
);
348 return bound
->size
? 0 : -1;
351 /* Check if we can find a memory tile for the given array
352 * based on the given accesses, and if so, put the results in "tile".
354 * We project the accesses on each index in turn and look for a parametric
355 * offset such that the size is constant.
357 static int can_tile(__isl_keep isl_map
*access
, struct gpu_array_tile
*tile
)
361 for (i
= 0; i
< tile
->n
; ++i
) {
365 access_i
= isl_map_copy(access
);
366 access_i
= isl_map_project_out(access_i
, isl_dim_out
, 0, i
);
367 access_i
= isl_map_project_out(access_i
, isl_dim_out
,
368 1, tile
->n
- (i
+ 1));
369 access_i
= isl_map_compute_divs(access_i
);
370 hull
= isl_map_simple_hull(access_i
);
371 if (compute_array_dim_size(&tile
->bound
[i
], hull
) < 0)
378 /* Construct a map from domain_dim to domain_dim that increments
379 * the dimension at position "pos" and leaves all other dimensions
382 static __isl_give isl_map
*next(__isl_take isl_space
*domain_dim
, int pos
)
385 int len
= isl_space_dim(domain_dim
, isl_dim_set
);
390 dim
= isl_space_map_from_set(domain_dim
);
391 next
= isl_basic_map_universe(isl_space_copy(dim
));
392 ls
= isl_local_space_from_space(dim
);
394 for (i
= 0; i
< len
; ++i
) {
397 c
= isl_equality_alloc(isl_local_space_copy(ls
));
398 c
= isl_constraint_set_coefficient_si(c
, isl_dim_in
, i
, 1);
399 c
= isl_constraint_set_coefficient_si(c
, isl_dim_out
, i
, -1);
401 c
= isl_constraint_set_constant_si(c
, 1);
402 next
= isl_basic_map_add_constraint(next
, c
);
405 isl_local_space_free(ls
);
407 return isl_map_from_basic_map(next
);
410 /* Check if the given access is coalesced.
411 * That is, check whether incrementing the dimension that will get
412 * wrapped over the last thread index results in incrementing
413 * the last array index.
415 * This function is only called for access relations without reuse and
416 * kernels with at least one block dimension.
418 static int access_is_coalesced(struct gpu_gen
*gen
,
419 __isl_keep isl_union_map
*access
)
423 isl_map
*next_thread_x
;
424 isl_map
*next_element
;
428 access
= isl_union_map_copy(access
);
429 access
= isl_union_map_apply_domain(access
,
430 isl_union_map_copy(gen
->tiled_sched
));
431 access_map
= isl_map_from_union_map(access
);
433 dim
= isl_map_get_space(access_map
);
434 dim
= isl_space_domain(dim
);
435 next_thread_x
= next(dim
, gen
->shared_len
+ gen
->kernel
->n_block
- 1);
437 dim
= isl_map_get_space(access_map
);
438 dim
= isl_space_range(dim
);
439 next_element
= next(dim
, isl_space_dim(dim
, isl_dim_set
) - 1);
441 map
= isl_map_apply_domain(next_thread_x
, isl_map_copy(access_map
));
442 map
= isl_map_apply_range(map
, access_map
);
444 coalesced
= isl_map_is_subset(map
, next_element
);
446 isl_map_free(next_element
);
452 /* Given an access relation in terms of the first gen->shared_len + gen->n_block
453 * dimensions of the computed schedule, check if it is bijective for
454 * fixed values of the first gen->shared_len dimensions.
455 * We perform this check by equating these dimensions to parameters.
457 static int access_is_bijective(struct gpu_gen
*gen
, __isl_keep isl_map
*access
)
464 access
= isl_map_copy(access
);
465 space
= isl_space_params(isl_map_get_space(access
));
466 ids
= ppcg_scop_generate_names(gen
->prog
->scop
, gen
->shared_len
, "s");
467 par
= parametrization(space
, gen
->shared_len
+ gen
->kernel
->n_block
,
469 isl_id_list_free(ids
);
470 access
= isl_map_intersect_domain(access
, par
);
471 res
= isl_map_is_bijective(access
);
472 isl_map_free(access
);
477 /* Look for the last shared tile loop that affects the offset of "tile"
478 * and return the result.
479 * If there is no such loop, then return the index of the loop
480 * before the first shared tile loop, in particular gen->tile_first - 1.
482 static int compute_tile_last_shared(struct gpu_gen
*gen
,
483 struct gpu_array_tile
*tile
)
487 for (j
= gen
->shared_len
- 1; j
>= gen
->tile_first
; --j
) {
488 for (i
= 0; i
< tile
->n
; ++i
) {
492 lb
= tile
->bound
[i
].lb
;
493 if (isl_aff_involves_dims(lb
, isl_dim_in
, j
, 1))
496 shift
= tile
->bound
[i
].shift
;
499 if (isl_aff_involves_dims(shift
, isl_dim_in
, j
, 1))
509 /* Look for the last shared tile loop that affects the offset of the
510 * shared or private tile and store the result in group->last_shared.
511 * If there is no such loop, then group->last_shared is set to a value
512 * before the first shared tile loop, in particular gen->tile_first - 1.
513 * If there is no tile defined on the array reference group,
514 * then set group->last_shared to gen->shared_len - 1.
516 static void set_last_shared(struct gpu_gen
*gen
,
517 struct gpu_array_ref_group
*group
)
519 struct gpu_array_tile
*tile
;
521 group
->last_shared
= gen
->shared_len
- 1;
523 tile
= gpu_array_ref_group_tile(group
);
527 group
->last_shared
= compute_tile_last_shared(gen
, tile
);
530 /* Fill up the groups array with singleton groups, i.e., one group
531 * per reference, initializing the array, access, write, n_ref and refs fields.
532 * In particular the access field is initialized to the scheduled
533 * access relation of the array reference.
535 * Return the number of elements initialized, i.e., the number of
536 * active references in the current kernel.
538 static int populate_array_references(struct gpu_local_array_info
*local
,
539 __isl_keep isl_union_map
*sched
, struct gpu_array_ref_group
**groups
)
543 isl_ctx
*ctx
= isl_union_map_get_ctx(sched
);
546 for (i
= 0; i
< local
->array
->n_ref
; ++i
) {
549 struct gpu_array_ref_group
*group
;
550 struct gpu_stmt_access
*access
= local
->array
->refs
[i
];
552 map
= isl_map_copy(access
->access
);
553 umap
= isl_union_map_from_map(map
);
554 umap
= isl_union_map_apply_domain(umap
,
555 isl_union_map_copy(sched
));
557 if (isl_union_map_is_empty(umap
)) {
558 isl_union_map_free(umap
);
562 map
= isl_map_from_union_map(umap
);
563 map
= isl_map_detect_equalities(map
);
565 group
= isl_calloc_type(ctx
, struct gpu_array_ref_group
);
568 group
->local_array
= local
;
569 group
->array
= local
->array
;
571 group
->write
= access
->write
;
572 group
->exact_write
= access
->exact_write
;
573 group
->slice
= access
->n_index
< local
->array
->n_index
;
574 group
->refs
= &local
->array
->refs
[i
];
583 /* If group->n_ref == 1, then group->refs was set by
584 * populate_array_references to point directly into
585 * group->array->refs and should not be freed.
586 * If group->n_ref > 1, then group->refs was set by join_groups
587 * to point to a newly allocated array.
589 struct gpu_array_ref_group
*gpu_array_ref_group_free(
590 struct gpu_array_ref_group
*group
)
594 gpu_array_tile_free(group
->shared_tile
);
595 gpu_array_tile_free(group
->private_tile
);
596 isl_map_free(group
->access
);
597 if (group
->n_ref
> 1)
603 /* Given a map where the input dimensions represent the tile loops,
604 * eliminate the innermost of those that have a fixed value
605 * until we reach one that does not (obviously) have a fixed value.
607 static __isl_give isl_map
*eliminate_fixed_inner_loops(
608 __isl_take isl_map
*access
)
612 n
= isl_map_dim(access
, isl_dim_in
);
614 for (i
= n
- 1; i
>= 0; --i
) {
615 if (!map_plain_is_fixed(access
, isl_dim_in
, i
))
617 access
= isl_map_eliminate(access
, isl_dim_in
, i
, 1);
622 /* Check if the access relations of group1 and group2 overlap within
623 * the innermost loop. In particular, ignore any inner dimension
624 * with a fixed value.
625 * The copying to and from shared memory will be performed within
626 * the innermost actual loop so we are only allowed to consider
627 * the dimensions up to that innermost loop while checking whether
628 * two access relations overlap.
630 static int accesses_overlap(struct gpu_array_ref_group
*group1
,
631 struct gpu_array_ref_group
*group2
)
634 isl_map
*access1
, *access2
;
636 access1
= isl_map_copy(group1
->access
);
637 access1
= eliminate_fixed_inner_loops(access1
);
638 access2
= isl_map_copy(group2
->access
);
639 access2
= eliminate_fixed_inner_loops(access2
);
640 access1
= isl_map_intersect(access1
, access2
);
641 empty
= isl_map_is_empty(access1
);
642 isl_map_free(access1
);
647 /* Combine the given two groups into a single group, containing
648 * the references of both groups.
650 static struct gpu_array_ref_group
*join_groups(
651 struct gpu_array_ref_group
*group1
,
652 struct gpu_array_ref_group
*group2
)
656 struct gpu_array_ref_group
*group
;
658 ctx
= isl_map_get_ctx(group1
->access
);
659 group
= isl_calloc_type(ctx
, struct gpu_array_ref_group
);
662 group
->local_array
= group1
->local_array
;
663 group
->array
= group1
->array
;
664 group
->access
= isl_map_union(isl_map_copy(group1
->access
),
665 isl_map_copy(group2
->access
));
666 group
->write
= group1
->write
|| group2
->write
;
667 group
->exact_write
= group1
->exact_write
&& group2
->exact_write
;
668 group
->slice
= group1
->slice
|| group2
->slice
;
669 group
->n_ref
= group1
->n_ref
+ group2
->n_ref
;
670 group
->refs
= isl_alloc_array(ctx
, struct gpu_stmt_access
*,
673 return gpu_array_ref_group_free(group
);
674 for (i
= 0; i
< group1
->n_ref
; ++i
)
675 group
->refs
[i
] = group1
->refs
[i
];
676 for (i
= 0; i
< group2
->n_ref
; ++i
)
677 group
->refs
[group1
->n_ref
+ i
] = group2
->refs
[i
];
682 /* Combine the given two groups into a single group and free
683 * the original two groups.
685 static struct gpu_array_ref_group
*join_groups_and_free(
686 struct gpu_array_ref_group
*group1
,
687 struct gpu_array_ref_group
*group2
)
689 struct gpu_array_ref_group
*group
;
691 group
= join_groups(group1
, group2
);
692 gpu_array_ref_group_free(group1
);
693 gpu_array_ref_group_free(group2
);
697 /* Report that the array reference group with the given access relation
698 * is not mapped to shared memory in the given kernel because
699 * it does not exhibit any reuse and is considered to be coalesced.
701 static void report_no_reuse_and_coalesced(struct ppcg_kernel
*kernel
,
702 __isl_keep isl_union_map
*access
)
707 ctx
= isl_union_map_get_ctx(access
);
708 p
= isl_printer_to_file(ctx
, stdout
);
709 p
= isl_printer_print_str(p
, "Array reference group ");
710 p
= isl_printer_print_union_map(p
, access
);
711 p
= isl_printer_print_str(p
,
712 " not considered for mapping to shared memory in kernel");
713 p
= isl_printer_print_int(p
, kernel
->id
);
714 p
= isl_printer_print_str(p
,
715 " because it exhibits no reuse and is considered to be coalesced");
716 p
= isl_printer_end_line(p
);
720 /* Compute the private and/or shared memory tiles for the array
721 * reference group "group" of array "array".
722 * Return 0 on success and -1 on error.
724 * If the array is a read-only scalar or if the user requested
725 * not to use shared or private memory, then we do not need to do anything.
727 * If any reference in the reference group accesses more than one element,
728 * then we would have to make sure that the layout in shared memory
729 * is the same as that in global memory. Since we do not handle this yet
730 * (and it may not even be possible), we refuse to map to private or
731 * shared memory in such cases.
733 * If the array group involves any may writes (that are not must writes),
734 * then we would have to make sure that we load the data into shared/private
735 * memory first in case the data is not written by the kernel
736 * (but still written back out to global memory).
737 * Since we don't have any such mechanism at the moment, we don't
738 * compute shared/private tiles for groups involving may writes.
740 * We only try to compute a shared memory tile if there is any reuse
741 * or if the access is not coalesced.
743 * For computing a private memory tile, we also require that there is
744 * some reuse. Moreover, we require that the access is private
745 * to the thread. That is, we check that any given array element
746 * is only accessed by a single thread.
747 * We compute an access relation that maps the shared tile loop iterators
748 * and the shared point loop iterators that will be wrapped over the
749 * threads to the array elements.
750 * We actually check that those iterators that will be wrapped
751 * partition the array space. This check is stricter than necessary
752 * since several iterations may be mapped onto the same thread
753 * and then they could be allowed to access the same memory elements,
754 * but our check does not allow this situation.
756 * We also check that the index expression only depends on parallel
757 * loops. That way, we can move those loops innermost and unroll them.
758 * Again, we use a test that is stricter than necessary.
759 * We actually check whether the index expression only depends
760 * on the iterators that are wrapped over the threads.
761 * These are necessarily parallel, but there may be more parallel loops.
763 * Combining the injectivity of the first test with the single-valuedness
764 * of the second test, we simply test for bijectivity.
766 * If the array is marked force_private, then we bypass all checks
767 * and assume we can (and should) use registers.
769 * If it turns out we can (or have to) use registers, we compute
770 * the private memory tile size using can_tile, after introducing a dependence
771 * on the thread indices.
773 static int compute_group_bounds_core(struct gpu_gen
*gen
,
774 struct gpu_array_ref_group
*group
)
776 isl_ctx
*ctx
= isl_space_get_ctx(group
->array
->space
);
777 isl_union_map
*access
;
778 int n_index
= group
->array
->n_index
;
779 int no_reuse
, coalesced
;
781 int force_private
= group
->local_array
->force_private
;
782 int use_shared
= gen
->options
->use_shared_memory
&&
783 gen
->kernel
->n_block
> 0;
784 int use_private
= force_private
|| gen
->options
->use_private_memory
;
787 if (!use_shared
&& !use_private
)
789 if (gpu_array_is_read_only_scalar(group
->array
))
791 if (!force_private
&& !group
->exact_write
)
796 access
= gpu_array_ref_group_access_relation(group
, 1, 1);
797 no_reuse
= isl_union_map_is_injective(access
);
800 if (use_shared
&& no_reuse
)
801 coalesced
= access_is_coalesced(gen
, access
);
803 if (r
>= 0 && gen
->options
->debug
->verbose
&&
804 use_shared
&& no_reuse
&& coalesced
)
805 report_no_reuse_and_coalesced(gen
->kernel
, access
);
807 if (use_shared
&& (!no_reuse
|| !coalesced
)) {
808 group
->shared_tile
= gpu_array_tile_create(ctx
,
809 group
->array
->n_index
);
810 if (!group
->shared_tile
)
812 else if (!can_tile(group
->access
, group
->shared_tile
))
814 gpu_array_tile_free(group
->shared_tile
);
817 if (r
< 0 || (!force_private
&& (!use_private
|| no_reuse
))) {
818 isl_union_map_free(access
);
822 access
= isl_union_map_apply_domain(access
,
823 isl_union_map_copy(gen
->shared_sched
));
825 acc
= isl_map_from_union_map(access
);
827 if (!force_private
&& !access_is_bijective(gen
, acc
)) {
832 group
->private_tile
= gpu_array_tile_create(gen
->ctx
, n_index
);
833 if (!group
->private_tile
) {
837 acc
= isl_map_apply_domain(acc
, isl_map_copy(gen
->privatization
));
838 if (!can_tile(acc
, group
->private_tile
))
839 group
->private_tile
= gpu_array_tile_free(group
->private_tile
);
843 if (force_private
&& !group
->private_tile
)
844 isl_die(ctx
, isl_error_internal
,
845 "unable to map array reference group to registers",
851 /* Compute the private and/or shared memory tiles for the array
852 * reference group "group" of array "array" and set last_shared.
853 * Return 0 on success and -1 on error.
855 static int compute_group_bounds(struct gpu_gen
*gen
,
856 struct gpu_array_ref_group
*group
)
860 if (compute_group_bounds_core(gen
, group
) < 0)
862 set_last_shared(gen
, group
);
867 /* If two groups have overlapping access relations (as determined by
868 * the "overlap" function) and if one of them involves a write,
869 * then merge the two groups into one.
870 * If "compute_bounds" is set, then call compute_group_bounds
871 * on the merged groups.
873 * Return the updated number of groups.
874 * Return -1 on error.
876 static int group_writes(struct gpu_gen
*gen
,
877 int n
, struct gpu_array_ref_group
**groups
,
878 int (*overlap
)(struct gpu_array_ref_group
*group1
,
879 struct gpu_array_ref_group
*group2
), int compute_bounds
)
883 for (i
= 0; i
< n
; ++i
) {
884 for (j
= n
- 1; j
> i
; --j
) {
885 if (!groups
[i
]->write
&& !groups
[j
]->write
)
888 if (!overlap(groups
[i
], groups
[j
]))
891 groups
[i
] = join_groups_and_free(groups
[i
], groups
[j
]);
893 groups
[j
] = groups
[n
- 1];
894 groups
[n
- 1] = NULL
;
899 if (compute_bounds
&&
900 compute_group_bounds(gen
, groups
[i
]) < 0)
908 /* If two groups have overlapping access relations (within the innermost
909 * loop) and if one of them involves a write, then merge the two groups
912 * Return the updated number of groups.
914 static int group_overlapping_writes(struct gpu_gen
*gen
,
915 int n
, struct gpu_array_ref_group
**groups
)
917 return group_writes(gen
, n
, groups
, &accesses_overlap
, 0);
920 /* Check if the access relations of group1 and group2 overlap within
921 * the outermost min(group1->last_shared, group2->last_shared) loops.
923 static int last_shared_accesses_overlap(struct gpu_array_ref_group
*group1
,
924 struct gpu_array_ref_group
*group2
)
929 isl_map
*map_i
, *map_j
, *map
;
931 last_shared
= group1
->last_shared
;
932 if (group2
->last_shared
< last_shared
)
933 last_shared
= group2
->last_shared
;
934 map_i
= isl_map_copy(group1
->access
);
935 dim
= isl_map_dim(map_i
, isl_dim_in
);
936 map_i
= isl_map_eliminate(map_i
, isl_dim_in
,
937 last_shared
+ 1, dim
- (last_shared
+ 1));
938 map_j
= isl_map_copy(group2
->access
);
939 map_j
= isl_map_eliminate(map_j
, isl_dim_in
,
940 last_shared
+ 1, dim
- (last_shared
+ 1));
941 map
= isl_map_intersect(map_i
, map_j
);
942 empty
= isl_map_is_empty(map
);
948 /* If two groups have overlapping access relations (within the outer
949 * last_shared loops) and if one of them involves a write,
950 * then merge the two groups into one.
952 * Return the updated number of groups.
954 static int group_last_shared_overlapping_writes(struct gpu_gen
*gen
, int n
,
955 struct gpu_array_ref_group
**groups
)
957 return group_writes(gen
, n
, groups
, &last_shared_accesses_overlap
, 1);
960 /* Is the size of the tile specified by "tile" smaller than the sum of
961 * the sizes of the tiles specified by "tile1" and "tile2"?
963 static int smaller_tile(struct gpu_array_tile
*tile
,
964 struct gpu_array_tile
*tile1
, struct gpu_array_tile
*tile2
)
967 isl_val
*size
, *size1
, *size2
;
969 size
= gpu_array_tile_size(tile
);
970 size1
= gpu_array_tile_size(tile1
);
971 size2
= gpu_array_tile_size(tile2
);
973 size
= isl_val_sub(size
, size1
);
974 size
= isl_val_sub(size
, size2
);
975 smaller
= isl_val_is_neg(size
);
982 /* Given an initial grouping of array references and shared memory tiles
983 * for each group that allows for a shared memory tile, merge two groups
984 * if both have a shared memory tile, the merged group also has
985 * a shared memory tile and the size of the tile for the merge group
986 * is smaller than the sum of the tile sizes of the individual groups.
988 * If merging two groups decreases the "last_shared" dimension of
989 * one or both of the two groups, then we need to check for overlapping
992 * Return the number of groups after merging.
993 * Return -1 on error.
995 static int group_common_shared_memory_tile(struct gpu_gen
*gen
,
996 struct gpu_array_info
*array
, int n
,
997 struct gpu_array_ref_group
**groups
)
1000 int recompute_overlap
= 0;
1001 isl_ctx
*ctx
= isl_space_get_ctx(array
->space
);
1003 for (i
= 0; i
< n
; ++i
) {
1004 if (!groups
[i
]->shared_tile
)
1006 for (j
= n
- 1; j
> i
; --j
) {
1009 struct gpu_array_ref_group
*group
;
1011 if (!groups
[j
]->shared_tile
)
1014 map
= isl_map_intersect(isl_map_copy(groups
[i
]->access
),
1015 isl_map_copy(groups
[j
]->access
));
1016 empty
= isl_map_is_empty(map
);
1022 group
= join_groups(groups
[i
], groups
[j
]);
1023 if (compute_group_bounds(gen
, group
) < 0) {
1024 gpu_array_ref_group_free(group
);
1027 if (!group
->shared_tile
||
1028 !smaller_tile(group
->shared_tile
,
1029 groups
[i
]->shared_tile
,
1030 groups
[j
]->shared_tile
)) {
1031 gpu_array_ref_group_free(group
);
1035 if (group
->last_shared
< groups
[i
]->last_shared
||
1036 group
->last_shared
< groups
[j
]->last_shared
)
1037 recompute_overlap
= 1;
1038 gpu_array_ref_group_free(groups
[i
]);
1039 gpu_array_ref_group_free(groups
[j
]);
1042 groups
[j
] = groups
[n
- 1];
1047 if (recompute_overlap
)
1048 n
= group_last_shared_overlapping_writes(gen
, n
, groups
);
1052 /* Set array->n_group and array->groups to n and groups.
1054 * Additionally, set the "nr" field of each group.
1056 static void set_array_groups(struct gpu_local_array_info
*array
,
1057 int n
, struct gpu_array_ref_group
**groups
)
1062 array
->groups
= groups
;
1064 for (i
= 0; i
< n
; ++i
)
1068 /* Group array references that should be considered together when
1069 * deciding whether to access them from private, shared or global memory.
1070 * Return -1 on error.
1072 * In particular, if two array references overlap and if one of them
1073 * is a write, then the two references are grouped together.
1074 * We first perform an initial grouping based only on the access relation.
1075 * After computing shared and private memory tiles, we check for
1076 * overlapping writes again, but this time taking into account
1077 * the "last_shared" property.
1079 * Furthermore, if two groups admit a shared memory tile and if the
1080 * combination of the two also admits a shared memory tile, we merge
1083 * If the array contains structures, then there is no need to compute
1084 * reference groups since we do not map such arrays to private or shared
1087 static int group_array_references(struct gpu_gen
*gen
,
1088 struct gpu_local_array_info
*local
, __isl_keep isl_union_map
*sched
)
1092 isl_ctx
*ctx
= isl_union_map_get_ctx(sched
);
1093 struct gpu_array_ref_group
**groups
;
1095 if (local
->array
->has_compound_element
)
1098 groups
= isl_calloc_array(ctx
, struct gpu_array_ref_group
*,
1099 local
->array
->n_ref
);
1103 n
= populate_array_references(local
, sched
, groups
);
1105 n
= group_overlapping_writes(gen
, n
, groups
);
1107 for (i
= 0; i
< n
; ++i
)
1108 if (compute_group_bounds(gen
, groups
[i
]) < 0)
1111 n
= group_last_shared_overlapping_writes(gen
, n
, groups
);
1113 n
= group_common_shared_memory_tile(gen
, local
->array
, n
, groups
);
1115 set_array_groups(local
, n
, groups
);
1120 for (i
= 0; i
< local
->array
->n_ref
; ++i
)
1121 gpu_array_ref_group_free(groups
[i
]);
1125 /* For each scalar in the input program, check if there are any
1126 * order dependences active inside the current kernel, within
1127 * the same iteration of the host schedule.
1128 * If so, mark the scalar as force_private so that it will be
1129 * mapped to a register.
1131 static void check_scalar_live_ranges(struct gpu_gen
*gen
)
1135 isl_union_map
*sched
;
1136 isl_union_set
*domain
;
1137 isl_union_map
*same_host_iteration
;
1139 gen
->kernel
->any_force_private
= 0;
1141 if (!gen
->options
->live_range_reordering
)
1144 sched
= gen
->shared_sched
;
1145 sched
= isl_union_map_universe(isl_union_map_copy(sched
));
1146 domain
= isl_union_map_domain(sched
);
1148 sched
= isl_union_map_copy(gen
->sched
);
1149 proj
= projection(isl_union_map_get_space(sched
),
1150 gen
->untiled_len
, gen
->tile_first
);
1151 sched
= isl_union_map_apply_range(sched
, isl_union_map_from_map(proj
));
1152 same_host_iteration
= isl_union_map_apply_range(sched
,
1153 isl_union_map_reverse(isl_union_map_copy(sched
)));
1155 for (i
= 0; i
< gen
->kernel
->n_array
; ++i
) {
1156 struct gpu_local_array_info
*local
= &gen
->kernel
->array
[i
];
1157 isl_union_map
*order
;
1159 local
->force_private
= 0;
1160 if (local
->array
->n_index
!= 0)
1162 order
= isl_union_map_copy(local
->array
->dep_order
);
1163 order
= isl_union_map_intersect_domain(order
,
1164 isl_union_set_copy(domain
));
1165 order
= isl_union_map_intersect_range(order
,
1166 isl_union_set_copy(domain
));
1167 order
= isl_union_map_intersect(order
,
1168 isl_union_map_copy(same_host_iteration
));
1169 if (!isl_union_map_is_empty(order
)) {
1170 local
->force_private
= 1;
1171 gen
->kernel
->any_force_private
= 1;
1173 isl_union_map_free(order
);
1176 isl_union_map_free(same_host_iteration
);
1177 isl_union_set_free(domain
);
1180 /* Group references of all arrays in the current kernel.
1182 int gpu_group_references(struct gpu_gen
*gen
)
1186 isl_union_map
*sched
;
1188 check_scalar_live_ranges(gen
);
1190 sched
= isl_union_map_apply_range(isl_union_map_copy(gen
->shared_sched
),
1191 isl_union_map_copy(gen
->shared_proj
));
1193 for (i
= 0; i
< gen
->kernel
->n_array
; ++i
) {
1194 r
= group_array_references(gen
, &gen
->kernel
->array
[i
], sched
);
1199 isl_union_map_free(sched
);
1204 /* Given a description of an array tile "tile" and the "space"
1208 * where D represents the first shared_len schedule dimensions
1209 * and A represents the array, construct an isl_multi_aff
1211 * { [D[i] -> A[a]] -> A'[a'] }
1213 * with A' a scaled down copy of A according to the shifts and strides
1214 * in "tile". In particular,
1216 * a' = (a + shift(i))/stride
1218 * "insert_array" represents
1222 * and is used to insert A into the domain of functions that only
1225 static __isl_give isl_multi_aff
*strided_tile(
1226 struct gpu_array_tile
*tile
, __isl_keep isl_space
*space
,
1227 __isl_keep isl_multi_aff
*insert_array
)
1231 isl_multi_aff
*shift
;
1232 isl_multi_val
*stride
;
1234 isl_local_space
*ls
;
1235 isl_multi_aff
*tiling
;
1237 ctx
= isl_space_get_ctx(space
);
1238 space2
= isl_space_domain(isl_space_copy(space
));
1239 ls
= isl_local_space_from_space(space2
);
1240 space2
= isl_space_range(isl_space_copy(space
));
1241 stride
= isl_multi_val_zero(space2
);
1242 shift
= isl_multi_aff_zero(isl_space_copy(space
));
1244 for (i
= 0; i
< tile
->n
; ++i
) {
1245 struct gpu_array_bound
*bound
= &tile
->bound
[i
];
1249 if (tile
->bound
[i
].shift
) {
1250 stride_i
= isl_val_copy(bound
->stride
);
1251 shift_i
= isl_aff_copy(bound
->shift
);
1253 stride_i
= isl_val_one(ctx
);
1254 shift_i
= isl_aff_zero_on_domain(
1255 isl_local_space_copy(ls
));
1258 stride
= isl_multi_val_set_val(stride
, i
, stride_i
);
1259 shift
= isl_multi_aff_set_aff(shift
, i
, shift_i
);
1261 isl_local_space_free(ls
);
1263 shift
= isl_multi_aff_pullback_multi_aff(shift
,
1264 isl_multi_aff_copy(insert_array
));
1266 tiling
= isl_multi_aff_range_map(isl_space_copy(space
));
1267 tiling
= isl_multi_aff_add(tiling
, shift
);
1268 tiling
= isl_multi_aff_scale_down_multi_val(tiling
, stride
);
1273 /* Compute a tiling for the array reference group "group".
1275 * The tiling is of the form
1277 * { [D[i] -> A[a]] -> T[t] }
1279 * where D represents the first shared_len schedule dimensions,
1280 * A represents the global array and T represents the shared or
1281 * private memory tile. The name of T is the name of the local
1284 * If there is any stride in the accesses, then the mapping is
1286 * t = (a + shift(i))/stride - lb(i)
1288 * otherwise, it is simply
1292 void gpu_array_ref_group_compute_tiling(struct gpu_array_ref_group
*group
)
1295 struct gpu_array_tile
*tile
;
1296 struct gpu_array_info
*array
= group
->array
;
1298 isl_multi_aff
*tiling
, *lb
, *insert_array
;
1302 tile
= group
->private_tile
;
1304 tile
= group
->shared_tile
;
1308 space
= isl_map_get_space(group
->access
);
1309 insert_array
= isl_multi_aff_domain_map(isl_space_copy(space
));
1311 for (i
= 0; i
< tile
->n
; ++i
)
1312 if (tile
->bound
[i
].shift
)
1316 tiling
= strided_tile(tile
, space
, insert_array
);
1318 tiling
= isl_multi_aff_range_map(isl_space_copy(space
));
1320 lb
= isl_multi_aff_zero(space
);
1321 for (i
= 0; i
< tile
->n
; ++i
) {
1322 isl_aff
*lb_i
= isl_aff_copy(tile
->bound
[i
].lb
);
1323 lb
= isl_multi_aff_set_aff(lb
, i
, lb_i
);
1325 lb
= isl_multi_aff_pullback_multi_aff(lb
, insert_array
);
1327 tiling
= isl_multi_aff_sub(tiling
, lb
);
1329 p
= isl_printer_to_str(isl_multi_aff_get_ctx(tiling
));
1330 p
= gpu_array_ref_group_print_name(group
, p
);
1331 local_name
= isl_printer_get_str(p
);
1332 isl_printer_free(p
);
1333 tiling
= isl_multi_aff_set_tuple_name(tiling
, isl_dim_out
, local_name
);
1336 tile
->tiling
= tiling
;