3 #include "gpu_array_tile.h"
7 /* Print the name of the local copy of a given group of array references.
9 __isl_give isl_printer
*gpu_array_ref_group_print_name(
10 struct gpu_array_ref_group
*group
, __isl_take isl_printer
*p
)
14 if (group
->private_tile
)
15 p
= isl_printer_print_str(p
, "private_");
16 else if (group
->shared_tile
)
17 p
= isl_printer_print_str(p
, "shared_");
20 p
= isl_printer_print_str(p
, group
->array
->name
);
21 if (!global
&& group
->local_array
->n_group
> 1) {
22 p
= isl_printer_print_str(p
, "_");
23 p
= isl_printer_print_int(p
, group
->nr
);
29 /* Return the union of all read (read = 1) and/or write (write = 1)
30 * access relations in the group.
32 __isl_give isl_union_map
*gpu_array_ref_group_access_relation(
33 struct gpu_array_ref_group
*group
, int read
, int write
)
36 isl_union_map
*access
;
38 access
= isl_union_map_empty(isl_map_get_space(group
->access
));
39 for (i
= 0; i
< group
->n_ref
; ++i
) {
42 if (!((read
&& group
->refs
[i
]->read
) ||
43 (write
&& group
->refs
[i
]->write
)))
45 map_i
= isl_map_copy(group
->refs
[i
]->access
);
46 access
= isl_union_map_union(access
,
47 isl_union_map_from_map(map_i
));
53 /* Return the effective gpu_array_tile associated to "group" or
54 * NULL if there is no such gpu_array_tile.
55 * If we have computed both a private and a shared tile, then
56 * the private tile is used.
58 struct gpu_array_tile
*gpu_array_ref_group_tile(
59 struct gpu_array_ref_group
*group
)
61 if (group
->private_tile
)
62 return group
->private_tile
;
63 if (group
->shared_tile
)
64 return group
->shared_tile
;
68 /* Does the tile associated to "group" require unrolling of the schedule
69 * dimensions mapped to threads?
70 * Note that this can only happen for private tiles.
72 int gpu_array_ref_group_requires_unroll(struct gpu_array_ref_group
*group
)
74 struct gpu_array_tile
*tile
;
76 tile
= gpu_array_ref_group_tile(group
);
79 return tile
->requires_unroll
;
86 * or -a(p,i) - j = g f(e) if sign < 0,
87 * store a(p,i) in bound->shift and g (stride) in bound->stride.
88 * a(p,i) is assumed to be an expression in only the parameters
89 * and the input dimensions.
91 static void extract_stride(__isl_keep isl_constraint
*c
,
92 struct gpu_array_bound
*bound
, __isl_keep isl_val
*stride
, int sign
)
101 isl_val_free(bound
->stride
);
102 bound
->stride
= isl_val_copy(stride
);
104 space
= isl_constraint_get_space(c
);
105 space
= isl_space_domain(space
);
107 nparam
= isl_space_dim(space
, isl_dim_param
);
108 nvar
= isl_space_dim(space
, isl_dim_set
);
110 v
= isl_constraint_get_constant_val(c
);
113 aff
= isl_aff_zero_on_domain(isl_local_space_from_space(space
));
114 aff
= isl_aff_set_constant_val(aff
, v
);
116 for (i
= 0; i
< nparam
; ++i
) {
117 if (!isl_constraint_involves_dims(c
, isl_dim_param
, i
, 1))
119 v
= isl_constraint_get_coefficient_val(c
, isl_dim_param
, i
);
122 aff
= isl_aff_add_coefficient_val(aff
, isl_dim_param
, i
, v
);
125 for (i
= 0; i
< nvar
; ++i
) {
126 if (!isl_constraint_involves_dims(c
, isl_dim_in
, i
, 1))
128 v
= isl_constraint_get_coefficient_val(c
, isl_dim_in
, i
);
131 aff
= isl_aff_add_coefficient_val(aff
, isl_dim_in
, i
, v
);
137 /* Given an equality constraint of a map with a single output dimension j,
138 * check if the constraint is of the form
140 * a(p,i) + j = g f(e)
142 * with a(p,i) an expression in the parameters and input dimensions
143 * and f(e) an expression in the existentially quantified variables.
144 * If so, and if g is larger than any such g from a previously considered
145 * constraint, then call extract_stride to record the stride information
148 static int check_stride_constraint(__isl_take isl_constraint
*c
, void *user
)
154 struct gpu_array_bound
*bound
= user
;
156 ctx
= isl_constraint_get_ctx(c
);
157 n_div
= isl_constraint_dim(c
, isl_dim_div
);
158 v
= isl_constraint_get_coefficient_val(c
, isl_dim_out
, 0);
160 if (n_div
&& (isl_val_is_one(v
) || isl_val_is_negone(v
))) {
161 int s
= isl_val_sgn(v
);
162 isl_val
*stride
= isl_val_zero(ctx
);
165 for (i
= 0; i
< n_div
; ++i
) {
166 v
= isl_constraint_get_coefficient_val(c
,
168 stride
= isl_val_gcd(stride
, v
);
170 if (!isl_val_is_zero(stride
) &&
171 isl_val_gt(stride
, bound
->stride
))
172 extract_stride(c
, bound
, stride
, s
);
174 isl_val_free(stride
);
178 isl_constraint_free(c
);
182 /* Given contraints on an array index i, check if we can find
183 * a shift a(p) and a stride g such that
187 * If so, record the information in bound and apply the mapping
188 * i -> (i + a(p))/g to the array index in bounds and return
189 * the new constraints.
190 * If not, simply return the original constraints.
192 * If bounds is a subset of the space
196 * then the bound recorded in bound->shift is of the form
200 * with s(D) equal to a(p) above.
201 * Next, we construct a mapping of the form
203 * [D -> i] -> [D -> (i + S(D))/g]
205 * This mapping is computed as follows.
206 * We first introduce "i" in the domain through precomposition
207 * with [D -> i] -> D obtaining
211 * Adding [D -> i] -> i produces
213 * [D -> i] -> i + s(D)
215 * and the domain product with [D -> i] -> D yields
217 * [D -> i] -> [D -> i + s(D)]
219 * Composition with [D -> i] -> [D -> i/g] gives the desired result.
221 static __isl_give isl_basic_map
*check_stride(struct gpu_array_bound
*bound
,
222 __isl_take isl_basic_map
*bounds
)
226 isl_basic_map
*shift
, *id
, *bmap
, *scale
;
230 bound
->stride
= NULL
;
232 hull
= isl_basic_map_affine_hull(isl_basic_map_copy(bounds
));
234 isl_basic_map_foreach_constraint(hull
, &check_stride_constraint
, bound
);
236 isl_basic_map_free(hull
);
241 shift
= isl_basic_map_from_aff(isl_aff_copy(bound
->shift
));
242 space
= isl_basic_map_get_space(bounds
);
243 bmap
= isl_basic_map_domain_map(isl_basic_map_universe(space
));
244 shift
= isl_basic_map_apply_range(bmap
, shift
);
245 space
= isl_basic_map_get_space(bounds
);
246 id
= isl_basic_map_range_map(isl_basic_map_universe(space
));
247 shift
= isl_basic_map_sum(id
, shift
);
248 space
= isl_basic_map_get_space(bounds
);
249 id
= isl_basic_map_domain_map(isl_basic_map_universe(space
));
250 shift
= isl_basic_map_range_product(id
, shift
);
252 space
= isl_space_domain(isl_basic_map_get_space(bounds
));
253 id
= isl_basic_map_identity(isl_space_map_from_set(space
));
254 space
= isl_space_range(isl_basic_map_get_space(bounds
));
255 aff
= isl_aff_zero_on_domain(isl_local_space_from_space(space
));
256 aff
= isl_aff_add_coefficient_si(aff
, isl_dim_in
, 0, 1);
257 aff
= isl_aff_scale_down_val(aff
, isl_val_copy(bound
->stride
));
258 scale
= isl_basic_map_from_aff(aff
);
259 scale
= isl_basic_map_product(id
, scale
);
261 bmap
= isl_basic_map_apply_range(shift
, scale
);
262 bset
= isl_basic_set_apply(isl_basic_map_wrap(bounds
), bmap
);
263 bounds
= isl_basic_set_unwrap(bset
);
268 /* Data used in compute_array_dim_size and compute_size_in_direction.
270 * pos is the position of the variable representing the array index,
271 * i.e., the variable for which want to compute the size. This variable
272 * is also the last variable in the set.
274 struct gpu_size_info
{
276 struct gpu_array_bound
*bound
;
280 /* Given a constraint from the basic set describing the bounds on
281 * an array index, check if it is a lower bound, say m i >= b(x), and,
282 * if so, check whether the expression "i - ceil(b(x)/m) + 1" has a constant
283 * upper bound. If so, and if this bound is smaller than any bound
284 * derived from earlier constraints, set the size to this bound on
285 * the expression and the lower bound to ceil(b(x)/m).
287 static int compute_size_in_direction(__isl_take isl_constraint
*c
, void *user
)
289 struct gpu_size_info
*size
= user
;
296 nparam
= isl_basic_set_dim(size
->bset
, isl_dim_param
);
297 n_div
= isl_constraint_dim(c
, isl_dim_div
);
299 if (isl_constraint_involves_dims(c
, isl_dim_div
, 0, n_div
) ||
300 !isl_constraint_is_lower_bound(c
, isl_dim_set
, size
->pos
)) {
301 isl_constraint_free(c
);
305 aff
= isl_constraint_get_bound(c
, isl_dim_set
, size
->pos
);
306 aff
= isl_aff_ceil(aff
);
308 lb
= isl_aff_copy(aff
);
310 aff
= isl_aff_neg(aff
);
311 aff
= isl_aff_add_coefficient_si(aff
, isl_dim_in
, size
->pos
, 1);
313 v
= isl_basic_set_max_val(size
->bset
, aff
);
316 if (isl_val_is_int(v
)) {
317 v
= isl_val_add_ui(v
, 1);
318 if (!size
->bound
->size
|| isl_val_lt(v
, size
->bound
->size
)) {
319 isl_val_free(size
->bound
->size
);
320 size
->bound
->size
= isl_val_copy(v
);
321 lb
= isl_aff_drop_dims(lb
, isl_dim_in
, size
->pos
, 1);
322 isl_aff_free(size
->bound
->lb
);
323 size
->bound
->lb
= isl_aff_copy(lb
);
329 isl_constraint_free(c
);
334 /* Given a basic map "bounds" that maps parameters and input dimensions
335 * to a single output dimension, look for an expression in the parameters
336 * and input dimensions such that the range of the output dimension shifted
337 * by this expression is a constant.
339 * In particular, we currently only consider lower bounds on the output
340 * dimension as candidate expressions.
342 static int compute_array_dim_size(struct gpu_array_bound
*bound
,
343 __isl_take isl_basic_map
*bounds
)
345 struct gpu_size_info size
;
347 bounds
= isl_basic_map_detect_equalities(bounds
);
348 bounds
= check_stride(bound
, bounds
);
354 size
.pos
= isl_basic_map_dim(bounds
, isl_dim_in
);
355 size
.bset
= isl_basic_map_wrap(bounds
);
356 size
.bset
= isl_basic_set_flatten(size
.bset
);
357 size
.bset
= isl_set_simple_hull(isl_basic_set_compute_divs(size
.bset
));
358 isl_basic_set_foreach_constraint(size
.bset
, &compute_size_in_direction
,
360 isl_basic_set_free(size
.bset
);
362 return bound
->size
? 0 : -1;
365 /* Check if we can find a memory tile for the given array
366 * based on the given accesses, and if so, put the results in "tile".
368 * We project the accesses on each index in turn and look for a parametric
369 * offset such that the size is constant.
371 static int can_tile(__isl_keep isl_map
*access
, struct gpu_array_tile
*tile
)
375 for (i
= 0; i
< tile
->n
; ++i
) {
379 access_i
= isl_map_copy(access
);
380 access_i
= isl_map_project_out(access_i
, isl_dim_out
, 0, i
);
381 access_i
= isl_map_project_out(access_i
, isl_dim_out
,
382 1, tile
->n
- (i
+ 1));
383 access_i
= isl_map_compute_divs(access_i
);
384 hull
= isl_map_simple_hull(access_i
);
385 if (compute_array_dim_size(&tile
->bound
[i
], hull
) < 0)
392 /* Construct a map from domain_dim to domain_dim that increments
393 * the dimension at position "pos" and leaves all other dimensions
396 static __isl_give isl_map
*next(__isl_take isl_space
*domain_dim
, int pos
)
399 int len
= isl_space_dim(domain_dim
, isl_dim_set
);
404 dim
= isl_space_map_from_set(domain_dim
);
405 next
= isl_basic_map_universe(isl_space_copy(dim
));
406 ls
= isl_local_space_from_space(dim
);
408 for (i
= 0; i
< len
; ++i
) {
411 c
= isl_equality_alloc(isl_local_space_copy(ls
));
412 c
= isl_constraint_set_coefficient_si(c
, isl_dim_in
, i
, 1);
413 c
= isl_constraint_set_coefficient_si(c
, isl_dim_out
, i
, -1);
415 c
= isl_constraint_set_constant_si(c
, 1);
416 next
= isl_basic_map_add_constraint(next
, c
);
419 isl_local_space_free(ls
);
421 return isl_map_from_basic_map(next
);
424 /* Check if the given access is coalesced.
425 * That is, check whether incrementing the dimension that will get
426 * wrapped over the last thread index results in incrementing
427 * the last array index.
429 * This function is only called for access relations without reuse and
430 * kernels with at least one block dimension.
432 static int access_is_coalesced(struct gpu_gen
*gen
,
433 __isl_keep isl_union_map
*access
)
437 isl_map
*next_thread_x
;
438 isl_map
*next_element
;
442 access
= isl_union_map_copy(access
);
443 access
= isl_union_map_apply_domain(access
,
444 isl_union_map_copy(gen
->tiled_sched
));
445 access_map
= isl_map_from_union_map(access
);
447 dim
= isl_map_get_space(access_map
);
448 dim
= isl_space_domain(dim
);
449 next_thread_x
= next(dim
, gen
->shared_len
+ gen
->kernel
->n_block
- 1);
451 dim
= isl_map_get_space(access_map
);
452 dim
= isl_space_range(dim
);
453 next_element
= next(dim
, isl_space_dim(dim
, isl_dim_set
) - 1);
455 map
= isl_map_apply_domain(next_thread_x
, isl_map_copy(access_map
));
456 map
= isl_map_apply_range(map
, access_map
);
458 coalesced
= isl_map_is_subset(map
, next_element
);
460 isl_map_free(next_element
);
466 /* Given an access relation in terms of at least gen->shared_len initial
467 * dimensions of the computed schedule, check if it is bijective for
468 * fixed values of the first gen->shared_len dimensions.
469 * We perform this check by equating these dimensions to parameters.
471 static int access_is_bijective(struct gpu_gen
*gen
, __isl_keep isl_map
*access
)
479 access
= isl_map_copy(access
);
480 space
= isl_space_params(isl_map_get_space(access
));
481 ids
= ppcg_scop_generate_names(gen
->prog
->scop
, gen
->shared_len
, "s");
482 dim
= isl_map_dim(access
, isl_dim_in
);
483 par
= parametrization(space
, dim
, 0, ids
);
484 isl_id_list_free(ids
);
485 access
= isl_map_intersect_domain(access
, par
);
486 res
= isl_map_is_bijective(access
);
487 isl_map_free(access
);
492 /* Compute the number of outer schedule tile dimensions that affect
493 * the offset of "tile".
494 * If there is no such dimension, then return the index
495 * of the first shared tile loop, i.e., gen->tile_first.
497 static int compute_tile_depth(struct gpu_gen
*gen
,
498 struct gpu_array_tile
*tile
)
502 for (j
= gen
->shared_len
- 1; j
>= gen
->tile_first
; --j
) {
503 for (i
= 0; i
< tile
->n
; ++i
) {
507 lb
= tile
->bound
[i
].lb
;
508 if (isl_aff_involves_dims(lb
, isl_dim_in
, j
, 1))
511 shift
= tile
->bound
[i
].shift
;
514 if (isl_aff_involves_dims(shift
, isl_dim_in
, j
, 1))
524 /* Determine the number of schedule dimensions that affect the offset of the
525 * shared or private tile and store the result in group->depth, with
526 * a lower bound of gen->tile_first.
527 * If there is no tile defined on the array reference group,
528 * then set group->depth to gen->shared_len.
530 static void set_depth(struct gpu_gen
*gen
,
531 struct gpu_array_ref_group
*group
)
533 struct gpu_array_tile
*tile
;
535 group
->depth
= gen
->shared_len
;
537 tile
= gpu_array_ref_group_tile(group
);
541 group
->depth
= compute_tile_depth(gen
, tile
);
544 /* Fill up the groups array with singleton groups, i.e., one group
545 * per reference, initializing the array, access, write, n_ref and refs fields.
546 * In particular the access field is initialized to the scheduled
547 * access relation of the array reference.
549 * Return the number of elements initialized, i.e., the number of
550 * active references in the current kernel.
552 static int populate_array_references(struct gpu_local_array_info
*local
,
553 __isl_keep isl_union_map
*sched
, struct gpu_array_ref_group
**groups
)
557 isl_ctx
*ctx
= isl_union_map_get_ctx(sched
);
560 for (i
= 0; i
< local
->array
->n_ref
; ++i
) {
563 struct gpu_array_ref_group
*group
;
564 struct gpu_stmt_access
*access
= local
->array
->refs
[i
];
566 map
= isl_map_copy(access
->access
);
567 umap
= isl_union_map_from_map(map
);
568 umap
= isl_union_map_apply_domain(umap
,
569 isl_union_map_copy(sched
));
571 if (isl_union_map_is_empty(umap
)) {
572 isl_union_map_free(umap
);
576 map
= isl_map_from_union_map(umap
);
577 map
= isl_map_detect_equalities(map
);
579 group
= isl_calloc_type(ctx
, struct gpu_array_ref_group
);
582 group
->local_array
= local
;
583 group
->array
= local
->array
;
585 group
->write
= access
->write
;
586 group
->exact_write
= access
->exact_write
;
587 group
->slice
= access
->n_index
< local
->array
->n_index
;
588 group
->refs
= &local
->array
->refs
[i
];
597 /* If group->n_ref == 1, then group->refs was set by
598 * populate_array_references to point directly into
599 * group->array->refs and should not be freed.
600 * If group->n_ref > 1, then group->refs was set by join_groups
601 * to point to a newly allocated array.
603 struct gpu_array_ref_group
*gpu_array_ref_group_free(
604 struct gpu_array_ref_group
*group
)
608 gpu_array_tile_free(group
->shared_tile
);
609 gpu_array_tile_free(group
->private_tile
);
610 isl_map_free(group
->access
);
611 if (group
->n_ref
> 1)
617 /* Given a map where the input dimensions represent the tile loops,
618 * eliminate the innermost of those that have a fixed value
619 * until we reach one that does not (obviously) have a fixed value.
621 static __isl_give isl_map
*eliminate_fixed_inner_loops(
622 __isl_take isl_map
*access
)
626 n
= isl_map_dim(access
, isl_dim_in
);
628 for (i
= n
- 1; i
>= 0; --i
) {
629 if (!map_plain_is_fixed(access
, isl_dim_in
, i
))
631 access
= isl_map_eliminate(access
, isl_dim_in
, i
, 1);
636 /* Check if the access relations of group1 and group2 overlap within
637 * the innermost loop. In particular, ignore any inner dimension
638 * with a fixed value.
639 * The copying to and from shared memory will be performed within
640 * the innermost actual loop so we are only allowed to consider
641 * the dimensions up to that innermost loop while checking whether
642 * two access relations overlap.
644 static int accesses_overlap(struct gpu_array_ref_group
*group1
,
645 struct gpu_array_ref_group
*group2
)
648 isl_map
*access1
, *access2
;
650 access1
= isl_map_copy(group1
->access
);
651 access1
= eliminate_fixed_inner_loops(access1
);
652 access2
= isl_map_copy(group2
->access
);
653 access2
= eliminate_fixed_inner_loops(access2
);
654 access1
= isl_map_intersect(access1
, access2
);
655 empty
= isl_map_is_empty(access1
);
656 isl_map_free(access1
);
661 /* Combine the given two groups into a single group, containing
662 * the references of both groups.
664 static struct gpu_array_ref_group
*join_groups(
665 struct gpu_array_ref_group
*group1
,
666 struct gpu_array_ref_group
*group2
)
670 struct gpu_array_ref_group
*group
;
672 ctx
= isl_map_get_ctx(group1
->access
);
673 group
= isl_calloc_type(ctx
, struct gpu_array_ref_group
);
676 group
->local_array
= group1
->local_array
;
677 group
->array
= group1
->array
;
678 group
->access
= isl_map_union(isl_map_copy(group1
->access
),
679 isl_map_copy(group2
->access
));
680 group
->write
= group1
->write
|| group2
->write
;
681 group
->exact_write
= group1
->exact_write
&& group2
->exact_write
;
682 group
->slice
= group1
->slice
|| group2
->slice
;
683 group
->n_ref
= group1
->n_ref
+ group2
->n_ref
;
684 group
->refs
= isl_alloc_array(ctx
, struct gpu_stmt_access
*,
687 return gpu_array_ref_group_free(group
);
688 for (i
= 0; i
< group1
->n_ref
; ++i
)
689 group
->refs
[i
] = group1
->refs
[i
];
690 for (i
= 0; i
< group2
->n_ref
; ++i
)
691 group
->refs
[group1
->n_ref
+ i
] = group2
->refs
[i
];
696 /* Combine the given two groups into a single group and free
697 * the original two groups.
699 static struct gpu_array_ref_group
*join_groups_and_free(
700 struct gpu_array_ref_group
*group1
,
701 struct gpu_array_ref_group
*group2
)
703 struct gpu_array_ref_group
*group
;
705 group
= join_groups(group1
, group2
);
706 gpu_array_ref_group_free(group1
);
707 gpu_array_ref_group_free(group2
);
711 /* Report that the array reference group with the given access relation
712 * is not mapped to shared memory in the given kernel because
713 * it does not exhibit any reuse and is considered to be coalesced.
715 static void report_no_reuse_and_coalesced(struct ppcg_kernel
*kernel
,
716 __isl_keep isl_union_map
*access
)
721 ctx
= isl_union_map_get_ctx(access
);
722 p
= isl_printer_to_file(ctx
, stdout
);
723 p
= isl_printer_print_str(p
, "Array reference group ");
724 p
= isl_printer_print_union_map(p
, access
);
725 p
= isl_printer_print_str(p
,
726 " not considered for mapping to shared memory in kernel");
727 p
= isl_printer_print_int(p
, kernel
->id
);
728 p
= isl_printer_print_str(p
,
729 " because it exhibits no reuse and is considered to be coalesced");
730 p
= isl_printer_end_line(p
);
734 /* Given an access relation in terms of the gen->shared_len initial
735 * dimensions of the computed schedule and the thread identifiers
736 * (as parameters), check if the use of the corresponding private tile
737 * requires unrolling.
739 * If we are creating a private tile because we are forced to,
740 * then no unrolling is required.
741 * Otherwise we check if "access" is bijective and unrolling
742 * is required if it is not. Note that the access relation
743 * has already been determined to be bijective before the introduction
744 * of the thread identifiers and the removal of the schedule dimensions
745 * that are mapped to these threads. If the access relation is no longer
746 * bijective, then this means that more than one value of one of those
747 * schedule dimensions is mapped to the same thread and therefore
748 * unrolling is required.
750 static int check_requires_unroll(struct gpu_gen
*gen
,
751 __isl_keep isl_map
*access
, int force_private
)
757 bijective
= access_is_bijective(gen
, access
);
763 /* Compute the private and/or shared memory tiles for the array
764 * reference group "group" of array "array".
765 * Return 0 on success and -1 on error.
767 * If the array is a read-only scalar or if the user requested
768 * not to use shared or private memory, then we do not need to do anything.
770 * If any reference in the reference group accesses more than one element,
771 * then we would have to make sure that the layout in shared memory
772 * is the same as that in global memory. Since we do not handle this yet
773 * (and it may not even be possible), we refuse to map to private or
774 * shared memory in such cases.
776 * If the array group involves any may writes (that are not must writes),
777 * then we would have to make sure that we load the data into shared/private
778 * memory first in case the data is not written by the kernel
779 * (but still written back out to global memory).
780 * Since we don't have any such mechanism at the moment, we don't
781 * compute shared/private tiles for groups involving may writes.
783 * We only try to compute a shared memory tile if there is any reuse
784 * or if the access is not coalesced.
786 * For computing a private memory tile, we also require that there is
787 * some reuse. Moreover, we require that the access is private
788 * to the thread. That is, we check that any given array element
789 * is only accessed by a single thread.
790 * We compute an access relation that maps the shared tile loop iterators
791 * and the shared point loop iterators that will be wrapped over the
792 * threads to the array elements.
793 * We actually check that those iterators that will be wrapped
794 * partition the array space. This check is stricter than necessary
795 * since several iterations may be mapped onto the same thread
796 * and then they could be allowed to access the same memory elements,
797 * but our check does not allow this situation.
799 * We also check that the index expression only depends on parallel
800 * loops. That way, we can move those loops innermost and unroll them.
801 * Again, we use a test that is stricter than necessary.
802 * We actually check whether the index expression only depends
803 * on the iterators that are wrapped over the threads.
804 * These are necessarily parallel, but there may be more parallel loops.
806 * Combining the injectivity of the first test with the single-valuedness
807 * of the second test, we simply test for bijectivity.
809 * If the use of the private tile requires unrolling, but some
810 * of the other arrays are forcibly mapped to private memory,
811 * then we do not allow the use of this private tile since
812 * we cannot move the schedule dimensions that need to be unrolled down
813 * without performing some kind of expansion on those arrays
814 * that are forcibly mapped to private memory.
816 * If the array is marked force_private, then we bypass all checks
817 * and assume we can (and should) use registers.
819 * If it turns out we can (or have to) use registers, we compute
820 * the private memory tile size using can_tile, after introducing a dependence
821 * on the thread indices.
823 static int compute_group_bounds_core(struct gpu_gen
*gen
,
824 struct gpu_array_ref_group
*group
)
826 isl_ctx
*ctx
= isl_space_get_ctx(group
->array
->space
);
827 isl_union_map
*access
;
828 int n_index
= group
->array
->n_index
;
829 int no_reuse
, coalesced
;
831 int force_private
= group
->local_array
->force_private
;
832 int use_shared
= gen
->options
->use_shared_memory
&&
833 gen
->kernel
->n_block
> 0;
834 int use_private
= force_private
|| gen
->options
->use_private_memory
;
838 if (!use_shared
&& !use_private
)
840 if (gpu_array_is_read_only_scalar(group
->array
))
842 if (!force_private
&& !group
->exact_write
)
847 access
= gpu_array_ref_group_access_relation(group
, 1, 1);
848 no_reuse
= isl_union_map_is_injective(access
);
851 if (use_shared
&& no_reuse
)
852 coalesced
= access_is_coalesced(gen
, access
);
854 if (r
>= 0 && gen
->options
->debug
->verbose
&&
855 use_shared
&& no_reuse
&& coalesced
)
856 report_no_reuse_and_coalesced(gen
->kernel
, access
);
858 if (use_shared
&& (!no_reuse
|| !coalesced
)) {
859 group
->shared_tile
= gpu_array_tile_create(ctx
,
860 group
->array
->n_index
);
861 if (!group
->shared_tile
)
863 else if (!can_tile(group
->access
, group
->shared_tile
))
865 gpu_array_tile_free(group
->shared_tile
);
868 if (r
< 0 || (!force_private
&& (!use_private
|| no_reuse
))) {
869 isl_union_map_free(access
);
873 access
= isl_union_map_apply_domain(access
,
874 isl_union_map_copy(gen
->shared_sched
));
876 acc
= isl_map_from_union_map(access
);
878 if (!force_private
&& !access_is_bijective(gen
, acc
)) {
883 acc
= isl_map_apply_domain(acc
, isl_map_copy(gen
->privatization
));
884 requires_unroll
= check_requires_unroll(gen
, acc
, force_private
);
885 if (requires_unroll
< 0 ||
886 (requires_unroll
&& gen
->kernel
->any_force_private
)) {
888 return requires_unroll
< 0 ? -1 : 0;
891 group
->private_tile
= gpu_array_tile_create(gen
->ctx
, n_index
);
892 if (!group
->private_tile
) {
896 group
->private_tile
->requires_unroll
= requires_unroll
;
897 if (!can_tile(acc
, group
->private_tile
))
898 group
->private_tile
= gpu_array_tile_free(group
->private_tile
);
902 if (force_private
&& !group
->private_tile
)
903 isl_die(ctx
, isl_error_internal
,
904 "unable to map array reference group to registers",
910 /* Compute the private and/or shared memory tiles for the array
911 * reference group "group" of array "array" and set the tile depth.
912 * Return 0 on success and -1 on error.
914 static int compute_group_bounds(struct gpu_gen
*gen
,
915 struct gpu_array_ref_group
*group
)
919 if (compute_group_bounds_core(gen
, group
) < 0)
921 set_depth(gen
, group
);
926 /* If two groups have overlapping access relations (as determined by
927 * the "overlap" function) and if one of them involves a write,
928 * then merge the two groups into one.
929 * If "compute_bounds" is set, then call compute_group_bounds
930 * on the merged groups.
932 * Return the updated number of groups.
933 * Return -1 on error.
935 static int group_writes(struct gpu_gen
*gen
,
936 int n
, struct gpu_array_ref_group
**groups
,
937 int (*overlap
)(struct gpu_array_ref_group
*group1
,
938 struct gpu_array_ref_group
*group2
), int compute_bounds
)
942 for (i
= 0; i
< n
; ++i
) {
943 for (j
= n
- 1; j
> i
; --j
) {
944 if (!groups
[i
]->write
&& !groups
[j
]->write
)
947 if (!overlap(groups
[i
], groups
[j
]))
950 groups
[i
] = join_groups_and_free(groups
[i
], groups
[j
]);
952 groups
[j
] = groups
[n
- 1];
953 groups
[n
- 1] = NULL
;
958 if (compute_bounds
&&
959 compute_group_bounds(gen
, groups
[i
]) < 0)
967 /* If two groups have overlapping access relations (within the innermost
968 * loop) and if one of them involves a write, then merge the two groups
971 * Return the updated number of groups.
973 static int group_overlapping_writes(struct gpu_gen
*gen
,
974 int n
, struct gpu_array_ref_group
**groups
)
976 return group_writes(gen
, n
, groups
, &accesses_overlap
, 0);
979 /* Check if the access relations of group1 and group2 overlap within
980 * the outermost min(group1->depth, group2->depth) loops.
982 static int depth_accesses_overlap(struct gpu_array_ref_group
*group1
,
983 struct gpu_array_ref_group
*group2
)
988 isl_map
*map_i
, *map_j
, *map
;
990 depth
= group1
->depth
;
991 if (group2
->depth
< depth
)
992 depth
= group2
->depth
;
993 map_i
= isl_map_copy(group1
->access
);
994 dim
= isl_map_dim(map_i
, isl_dim_in
);
995 map_i
= isl_map_eliminate(map_i
, isl_dim_in
, depth
, dim
- depth
);
996 map_j
= isl_map_copy(group2
->access
);
997 map_j
= isl_map_eliminate(map_j
, isl_dim_in
, depth
, dim
- depth
);
998 map
= isl_map_intersect(map_i
, map_j
);
999 empty
= isl_map_is_empty(map
);
1005 /* If two groups have overlapping access relations (within the outer
1006 * depth loops) and if one of them involves a write,
1007 * then merge the two groups into one.
1009 * Return the updated number of groups.
1011 static int group_depth_overlapping_writes(struct gpu_gen
*gen
, int n
,
1012 struct gpu_array_ref_group
**groups
)
1014 return group_writes(gen
, n
, groups
, &depth_accesses_overlap
, 1);
1017 /* Is the size of the tile specified by "tile" smaller than the sum of
1018 * the sizes of the tiles specified by "tile1" and "tile2"?
1020 static int smaller_tile(struct gpu_array_tile
*tile
,
1021 struct gpu_array_tile
*tile1
, struct gpu_array_tile
*tile2
)
1024 isl_val
*size
, *size1
, *size2
;
1026 size
= gpu_array_tile_size(tile
);
1027 size1
= gpu_array_tile_size(tile1
);
1028 size2
= gpu_array_tile_size(tile2
);
1030 size
= isl_val_sub(size
, size1
);
1031 size
= isl_val_sub(size
, size2
);
1032 smaller
= isl_val_is_neg(size
);
1039 /* Given an initial grouping of array references and shared memory tiles
1040 * for each group that allows for a shared memory tile, merge two groups
1041 * if both have a shared memory tile, the merged group also has
1042 * a shared memory tile and the size of the tile for the merge group
1043 * is smaller than the sum of the tile sizes of the individual groups.
1045 * If merging two groups decreases the depth of the tile of
1046 * one or both of the two groups, then we need to check for overlapping
1049 * Return the number of groups after merging.
1050 * Return -1 on error.
1052 static int group_common_shared_memory_tile(struct gpu_gen
*gen
,
1053 struct gpu_array_info
*array
, int n
,
1054 struct gpu_array_ref_group
**groups
)
1057 int recompute_overlap
= 0;
1058 isl_ctx
*ctx
= isl_space_get_ctx(array
->space
);
1060 for (i
= 0; i
< n
; ++i
) {
1061 if (!groups
[i
]->shared_tile
)
1063 for (j
= n
- 1; j
> i
; --j
) {
1066 struct gpu_array_ref_group
*group
;
1068 if (!groups
[j
]->shared_tile
)
1071 map
= isl_map_intersect(isl_map_copy(groups
[i
]->access
),
1072 isl_map_copy(groups
[j
]->access
));
1073 empty
= isl_map_is_empty(map
);
1079 group
= join_groups(groups
[i
], groups
[j
]);
1080 if (compute_group_bounds(gen
, group
) < 0) {
1081 gpu_array_ref_group_free(group
);
1084 if (!group
->shared_tile
||
1085 !smaller_tile(group
->shared_tile
,
1086 groups
[i
]->shared_tile
,
1087 groups
[j
]->shared_tile
)) {
1088 gpu_array_ref_group_free(group
);
1092 if (group
->depth
< groups
[i
]->depth
||
1093 group
->depth
< groups
[j
]->depth
)
1094 recompute_overlap
= 1;
1095 gpu_array_ref_group_free(groups
[i
]);
1096 gpu_array_ref_group_free(groups
[j
]);
1099 groups
[j
] = groups
[n
- 1];
1104 if (recompute_overlap
)
1105 n
= group_depth_overlapping_writes(gen
, n
, groups
);
1109 /* Set array->n_group and array->groups to n and groups.
1111 * Additionally, set the "nr" field of each group.
1113 static void set_array_groups(struct gpu_local_array_info
*array
,
1114 int n
, struct gpu_array_ref_group
**groups
)
1119 array
->groups
= groups
;
1121 for (i
= 0; i
< n
; ++i
)
1125 /* Group array references that should be considered together when
1126 * deciding whether to access them from private, shared or global memory.
1127 * Return -1 on error.
1129 * In particular, if two array references overlap and if one of them
1130 * is a write, then the two references are grouped together.
1131 * We first perform an initial grouping based only on the access relation.
1132 * After computing shared and private memory tiles, we check for
1133 * overlapping writes again, but this time taking into account
1134 * the depth of the effective tile.
1136 * Furthermore, if two groups admit a shared memory tile and if the
1137 * combination of the two also admits a shared memory tile, we merge
1140 * If the array contains structures, then there is no need to compute
1141 * reference groups since we do not map such arrays to private or shared
1144 static int group_array_references(struct gpu_gen
*gen
,
1145 struct gpu_local_array_info
*local
, __isl_keep isl_union_map
*sched
)
1149 isl_ctx
*ctx
= isl_union_map_get_ctx(sched
);
1150 struct gpu_array_ref_group
**groups
;
1152 if (local
->array
->has_compound_element
)
1155 groups
= isl_calloc_array(ctx
, struct gpu_array_ref_group
*,
1156 local
->array
->n_ref
);
1160 n
= populate_array_references(local
, sched
, groups
);
1162 n
= group_overlapping_writes(gen
, n
, groups
);
1164 for (i
= 0; i
< n
; ++i
)
1165 if (compute_group_bounds(gen
, groups
[i
]) < 0)
1168 n
= group_depth_overlapping_writes(gen
, n
, groups
);
1170 n
= group_common_shared_memory_tile(gen
, local
->array
, n
, groups
);
1172 set_array_groups(local
, n
, groups
);
1177 for (i
= 0; i
< local
->array
->n_ref
; ++i
)
1178 gpu_array_ref_group_free(groups
[i
]);
1182 /* For each scalar in the input program, check if there are any
1183 * order dependences active inside the current kernel, within
1184 * the same iteration of the host schedule.
1185 * If so, mark the scalar as force_private so that it will be
1186 * mapped to a register.
1188 static void check_scalar_live_ranges(struct gpu_gen
*gen
)
1192 isl_union_map
*sched
;
1193 isl_union_set
*domain
;
1194 isl_union_map
*same_host_iteration
;
1196 gen
->kernel
->any_force_private
= 0;
1198 if (!gen
->options
->live_range_reordering
)
1201 sched
= gen
->shared_sched
;
1202 sched
= isl_union_map_universe(isl_union_map_copy(sched
));
1203 domain
= isl_union_map_domain(sched
);
1205 sched
= isl_union_map_copy(gen
->sched
);
1206 proj
= projection(isl_union_map_get_space(sched
),
1207 gen
->untiled_len
, gen
->tile_first
);
1208 sched
= isl_union_map_apply_range(sched
, isl_union_map_from_map(proj
));
1209 same_host_iteration
= isl_union_map_apply_range(sched
,
1210 isl_union_map_reverse(isl_union_map_copy(sched
)));
1212 for (i
= 0; i
< gen
->kernel
->n_array
; ++i
) {
1213 struct gpu_local_array_info
*local
= &gen
->kernel
->array
[i
];
1214 isl_union_map
*order
;
1216 local
->force_private
= 0;
1217 if (local
->array
->n_index
!= 0)
1219 order
= isl_union_map_copy(local
->array
->dep_order
);
1220 order
= isl_union_map_intersect_domain(order
,
1221 isl_union_set_copy(domain
));
1222 order
= isl_union_map_intersect_range(order
,
1223 isl_union_set_copy(domain
));
1224 order
= isl_union_map_intersect(order
,
1225 isl_union_map_copy(same_host_iteration
));
1226 if (!isl_union_map_is_empty(order
)) {
1227 local
->force_private
= 1;
1228 gen
->kernel
->any_force_private
= 1;
1230 isl_union_map_free(order
);
1233 isl_union_map_free(same_host_iteration
);
1234 isl_union_set_free(domain
);
1237 /* Group references of all arrays in the current kernel.
1239 int gpu_group_references(struct gpu_gen
*gen
)
1243 isl_union_map
*sched
;
1245 check_scalar_live_ranges(gen
);
1247 sched
= isl_union_map_apply_range(isl_union_map_copy(gen
->shared_sched
),
1248 isl_union_map_copy(gen
->shared_proj
));
1250 for (i
= 0; i
< gen
->kernel
->n_array
; ++i
) {
1251 r
= group_array_references(gen
, &gen
->kernel
->array
[i
], sched
);
1256 isl_union_map_free(sched
);
1261 /* Given a description of an array tile "tile" and the "space"
1265 * where D represents the first shared_len schedule dimensions
1266 * and A represents the array, construct an isl_multi_aff
1268 * { [D[i] -> A[a]] -> A'[a'] }
1270 * with A' a scaled down copy of A according to the shifts and strides
1271 * in "tile". In particular,
1273 * a' = (a + shift(i))/stride
1275 * "insert_array" represents
1279 * and is used to insert A into the domain of functions that only
1282 static __isl_give isl_multi_aff
*strided_tile(
1283 struct gpu_array_tile
*tile
, __isl_keep isl_space
*space
,
1284 __isl_keep isl_multi_aff
*insert_array
)
1288 isl_multi_aff
*shift
;
1289 isl_multi_val
*stride
;
1291 isl_local_space
*ls
;
1292 isl_multi_aff
*tiling
;
1294 ctx
= isl_space_get_ctx(space
);
1295 space2
= isl_space_domain(isl_space_copy(space
));
1296 ls
= isl_local_space_from_space(space2
);
1297 space2
= isl_space_range(isl_space_copy(space
));
1298 stride
= isl_multi_val_zero(space2
);
1299 shift
= isl_multi_aff_zero(isl_space_copy(space
));
1301 for (i
= 0; i
< tile
->n
; ++i
) {
1302 struct gpu_array_bound
*bound
= &tile
->bound
[i
];
1306 if (tile
->bound
[i
].shift
) {
1307 stride_i
= isl_val_copy(bound
->stride
);
1308 shift_i
= isl_aff_copy(bound
->shift
);
1310 stride_i
= isl_val_one(ctx
);
1311 shift_i
= isl_aff_zero_on_domain(
1312 isl_local_space_copy(ls
));
1315 stride
= isl_multi_val_set_val(stride
, i
, stride_i
);
1316 shift
= isl_multi_aff_set_aff(shift
, i
, shift_i
);
1318 isl_local_space_free(ls
);
1320 shift
= isl_multi_aff_pullback_multi_aff(shift
,
1321 isl_multi_aff_copy(insert_array
));
1323 tiling
= isl_multi_aff_range_map(isl_space_copy(space
));
1324 tiling
= isl_multi_aff_add(tiling
, shift
);
1325 tiling
= isl_multi_aff_scale_down_multi_val(tiling
, stride
);
1330 /* Compute a tiling for the array reference group "group".
1332 * The tiling is of the form
1334 * { [D[i] -> A[a]] -> T[t] }
1336 * where D represents the first shared_len schedule dimensions,
1337 * A represents the global array and T represents the shared or
1338 * private memory tile. The name of T is the name of the local
1341 * If there is any stride in the accesses, then the mapping is
1343 * t = (a + shift(i))/stride - lb(i)
1345 * otherwise, it is simply
1349 void gpu_array_ref_group_compute_tiling(struct gpu_array_ref_group
*group
)
1352 struct gpu_array_tile
*tile
;
1353 struct gpu_array_info
*array
= group
->array
;
1355 isl_multi_aff
*tiling
, *lb
, *insert_array
;
1359 tile
= group
->private_tile
;
1361 tile
= group
->shared_tile
;
1365 space
= isl_map_get_space(group
->access
);
1366 insert_array
= isl_multi_aff_domain_map(isl_space_copy(space
));
1368 for (i
= 0; i
< tile
->n
; ++i
)
1369 if (tile
->bound
[i
].shift
)
1373 tiling
= strided_tile(tile
, space
, insert_array
);
1375 tiling
= isl_multi_aff_range_map(isl_space_copy(space
));
1377 lb
= isl_multi_aff_zero(space
);
1378 for (i
= 0; i
< tile
->n
; ++i
) {
1379 isl_aff
*lb_i
= isl_aff_copy(tile
->bound
[i
].lb
);
1380 lb
= isl_multi_aff_set_aff(lb
, i
, lb_i
);
1382 lb
= isl_multi_aff_pullback_multi_aff(lb
, insert_array
);
1384 tiling
= isl_multi_aff_sub(tiling
, lb
);
1386 p
= isl_printer_to_str(isl_multi_aff_get_ctx(tiling
));
1387 p
= gpu_array_ref_group_print_name(group
, p
);
1388 local_name
= isl_printer_get_str(p
);
1389 isl_printer_free(p
);
1390 tiling
= isl_multi_aff_set_tuple_name(tiling
, isl_dim_out
, local_name
);
1393 tile
->tiling
= tiling
;