3 #include "gpu_array_tile.h"
7 /* Print the name of the local copy of a given group of array references.
9 __isl_give isl_printer
*gpu_array_ref_group_print_name(
10 struct gpu_array_ref_group
*group
, __isl_take isl_printer
*p
)
14 if (group
->private_tile
)
15 p
= isl_printer_print_str(p
, "private_");
16 else if (group
->shared_tile
)
17 p
= isl_printer_print_str(p
, "shared_");
20 p
= isl_printer_print_str(p
, group
->array
->name
);
21 if (!global
&& group
->local_array
->n_group
> 1) {
22 p
= isl_printer_print_str(p
, "_");
23 p
= isl_printer_print_int(p
, group
->nr
);
29 /* Return the union of all read (read = 1) and/or write (write = 1)
30 * access relations in the group.
32 __isl_give isl_union_map
*gpu_array_ref_group_access_relation(
33 struct gpu_array_ref_group
*group
, int read
, int write
)
36 isl_union_map
*access
;
38 access
= isl_union_map_empty(isl_map_get_space(group
->access
));
39 for (i
= 0; i
< group
->n_ref
; ++i
) {
42 if (!((read
&& group
->refs
[i
]->read
) ||
43 (write
&& group
->refs
[i
]->write
)))
45 map_i
= isl_map_copy(group
->refs
[i
]->access
);
46 access
= isl_union_map_union(access
,
47 isl_union_map_from_map(map_i
));
53 /* Return the effective gpu_array_tile associated to "group" or
54 * NULL if there is no such gpu_array_tile.
55 * If we have computed both a private and a shared tile, then
56 * the private tile is used.
58 struct gpu_array_tile
*gpu_array_ref_group_tile(
59 struct gpu_array_ref_group
*group
)
61 if (group
->private_tile
)
62 return group
->private_tile
;
63 if (group
->shared_tile
)
64 return group
->shared_tile
;
68 /* Does the tile associated to "group" require unrolling of the schedule
69 * dimensions mapped to threads?
70 * Note that this can only happen for private tiles.
72 int gpu_array_ref_group_requires_unroll(struct gpu_array_ref_group
*group
)
74 struct gpu_array_tile
*tile
;
76 tile
= gpu_array_ref_group_tile(group
);
79 return tile
->requires_unroll
;
86 * or -a(p,i) - j = g f(e) if sign < 0,
87 * store a(p,i) in bound->shift and g (stride) in bound->stride.
88 * a(p,i) is assumed to be an expression in only the parameters
89 * and the input dimensions.
91 static void extract_stride(__isl_keep isl_constraint
*c
,
92 struct gpu_array_bound
*bound
, __isl_keep isl_val
*stride
, int sign
)
101 isl_val_free(bound
->stride
);
102 bound
->stride
= isl_val_copy(stride
);
104 space
= isl_constraint_get_space(c
);
105 space
= isl_space_domain(space
);
107 nparam
= isl_space_dim(space
, isl_dim_param
);
108 nvar
= isl_space_dim(space
, isl_dim_set
);
110 v
= isl_constraint_get_constant_val(c
);
113 aff
= isl_aff_zero_on_domain(isl_local_space_from_space(space
));
114 aff
= isl_aff_set_constant_val(aff
, v
);
116 for (i
= 0; i
< nparam
; ++i
) {
117 if (!isl_constraint_involves_dims(c
, isl_dim_param
, i
, 1))
119 v
= isl_constraint_get_coefficient_val(c
, isl_dim_param
, i
);
122 aff
= isl_aff_add_coefficient_val(aff
, isl_dim_param
, i
, v
);
125 for (i
= 0; i
< nvar
; ++i
) {
126 if (!isl_constraint_involves_dims(c
, isl_dim_in
, i
, 1))
128 v
= isl_constraint_get_coefficient_val(c
, isl_dim_in
, i
);
131 aff
= isl_aff_add_coefficient_val(aff
, isl_dim_in
, i
, v
);
137 /* Given an equality constraint of a map with a single output dimension j,
138 * check if the constraint is of the form
140 * a(p,i) + j = g f(e)
142 * with a(p,i) an expression in the parameters and input dimensions
143 * and f(e) an expression in the existentially quantified variables.
144 * If so, and if g is larger than any such g from a previously considered
145 * constraint, then call extract_stride to record the stride information
148 static int check_stride_constraint(__isl_take isl_constraint
*c
, void *user
)
154 struct gpu_array_bound
*bound
= user
;
156 ctx
= isl_constraint_get_ctx(c
);
157 n_div
= isl_constraint_dim(c
, isl_dim_div
);
158 v
= isl_constraint_get_coefficient_val(c
, isl_dim_out
, 0);
160 if (n_div
&& (isl_val_is_one(v
) || isl_val_is_negone(v
))) {
161 int s
= isl_val_sgn(v
);
162 isl_val
*stride
= isl_val_zero(ctx
);
165 for (i
= 0; i
< n_div
; ++i
) {
166 v
= isl_constraint_get_coefficient_val(c
,
168 stride
= isl_val_gcd(stride
, v
);
170 if (!isl_val_is_zero(stride
) &&
171 isl_val_gt(stride
, bound
->stride
))
172 extract_stride(c
, bound
, stride
, s
);
174 isl_val_free(stride
);
178 isl_constraint_free(c
);
182 /* Given contraints on an array index i, check if we can find
183 * a shift a(p) and a stride g such that
187 * If so, record the information in bound and apply the mapping
188 * i -> (i + a(p))/g to the array index in bounds and return
189 * the new constraints.
190 * If not, simply return the original constraints.
192 * If bounds is a subset of the space
196 * then the bound recorded in bound->shift is of the form
200 * with s(D) equal to a(p) above.
201 * Next, we construct a mapping of the form
203 * [D -> i] -> [D -> (i + S(D))/g]
205 * This mapping is computed as follows.
206 * We first introduce "i" in the domain through precomposition
207 * with [D -> i] -> D obtaining
211 * Adding [D -> i] -> i produces
213 * [D -> i] -> i + s(D)
215 * and the domain product with [D -> i] -> D yields
217 * [D -> i] -> [D -> i + s(D)]
219 * Composition with [D -> i] -> [D -> i/g] gives the desired result.
221 static __isl_give isl_basic_map
*check_stride(struct gpu_array_bound
*bound
,
222 __isl_take isl_basic_map
*bounds
)
226 isl_basic_map
*shift
, *id
, *bmap
, *scale
;
230 bound
->stride
= NULL
;
232 hull
= isl_basic_map_affine_hull(isl_basic_map_copy(bounds
));
234 isl_basic_map_foreach_constraint(hull
, &check_stride_constraint
, bound
);
236 isl_basic_map_free(hull
);
241 shift
= isl_basic_map_from_aff(isl_aff_copy(bound
->shift
));
242 space
= isl_basic_map_get_space(bounds
);
243 bmap
= isl_basic_map_domain_map(isl_basic_map_universe(space
));
244 shift
= isl_basic_map_apply_range(bmap
, shift
);
245 space
= isl_basic_map_get_space(bounds
);
246 id
= isl_basic_map_range_map(isl_basic_map_universe(space
));
247 shift
= isl_basic_map_sum(id
, shift
);
248 space
= isl_basic_map_get_space(bounds
);
249 id
= isl_basic_map_domain_map(isl_basic_map_universe(space
));
250 shift
= isl_basic_map_range_product(id
, shift
);
252 space
= isl_space_domain(isl_basic_map_get_space(bounds
));
253 id
= isl_basic_map_identity(isl_space_map_from_set(space
));
254 space
= isl_space_range(isl_basic_map_get_space(bounds
));
255 aff
= isl_aff_zero_on_domain(isl_local_space_from_space(space
));
256 aff
= isl_aff_add_coefficient_si(aff
, isl_dim_in
, 0, 1);
257 aff
= isl_aff_scale_down_val(aff
, isl_val_copy(bound
->stride
));
258 scale
= isl_basic_map_from_aff(aff
);
259 scale
= isl_basic_map_product(id
, scale
);
261 bmap
= isl_basic_map_apply_range(shift
, scale
);
262 bset
= isl_basic_set_apply(isl_basic_map_wrap(bounds
), bmap
);
263 bounds
= isl_basic_set_unwrap(bset
);
268 /* Data used in compute_array_dim_size and compute_size_in_direction.
270 * pos is the position of the variable representing the array index,
271 * i.e., the variable for which want to compute the size. This variable
272 * is also the last variable in the set.
274 struct gpu_size_info
{
276 struct gpu_array_bound
*bound
;
280 /* Given a constraint from the basic set describing the bounds on
281 * an array index, check if it is a lower bound, say m i >= b(x), and,
282 * if so, check whether the expression "i - ceil(b(x)/m) + 1" has a constant
283 * upper bound. If so, and if this bound is smaller than any bound
284 * derived from earlier constraints, set the size to this bound on
285 * the expression and the lower bound to ceil(b(x)/m).
287 static int compute_size_in_direction(__isl_take isl_constraint
*c
, void *user
)
289 struct gpu_size_info
*size
= user
;
296 nparam
= isl_basic_set_dim(size
->bset
, isl_dim_param
);
297 n_div
= isl_constraint_dim(c
, isl_dim_div
);
299 if (isl_constraint_involves_dims(c
, isl_dim_div
, 0, n_div
) ||
300 !isl_constraint_is_lower_bound(c
, isl_dim_set
, size
->pos
)) {
301 isl_constraint_free(c
);
305 aff
= isl_constraint_get_bound(c
, isl_dim_set
, size
->pos
);
306 aff
= isl_aff_ceil(aff
);
308 lb
= isl_aff_copy(aff
);
310 aff
= isl_aff_neg(aff
);
311 aff
= isl_aff_add_coefficient_si(aff
, isl_dim_in
, size
->pos
, 1);
313 v
= isl_basic_set_max_val(size
->bset
, aff
);
316 if (isl_val_is_int(v
)) {
317 v
= isl_val_add_ui(v
, 1);
318 if (!size
->bound
->size
|| isl_val_lt(v
, size
->bound
->size
)) {
319 isl_val_free(size
->bound
->size
);
320 size
->bound
->size
= isl_val_copy(v
);
321 lb
= isl_aff_drop_dims(lb
, isl_dim_in
, size
->pos
, 1);
322 isl_aff_free(size
->bound
->lb
);
323 size
->bound
->lb
= isl_aff_copy(lb
);
329 isl_constraint_free(c
);
334 /* Given a basic map "bounds" that maps parameters and input dimensions
335 * to a single output dimension, look for an expression in the parameters
336 * and input dimensions such that the range of the output dimension shifted
337 * by this expression is a constant.
339 * In particular, we currently only consider lower bounds on the output
340 * dimension as candidate expressions.
342 static int compute_array_dim_size(struct gpu_array_bound
*bound
,
343 __isl_take isl_basic_map
*bounds
)
345 struct gpu_size_info size
;
347 bounds
= isl_basic_map_detect_equalities(bounds
);
348 bounds
= check_stride(bound
, bounds
);
354 size
.pos
= isl_basic_map_dim(bounds
, isl_dim_in
);
355 size
.bset
= isl_basic_map_wrap(bounds
);
356 size
.bset
= isl_basic_set_flatten(size
.bset
);
357 size
.bset
= isl_set_simple_hull(isl_basic_set_compute_divs(size
.bset
));
358 isl_basic_set_foreach_constraint(size
.bset
, &compute_size_in_direction
,
360 isl_basic_set_free(size
.bset
);
362 return bound
->size
? 0 : -1;
365 /* Check if we can find a memory tile for the given array
366 * based on the given accesses, and if so, put the results in "tile".
368 * We project the accesses on each index in turn and look for a parametric
369 * offset such that the size is constant.
371 static int can_tile(__isl_keep isl_map
*access
, struct gpu_array_tile
*tile
)
375 for (i
= 0; i
< tile
->n
; ++i
) {
379 access_i
= isl_map_copy(access
);
380 access_i
= isl_map_project_out(access_i
, isl_dim_out
, 0, i
);
381 access_i
= isl_map_project_out(access_i
, isl_dim_out
,
382 1, tile
->n
- (i
+ 1));
383 access_i
= isl_map_compute_divs(access_i
);
384 hull
= isl_map_simple_hull(access_i
);
385 if (compute_array_dim_size(&tile
->bound
[i
], hull
) < 0)
392 /* Internal data structure for gpu_group_references.
394 * scop represents the input scop.
395 * kernel_depth is the schedule depth where the kernel launch will
396 * be introduced, i.e., it is the depth of the band that is mapped
398 * thread_depth is the schedule depth where the thread mark is located,
399 * i.e., it is the depth of the band that is mapped to threads and also
400 * the schedule depth at which the copying to/from shared/private memory
401 * is computed. The copy operation may then later be hoisted to
403 * n_thread is the number of schedule dimensions in the band that
404 * is mapped to threads.
405 * privatization lives in the range of thread_sched (i.e., it is
406 * of dimension thread_depth + n_thread) and encodes the mapping
407 * to thread identifiers (as parameters).
408 * shared_sched contains the first thread_depth dimensions of the
410 * thread_sched contains the first (thread_depth + n_thread) dimensions
411 * of the kernel schedule.
412 * full_sched is a union_map representation of the entire kernel schedule.
414 struct gpu_group_data
{
415 struct ppcg_scop
*scop
;
419 isl_set
*privatization
;
420 isl_union_map
*shared_sched
;
421 isl_union_map
*thread_sched
;
422 isl_union_map
*full_sched
;
425 /* Construct a map from domain_dim to domain_dim that increments
426 * the dimension at position "pos" and leaves all other dimensions
429 static __isl_give isl_map
*next(__isl_take isl_space
*domain_dim
, int pos
)
432 int len
= isl_space_dim(domain_dim
, isl_dim_set
);
437 dim
= isl_space_map_from_set(domain_dim
);
438 next
= isl_basic_map_universe(isl_space_copy(dim
));
439 ls
= isl_local_space_from_space(dim
);
441 for (i
= 0; i
< len
; ++i
) {
444 c
= isl_equality_alloc(isl_local_space_copy(ls
));
445 c
= isl_constraint_set_coefficient_si(c
, isl_dim_in
, i
, 1);
446 c
= isl_constraint_set_coefficient_si(c
, isl_dim_out
, i
, -1);
448 c
= isl_constraint_set_constant_si(c
, 1);
449 next
= isl_basic_map_add_constraint(next
, c
);
452 isl_local_space_free(ls
);
454 return isl_map_from_basic_map(next
);
457 /* Check if the given access is coalesced.
458 * That is, check whether incrementing the dimension that will get
459 * wrapped over the last thread index results in incrementing
460 * the last array index.
462 * This function is only called for access relations without reuse and
463 * kernels with at least one thread identifier.
465 static int access_is_coalesced(struct gpu_group_data
*data
,
466 __isl_keep isl_union_map
*access
)
470 isl_map
*next_thread_x
;
471 isl_map
*next_element
;
475 access
= isl_union_map_copy(access
);
476 access
= isl_union_map_apply_domain(access
,
477 isl_union_map_copy(data
->full_sched
));
478 access_map
= isl_map_from_union_map(access
);
480 dim
= isl_map_get_space(access_map
);
481 dim
= isl_space_domain(dim
);
482 next_thread_x
= next(dim
, data
->thread_depth
+ data
->n_thread
- 1);
484 dim
= isl_map_get_space(access_map
);
485 dim
= isl_space_range(dim
);
486 next_element
= next(dim
, isl_space_dim(dim
, isl_dim_set
) - 1);
488 map
= isl_map_apply_domain(next_thread_x
, isl_map_copy(access_map
));
489 map
= isl_map_apply_range(map
, access_map
);
491 coalesced
= isl_map_is_subset(map
, next_element
);
493 isl_map_free(next_element
);
499 /* Given an access relation in terms of at least data->thread_depth initial
500 * dimensions of the computed schedule, check if it is bijective for
501 * fixed values of the first data->thread_depth dimensions.
502 * We perform this check by equating these dimensions to parameters.
504 static int access_is_bijective(struct gpu_group_data
*data
,
505 __isl_keep isl_map
*access
)
513 access
= isl_map_copy(access
);
514 space
= isl_space_params(isl_map_get_space(access
));
515 ids
= ppcg_scop_generate_names(data
->scop
, data
->thread_depth
, "s");
516 dim
= isl_map_dim(access
, isl_dim_in
);
517 par
= parametrization(space
, dim
, 0, ids
);
518 isl_id_list_free(ids
);
519 access
= isl_map_intersect_domain(access
, par
);
520 res
= isl_map_is_bijective(access
);
521 isl_map_free(access
);
526 /* Compute the number of outer schedule tile dimensions that affect
527 * the offset of "tile".
528 * If there is no such dimension, then return the index
529 * of the first kernel dimension, i.e., data->kernel_depth.
531 static int compute_tile_depth(struct gpu_group_data
*data
,
532 struct gpu_array_tile
*tile
)
536 for (j
= data
->thread_depth
- 1; j
>= data
->kernel_depth
; --j
) {
537 for (i
= 0; i
< tile
->n
; ++i
) {
541 lb
= tile
->bound
[i
].lb
;
542 if (isl_aff_involves_dims(lb
, isl_dim_in
, j
, 1))
545 shift
= tile
->bound
[i
].shift
;
548 if (isl_aff_involves_dims(shift
, isl_dim_in
, j
, 1))
558 /* Determine the number of schedule dimensions that affect the offset of the
559 * shared or private tile and store the result in group->depth, with
560 * a lower bound of data->kernel_depth.
561 * If there is no tile defined on the array reference group,
562 * then set group->depth to data->thread_depth.
564 static void set_depth(struct gpu_group_data
*data
,
565 struct gpu_array_ref_group
*group
)
567 struct gpu_array_tile
*tile
;
569 group
->depth
= data
->thread_depth
;
571 tile
= gpu_array_ref_group_tile(group
);
575 group
->depth
= compute_tile_depth(data
, tile
);
578 /* Fill up the groups array with singleton groups, i.e., one group
579 * per reference, initializing the array, access, write, n_ref and refs fields.
580 * In particular the access field is initialized to the scheduled
581 * access relation of the array reference.
583 * Return the number of elements initialized, i.e., the number of
584 * active references in the current kernel.
586 static int populate_array_references(struct gpu_local_array_info
*local
,
587 struct gpu_array_ref_group
**groups
, struct gpu_group_data
*data
)
591 isl_ctx
*ctx
= isl_union_map_get_ctx(data
->shared_sched
);
594 for (i
= 0; i
< local
->array
->n_ref
; ++i
) {
597 struct gpu_array_ref_group
*group
;
598 struct gpu_stmt_access
*access
= local
->array
->refs
[i
];
600 map
= isl_map_copy(access
->access
);
601 umap
= isl_union_map_from_map(map
);
602 umap
= isl_union_map_apply_domain(umap
,
603 isl_union_map_copy(data
->shared_sched
));
605 if (isl_union_map_is_empty(umap
)) {
606 isl_union_map_free(umap
);
610 map
= isl_map_from_union_map(umap
);
611 map
= isl_map_detect_equalities(map
);
613 group
= isl_calloc_type(ctx
, struct gpu_array_ref_group
);
616 group
->local_array
= local
;
617 group
->array
= local
->array
;
619 group
->write
= access
->write
;
620 group
->exact_write
= access
->exact_write
;
621 group
->slice
= access
->n_index
< local
->array
->n_index
;
622 group
->refs
= &local
->array
->refs
[i
];
631 /* If group->n_ref == 1, then group->refs was set by
632 * populate_array_references to point directly into
633 * group->array->refs and should not be freed.
634 * If group->n_ref > 1, then group->refs was set by join_groups
635 * to point to a newly allocated array.
637 struct gpu_array_ref_group
*gpu_array_ref_group_free(
638 struct gpu_array_ref_group
*group
)
642 gpu_array_tile_free(group
->shared_tile
);
643 gpu_array_tile_free(group
->private_tile
);
644 isl_map_free(group
->access
);
645 if (group
->n_ref
> 1)
651 /* Given a map where the input dimensions represent the tile loops,
652 * eliminate the innermost of those that have a fixed value
653 * until we reach one that does not (obviously) have a fixed value.
655 static __isl_give isl_map
*eliminate_fixed_inner_loops(
656 __isl_take isl_map
*access
)
660 n
= isl_map_dim(access
, isl_dim_in
);
662 for (i
= n
- 1; i
>= 0; --i
) {
663 if (!map_plain_is_fixed(access
, isl_dim_in
, i
))
665 access
= isl_map_eliminate(access
, isl_dim_in
, i
, 1);
670 /* Check if the access relations of group1 and group2 overlap within
671 * the innermost loop. In particular, ignore any inner dimension
672 * with a fixed value.
673 * The copying to and from shared memory will be performed within
674 * the innermost actual loop so we are only allowed to consider
675 * the dimensions up to that innermost loop while checking whether
676 * two access relations overlap.
678 static int accesses_overlap(struct gpu_array_ref_group
*group1
,
679 struct gpu_array_ref_group
*group2
)
682 isl_map
*access1
, *access2
;
684 access1
= isl_map_copy(group1
->access
);
685 access1
= eliminate_fixed_inner_loops(access1
);
686 access2
= isl_map_copy(group2
->access
);
687 access2
= eliminate_fixed_inner_loops(access2
);
688 access1
= isl_map_intersect(access1
, access2
);
689 empty
= isl_map_is_empty(access1
);
690 isl_map_free(access1
);
695 /* Combine the given two groups into a single group, containing
696 * the references of both groups.
698 static struct gpu_array_ref_group
*join_groups(
699 struct gpu_array_ref_group
*group1
,
700 struct gpu_array_ref_group
*group2
)
704 struct gpu_array_ref_group
*group
;
706 ctx
= isl_map_get_ctx(group1
->access
);
707 group
= isl_calloc_type(ctx
, struct gpu_array_ref_group
);
710 group
->local_array
= group1
->local_array
;
711 group
->array
= group1
->array
;
712 group
->access
= isl_map_union(isl_map_copy(group1
->access
),
713 isl_map_copy(group2
->access
));
714 group
->write
= group1
->write
|| group2
->write
;
715 group
->exact_write
= group1
->exact_write
&& group2
->exact_write
;
716 group
->slice
= group1
->slice
|| group2
->slice
;
717 group
->n_ref
= group1
->n_ref
+ group2
->n_ref
;
718 group
->refs
= isl_alloc_array(ctx
, struct gpu_stmt_access
*,
721 return gpu_array_ref_group_free(group
);
722 for (i
= 0; i
< group1
->n_ref
; ++i
)
723 group
->refs
[i
] = group1
->refs
[i
];
724 for (i
= 0; i
< group2
->n_ref
; ++i
)
725 group
->refs
[group1
->n_ref
+ i
] = group2
->refs
[i
];
730 /* Combine the given two groups into a single group and free
731 * the original two groups.
733 static struct gpu_array_ref_group
*join_groups_and_free(
734 struct gpu_array_ref_group
*group1
,
735 struct gpu_array_ref_group
*group2
)
737 struct gpu_array_ref_group
*group
;
739 group
= join_groups(group1
, group2
);
740 gpu_array_ref_group_free(group1
);
741 gpu_array_ref_group_free(group2
);
745 /* Report that the array reference group with the given access relation
746 * is not mapped to shared memory in the given kernel because
747 * it does not exhibit any reuse and is considered to be coalesced.
749 static void report_no_reuse_and_coalesced(struct ppcg_kernel
*kernel
,
750 __isl_keep isl_union_map
*access
)
755 ctx
= isl_union_map_get_ctx(access
);
756 p
= isl_printer_to_file(ctx
, stdout
);
757 p
= isl_printer_print_str(p
, "Array reference group ");
758 p
= isl_printer_print_union_map(p
, access
);
759 p
= isl_printer_print_str(p
,
760 " not considered for mapping to shared memory in kernel");
761 p
= isl_printer_print_int(p
, kernel
->id
);
762 p
= isl_printer_print_str(p
,
763 " because it exhibits no reuse and is considered to be coalesced");
764 p
= isl_printer_end_line(p
);
768 /* Given an access relation in terms of the data->thread_depth initial
769 * dimensions of the computed schedule and the thread identifiers
770 * (as parameters), check if the use of the corresponding private tile
771 * requires unrolling.
773 * If we are creating a private tile because we are forced to,
774 * then no unrolling is required.
775 * Otherwise we check if "access" is bijective and unrolling
776 * is required if it is not. Note that the access relation
777 * has already been determined to be bijective before the introduction
778 * of the thread identifiers and the removal of the schedule dimensions
779 * that are mapped to these threads. If the access relation is no longer
780 * bijective, then this means that more than one value of one of those
781 * schedule dimensions is mapped to the same thread and therefore
782 * unrolling is required.
784 static int check_requires_unroll(struct gpu_group_data
*data
,
785 __isl_keep isl_map
*access
, int force_private
)
791 bijective
= access_is_bijective(data
, access
);
797 /* Compute the private and/or shared memory tiles for the array
798 * reference group "group" of array "array".
799 * Return 0 on success and -1 on error.
801 * If the array is a read-only scalar or if the user requested
802 * not to use shared or private memory, then we do not need to do anything.
804 * If any reference in the reference group accesses more than one element,
805 * then we would have to make sure that the layout in shared memory
806 * is the same as that in global memory. Since we do not handle this yet
807 * (and it may not even be possible), we refuse to map to private or
808 * shared memory in such cases.
810 * If the array group involves any may writes (that are not must writes),
811 * then we would have to make sure that we load the data into shared/private
812 * memory first in case the data is not written by the kernel
813 * (but still written back out to global memory).
814 * Since we don't have any such mechanism at the moment, we don't
815 * compute shared/private tiles for groups involving may writes.
817 * We only try to compute a shared memory tile if there is any reuse
818 * or if the access is not coalesced.
820 * For computing a private memory tile, we also require that there is
821 * some reuse. Moreover, we require that the access is private
822 * to the thread. That is, we check that any given array element
823 * is only accessed by a single thread.
824 * We compute an access relation that maps the outer
825 * data->thread_depth + data->n_thread schedule dimensions.
826 * The latter data->n_thread will be mapped to thread identifiers.
827 * We actually check that those iterators that will be wrapped
828 * partition the array space. This check is stricter than necessary
829 * since several iterations may be mapped onto the same thread
830 * and then they could be allowed to access the same memory elements,
831 * but our check does not allow this situation.
833 * We also check that the index expression only depends on parallel
834 * loops. That way, we can move those loops innermost and unroll them.
835 * Again, we use a test that is stricter than necessary.
836 * We actually check whether the index expression only depends
837 * on the iterators that are wrapped over the threads.
838 * These are necessarily parallel, but there may be more parallel loops.
840 * Combining the injectivity of the first test with the single-valuedness
841 * of the second test, we simply test for bijectivity.
843 * If the use of the private tile requires unrolling, but some
844 * of the other arrays are forcibly mapped to private memory,
845 * then we do not allow the use of this private tile since
846 * we cannot move the schedule dimensions that need to be unrolled down
847 * without performing some kind of expansion on those arrays
848 * that are forcibly mapped to private memory.
850 * If the array is marked force_private, then we bypass all checks
851 * and assume we can (and should) use registers.
853 * If it turns out we can (or have to) use registers, we compute
854 * the private memory tile size using can_tile, after introducing a dependence
855 * on the thread indices.
857 static int compute_group_bounds_core(struct ppcg_kernel
*kernel
,
858 struct gpu_array_ref_group
*group
, struct gpu_group_data
*data
)
860 isl_ctx
*ctx
= isl_space_get_ctx(group
->array
->space
);
861 isl_union_map
*access
;
862 int n_index
= group
->array
->n_index
;
863 int no_reuse
, coalesced
;
865 int force_private
= group
->local_array
->force_private
;
866 int use_shared
= kernel
->options
->use_shared_memory
&&
868 int use_private
= force_private
|| kernel
->options
->use_private_memory
;
872 if (!use_shared
&& !use_private
)
874 if (gpu_array_is_read_only_scalar(group
->array
))
876 if (!force_private
&& !group
->exact_write
)
881 access
= gpu_array_ref_group_access_relation(group
, 1, 1);
882 no_reuse
= isl_union_map_is_injective(access
);
885 if (use_shared
&& no_reuse
)
886 coalesced
= access_is_coalesced(data
, access
);
888 if (r
>= 0 && kernel
->options
->debug
->verbose
&&
889 use_shared
&& no_reuse
&& coalesced
)
890 report_no_reuse_and_coalesced(kernel
, access
);
892 if (use_shared
&& (!no_reuse
|| !coalesced
)) {
893 group
->shared_tile
= gpu_array_tile_create(ctx
,
894 group
->array
->n_index
);
895 if (!group
->shared_tile
)
897 else if (!can_tile(group
->access
, group
->shared_tile
))
899 gpu_array_tile_free(group
->shared_tile
);
902 if (r
< 0 || (!force_private
&& (!use_private
|| no_reuse
))) {
903 isl_union_map_free(access
);
907 access
= isl_union_map_apply_domain(access
,
908 isl_union_map_copy(data
->thread_sched
));
910 acc
= isl_map_from_union_map(access
);
912 if (!force_private
&& !access_is_bijective(data
, acc
)) {
917 acc
= isl_map_intersect_domain(acc
, isl_set_copy(data
->privatization
));
918 acc
= isl_map_project_out(acc
, isl_dim_in
, data
->thread_depth
,
920 requires_unroll
= check_requires_unroll(data
, acc
, force_private
);
921 if (requires_unroll
< 0 ||
922 (requires_unroll
&& kernel
->any_force_private
)) {
924 return requires_unroll
< 0 ? -1 : 0;
927 group
->private_tile
= gpu_array_tile_create(ctx
, n_index
);
928 if (!group
->private_tile
) {
932 group
->private_tile
->requires_unroll
= requires_unroll
;
933 if (!can_tile(acc
, group
->private_tile
))
934 group
->private_tile
= gpu_array_tile_free(group
->private_tile
);
938 if (force_private
&& !group
->private_tile
)
939 isl_die(ctx
, isl_error_internal
,
940 "unable to map array reference group to registers",
946 /* Compute the private and/or shared memory tiles for the array
947 * reference group "group" of array "array" and set the tile depth.
948 * Return 0 on success and -1 on error.
950 static int compute_group_bounds(struct ppcg_kernel
*kernel
,
951 struct gpu_array_ref_group
*group
, struct gpu_group_data
*data
)
955 if (compute_group_bounds_core(kernel
, group
, data
) < 0)
957 set_depth(data
, group
);
962 /* If two groups have overlapping access relations (as determined by
963 * the "overlap" function) and if one of them involves a write,
964 * then merge the two groups into one.
965 * If "compute_bounds" is set, then call compute_group_bounds
966 * on the merged groups.
968 * Return the updated number of groups.
969 * Return -1 on error.
971 static int group_writes(struct ppcg_kernel
*kernel
,
972 int n
, struct gpu_array_ref_group
**groups
,
973 int (*overlap
)(struct gpu_array_ref_group
*group1
,
974 struct gpu_array_ref_group
*group2
), int compute_bounds
,
975 struct gpu_group_data
*data
)
979 for (i
= 0; i
< n
; ++i
) {
980 for (j
= n
- 1; j
> i
; --j
) {
981 if (!groups
[i
]->write
&& !groups
[j
]->write
)
984 if (!overlap(groups
[i
], groups
[j
]))
987 groups
[i
] = join_groups_and_free(groups
[i
], groups
[j
]);
989 groups
[j
] = groups
[n
- 1];
990 groups
[n
- 1] = NULL
;
995 if (compute_bounds
&&
996 compute_group_bounds(kernel
, groups
[i
], data
) < 0)
1004 /* If two groups have overlapping access relations (within the innermost
1005 * loop) and if one of them involves a write, then merge the two groups
1008 * Return the updated number of groups.
1010 static int group_overlapping_writes(struct ppcg_kernel
*kernel
,
1011 int n
, struct gpu_array_ref_group
**groups
,
1012 struct gpu_group_data
*data
)
1014 return group_writes(kernel
, n
, groups
, &accesses_overlap
, 0, data
);
1017 /* Check if the access relations of group1 and group2 overlap within
1018 * the outermost min(group1->depth, group2->depth) loops.
1020 static int depth_accesses_overlap(struct gpu_array_ref_group
*group1
,
1021 struct gpu_array_ref_group
*group2
)
1026 isl_map
*map_i
, *map_j
, *map
;
1028 depth
= group1
->depth
;
1029 if (group2
->depth
< depth
)
1030 depth
= group2
->depth
;
1031 map_i
= isl_map_copy(group1
->access
);
1032 dim
= isl_map_dim(map_i
, isl_dim_in
);
1033 map_i
= isl_map_eliminate(map_i
, isl_dim_in
, depth
, dim
- depth
);
1034 map_j
= isl_map_copy(group2
->access
);
1035 map_j
= isl_map_eliminate(map_j
, isl_dim_in
, depth
, dim
- depth
);
1036 map
= isl_map_intersect(map_i
, map_j
);
1037 empty
= isl_map_is_empty(map
);
1043 /* If two groups have overlapping access relations (within the outer
1044 * depth loops) and if one of them involves a write,
1045 * then merge the two groups into one.
1047 * Return the updated number of groups.
1049 static int group_depth_overlapping_writes(struct ppcg_kernel
*kernel
,
1050 int n
, struct gpu_array_ref_group
**groups
, struct gpu_group_data
*data
)
1052 return group_writes(kernel
, n
, groups
, &depth_accesses_overlap
, 1,
1056 /* Is the size of the tile specified by "tile" smaller than the sum of
1057 * the sizes of the tiles specified by "tile1" and "tile2"?
1059 static int smaller_tile(struct gpu_array_tile
*tile
,
1060 struct gpu_array_tile
*tile1
, struct gpu_array_tile
*tile2
)
1063 isl_val
*size
, *size1
, *size2
;
1065 size
= gpu_array_tile_size(tile
);
1066 size1
= gpu_array_tile_size(tile1
);
1067 size2
= gpu_array_tile_size(tile2
);
1069 size
= isl_val_sub(size
, size1
);
1070 size
= isl_val_sub(size
, size2
);
1071 smaller
= isl_val_is_neg(size
);
1078 /* Given an initial grouping of array references and shared memory tiles
1079 * for each group that allows for a shared memory tile, merge two groups
1080 * if both have a shared memory tile, the merged group also has
1081 * a shared memory tile and the size of the tile for the merge group
1082 * is smaller than the sum of the tile sizes of the individual groups.
1084 * If merging two groups decreases the depth of the tile of
1085 * one or both of the two groups, then we need to check for overlapping
1088 * Return the number of groups after merging.
1089 * Return -1 on error.
1091 static int group_common_shared_memory_tile(struct ppcg_kernel
*kernel
,
1092 struct gpu_array_info
*array
, int n
,
1093 struct gpu_array_ref_group
**groups
, struct gpu_group_data
*data
)
1096 int recompute_overlap
= 0;
1097 isl_ctx
*ctx
= isl_space_get_ctx(array
->space
);
1099 for (i
= 0; i
< n
; ++i
) {
1100 if (!groups
[i
]->shared_tile
)
1102 for (j
= n
- 1; j
> i
; --j
) {
1105 struct gpu_array_ref_group
*group
;
1107 if (!groups
[j
]->shared_tile
)
1110 map
= isl_map_intersect(isl_map_copy(groups
[i
]->access
),
1111 isl_map_copy(groups
[j
]->access
));
1112 empty
= isl_map_is_empty(map
);
1118 group
= join_groups(groups
[i
], groups
[j
]);
1119 if (compute_group_bounds(kernel
, group
, data
) < 0) {
1120 gpu_array_ref_group_free(group
);
1123 if (!group
->shared_tile
||
1124 !smaller_tile(group
->shared_tile
,
1125 groups
[i
]->shared_tile
,
1126 groups
[j
]->shared_tile
)) {
1127 gpu_array_ref_group_free(group
);
1131 if (group
->depth
< groups
[i
]->depth
||
1132 group
->depth
< groups
[j
]->depth
)
1133 recompute_overlap
= 1;
1134 gpu_array_ref_group_free(groups
[i
]);
1135 gpu_array_ref_group_free(groups
[j
]);
1138 groups
[j
] = groups
[n
- 1];
1143 if (recompute_overlap
)
1144 n
= group_depth_overlapping_writes(kernel
, n
, groups
, data
);
1148 /* Set array->n_group and array->groups to n and groups.
1150 * Additionally, set the "nr" field of each group.
1152 static void set_array_groups(struct gpu_local_array_info
*array
,
1153 int n
, struct gpu_array_ref_group
**groups
)
1158 array
->groups
= groups
;
1160 for (i
= 0; i
< n
; ++i
)
1164 /* Group array references that should be considered together when
1165 * deciding whether to access them from private, shared or global memory.
1166 * Return -1 on error.
1168 * In particular, if two array references overlap and if one of them
1169 * is a write, then the two references are grouped together.
1170 * We first perform an initial grouping based only on the access relation.
1171 * After computing shared and private memory tiles, we check for
1172 * overlapping writes again, but this time taking into account
1173 * the depth of the effective tile.
1175 * Furthermore, if two groups admit a shared memory tile and if the
1176 * combination of the two also admits a shared memory tile, we merge
1179 * If the array contains structures, then there is no need to compute
1180 * reference groups since we do not map such arrays to private or shared
1183 static int group_array_references(struct ppcg_kernel
*kernel
,
1184 struct gpu_local_array_info
*local
, struct gpu_group_data
*data
)
1188 isl_ctx
*ctx
= isl_union_map_get_ctx(data
->shared_sched
);
1189 struct gpu_array_ref_group
**groups
;
1191 if (local
->array
->has_compound_element
)
1194 groups
= isl_calloc_array(ctx
, struct gpu_array_ref_group
*,
1195 local
->array
->n_ref
);
1199 n
= populate_array_references(local
, groups
, data
);
1201 n
= group_overlapping_writes(kernel
, n
, groups
, data
);
1203 for (i
= 0; i
< n
; ++i
)
1204 if (compute_group_bounds(kernel
, groups
[i
], data
) < 0)
1207 n
= group_depth_overlapping_writes(kernel
, n
, groups
, data
);
1209 n
= group_common_shared_memory_tile(kernel
, local
->array
,
1212 set_array_groups(local
, n
, groups
);
1217 for (i
= 0; i
< local
->array
->n_ref
; ++i
)
1218 gpu_array_ref_group_free(groups
[i
]);
1222 /* For each scalar in the input program, check if there are any
1223 * order dependences active inside the current kernel, within
1224 * the same iteration of "host_schedule".
1225 * If so, mark the scalar as force_private so that it will be
1226 * mapped to a register.
1228 static void check_scalar_live_ranges_in_host(struct ppcg_kernel
*kernel
,
1229 __isl_take isl_union_map
*host_schedule
)
1232 isl_union_map
*sched
;
1233 isl_union_set
*domain
;
1234 isl_union_map
*same_host_iteration
;
1236 kernel
->any_force_private
= 0;
1238 sched
= isl_union_map_universe(isl_union_map_copy(host_schedule
));
1239 domain
= isl_union_map_domain(sched
);
1241 same_host_iteration
= isl_union_map_apply_range(host_schedule
,
1242 isl_union_map_reverse(isl_union_map_copy(host_schedule
)));
1244 for (i
= 0; i
< kernel
->n_array
; ++i
) {
1245 struct gpu_local_array_info
*local
= &kernel
->array
[i
];
1246 isl_union_map
*order
;
1248 local
->force_private
= 0;
1249 if (local
->array
->n_index
!= 0)
1251 order
= isl_union_map_copy(local
->array
->dep_order
);
1252 order
= isl_union_map_intersect_domain(order
,
1253 isl_union_set_copy(domain
));
1254 order
= isl_union_map_intersect_range(order
,
1255 isl_union_set_copy(domain
));
1256 order
= isl_union_map_intersect(order
,
1257 isl_union_map_copy(same_host_iteration
));
1258 if (!isl_union_map_is_empty(order
)) {
1259 local
->force_private
= 1;
1260 kernel
->any_force_private
= 1;
1262 isl_union_map_free(order
);
1265 isl_union_map_free(same_host_iteration
);
1266 isl_union_set_free(domain
);
1269 /* For each scalar in the input program, check if there are any
1270 * order dependences active inside the current kernel, within
1271 * the same iteration of the host schedule.
1272 * If so, mark the scalar as force_private so that it will be
1273 * mapped to a register.
1275 static void check_scalar_live_ranges(struct gpu_gen
*gen
)
1278 isl_union_map
*sched
;
1280 if (!gen
->options
->live_range_reordering
)
1283 sched
= isl_union_map_copy(gen
->sched
);
1284 proj
= projection(isl_union_map_get_space(sched
),
1285 gen
->untiled_len
, gen
->tile_first
);
1286 sched
= isl_union_map_apply_range(sched
, isl_union_map_from_map(proj
));
1288 check_scalar_live_ranges_in_host(gen
->kernel
, sched
);
1291 /* Group references of all arrays in the current kernel.
1293 * We first extract all required schedule information into
1294 * a gpu_group_data structure and then consider each array
1297 int gpu_group_references(struct gpu_gen
*gen
)
1301 isl_union_map
*sched
;
1302 struct gpu_group_data data
;
1304 check_scalar_live_ranges(gen
);
1306 data
.scop
= gen
->prog
->scop
;
1308 data
.kernel_depth
= gen
->tile_first
;
1310 sched
= isl_union_map_apply_range(isl_union_map_copy(gen
->shared_sched
),
1311 isl_union_map_copy(gen
->shared_proj
));
1312 data
.shared_sched
= sched
;
1314 data
.thread_depth
= gen
->shared_len
;
1315 data
.n_thread
= gen
->kernel
->n_block
;
1316 data
.thread_sched
= isl_union_map_copy(gen
->shared_sched
);
1317 data
.full_sched
= isl_union_map_copy(gen
->tiled_sched
);
1319 data
.privatization
= isl_map_domain(isl_map_copy(gen
->privatization
));
1321 for (i
= 0; i
< gen
->kernel
->n_array
; ++i
) {
1322 r
= group_array_references(gen
->kernel
, &gen
->kernel
->array
[i
],
1328 isl_union_map_free(data
.shared_sched
);
1329 isl_union_map_free(data
.thread_sched
);
1330 isl_union_map_free(data
.full_sched
);
1331 isl_set_free(data
.privatization
);
1336 /* Given a description of an array tile "tile" and the "space"
1340 * where D represents the first shared_len schedule dimensions
1341 * and A represents the array, construct an isl_multi_aff
1343 * { [D[i] -> A[a]] -> A'[a'] }
1345 * with A' a scaled down copy of A according to the shifts and strides
1346 * in "tile". In particular,
1348 * a' = (a + shift(i))/stride
1350 * "insert_array" represents
1354 * and is used to insert A into the domain of functions that only
1357 static __isl_give isl_multi_aff
*strided_tile(
1358 struct gpu_array_tile
*tile
, __isl_keep isl_space
*space
,
1359 __isl_keep isl_multi_aff
*insert_array
)
1363 isl_multi_aff
*shift
;
1364 isl_multi_val
*stride
;
1366 isl_local_space
*ls
;
1367 isl_multi_aff
*tiling
;
1369 ctx
= isl_space_get_ctx(space
);
1370 space2
= isl_space_domain(isl_space_copy(space
));
1371 ls
= isl_local_space_from_space(space2
);
1372 space2
= isl_space_range(isl_space_copy(space
));
1373 stride
= isl_multi_val_zero(space2
);
1374 shift
= isl_multi_aff_zero(isl_space_copy(space
));
1376 for (i
= 0; i
< tile
->n
; ++i
) {
1377 struct gpu_array_bound
*bound
= &tile
->bound
[i
];
1381 if (tile
->bound
[i
].shift
) {
1382 stride_i
= isl_val_copy(bound
->stride
);
1383 shift_i
= isl_aff_copy(bound
->shift
);
1385 stride_i
= isl_val_one(ctx
);
1386 shift_i
= isl_aff_zero_on_domain(
1387 isl_local_space_copy(ls
));
1390 stride
= isl_multi_val_set_val(stride
, i
, stride_i
);
1391 shift
= isl_multi_aff_set_aff(shift
, i
, shift_i
);
1393 isl_local_space_free(ls
);
1395 shift
= isl_multi_aff_pullback_multi_aff(shift
,
1396 isl_multi_aff_copy(insert_array
));
1398 tiling
= isl_multi_aff_range_map(isl_space_copy(space
));
1399 tiling
= isl_multi_aff_add(tiling
, shift
);
1400 tiling
= isl_multi_aff_scale_down_multi_val(tiling
, stride
);
1405 /* Compute a tiling for the array reference group "group".
1407 * The tiling is of the form
1409 * { [D[i] -> A[a]] -> T[t] }
1411 * where D represents the first shared_len schedule dimensions,
1412 * A represents the global array and T represents the shared or
1413 * private memory tile. The name of T is the name of the local
1416 * If there is any stride in the accesses, then the mapping is
1418 * t = (a + shift(i))/stride - lb(i)
1420 * otherwise, it is simply
1424 void gpu_array_ref_group_compute_tiling(struct gpu_array_ref_group
*group
)
1427 struct gpu_array_tile
*tile
;
1428 struct gpu_array_info
*array
= group
->array
;
1430 isl_multi_aff
*tiling
, *lb
, *insert_array
;
1434 tile
= group
->private_tile
;
1436 tile
= group
->shared_tile
;
1440 space
= isl_map_get_space(group
->access
);
1441 insert_array
= isl_multi_aff_domain_map(isl_space_copy(space
));
1443 for (i
= 0; i
< tile
->n
; ++i
)
1444 if (tile
->bound
[i
].shift
)
1448 tiling
= strided_tile(tile
, space
, insert_array
);
1450 tiling
= isl_multi_aff_range_map(isl_space_copy(space
));
1452 lb
= isl_multi_aff_zero(space
);
1453 for (i
= 0; i
< tile
->n
; ++i
) {
1454 isl_aff
*lb_i
= isl_aff_copy(tile
->bound
[i
].lb
);
1455 lb
= isl_multi_aff_set_aff(lb
, i
, lb_i
);
1457 lb
= isl_multi_aff_pullback_multi_aff(lb
, insert_array
);
1459 tiling
= isl_multi_aff_sub(tiling
, lb
);
1461 p
= isl_printer_to_str(isl_multi_aff_get_ctx(tiling
));
1462 p
= gpu_array_ref_group_print_name(group
, p
);
1463 local_name
= isl_printer_get_str(p
);
1464 isl_printer_free(p
);
1465 tiling
= isl_multi_aff_set_tuple_name(tiling
, isl_dim_out
, local_name
);
1468 tile
->tiling
= tiling
;