3 #include "gpu_array_tile.h"
8 /* Print the name of the local copy of a given group of array references.
10 __isl_give isl_printer
*gpu_array_ref_group_print_name(
11 struct gpu_array_ref_group
*group
, __isl_take isl_printer
*p
)
15 if (group
->private_tile
)
16 p
= isl_printer_print_str(p
, "private_");
17 else if (group
->shared_tile
)
18 p
= isl_printer_print_str(p
, "shared_");
21 p
= isl_printer_print_str(p
, group
->array
->name
);
22 if (!global
&& group
->local_array
->n_group
> 1) {
23 p
= isl_printer_print_str(p
, "_");
24 p
= isl_printer_print_int(p
, group
->nr
);
30 /* Return the union of all read (read = 1) and/or write (write = 1)
31 * access relations in the group.
33 __isl_give isl_union_map
*gpu_array_ref_group_access_relation(
34 struct gpu_array_ref_group
*group
, int read
, int write
)
37 isl_union_map
*access
;
39 access
= isl_union_map_empty(isl_map_get_space(group
->access
));
40 for (i
= 0; i
< group
->n_ref
; ++i
) {
43 if (!((read
&& group
->refs
[i
]->read
) ||
44 (write
&& group
->refs
[i
]->write
)))
46 map_i
= isl_map_copy(group
->refs
[i
]->access
);
47 access
= isl_union_map_union(access
,
48 isl_union_map_from_map(map_i
));
54 /* Return the effective gpu_array_tile associated to "group" or
55 * NULL if there is no such gpu_array_tile.
56 * If we have computed both a private and a shared tile, then
57 * the private tile is used.
59 struct gpu_array_tile
*gpu_array_ref_group_tile(
60 struct gpu_array_ref_group
*group
)
62 if (group
->private_tile
)
63 return group
->private_tile
;
64 if (group
->shared_tile
)
65 return group
->shared_tile
;
69 /* Does the tile associated to "group" require unrolling of the schedule
70 * dimensions mapped to threads?
71 * Note that this can only happen for private tiles.
73 int gpu_array_ref_group_requires_unroll(struct gpu_array_ref_group
*group
)
75 struct gpu_array_tile
*tile
;
77 tile
= gpu_array_ref_group_tile(group
);
80 return tile
->requires_unroll
;
87 * or -a(p,i) - j = g f(e) if sign < 0,
88 * store a(p,i) in bound->shift and g (stride) in bound->stride.
89 * a(p,i) is assumed to be an expression in only the parameters
90 * and the input dimensions.
92 static void extract_stride(__isl_keep isl_constraint
*c
,
93 struct gpu_array_bound
*bound
, __isl_keep isl_val
*stride
, int sign
)
102 isl_val_free(bound
->stride
);
103 bound
->stride
= isl_val_copy(stride
);
105 space
= isl_constraint_get_space(c
);
106 space
= isl_space_domain(space
);
108 nparam
= isl_space_dim(space
, isl_dim_param
);
109 nvar
= isl_space_dim(space
, isl_dim_set
);
111 v
= isl_constraint_get_constant_val(c
);
114 aff
= isl_aff_zero_on_domain(isl_local_space_from_space(space
));
115 aff
= isl_aff_set_constant_val(aff
, v
);
117 for (i
= 0; i
< nparam
; ++i
) {
118 if (!isl_constraint_involves_dims(c
, isl_dim_param
, i
, 1))
120 v
= isl_constraint_get_coefficient_val(c
, isl_dim_param
, i
);
123 aff
= isl_aff_add_coefficient_val(aff
, isl_dim_param
, i
, v
);
126 for (i
= 0; i
< nvar
; ++i
) {
127 if (!isl_constraint_involves_dims(c
, isl_dim_in
, i
, 1))
129 v
= isl_constraint_get_coefficient_val(c
, isl_dim_in
, i
);
132 aff
= isl_aff_add_coefficient_val(aff
, isl_dim_in
, i
, v
);
138 /* Given an equality constraint of a map with a single output dimension j,
139 * check if the constraint is of the form
141 * a(p,i) + j = g f(e)
143 * with a(p,i) an expression in the parameters and input dimensions
144 * and f(e) an expression in the existentially quantified variables.
145 * If so, and if g is larger than any such g from a previously considered
146 * constraint, then call extract_stride to record the stride information
149 static int check_stride_constraint(__isl_take isl_constraint
*c
, void *user
)
155 struct gpu_array_bound
*bound
= user
;
157 ctx
= isl_constraint_get_ctx(c
);
158 n_div
= isl_constraint_dim(c
, isl_dim_div
);
159 v
= isl_constraint_get_coefficient_val(c
, isl_dim_out
, 0);
161 if (n_div
&& (isl_val_is_one(v
) || isl_val_is_negone(v
))) {
162 int s
= isl_val_sgn(v
);
163 isl_val
*stride
= isl_val_zero(ctx
);
166 for (i
= 0; i
< n_div
; ++i
) {
167 v
= isl_constraint_get_coefficient_val(c
,
169 stride
= isl_val_gcd(stride
, v
);
171 if (!isl_val_is_zero(stride
) &&
172 isl_val_gt(stride
, bound
->stride
))
173 extract_stride(c
, bound
, stride
, s
);
175 isl_val_free(stride
);
179 isl_constraint_free(c
);
183 /* Given contraints on an array index i, check if we can find
184 * a shift a(p) and a stride g such that
188 * If so, record the information in bound and apply the mapping
189 * i -> (i + a(p))/g to the array index in bounds and return
190 * the new constraints.
191 * If not, simply return the original constraints.
193 * If bounds is a subset of the space
197 * then the bound recorded in bound->shift is of the form
201 * with s(D) equal to a(p) above.
202 * Next, we construct a mapping of the form
204 * [D -> i] -> [D -> (i + S(D))/g]
206 * This mapping is computed as follows.
207 * We first introduce "i" in the domain through precomposition
208 * with [D -> i] -> D obtaining
212 * Adding [D -> i] -> i produces
214 * [D -> i] -> i + s(D)
216 * and the domain product with [D -> i] -> D yields
218 * [D -> i] -> [D -> i + s(D)]
220 * Composition with [D -> i] -> [D -> i/g] gives the desired result.
222 static __isl_give isl_basic_map
*check_stride(struct gpu_array_bound
*bound
,
223 __isl_take isl_basic_map
*bounds
)
227 isl_basic_map
*shift
, *id
, *bmap
, *scale
;
231 bound
->stride
= NULL
;
233 hull
= isl_basic_map_affine_hull(isl_basic_map_copy(bounds
));
235 isl_basic_map_foreach_constraint(hull
, &check_stride_constraint
, bound
);
237 isl_basic_map_free(hull
);
242 shift
= isl_basic_map_from_aff(isl_aff_copy(bound
->shift
));
243 space
= isl_basic_map_get_space(bounds
);
244 bmap
= isl_basic_map_domain_map(isl_basic_map_universe(space
));
245 shift
= isl_basic_map_apply_range(bmap
, shift
);
246 space
= isl_basic_map_get_space(bounds
);
247 id
= isl_basic_map_range_map(isl_basic_map_universe(space
));
248 shift
= isl_basic_map_sum(id
, shift
);
249 space
= isl_basic_map_get_space(bounds
);
250 id
= isl_basic_map_domain_map(isl_basic_map_universe(space
));
251 shift
= isl_basic_map_range_product(id
, shift
);
253 space
= isl_space_domain(isl_basic_map_get_space(bounds
));
254 id
= isl_basic_map_identity(isl_space_map_from_set(space
));
255 space
= isl_space_range(isl_basic_map_get_space(bounds
));
256 aff
= isl_aff_zero_on_domain(isl_local_space_from_space(space
));
257 aff
= isl_aff_add_coefficient_si(aff
, isl_dim_in
, 0, 1);
258 aff
= isl_aff_scale_down_val(aff
, isl_val_copy(bound
->stride
));
259 scale
= isl_basic_map_from_aff(aff
);
260 scale
= isl_basic_map_product(id
, scale
);
262 bmap
= isl_basic_map_apply_range(shift
, scale
);
263 bset
= isl_basic_set_apply(isl_basic_map_wrap(bounds
), bmap
);
264 bounds
= isl_basic_set_unwrap(bset
);
269 /* Data used in compute_array_dim_size and compute_size_in_direction.
271 * pos is the position of the variable representing the array index,
272 * i.e., the variable for which want to compute the size. This variable
273 * is also the last variable in the set.
275 struct gpu_size_info
{
277 struct gpu_array_bound
*bound
;
281 /* Given a constraint from the basic set describing the bounds on
282 * an array index, check if it is a lower bound, say m i >= b(x), and,
283 * if so, check whether the expression "i - ceil(b(x)/m) + 1" has a constant
284 * upper bound. If so, and if this bound is smaller than any bound
285 * derived from earlier constraints, set the size to this bound on
286 * the expression and the lower bound to ceil(b(x)/m).
288 static int compute_size_in_direction(__isl_take isl_constraint
*c
, void *user
)
290 struct gpu_size_info
*size
= user
;
297 nparam
= isl_basic_set_dim(size
->bset
, isl_dim_param
);
298 n_div
= isl_constraint_dim(c
, isl_dim_div
);
300 if (isl_constraint_involves_dims(c
, isl_dim_div
, 0, n_div
) ||
301 !isl_constraint_is_lower_bound(c
, isl_dim_set
, size
->pos
)) {
302 isl_constraint_free(c
);
306 aff
= isl_constraint_get_bound(c
, isl_dim_set
, size
->pos
);
307 aff
= isl_aff_ceil(aff
);
309 lb
= isl_aff_copy(aff
);
311 aff
= isl_aff_neg(aff
);
312 aff
= isl_aff_add_coefficient_si(aff
, isl_dim_in
, size
->pos
, 1);
314 v
= isl_basic_set_max_val(size
->bset
, aff
);
317 if (isl_val_is_int(v
)) {
318 v
= isl_val_add_ui(v
, 1);
319 if (!size
->bound
->size
|| isl_val_lt(v
, size
->bound
->size
)) {
320 isl_val_free(size
->bound
->size
);
321 size
->bound
->size
= isl_val_copy(v
);
322 lb
= isl_aff_drop_dims(lb
, isl_dim_in
, size
->pos
, 1);
323 isl_aff_free(size
->bound
->lb
);
324 size
->bound
->lb
= isl_aff_copy(lb
);
330 isl_constraint_free(c
);
335 /* Given a basic map "bounds" that maps parameters and input dimensions
336 * to a single output dimension, look for an expression in the parameters
337 * and input dimensions such that the range of the output dimension shifted
338 * by this expression is a constant.
340 * In particular, we currently only consider lower bounds on the output
341 * dimension as candidate expressions.
343 static int compute_array_dim_size(struct gpu_array_bound
*bound
,
344 __isl_take isl_basic_map
*bounds
)
346 struct gpu_size_info size
;
348 bounds
= isl_basic_map_detect_equalities(bounds
);
349 bounds
= check_stride(bound
, bounds
);
355 size
.pos
= isl_basic_map_dim(bounds
, isl_dim_in
);
356 size
.bset
= isl_basic_map_wrap(bounds
);
357 size
.bset
= isl_basic_set_flatten(size
.bset
);
358 size
.bset
= isl_set_simple_hull(isl_basic_set_compute_divs(size
.bset
));
359 isl_basic_set_foreach_constraint(size
.bset
, &compute_size_in_direction
,
361 isl_basic_set_free(size
.bset
);
363 return bound
->size
? 0 : -1;
366 /* Check if we can find a memory tile for the given array
367 * based on the given accesses, and if so, put the results in "tile".
369 * We project the accesses on each index in turn and look for a parametric
370 * offset such that the size is constant.
372 static int can_tile(__isl_keep isl_map
*access
, struct gpu_array_tile
*tile
)
376 for (i
= 0; i
< tile
->n
; ++i
) {
380 access_i
= isl_map_copy(access
);
381 access_i
= isl_map_project_out(access_i
, isl_dim_out
, 0, i
);
382 access_i
= isl_map_project_out(access_i
, isl_dim_out
,
383 1, tile
->n
- (i
+ 1));
384 access_i
= isl_map_compute_divs(access_i
);
385 hull
= isl_map_simple_hull(access_i
);
386 if (compute_array_dim_size(&tile
->bound
[i
], hull
) < 0)
393 /* Internal data structure for gpu_group_references.
395 * scop represents the input scop.
396 * kernel_depth is the schedule depth where the kernel launch will
397 * be introduced, i.e., it is the depth of the band that is mapped
399 * thread_depth is the schedule depth where the thread mark is located,
400 * i.e., it is the depth of the band that is mapped to threads and also
401 * the schedule depth at which the copying to/from shared/private memory
402 * is computed. The copy operation may then later be hoisted to
404 * n_thread is the number of schedule dimensions in the band that
405 * is mapped to threads.
406 * privatization lives in the range of thread_sched (i.e., it is
407 * of dimension thread_depth + n_thread) and encodes the mapping
408 * to thread identifiers (as parameters).
409 * shared_sched contains the first thread_depth dimensions of the
411 * thread_sched contains the first (thread_depth + n_thread) dimensions
412 * of the kernel schedule.
413 * full_sched is a union_map representation of the entire kernel schedule.
415 struct gpu_group_data
{
416 struct ppcg_scop
*scop
;
420 isl_set
*privatization
;
421 isl_union_map
*shared_sched
;
422 isl_union_map
*thread_sched
;
423 isl_union_map
*full_sched
;
426 /* Construct a map from domain_dim to domain_dim that increments
427 * the dimension at position "pos" and leaves all other dimensions
430 static __isl_give isl_map
*next(__isl_take isl_space
*domain_dim
, int pos
)
433 int len
= isl_space_dim(domain_dim
, isl_dim_set
);
438 dim
= isl_space_map_from_set(domain_dim
);
439 next
= isl_basic_map_universe(isl_space_copy(dim
));
440 ls
= isl_local_space_from_space(dim
);
442 for (i
= 0; i
< len
; ++i
) {
445 c
= isl_equality_alloc(isl_local_space_copy(ls
));
446 c
= isl_constraint_set_coefficient_si(c
, isl_dim_in
, i
, 1);
447 c
= isl_constraint_set_coefficient_si(c
, isl_dim_out
, i
, -1);
449 c
= isl_constraint_set_constant_si(c
, 1);
450 next
= isl_basic_map_add_constraint(next
, c
);
453 isl_local_space_free(ls
);
455 return isl_map_from_basic_map(next
);
458 /* Check if the given access is coalesced.
459 * That is, check whether incrementing the dimension that will get
460 * wrapped over the last thread index results in incrementing
461 * the last array index.
463 * This function is only called for access relations without reuse and
464 * kernels with at least one thread identifier.
466 static int access_is_coalesced(struct gpu_group_data
*data
,
467 __isl_keep isl_union_map
*access
)
471 isl_map
*next_thread_x
;
472 isl_map
*next_element
;
476 access
= isl_union_map_copy(access
);
477 access
= isl_union_map_apply_domain(access
,
478 isl_union_map_copy(data
->full_sched
));
479 access_map
= isl_map_from_union_map(access
);
481 space
= isl_map_get_space(access_map
);
482 space
= isl_space_domain(space
);
483 next_thread_x
= next(space
, data
->thread_depth
+ data
->n_thread
- 1);
485 space
= isl_map_get_space(access_map
);
486 space
= isl_space_range(space
);
487 next_element
= next(space
, isl_space_dim(space
, isl_dim_set
) - 1);
489 map
= isl_map_apply_domain(next_thread_x
, isl_map_copy(access_map
));
490 map
= isl_map_apply_range(map
, access_map
);
492 coalesced
= isl_map_is_subset(map
, next_element
);
494 isl_map_free(next_element
);
500 /* Given an access relation in terms of at least data->thread_depth initial
501 * dimensions of the computed schedule, check if it is bijective for
502 * fixed values of the first data->thread_depth dimensions.
503 * We perform this check by equating these dimensions to parameters.
505 static int access_is_bijective(struct gpu_group_data
*data
,
506 __isl_keep isl_map
*access
)
514 access
= isl_map_copy(access
);
515 space
= isl_space_params(isl_map_get_space(access
));
516 ids
= ppcg_scop_generate_names(data
->scop
, data
->thread_depth
, "s");
517 dim
= isl_map_dim(access
, isl_dim_in
);
518 par
= parametrization(space
, dim
, 0, ids
);
519 isl_id_list_free(ids
);
520 access
= isl_map_intersect_domain(access
, par
);
521 res
= isl_map_is_bijective(access
);
522 isl_map_free(access
);
527 /* Compute the number of outer schedule tile dimensions that affect
528 * the offset of "tile".
529 * If there is no such dimension, then return the index
530 * of the first kernel dimension, i.e., data->kernel_depth.
532 static int compute_tile_depth(struct gpu_group_data
*data
,
533 struct gpu_array_tile
*tile
)
537 for (j
= data
->thread_depth
- 1; j
>= data
->kernel_depth
; --j
) {
538 for (i
= 0; i
< tile
->n
; ++i
) {
542 lb
= tile
->bound
[i
].lb
;
543 if (isl_aff_involves_dims(lb
, isl_dim_in
, j
, 1))
546 shift
= tile
->bound
[i
].shift
;
549 if (isl_aff_involves_dims(shift
, isl_dim_in
, j
, 1))
559 /* Adjust the fields of "tile" to reflect the new input dimension "new_dim",
560 * where "old_dim" is the old dimension.
561 * The dimension beyond "new_dim" are assumed not to affect the tile,
562 * so they can simply be dropped.
564 static int tile_adjust_depth(struct gpu_array_tile
*tile
,
565 int old_dim
, int new_dim
)
569 if (old_dim
== new_dim
)
572 for (i
= 0; i
< tile
->n
; ++i
) {
573 tile
->bound
[i
].lb
= isl_aff_drop_dims(tile
->bound
[i
].lb
,
574 isl_dim_in
, new_dim
, old_dim
- new_dim
);
575 if (!tile
->bound
[i
].lb
)
577 if (!tile
->bound
[i
].shift
)
579 tile
->bound
[i
].shift
= isl_aff_drop_dims(tile
->bound
[i
].shift
,
580 isl_dim_in
, new_dim
, old_dim
- new_dim
);
581 if (!tile
->bound
[i
].shift
)
588 /* Determine the number of schedule dimensions that affect the offset of the
589 * shared or private tile and store the result in group->depth, with
590 * a lower bound of data->kernel_depth.
591 * If there is no tile defined on the array reference group,
592 * then set group->depth to data->thread_depth.
593 * Also adjust the fields of the tile to only refer to the group->depth
594 * outer schedule dimensions.
596 static int set_depth(struct gpu_group_data
*data
,
597 struct gpu_array_ref_group
*group
)
599 struct gpu_array_tile
*tile
;
601 group
->depth
= data
->thread_depth
;
603 tile
= gpu_array_ref_group_tile(group
);
607 group
->depth
= compute_tile_depth(data
, tile
);
608 if (tile_adjust_depth(tile
, data
->thread_depth
, group
->depth
) < 0)
614 /* Fill up the groups array with singleton groups, i.e., one group
615 * per reference, initializing the array, access, write, n_ref and refs fields.
616 * In particular the access field is initialized to the scheduled
617 * access relation of the array reference.
619 * Return the number of elements initialized, i.e., the number of
620 * active references in the current kernel.
622 static int populate_array_references(struct gpu_local_array_info
*local
,
623 struct gpu_array_ref_group
**groups
, struct gpu_group_data
*data
)
627 isl_ctx
*ctx
= isl_union_map_get_ctx(data
->shared_sched
);
630 for (i
= 0; i
< local
->array
->n_ref
; ++i
) {
633 struct gpu_array_ref_group
*group
;
634 struct gpu_stmt_access
*access
= local
->array
->refs
[i
];
636 map
= isl_map_copy(access
->access
);
637 umap
= isl_union_map_from_map(map
);
638 umap
= isl_union_map_apply_domain(umap
,
639 isl_union_map_copy(data
->shared_sched
));
641 if (isl_union_map_is_empty(umap
)) {
642 isl_union_map_free(umap
);
646 map
= isl_map_from_union_map(umap
);
647 map
= isl_map_detect_equalities(map
);
649 group
= isl_calloc_type(ctx
, struct gpu_array_ref_group
);
652 group
->local_array
= local
;
653 group
->array
= local
->array
;
655 group
->write
= access
->write
;
656 group
->exact_write
= access
->exact_write
;
657 group
->slice
= access
->n_index
< local
->array
->n_index
;
658 group
->refs
= &local
->array
->refs
[i
];
667 /* If group->n_ref == 1, then group->refs was set by
668 * populate_array_references to point directly into
669 * group->array->refs and should not be freed.
670 * If group->n_ref > 1, then group->refs was set by join_groups
671 * to point to a newly allocated array.
673 struct gpu_array_ref_group
*gpu_array_ref_group_free(
674 struct gpu_array_ref_group
*group
)
678 gpu_array_tile_free(group
->shared_tile
);
679 gpu_array_tile_free(group
->private_tile
);
680 isl_map_free(group
->access
);
681 if (group
->n_ref
> 1)
687 /* Check if the access relations of group1 and group2 overlap within
690 static int accesses_overlap(struct gpu_array_ref_group
*group1
,
691 struct gpu_array_ref_group
*group2
)
695 disjoint
= isl_map_is_disjoint(group1
->access
, group2
->access
);
702 /* Combine the given two groups into a single group, containing
703 * the references of both groups.
705 static struct gpu_array_ref_group
*join_groups(
706 struct gpu_array_ref_group
*group1
,
707 struct gpu_array_ref_group
*group2
)
711 struct gpu_array_ref_group
*group
;
713 ctx
= isl_map_get_ctx(group1
->access
);
714 group
= isl_calloc_type(ctx
, struct gpu_array_ref_group
);
717 group
->local_array
= group1
->local_array
;
718 group
->array
= group1
->array
;
719 group
->access
= isl_map_union(isl_map_copy(group1
->access
),
720 isl_map_copy(group2
->access
));
721 group
->write
= group1
->write
|| group2
->write
;
722 group
->exact_write
= group1
->exact_write
&& group2
->exact_write
;
723 group
->slice
= group1
->slice
|| group2
->slice
;
724 group
->n_ref
= group1
->n_ref
+ group2
->n_ref
;
725 group
->refs
= isl_alloc_array(ctx
, struct gpu_stmt_access
*,
728 return gpu_array_ref_group_free(group
);
729 for (i
= 0; i
< group1
->n_ref
; ++i
)
730 group
->refs
[i
] = group1
->refs
[i
];
731 for (i
= 0; i
< group2
->n_ref
; ++i
)
732 group
->refs
[group1
->n_ref
+ i
] = group2
->refs
[i
];
737 /* Combine the given two groups into a single group and free
738 * the original two groups.
740 static struct gpu_array_ref_group
*join_groups_and_free(
741 struct gpu_array_ref_group
*group1
,
742 struct gpu_array_ref_group
*group2
)
744 struct gpu_array_ref_group
*group
;
746 group
= join_groups(group1
, group2
);
747 gpu_array_ref_group_free(group1
);
748 gpu_array_ref_group_free(group2
);
752 /* Report that the array reference group with the given access relation
753 * is not mapped to shared memory in the given kernel because
754 * it does not exhibit any reuse and is considered to be coalesced.
756 static void report_no_reuse_and_coalesced(struct ppcg_kernel
*kernel
,
757 __isl_keep isl_union_map
*access
)
762 ctx
= isl_union_map_get_ctx(access
);
763 p
= isl_printer_to_file(ctx
, stdout
);
764 p
= isl_printer_print_str(p
, "Array reference group ");
765 p
= isl_printer_print_union_map(p
, access
);
766 p
= isl_printer_print_str(p
,
767 " not considered for mapping to shared memory in kernel");
768 p
= isl_printer_print_int(p
, kernel
->id
);
769 p
= isl_printer_print_str(p
,
770 " because it exhibits no reuse and is considered to be coalesced");
771 p
= isl_printer_end_line(p
);
775 /* Given an access relation in terms of the data->thread_depth initial
776 * dimensions of the computed schedule and the thread identifiers
777 * (as parameters), check if the use of the corresponding private tile
778 * requires unrolling.
780 * If we are creating a private tile because we are forced to,
781 * then no unrolling is required.
782 * Otherwise we check if "access" is bijective and unrolling
783 * is required if it is not. Note that the access relation
784 * has already been determined to be bijective before the introduction
785 * of the thread identifiers and the removal of the schedule dimensions
786 * that are mapped to these threads. If the access relation is no longer
787 * bijective, then this means that more than one value of one of those
788 * schedule dimensions is mapped to the same thread and therefore
789 * unrolling is required.
791 static int check_requires_unroll(struct gpu_group_data
*data
,
792 __isl_keep isl_map
*access
, int force_private
)
798 bijective
= access_is_bijective(data
, access
);
804 /* Compute the private and/or shared memory tiles for the array
805 * reference group "group" of array "array".
806 * Return 0 on success and -1 on error.
808 * If the array is a read-only scalar or if the user requested
809 * not to use shared or private memory, then we do not need to do anything.
811 * If any reference in the reference group accesses more than one element,
812 * then we would have to make sure that the layout in shared memory
813 * is the same as that in global memory. Since we do not handle this yet
814 * (and it may not even be possible), we refuse to map to private or
815 * shared memory in such cases.
817 * If the array group involves any may writes (that are not must writes),
818 * then we would have to make sure that we load the data into shared/private
819 * memory first in case the data is not written by the kernel
820 * (but still written back out to global memory).
821 * Since we don't have any such mechanism at the moment, we don't
822 * compute shared/private tiles for groups involving may writes.
824 * We only try to compute a shared memory tile if there is any reuse
825 * or if the access is not coalesced.
827 * For computing a private memory tile, we also require that there is
828 * some reuse. Moreover, we require that the access is private
829 * to the thread. That is, we check that any given array element
830 * is only accessed by a single thread.
831 * We compute an access relation that maps the outer
832 * data->thread_depth + data->n_thread schedule dimensions.
833 * The latter data->n_thread will be mapped to thread identifiers.
834 * We actually check that those iterators that will be wrapped
835 * partition the array space. This check is stricter than necessary
836 * since several iterations may be mapped onto the same thread
837 * and then they could be allowed to access the same memory elements,
838 * but our check does not allow this situation.
840 * We also check that the index expression only depends on parallel
841 * loops. That way, we can move those loops innermost and unroll them.
842 * Again, we use a test that is stricter than necessary.
843 * We actually check whether the index expression only depends
844 * on the iterators that are wrapped over the threads.
845 * These are necessarily parallel, but there may be more parallel loops.
847 * Combining the injectivity of the first test with the single-valuedness
848 * of the second test, we simply test for bijectivity.
850 * If the use of the private tile requires unrolling, but some
851 * of the other arrays are forcibly mapped to private memory,
852 * then we do not allow the use of this private tile since
853 * we cannot move the schedule dimensions that need to be unrolled down
854 * without performing some kind of expansion on those arrays
855 * that are forcibly mapped to private memory.
857 * If the array is marked force_private, then we bypass all checks
858 * and assume we can (and should) use registers.
860 * If it turns out we can (or have to) use registers, we compute
861 * the private memory tile size using can_tile, after introducing a dependence
862 * on the thread indices.
864 static int compute_group_bounds_core(struct ppcg_kernel
*kernel
,
865 struct gpu_array_ref_group
*group
, struct gpu_group_data
*data
)
867 isl_ctx
*ctx
= isl_space_get_ctx(group
->array
->space
);
868 isl_union_map
*access
;
869 int n_index
= group
->array
->n_index
;
870 int no_reuse
, coalesced
;
872 int force_private
= group
->local_array
->force_private
;
873 int use_shared
= kernel
->options
->use_shared_memory
&&
875 int use_private
= force_private
|| kernel
->options
->use_private_memory
;
879 if (!use_shared
&& !use_private
)
881 if (gpu_array_is_read_only_scalar(group
->array
))
883 if (!force_private
&& !group
->exact_write
)
888 access
= gpu_array_ref_group_access_relation(group
, 1, 1);
889 no_reuse
= isl_union_map_is_injective(access
);
892 if (use_shared
&& no_reuse
)
893 coalesced
= access_is_coalesced(data
, access
);
895 if (r
>= 0 && kernel
->options
->debug
->verbose
&&
896 use_shared
&& no_reuse
&& coalesced
)
897 report_no_reuse_and_coalesced(kernel
, access
);
899 if (use_shared
&& (!no_reuse
|| !coalesced
)) {
900 group
->shared_tile
= gpu_array_tile_create(ctx
,
901 group
->array
->n_index
);
902 if (!group
->shared_tile
)
904 else if (!can_tile(group
->access
, group
->shared_tile
))
906 gpu_array_tile_free(group
->shared_tile
);
909 if (r
< 0 || (!force_private
&& (!use_private
|| no_reuse
))) {
910 isl_union_map_free(access
);
914 access
= isl_union_map_apply_domain(access
,
915 isl_union_map_copy(data
->thread_sched
));
917 acc
= isl_map_from_union_map(access
);
919 if (!force_private
&& !access_is_bijective(data
, acc
)) {
924 acc
= isl_map_intersect_domain(acc
, isl_set_copy(data
->privatization
));
925 acc
= isl_map_project_out(acc
, isl_dim_in
, data
->thread_depth
,
927 requires_unroll
= check_requires_unroll(data
, acc
, force_private
);
928 if (requires_unroll
< 0 ||
929 (requires_unroll
&& kernel
->any_force_private
)) {
931 return requires_unroll
< 0 ? -1 : 0;
934 group
->private_tile
= gpu_array_tile_create(ctx
, n_index
);
935 if (!group
->private_tile
) {
939 group
->private_tile
->requires_unroll
= requires_unroll
;
940 if (!can_tile(acc
, group
->private_tile
))
941 group
->private_tile
= gpu_array_tile_free(group
->private_tile
);
945 if (force_private
&& !group
->private_tile
)
946 isl_die(ctx
, isl_error_internal
,
947 "unable to map array reference group to registers",
953 /* Compute the private and/or shared memory tiles for the array
954 * reference group "group" of array "array" and set the tile depth.
955 * Return 0 on success and -1 on error.
957 static int compute_group_bounds(struct ppcg_kernel
*kernel
,
958 struct gpu_array_ref_group
*group
, struct gpu_group_data
*data
)
962 if (compute_group_bounds_core(kernel
, group
, data
) < 0)
964 if (set_depth(data
, group
) < 0)
970 /* If two groups have overlapping access relations (as determined by
971 * the "overlap" function) and if one of them involves a write,
972 * then merge the two groups into one.
973 * If "compute_bounds" is set, then call compute_group_bounds
974 * on the merged groups.
976 * Return the updated number of groups.
977 * Return -1 on error.
979 static int group_writes(struct ppcg_kernel
*kernel
,
980 int n
, struct gpu_array_ref_group
**groups
,
981 int (*overlap
)(struct gpu_array_ref_group
*group1
,
982 struct gpu_array_ref_group
*group2
), int compute_bounds
,
983 struct gpu_group_data
*data
)
987 for (i
= 0; i
< n
; ++i
) {
988 for (j
= n
- 1; j
> i
; --j
) {
989 if (!groups
[i
]->write
&& !groups
[j
]->write
)
992 if (!overlap(groups
[i
], groups
[j
]))
995 groups
[i
] = join_groups_and_free(groups
[i
], groups
[j
]);
997 groups
[j
] = groups
[n
- 1];
998 groups
[n
- 1] = NULL
;
1003 if (compute_bounds
&&
1004 compute_group_bounds(kernel
, groups
[i
], data
) < 0)
1012 /* If two groups have overlapping access relations (within the innermost
1013 * loop) and if one of them involves a write, then merge the two groups
1016 * Return the updated number of groups.
1018 static int group_overlapping_writes(struct ppcg_kernel
*kernel
,
1019 int n
, struct gpu_array_ref_group
**groups
,
1020 struct gpu_group_data
*data
)
1022 return group_writes(kernel
, n
, groups
, &accesses_overlap
, 0, data
);
1025 /* Check if the access relations of group1 and group2 overlap within
1026 * the outermost min(group1->depth, group2->depth) loops.
1028 static int depth_accesses_overlap(struct gpu_array_ref_group
*group1
,
1029 struct gpu_array_ref_group
*group2
)
1034 isl_map
*map_i
, *map_j
, *map
;
1036 depth
= group1
->depth
;
1037 if (group2
->depth
< depth
)
1038 depth
= group2
->depth
;
1039 map_i
= isl_map_copy(group1
->access
);
1040 dim
= isl_map_dim(map_i
, isl_dim_in
);
1041 map_i
= isl_map_eliminate(map_i
, isl_dim_in
, depth
, dim
- depth
);
1042 map_j
= isl_map_copy(group2
->access
);
1043 map_j
= isl_map_eliminate(map_j
, isl_dim_in
, depth
, dim
- depth
);
1044 map
= isl_map_intersect(map_i
, map_j
);
1045 empty
= isl_map_is_empty(map
);
1051 /* If two groups have overlapping access relations (within the outer
1052 * depth loops) and if one of them involves a write,
1053 * then merge the two groups into one.
1055 * Return the updated number of groups.
1057 static int group_depth_overlapping_writes(struct ppcg_kernel
*kernel
,
1058 int n
, struct gpu_array_ref_group
**groups
, struct gpu_group_data
*data
)
1060 return group_writes(kernel
, n
, groups
, &depth_accesses_overlap
, 1,
1064 /* Is the size of the tile specified by "tile" smaller than the sum of
1065 * the sizes of the tiles specified by "tile1" and "tile2"?
1067 static int smaller_tile(struct gpu_array_tile
*tile
,
1068 struct gpu_array_tile
*tile1
, struct gpu_array_tile
*tile2
)
1071 isl_val
*size
, *size1
, *size2
;
1073 size
= gpu_array_tile_size(tile
);
1074 size1
= gpu_array_tile_size(tile1
);
1075 size2
= gpu_array_tile_size(tile2
);
1077 size
= isl_val_sub(size
, size1
);
1078 size
= isl_val_sub(size
, size2
);
1079 smaller
= isl_val_is_neg(size
);
1086 /* Given an initial grouping of array references and shared memory tiles
1087 * for each group that allows for a shared memory tile, merge two groups
1088 * if both have a shared memory tile, the merged group also has
1089 * a shared memory tile and the size of the tile for the merge group
1090 * is smaller than the sum of the tile sizes of the individual groups.
1092 * If merging two groups decreases the depth of the tile of
1093 * one or both of the two groups, then we need to check for overlapping
1096 * Return the number of groups after merging.
1097 * Return -1 on error.
1099 static int group_common_shared_memory_tile(struct ppcg_kernel
*kernel
,
1100 struct gpu_array_info
*array
, int n
,
1101 struct gpu_array_ref_group
**groups
, struct gpu_group_data
*data
)
1104 int recompute_overlap
= 0;
1105 isl_ctx
*ctx
= isl_space_get_ctx(array
->space
);
1107 for (i
= 0; i
< n
; ++i
) {
1108 if (!groups
[i
]->shared_tile
)
1110 for (j
= n
- 1; j
> i
; --j
) {
1113 struct gpu_array_ref_group
*group
;
1115 if (!groups
[j
]->shared_tile
)
1118 map
= isl_map_intersect(isl_map_copy(groups
[i
]->access
),
1119 isl_map_copy(groups
[j
]->access
));
1120 empty
= isl_map_is_empty(map
);
1126 group
= join_groups(groups
[i
], groups
[j
]);
1127 if (compute_group_bounds(kernel
, group
, data
) < 0) {
1128 gpu_array_ref_group_free(group
);
1131 if (!group
->shared_tile
||
1132 !smaller_tile(group
->shared_tile
,
1133 groups
[i
]->shared_tile
,
1134 groups
[j
]->shared_tile
)) {
1135 gpu_array_ref_group_free(group
);
1139 if (group
->depth
< groups
[i
]->depth
||
1140 group
->depth
< groups
[j
]->depth
)
1141 recompute_overlap
= 1;
1142 gpu_array_ref_group_free(groups
[i
]);
1143 gpu_array_ref_group_free(groups
[j
]);
1146 groups
[j
] = groups
[n
- 1];
1151 if (recompute_overlap
)
1152 n
= group_depth_overlapping_writes(kernel
, n
, groups
, data
);
1156 /* Set array->n_group and array->groups to n and groups.
1158 * Additionally, set the "nr" field of each group.
1160 static void set_array_groups(struct gpu_local_array_info
*array
,
1161 int n
, struct gpu_array_ref_group
**groups
)
1166 array
->groups
= groups
;
1168 for (i
= 0; i
< n
; ++i
)
1172 /* Group array references that should be considered together when
1173 * deciding whether to access them from private, shared or global memory.
1174 * Return -1 on error.
1176 * In particular, if two array references overlap and if one of them
1177 * is a write, then the two references are grouped together.
1178 * We first perform an initial grouping based only on the access relation.
1179 * After computing shared and private memory tiles, we check for
1180 * overlapping writes again, but this time taking into account
1181 * the depth of the effective tile.
1183 * Furthermore, if two groups admit a shared memory tile and if the
1184 * combination of the two also admits a shared memory tile, we merge
1187 * If the array contains structures, then there is no need to compute
1188 * reference groups since we do not map such arrays to private or shared
1191 static int group_array_references(struct ppcg_kernel
*kernel
,
1192 struct gpu_local_array_info
*local
, struct gpu_group_data
*data
)
1196 isl_ctx
*ctx
= isl_union_map_get_ctx(data
->shared_sched
);
1197 struct gpu_array_ref_group
**groups
;
1199 if (local
->array
->has_compound_element
)
1202 groups
= isl_calloc_array(ctx
, struct gpu_array_ref_group
*,
1203 local
->array
->n_ref
);
1207 n
= populate_array_references(local
, groups
, data
);
1209 n
= group_overlapping_writes(kernel
, n
, groups
, data
);
1211 for (i
= 0; i
< n
; ++i
)
1212 if (compute_group_bounds(kernel
, groups
[i
], data
) < 0)
1215 n
= group_depth_overlapping_writes(kernel
, n
, groups
, data
);
1217 n
= group_common_shared_memory_tile(kernel
, local
->array
,
1220 set_array_groups(local
, n
, groups
);
1225 for (i
= 0; i
< local
->array
->n_ref
; ++i
)
1226 gpu_array_ref_group_free(groups
[i
]);
1230 /* For each scalar in the input program, check if there are any
1231 * order dependences active inside the current kernel, within
1232 * the same iteration of "host_schedule".
1233 * If so, mark the scalar as force_private so that it will be
1234 * mapped to a register.
1236 static void check_scalar_live_ranges_in_host(struct ppcg_kernel
*kernel
,
1237 __isl_take isl_union_map
*host_schedule
)
1240 isl_union_map
*sched
;
1241 isl_union_set
*domain
;
1242 isl_union_map
*same_host_iteration
;
1244 kernel
->any_force_private
= 0;
1246 sched
= isl_union_map_universe(isl_union_map_copy(host_schedule
));
1247 domain
= isl_union_map_domain(sched
);
1249 same_host_iteration
= isl_union_map_apply_range(host_schedule
,
1250 isl_union_map_reverse(isl_union_map_copy(host_schedule
)));
1252 for (i
= 0; i
< kernel
->n_array
; ++i
) {
1253 struct gpu_local_array_info
*local
= &kernel
->array
[i
];
1254 isl_union_map
*order
;
1256 local
->force_private
= 0;
1257 if (local
->array
->n_index
!= 0)
1259 order
= isl_union_map_copy(local
->array
->dep_order
);
1260 order
= isl_union_map_intersect_domain(order
,
1261 isl_union_set_copy(domain
));
1262 order
= isl_union_map_intersect_range(order
,
1263 isl_union_set_copy(domain
));
1264 order
= isl_union_map_intersect(order
,
1265 isl_union_map_copy(same_host_iteration
));
1266 if (!isl_union_map_is_empty(order
)) {
1267 local
->force_private
= 1;
1268 kernel
->any_force_private
= 1;
1270 isl_union_map_free(order
);
1273 isl_union_map_free(same_host_iteration
);
1274 isl_union_set_free(domain
);
1277 /* For each scalar in the input program, check if there are any
1278 * order dependences active inside the current kernel, within
1279 * the same iteration of the host schedule, i.e., the prefix
1280 * schedule at "node".
1281 * If so, mark the scalar as force_private so that it will be
1282 * mapped to a register.
1284 static void check_scalar_live_ranges(struct ppcg_kernel
*kernel
,
1285 __isl_keep isl_schedule_node
*node
)
1287 isl_union_map
*sched
;
1289 if (!kernel
->options
->live_range_reordering
)
1292 sched
= isl_schedule_node_get_prefix_schedule_union_map(node
);
1294 check_scalar_live_ranges_in_host(kernel
, sched
);
1297 /* Create a set of dimension data->thread_depth + data->n_thread
1298 * that equates the residue of the final data->n_thread dimensions
1299 * modulo the "sizes" to the thread identifiers.
1300 * "space" is a parameter space containing the thread identifiers.
1301 * Store the computed set in data->privatization.
1303 static void compute_privatization(struct gpu_group_data
*data
,
1304 __isl_take isl_space
*space
, int *sizes
)
1308 isl_local_space
*ls
;
1311 ctx
= isl_union_map_get_ctx(data
->shared_sched
);
1312 space
= isl_space_set_from_params(space
);
1313 space
= isl_space_add_dims(space
, isl_dim_set
,
1314 data
->thread_depth
+ data
->n_thread
);
1315 set
= isl_set_universe(space
);
1316 space
= isl_set_get_space(set
);
1317 ls
= isl_local_space_from_space(space
);
1319 for (i
= 0; i
< data
->n_thread
; ++i
) {
1320 isl_aff
*aff
, *aff2
;
1326 aff
= isl_aff_var_on_domain(isl_local_space_copy(ls
),
1327 isl_dim_set
, data
->thread_depth
+ i
);
1328 v
= isl_val_int_from_si(ctx
, sizes
[i
]);
1329 aff
= isl_aff_mod_val(aff
, v
);
1330 snprintf(name
, sizeof(name
), "t%d", i
);
1331 pos
= isl_set_find_dim_by_name(set
, isl_dim_param
, name
);
1332 aff2
= isl_aff_var_on_domain(isl_local_space_copy(ls
),
1333 isl_dim_param
, pos
);
1334 aff
= isl_aff_sub(aff
, aff2
);
1335 c
= isl_equality_from_aff(aff
);
1336 set
= isl_set_add_constraint(set
, c
);
1339 isl_local_space_free(ls
);
1340 data
->privatization
= set
;
1343 /* Group references of all arrays in "kernel".
1344 * "node" points to the kernel mark.
1346 * We first extract all required schedule information into
1347 * a gpu_group_data structure and then consider each array
1350 int gpu_group_references(struct ppcg_kernel
*kernel
,
1351 __isl_keep isl_schedule_node
*node
)
1356 struct gpu_group_data data
;
1358 check_scalar_live_ranges(kernel
, node
);
1360 data
.scop
= kernel
->prog
->scop
;
1362 data
.kernel_depth
= isl_schedule_node_get_schedule_depth(node
);
1364 node
= isl_schedule_node_copy(node
);
1365 node
= gpu_tree_move_down_to_thread(node
, kernel
->core
);
1367 isl_schedule_node_get_prefix_schedule_relation(node
);
1368 data
.shared_sched
= isl_union_map_detect_equalities(data
.shared_sched
);
1370 node
= isl_schedule_node_child(node
, 0);
1371 data
.thread_depth
= isl_schedule_node_get_schedule_depth(node
);
1372 data
.n_thread
= isl_schedule_node_band_n_member(node
);
1373 data
.thread_sched
= isl_union_map_copy(data
.shared_sched
);
1374 data
.thread_sched
= isl_union_map_flat_range_product(data
.thread_sched
,
1375 isl_schedule_node_band_get_partial_schedule_union_map(node
));
1376 data
.thread_sched
= isl_union_map_detect_equalities(data
.thread_sched
);
1377 node
= isl_schedule_node_child(node
, 0);
1378 data
.full_sched
= isl_union_map_copy(data
.thread_sched
);
1379 data
.full_sched
= isl_union_map_flat_range_product(data
.full_sched
,
1380 isl_schedule_node_get_subtree_schedule_union_map(node
));
1381 isl_schedule_node_free(node
);
1383 space
= isl_union_set_get_space(kernel
->thread_filter
);
1384 compute_privatization(&data
, space
, kernel
->block_dim
);
1386 for (i
= 0; i
< kernel
->n_array
; ++i
) {
1387 r
= group_array_references(kernel
, &kernel
->array
[i
], &data
);
1392 isl_union_map_free(data
.shared_sched
);
1393 isl_union_map_free(data
.thread_sched
);
1394 isl_union_map_free(data
.full_sched
);
1395 isl_set_free(data
.privatization
);
1400 /* Given a description of an array tile "tile" and the "space"
1404 * where D represents the first group->depth schedule dimensions
1405 * and A represents the array, construct an isl_multi_aff
1407 * { [D[i] -> A[a]] -> A'[a'] }
1409 * with A' a scaled down copy of A according to the shifts and strides
1410 * in "tile". In particular,
1412 * a' = (a + shift(i))/stride
1414 * "insert_array" represents
1418 * and is used to insert A into the domain of functions that only
1421 static __isl_give isl_multi_aff
*strided_tile(
1422 struct gpu_array_tile
*tile
, __isl_keep isl_space
*space
,
1423 __isl_keep isl_multi_aff
*insert_array
)
1427 isl_multi_aff
*shift
;
1428 isl_multi_val
*stride
;
1430 isl_local_space
*ls
;
1431 isl_multi_aff
*tiling
;
1433 ctx
= isl_space_get_ctx(space
);
1434 space2
= isl_space_domain(isl_space_copy(space
));
1435 ls
= isl_local_space_from_space(space2
);
1436 space2
= isl_space_range(isl_space_copy(space
));
1437 stride
= isl_multi_val_zero(space2
);
1438 shift
= isl_multi_aff_zero(isl_space_copy(space
));
1440 for (i
= 0; i
< tile
->n
; ++i
) {
1441 struct gpu_array_bound
*bound
= &tile
->bound
[i
];
1445 if (tile
->bound
[i
].shift
) {
1446 stride_i
= isl_val_copy(bound
->stride
);
1447 shift_i
= isl_aff_copy(bound
->shift
);
1449 stride_i
= isl_val_one(ctx
);
1450 shift_i
= isl_aff_zero_on_domain(
1451 isl_local_space_copy(ls
));
1454 stride
= isl_multi_val_set_val(stride
, i
, stride_i
);
1455 shift
= isl_multi_aff_set_aff(shift
, i
, shift_i
);
1457 isl_local_space_free(ls
);
1459 shift
= isl_multi_aff_pullback_multi_aff(shift
,
1460 isl_multi_aff_copy(insert_array
));
1462 tiling
= isl_multi_aff_range_map(isl_space_copy(space
));
1463 tiling
= isl_multi_aff_add(tiling
, shift
);
1464 tiling
= isl_multi_aff_scale_down_multi_val(tiling
, stride
);
1469 /* Compute a tiling for the array reference group "group".
1471 * The tiling is of the form
1473 * { [D[i] -> A[a]] -> T[t] }
1475 * where D represents the first group->depth schedule dimensions,
1476 * A represents the global array and T represents the shared or
1477 * private memory tile. The name of T is the name of the local
1480 * If there is any stride in the accesses, then the mapping is
1482 * t = (a + shift(i))/stride - lb(i)
1484 * otherwise, it is simply
1488 void gpu_array_ref_group_compute_tiling(struct gpu_array_ref_group
*group
)
1492 struct gpu_array_tile
*tile
;
1493 struct gpu_array_info
*array
= group
->array
;
1495 isl_multi_aff
*tiling
, *lb
, *insert_array
;
1499 tile
= group
->private_tile
;
1501 tile
= group
->shared_tile
;
1505 space
= isl_map_get_space(group
->access
);
1506 dim
= isl_space_dim(space
, isl_dim_in
);
1507 space
= isl_space_drop_dims(space
, isl_dim_in
, group
->depth
,
1508 dim
- group
->depth
);
1509 insert_array
= isl_multi_aff_domain_map(isl_space_copy(space
));
1511 for (i
= 0; i
< tile
->n
; ++i
)
1512 if (tile
->bound
[i
].shift
)
1516 tiling
= strided_tile(tile
, space
, insert_array
);
1518 tiling
= isl_multi_aff_range_map(isl_space_copy(space
));
1520 lb
= isl_multi_aff_zero(space
);
1521 for (i
= 0; i
< tile
->n
; ++i
) {
1522 isl_aff
*lb_i
= isl_aff_copy(tile
->bound
[i
].lb
);
1523 lb
= isl_multi_aff_set_aff(lb
, i
, lb_i
);
1525 lb
= isl_multi_aff_pullback_multi_aff(lb
, insert_array
);
1527 tiling
= isl_multi_aff_sub(tiling
, lb
);
1529 p
= isl_printer_to_str(isl_multi_aff_get_ctx(tiling
));
1530 p
= gpu_array_ref_group_print_name(group
, p
);
1531 local_name
= isl_printer_get_str(p
);
1532 isl_printer_free(p
);
1533 tiling
= isl_multi_aff_set_tuple_name(tiling
, isl_dim_out
, local_name
);
1536 tile
->tiling
= tiling
;