2 * Copyright 2010-2011 INRIA Saclay
3 * Copyright 2012-2014 Ecole Normale Superieure
4 * Copyright 2015 Sven Verdoolaege
6 * Use of this software is governed by the MIT license
8 * Written by Sven Verdoolaege, INRIA Saclay - Ile-de-France,
9 * Parc Club Orsay Universite, ZAC des vignes, 4 rue Jacques Monod,
11 * and Ecole Normale Superieure, 45 rue d'Ulm, 75230 Paris, France
16 #include <isl/constraint.h>
19 #include "gpu_array_tile.h"
20 #include "gpu_group.h"
24 /* Print the name of the local copy of a given group of array references.
26 __isl_give isl_printer
*gpu_array_ref_group_print_name(
27 struct gpu_array_ref_group
*group
, __isl_take isl_printer
*p
)
30 enum ppcg_group_access_type type
;
32 type
= gpu_array_ref_group_type(group
);
33 if (type
== ppcg_access_private
)
34 p
= isl_printer_print_str(p
, "private_");
35 else if (type
== ppcg_access_shared
)
36 p
= isl_printer_print_str(p
, "shared_");
39 p
= isl_printer_print_str(p
, group
->array
->name
);
40 if (!global
&& group
->local_array
->n_group
> 1) {
41 p
= isl_printer_print_str(p
, "_");
42 p
= isl_printer_print_int(p
, group
->nr
);
48 /* Return the union of all read (read = 1) and/or write (write = 1)
49 * access relations in the group.
51 __isl_give isl_union_map
*gpu_array_ref_group_access_relation(
52 struct gpu_array_ref_group
*group
, int read
, int write
)
55 isl_union_map
*access
;
57 access
= isl_union_map_empty(isl_map_get_space(group
->access
));
58 for (i
= 0; i
< group
->n_ref
; ++i
) {
61 if (!((read
&& group
->refs
[i
]->read
) ||
62 (write
&& group
->refs
[i
]->write
)))
64 map_i
= isl_map_copy(group
->refs
[i
]->access
);
65 access
= isl_union_map_union(access
,
66 isl_union_map_from_map(map_i
));
72 /* Should this array reference group be mapped to private, shared or global
74 * If we have computed both a private and a shared tile, then
75 * the tile with the smallest depth is used. If both have the same depth,
76 * then the private tile is used.
78 enum ppcg_group_access_type
gpu_array_ref_group_type(
79 struct gpu_array_ref_group
*group
)
81 if (group
->private_tile
&& group
->shared_tile
&&
82 group
->shared_tile
->depth
< group
->private_tile
->depth
)
83 return ppcg_access_shared
;
84 if (group
->private_tile
)
85 return ppcg_access_private
;
86 if (group
->shared_tile
)
87 return ppcg_access_shared
;
88 return ppcg_access_global
;
92 /* Return the effective gpu_array_tile associated to "group" or
93 * NULL if there is no such gpu_array_tile.
95 struct gpu_array_tile
*gpu_array_ref_group_tile(
96 struct gpu_array_ref_group
*group
)
98 switch (gpu_array_ref_group_type(group
)) {
99 case ppcg_access_global
:
101 case ppcg_access_shared
:
102 return group
->shared_tile
;
103 case ppcg_access_private
:
104 return group
->private_tile
;
108 /* Does the tile associated to "group" require unrolling of the schedule
109 * dimensions mapped to threads?
110 * Note that this can only happen for private tiles.
112 int gpu_array_ref_group_requires_unroll(struct gpu_array_ref_group
*group
)
114 struct gpu_array_tile
*tile
;
116 tile
= gpu_array_ref_group_tile(group
);
119 return tile
->requires_unroll
;
122 /* Given constraints on an array index i, check if we can find
123 * a shift a(p) and a stride g such that
127 * If so, record the information in bound->stride and bound->shift and
128 * return isl_bool_true.
129 * Otherwise, set bound->stride to 1 (and bound->shift to 0) and
130 * return isl_bool_false.
132 * Note that the stride info returned by isl_map_get_range_stride_info
137 * a(p) can therefore be taken to be equal to -o(p).
139 static isl_bool
set_stride(struct gpu_array_bound
*bound
,
140 __isl_take isl_basic_map
*bounds
)
146 map
= isl_map_from_basic_map(bounds
);
147 si
= isl_map_get_range_stride_info(map
, 0);
150 bound
->stride
= isl_stride_info_get_stride(si
);
151 bound
->shift
= isl_aff_neg(isl_stride_info_get_offset(si
));
152 has_stride
= isl_val_gt_si(bound
->stride
, 1);
154 isl_stride_info_free(si
);
158 /* Given constraints on an array index i, check if we can find
159 * a shift a(p) and a stride g such that
163 * If so, record the information in bound and apply the mapping
164 * i -> (i + a(p))/g to the array index in bounds and return
165 * the new constraints.
166 * If not, simply return the original constraints.
168 * If bounds is a subset of the space
172 * then the bound recorded in bound->shift is of the form
176 * with s(D) equal to a(p) above.
177 * Next, we construct a mapping of the form
179 * [D -> i] -> [D -> (i + S(D))/g]
181 * This mapping is computed as follows.
182 * We first introduce "i" in the domain through precomposition
183 * with [D -> i] -> D obtaining
187 * Adding [D -> i] -> i produces
189 * [D -> i] -> i + s(D)
191 * and the domain product with [D -> i] -> D yields
193 * [D -> i] -> [D -> i + s(D)]
195 * Composition with [D -> i] -> [D -> i/g] gives the desired result.
197 static __isl_give isl_basic_map
*check_stride(struct gpu_array_bound
*bound
,
198 __isl_take isl_basic_map
*bounds
)
202 isl_basic_map
*shift
, *id
, *bmap
, *scale
;
206 has_stride
= set_stride(bound
, isl_basic_map_copy(bounds
));
208 return isl_basic_map_free(bounds
);
212 shift
= isl_basic_map_from_aff(isl_aff_copy(bound
->shift
));
213 space
= isl_basic_map_get_space(bounds
);
214 bmap
= isl_basic_map_domain_map(isl_basic_map_universe(space
));
215 shift
= isl_basic_map_apply_range(bmap
, shift
);
216 space
= isl_basic_map_get_space(bounds
);
217 id
= isl_basic_map_range_map(isl_basic_map_universe(space
));
218 shift
= isl_basic_map_sum(id
, shift
);
219 space
= isl_basic_map_get_space(bounds
);
220 id
= isl_basic_map_domain_map(isl_basic_map_universe(space
));
221 shift
= isl_basic_map_range_product(id
, shift
);
223 space
= isl_space_domain(isl_basic_map_get_space(bounds
));
224 id
= isl_basic_map_identity(isl_space_map_from_set(space
));
225 space
= isl_space_range(isl_basic_map_get_space(bounds
));
226 aff
= isl_aff_zero_on_domain(isl_local_space_from_space(space
));
227 aff
= isl_aff_add_coefficient_si(aff
, isl_dim_in
, 0, 1);
228 aff
= isl_aff_scale_down_val(aff
, isl_val_copy(bound
->stride
));
229 scale
= isl_basic_map_from_aff(aff
);
230 scale
= isl_basic_map_product(id
, scale
);
232 bmap
= isl_basic_map_apply_range(shift
, scale
);
233 bset
= isl_basic_set_apply(isl_basic_map_wrap(bounds
), bmap
);
234 bounds
= isl_basic_set_unwrap(bset
);
239 /* Data used in compute_array_dim_size and compute_size_in_direction.
241 * pos is the position of the variable representing the array index,
242 * i.e., the variable for which want to compute the size. This variable
243 * is also the last variable in the set.
245 struct gpu_size_info
{
247 struct gpu_array_bound
*bound
;
251 /* Given a constraint from the basic set describing the bounds on
252 * an array index, check if it is a lower bound, say m i >= b(x), and,
253 * if so, check whether the expression "i - ceil(b(x)/m) + 1" has a constant
254 * upper bound. If so, and if this bound is smaller than any bound
255 * derived from earlier constraints, set the size to this bound on
256 * the expression and the lower bound to ceil(b(x)/m).
258 static isl_stat
compute_size_in_direction(__isl_take isl_constraint
*c
,
261 struct gpu_size_info
*size
= user
;
268 nparam
= isl_basic_set_dim(size
->bset
, isl_dim_param
);
269 n_div
= isl_constraint_dim(c
, isl_dim_div
);
271 if (isl_constraint_involves_dims(c
, isl_dim_div
, 0, n_div
) ||
272 !isl_constraint_is_lower_bound(c
, isl_dim_set
, size
->pos
)) {
273 isl_constraint_free(c
);
277 aff
= isl_constraint_get_bound(c
, isl_dim_set
, size
->pos
);
278 aff
= isl_aff_ceil(aff
);
280 lb
= isl_aff_copy(aff
);
282 aff
= isl_aff_neg(aff
);
283 aff
= isl_aff_add_coefficient_si(aff
, isl_dim_in
, size
->pos
, 1);
285 v
= isl_basic_set_max_val(size
->bset
, aff
);
288 if (isl_val_is_int(v
)) {
289 v
= isl_val_add_ui(v
, 1);
290 if (!size
->bound
->size
|| isl_val_lt(v
, size
->bound
->size
)) {
291 isl_val_free(size
->bound
->size
);
292 size
->bound
->size
= isl_val_copy(v
);
293 lb
= isl_aff_drop_dims(lb
, isl_dim_in
, size
->pos
, 1);
294 isl_aff_free(size
->bound
->lb
);
295 size
->bound
->lb
= isl_aff_copy(lb
);
301 isl_constraint_free(c
);
306 /* Given a basic map "bounds" that maps parameters and input dimensions
307 * to a single output dimension, look for an expression in the parameters
308 * and input dimensions such that the range of the output dimension shifted
309 * by this expression is a constant.
311 * In particular, we currently only consider lower bounds on the output
312 * dimension as candidate expressions.
314 static int compute_array_dim_size(struct gpu_array_bound
*bound
,
315 __isl_take isl_basic_map
*bounds
)
317 struct gpu_size_info size
;
319 bounds
= isl_basic_map_detect_equalities(bounds
);
320 bounds
= check_stride(bound
, bounds
);
326 size
.pos
= isl_basic_map_dim(bounds
, isl_dim_in
);
327 size
.bset
= isl_basic_map_wrap(bounds
);
328 size
.bset
= isl_basic_set_flatten(size
.bset
);
329 size
.bset
= isl_set_simple_hull(isl_basic_set_compute_divs(size
.bset
));
330 isl_basic_set_foreach_constraint(size
.bset
, &compute_size_in_direction
,
332 isl_basic_set_free(size
.bset
);
334 return bound
->size
? 0 : -1;
337 /* Check if we can find a memory tile for the given array
338 * based on the given accesses, and if so, put the results in "tile".
340 * We project the accesses on each index in turn and look for a parametric
341 * offset such that the size is constant.
343 * tile->depth is initialized to the input dimension of the computed bounds.
345 static isl_bool
can_tile(__isl_keep isl_map
*access
,
346 struct gpu_array_tile
*tile
)
351 return isl_bool_error
;
353 tile
->depth
= isl_map_dim(access
, isl_dim_in
);
355 for (i
= 0; i
< tile
->n
; ++i
) {
359 access_i
= isl_map_copy(access
);
360 access_i
= isl_map_project_out(access_i
, isl_dim_out
, 0, i
);
361 access_i
= isl_map_project_out(access_i
, isl_dim_out
,
362 1, tile
->n
- (i
+ 1));
363 access_i
= isl_map_compute_divs(access_i
);
364 hull
= isl_map_simple_hull(access_i
);
365 if (compute_array_dim_size(&tile
->bound
[i
], hull
) < 0)
366 return isl_bool_false
;
369 return isl_bool_true
;
372 /* Internal data structure for gpu_group_references.
374 * scop represents the input scop.
375 * kernel_depth is the schedule depth where the kernel launch will
376 * be introduced, i.e., it is the depth of the band that is mapped
378 * shared_depth is the schedule depth at which the copying to/from
379 * shared memory is computed. The copy operation may then
380 * later be hoisted to a higher level.
381 * thread_depth is the schedule depth where the thread mark is located,
382 * i.e., it is the depth of the band that is mapped to threads and also
383 * the schedule depth at which the copying to/from private memory
384 * is computed. The copy operation may then later be hoisted to
386 * n_thread is the number of schedule dimensions in the band that
387 * is mapped to threads.
388 * privatization lives in the range of thread_sched (i.e., it is
389 * of dimension thread_depth + n_thread) and encodes the mapping
390 * to thread identifiers (as parameters).
391 * host_sched contains the kernel_depth dimensions of the host schedule.
392 * shared_sched contains the first shared_depth dimensions of the
394 * copy_sched contains the first thread_depth dimensions of the
396 * thread_sched contains the first (thread_depth + n_thread) dimensions
397 * of the kernel schedule.
398 * full_sched is a union_map representation of the entire kernel schedule.
399 * The schedules are all formulated in terms of the original statement
400 * instances, i.e., those that appear in the domains of the access
403 struct gpu_group_data
{
404 struct ppcg_scop
*scop
;
409 isl_set
*privatization
;
410 isl_union_map
*host_sched
;
411 isl_union_map
*shared_sched
;
412 isl_union_map
*copy_sched
;
413 isl_union_map
*thread_sched
;
414 isl_union_map
*full_sched
;
417 /* Construct a map from domain_space to domain_space that increments
418 * the dimension at position "pos" and leaves all other dimensions
421 static __isl_give isl_map
*next(__isl_take isl_space
*domain_space
, int pos
)
427 space
= isl_space_map_from_set(domain_space
);
428 next
= isl_multi_aff_identity(space
);
429 aff
= isl_multi_aff_get_aff(next
, pos
);
430 aff
= isl_aff_add_constant_si(aff
, 1);
431 next
= isl_multi_aff_set_aff(next
, pos
, aff
);
433 return isl_map_from_multi_aff(next
);
436 /* Check if the given access is coalesced (or if there is no point
437 * in trying to coalesce the access by mapping the array to shared memory).
438 * That is, check whether incrementing the dimension that will get
439 * wrapped over the last thread index results in incrementing
440 * the last array index.
442 * If no two consecutive array elements are ever accessed by "access",
443 * then mapping the corresponding array to shared memory will not
444 * improve coalescing. In fact, the copying will likely be performed
445 * by a single thread. Consider the access as coalesced such that
446 * the caller will not try and map the array to shared memory just
447 * to improve coalescing.
449 * This function is only called for access relations without reuse and
450 * kernels with at least one thread identifier.
452 static int access_is_coalesced(struct gpu_group_data
*data
,
453 __isl_keep isl_union_map
*access
)
459 isl_map
*next_thread_x
;
460 isl_map
*next_element
;
462 int coalesced
, empty
;
464 access
= isl_union_map_copy(access
);
465 access
= isl_union_map_apply_domain(access
,
466 isl_union_map_copy(data
->full_sched
));
467 access_map
= isl_map_from_union_map(access
);
469 space
= isl_map_get_space(access_map
);
470 space
= isl_space_range(space
);
471 dim
= isl_space_dim(space
, isl_dim_set
);
473 next_element
= isl_map_empty(isl_space_map_from_set(space
));
475 next_element
= next(space
, dim
- 1);
477 accessed
= isl_map_range(isl_map_copy(access_map
));
478 map
= isl_map_copy(next_element
);
479 map
= isl_map_intersect_domain(map
, isl_set_copy(accessed
));
480 map
= isl_map_intersect_range(map
, accessed
);
481 empty
= isl_map_is_empty(map
);
484 if (empty
< 0 || empty
) {
485 isl_map_free(next_element
);
486 isl_map_free(access_map
);
490 space
= isl_map_get_space(access_map
);
491 space
= isl_space_domain(space
);
492 next_thread_x
= next(space
, data
->thread_depth
+ data
->n_thread
- 1);
494 map
= isl_map_apply_domain(next_thread_x
, isl_map_copy(access_map
));
495 map
= isl_map_apply_range(map
, access_map
);
497 coalesced
= isl_map_is_subset(map
, next_element
);
499 isl_map_free(next_element
);
505 /* Replace the host schedule dimensions in the access relation "access"
506 * by parameters, so that they are treated as fixed when checking for reuse
507 * (within a kernel) or whether two consecutive elements are accessed
510 static __isl_give isl_union_map
*localize_access(struct gpu_group_data
*data
,
511 __isl_take isl_union_map
*access
)
519 umap
= isl_union_map_copy(data
->host_sched
);
520 space
= isl_union_map_get_space(umap
);
521 n
= data
->kernel_depth
;
522 ids
= ppcg_scop_generate_names(data
->scop
, n
, "__ppcg_host_");
523 param
= parametrization(space
, n
, 0, ids
);
524 isl_id_list_free(ids
);
525 umap
= isl_union_map_intersect_range(umap
,
526 isl_union_set_from_set(param
));
527 access
= isl_union_map_intersect_domain(access
,
528 isl_union_map_domain(umap
));
533 /* Given an access relation in terms of at least data->thread_depth initial
534 * dimensions of the computed schedule, check if it is bijective for
535 * fixed values of the first data->thread_depth dimensions.
536 * We perform this check by equating these dimensions to parameters.
538 static int access_is_bijective(struct gpu_group_data
*data
,
539 __isl_keep isl_map
*access
)
547 access
= isl_map_copy(access
);
548 space
= isl_space_params(isl_map_get_space(access
));
549 ids
= ppcg_scop_generate_names(data
->scop
, data
->thread_depth
, "s");
550 dim
= isl_map_dim(access
, isl_dim_in
);
551 par
= parametrization(space
, dim
, 0, ids
);
552 isl_id_list_free(ids
);
553 access
= isl_map_intersect_domain(access
, par
);
554 res
= isl_map_is_bijective(access
);
555 isl_map_free(access
);
560 /* Compute the number of outer schedule tile dimensions that affect
561 * the offset of "tile".
562 * If there is no such dimension, then return the index
563 * of the first kernel dimension, i.e., data->kernel_depth.
565 static int compute_tile_depth(struct gpu_group_data
*data
,
566 struct gpu_array_tile
*tile
)
570 for (j
= tile
->depth
- 1; j
>= data
->kernel_depth
; --j
) {
571 for (i
= 0; i
< tile
->n
; ++i
) {
575 lb
= tile
->bound
[i
].lb
;
576 if (isl_aff_involves_dims(lb
, isl_dim_in
, j
, 1))
579 shift
= tile
->bound
[i
].shift
;
582 if (isl_aff_involves_dims(shift
, isl_dim_in
, j
, 1))
592 /* Return the lowest depth between data->kernel_depth and data->thread_depth
593 * at which every array element accessed through "acc" is accessed
594 * by a single thread. The input dimension of "acc" is
595 * data->thread_depth + data->n_thread, where the final data->n_thread
596 * dimensions are those that will be mapped to threads.
597 * If the values for these dimensions are uniquely determined
598 * by the array index and a given number of outer dimensions, then
599 * there is only one thread accessing that array element within those
602 * The input space of "acc" is first split up, such that it has the form
606 * with O the outer dimensions, T the dimensions that will be mapped to threads
607 * and A the array index.
609 * Then the positions of T and A are interchanged to simplify the test
610 * whether T uniquely depends on O and A.
611 * In particular, the above access relation is first combined with
617 * [O -> T] -> [A -> T]
623 * is extracted, which is then uncurried to
627 * Finally, the final dimensions of O are projected out one by one
628 * until T is no longer uniquely determined by A and the remaining
629 * dimensions in O. The value returned is that of the last dimension
630 * that was successfully projected out.
631 * Note that there is no need to test whether [O -> A] -> T itself
632 * is single-valued as that was already tested in access_is_bijective.
634 static int compute_accessed_by_single_thread_depth(struct gpu_group_data
*data
,
635 __isl_keep isl_map
*acc
)
642 if (data
->thread_depth
== data
->kernel_depth
)
643 return data
->thread_depth
;
645 acc
= isl_map_copy(acc
);
647 space
= isl_map_get_space(acc
);
648 space
= isl_space_params(space
);
649 space
= isl_space_set_from_params(space
);
650 space
= isl_space_add_dims(space
, isl_dim_set
, data
->thread_depth
);
651 space
= isl_space_from_domain(space
);
652 space
= isl_space_add_dims(space
, isl_dim_out
, data
->n_thread
);
653 space
= isl_space_wrap(space
);
654 map
= isl_set_flatten_map(isl_set_universe(space
));
655 acc
= isl_map_apply_range(map
, acc
);
657 space
= isl_space_domain(isl_map_get_space(acc
));
658 map
= isl_map_range_map(isl_map_universe(isl_space_unwrap(space
)));
659 acc
= isl_map_range_product(acc
, map
);
660 acc
= isl_map_domain_factor_domain(acc
);
661 acc
= isl_map_uncurry(acc
);
663 for (i
= data
->thread_depth
- 1; i
>= data
->kernel_depth
; --i
) {
664 acc
= isl_map_project_out(acc
, isl_dim_in
, i
, 1);
665 sv
= isl_map_is_single_valued(acc
);
680 /* Adjust the fields of "tile" to reflect the new input dimension "depth".
681 * The dimension beyond "depth" are assumed not to affect the tile,
682 * so they can simply be dropped.
684 static int tile_adjust_depth(struct gpu_array_tile
*tile
, int depth
)
688 if (tile
->depth
== depth
)
691 for (i
= 0; i
< tile
->n
; ++i
) {
692 tile
->bound
[i
].lb
= isl_aff_drop_dims(tile
->bound
[i
].lb
,
693 isl_dim_in
, depth
, tile
->depth
- depth
);
694 if (!tile
->bound
[i
].lb
)
696 if (!tile
->bound
[i
].shift
)
698 tile
->bound
[i
].shift
= isl_aff_drop_dims(tile
->bound
[i
].shift
,
699 isl_dim_in
, depth
, tile
->depth
- depth
);
700 if (!tile
->bound
[i
].shift
)
709 /* Determine the number of schedule dimensions that affect the offset of the
710 * shared or private tile "tile" and store the result in tile->depth, with
711 * a lower bound of data->kernel_depth.
712 * Also adjust the fields of the tile to only refer to the tile->depth
713 * outer schedule dimensions.
715 static isl_stat
tile_set_depth(struct gpu_group_data
*data
,
716 struct gpu_array_tile
*tile
)
718 if (tile_adjust_depth(tile
, compute_tile_depth(data
, tile
)) < 0)
719 return isl_stat_error
;
724 /* Determine the number of schedule dimensions that affect the offset of the
725 * shared tile and store the minimum of the private and shared tile depth
726 * in group->min_depth, with a lower bound of data->kernel_depth.
727 * If there is no tile defined on the array reference group,
728 * then set group->min_depth to data->thread_depth.
730 static int set_depth(struct gpu_group_data
*data
,
731 struct gpu_array_ref_group
*group
)
733 group
->min_depth
= data
->thread_depth
;
735 if (group
->private_tile
) {
736 if (group
->private_tile
->depth
< group
->min_depth
)
737 group
->min_depth
= group
->private_tile
->depth
;
739 if (group
->shared_tile
) {
740 if (tile_set_depth(data
, group
->shared_tile
) < 0)
742 if (group
->shared_tile
->depth
< group
->min_depth
)
743 group
->min_depth
= group
->shared_tile
->depth
;
749 /* Fill up the groups array with singleton groups, i.e., one group
750 * per reference, initializing the array, access, write, n_ref and refs fields.
751 * In particular the access field is initialized to the scheduled
752 * access relation of the array reference.
754 * Return the number of elements initialized, i.e., the number of
755 * active references in the current kernel.
757 static int populate_array_references(struct gpu_local_array_info
*local
,
758 struct gpu_array_ref_group
**groups
, struct gpu_group_data
*data
)
762 isl_ctx
*ctx
= isl_union_map_get_ctx(data
->copy_sched
);
765 for (i
= 0; i
< local
->array
->n_ref
; ++i
) {
768 struct gpu_array_ref_group
*group
;
769 struct gpu_stmt_access
*access
= local
->array
->refs
[i
];
771 map
= isl_map_copy(access
->access
);
772 umap
= isl_union_map_from_map(map
);
773 umap
= isl_union_map_apply_domain(umap
,
774 isl_union_map_copy(data
->copy_sched
));
776 if (isl_union_map_is_empty(umap
)) {
777 isl_union_map_free(umap
);
781 map
= isl_map_from_union_map(umap
);
782 map
= isl_map_detect_equalities(map
);
784 group
= isl_calloc_type(ctx
, struct gpu_array_ref_group
);
789 group
->local_array
= local
;
790 group
->array
= local
->array
;
792 group
->write
= access
->write
;
793 group
->exact_write
= access
->exact_write
;
794 group
->slice
= access
->n_index
< local
->array
->n_index
;
795 group
->refs
= &local
->array
->refs
[i
];
804 /* If group->n_ref == 1, then group->refs was set by
805 * populate_array_references to point directly into
806 * group->array->refs and should not be freed.
807 * If group->n_ref > 1, then group->refs was set by join_groups
808 * to point to a newly allocated array.
810 struct gpu_array_ref_group
*gpu_array_ref_group_free(
811 struct gpu_array_ref_group
*group
)
815 gpu_array_tile_free(group
->shared_tile
);
816 gpu_array_tile_free(group
->private_tile
);
817 isl_map_free(group
->access
);
818 if (group
->n_ref
> 1)
824 /* Check if the access relations of group1 and group2 overlap within
827 static int accesses_overlap(struct gpu_array_ref_group
*group1
,
828 struct gpu_array_ref_group
*group2
)
832 disjoint
= isl_map_is_disjoint(group1
->access
, group2
->access
);
839 /* Combine the given two groups into a single group, containing
840 * the references of both groups.
842 static struct gpu_array_ref_group
*join_groups(
843 struct gpu_array_ref_group
*group1
,
844 struct gpu_array_ref_group
*group2
)
848 struct gpu_array_ref_group
*group
;
850 if (!group1
|| !group2
)
853 ctx
= isl_map_get_ctx(group1
->access
);
854 group
= isl_calloc_type(ctx
, struct gpu_array_ref_group
);
857 group
->local_array
= group1
->local_array
;
858 group
->array
= group1
->array
;
859 group
->access
= isl_map_union(isl_map_copy(group1
->access
),
860 isl_map_copy(group2
->access
));
861 group
->write
= group1
->write
|| group2
->write
;
862 group
->exact_write
= group1
->exact_write
&& group2
->exact_write
;
863 group
->slice
= group1
->slice
|| group2
->slice
;
864 group
->n_ref
= group1
->n_ref
+ group2
->n_ref
;
865 group
->refs
= isl_alloc_array(ctx
, struct gpu_stmt_access
*,
868 return gpu_array_ref_group_free(group
);
869 for (i
= 0; i
< group1
->n_ref
; ++i
)
870 group
->refs
[i
] = group1
->refs
[i
];
871 for (i
= 0; i
< group2
->n_ref
; ++i
)
872 group
->refs
[group1
->n_ref
+ i
] = group2
->refs
[i
];
877 /* Combine the given two groups into a single group and free
878 * the original two groups.
880 static struct gpu_array_ref_group
*join_groups_and_free(
881 struct gpu_array_ref_group
*group1
,
882 struct gpu_array_ref_group
*group2
)
884 struct gpu_array_ref_group
*group
;
886 group
= join_groups(group1
, group2
);
887 gpu_array_ref_group_free(group1
);
888 gpu_array_ref_group_free(group2
);
892 /* Report that the array reference group with the given access relation
893 * is not mapped to shared memory in the given kernel because
894 * it does not exhibit any reuse and is considered to be coalesced.
896 static void report_no_reuse_and_coalesced(struct ppcg_kernel
*kernel
,
897 __isl_keep isl_union_map
*access
)
902 ctx
= isl_union_map_get_ctx(access
);
903 p
= isl_printer_to_file(ctx
, stdout
);
904 p
= isl_printer_print_str(p
, "Array reference group ");
905 p
= isl_printer_print_union_map(p
, access
);
906 p
= isl_printer_print_str(p
,
907 " not considered for mapping to shared memory in kernel");
908 p
= isl_printer_print_int(p
, kernel
->id
);
909 p
= isl_printer_print_str(p
,
910 " because it exhibits no reuse and is considered to be coalesced");
911 p
= isl_printer_end_line(p
);
915 /* Given an access relation in terms of the data->thread_depth initial
916 * dimensions of the computed schedule and the thread identifiers
917 * (as parameters), check if the use of the corresponding private tile
918 * requires unrolling.
920 * If we are creating a private tile because we are forced to,
921 * then no unrolling is required.
922 * Otherwise we check if "access" is bijective and unrolling
923 * is required if it is not. Note that the access relation
924 * has already been determined to be bijective before the introduction
925 * of the thread identifiers and the removal of the schedule dimensions
926 * that are mapped to these threads. If the access relation is no longer
927 * bijective, then this means that more than one value of one of those
928 * schedule dimensions is mapped to the same thread and therefore
929 * unrolling is required.
931 static int check_requires_unroll(struct gpu_group_data
*data
,
932 __isl_keep isl_map
*access
, int force_private
)
938 bijective
= access_is_bijective(data
, access
);
944 /* Map the domain of "access" to the outer data->shared_depth
945 * schedule dimensions. When data->shared_depth is equal to
946 * data->thread_depth, this result is already available in group->access.
948 static __isl_give isl_map
*shared_access(struct gpu_array_ref_group
*group
,
949 __isl_keep isl_union_map
*access
, struct gpu_group_data
*data
)
951 isl_union_map
*shared
;
953 if (data
->shared_depth
== data
->thread_depth
)
954 return isl_map_copy(group
->access
);
956 shared
= isl_union_map_copy(access
);
957 shared
= isl_union_map_apply_domain(shared
,
958 isl_union_map_copy(data
->shared_sched
));
959 return isl_map_from_union_map(shared
);
962 /* Compute the private and/or shared memory tiles for the array
963 * reference group "group" of array "array".
964 * Return isl_stat_ok on success and isl_stat_error on error.
966 * If the array is a read-only scalar or if the user requested
967 * not to use shared or private memory, then we do not need to do anything.
969 * If any reference in the reference group accesses more than one element,
970 * then we would have to make sure that the layout in shared memory
971 * is the same as that in global memory. Since we do not handle this yet
972 * (and it may not even be possible), we refuse to map to private or
973 * shared memory in such cases.
975 * If the array group involves any may writes (that are not must writes),
976 * then we would have to make sure that we load the data into shared/private
977 * memory first in case the data is not written by the kernel
978 * (but still written back out to global memory).
979 * Since we don't have any such mechanism at the moment, we don't
980 * compute shared/private tiles for groups involving may writes.
982 * We only try to compute a shared memory tile if there is any reuse
983 * or if the access is not coalesced.
984 * Reuse and coalescing are checked within the given kernel.
986 * For computing a private memory tile, we also require that there is
987 * some reuse. Moreover, we require that the access is private
988 * to the thread. That is, we check that any given array element
989 * is only accessed by a single thread.
990 * We compute an access relation that maps the outer
991 * data->thread_depth + data->n_thread schedule dimensions.
992 * The latter data->n_thread will be mapped to thread identifiers.
993 * We actually check that those iterators that will be wrapped
994 * partition the array space. This check is stricter than necessary
995 * since several iterations may be mapped onto the same thread
996 * and then they could be allowed to access the same memory elements,
997 * but our check does not allow this situation.
999 * For private memory tiles, the number of schedule dimensions that
1000 * affect the offset is computed and stored in tile->depth, with
1001 * a lower bound of data->kernel_depth. If this depth is smaller
1002 * than the minimal depth that still ensures that every element
1003 * is accessed by a single thread, then the depth is raised
1004 * to this minimal depth.
1005 * The fields of the tile are then adjusted to only refer to the tile->depth
1006 * outer schedule dimensions.
1008 * We also check that the index expression only depends on parallel
1009 * loops. That way, we can move those loops innermost and unroll them.
1010 * Again, we use a test that is stricter than necessary.
1011 * We actually check whether the index expression only depends
1012 * on the iterators that are wrapped over the threads.
1013 * These are necessarily parallel, but there may be more parallel loops.
1015 * Combining the injectivity of the first test with the single-valuedness
1016 * of the second test, we simply test for bijectivity.
1018 * If the use of the private tile requires unrolling, but some
1019 * of the other arrays are forcibly mapped to private memory,
1020 * then we do not allow the use of this private tile since
1021 * we cannot move the schedule dimensions that need to be unrolled down
1022 * without performing some kind of expansion on those arrays
1023 * that are forcibly mapped to private memory.
1025 * If the array is marked force_private, then we bypass all checks
1026 * and assume we can (and should) use registers only.
1028 * If it turns out we can (or have to) use registers, we compute
1029 * the private memory tile size using can_tile, after introducing a dependence
1030 * on the thread indices.
1032 static isl_stat
compute_group_bounds_core(struct ppcg_kernel
*kernel
,
1033 struct gpu_array_ref_group
*group
, struct gpu_group_data
*data
)
1035 isl_ctx
*ctx
= isl_space_get_ctx(group
->array
->space
);
1036 isl_union_map
*access
, *local
;
1037 int n_index
= group
->array
->n_index
;
1038 int no_reuse
, coalesced
;
1040 int force_private
= group
->local_array
->force_private
;
1041 int use_shared
= !force_private
&& kernel
->options
->use_shared_memory
&&
1043 int use_private
= force_private
|| kernel
->options
->use_private_memory
;
1044 isl_stat r
= isl_stat_ok
;
1046 int requires_unroll
;
1049 if (!use_shared
&& !use_private
)
1051 if (gpu_array_is_read_only_scalar(group
->array
))
1053 if (!force_private
&& !group
->exact_write
)
1058 access
= gpu_array_ref_group_access_relation(group
, 1, 1);
1059 local
= localize_access(data
, isl_union_map_copy(access
));
1060 no_reuse
= isl_union_map_is_injective(local
);
1063 if (use_shared
&& no_reuse
)
1064 coalesced
= access_is_coalesced(data
, local
);
1065 isl_union_map_free(local
);
1067 if (r
>= 0 && kernel
->options
->debug
->verbose
&&
1068 use_shared
&& no_reuse
&& coalesced
)
1069 report_no_reuse_and_coalesced(kernel
, access
);
1071 if (use_shared
&& (!no_reuse
|| !coalesced
)) {
1072 group
->shared_tile
= gpu_array_tile_create(ctx
,
1073 group
->array
->n_index
);
1074 acc
= shared_access(group
, access
, data
);
1075 ok
= can_tile(acc
, group
->shared_tile
);
1079 group
->shared_tile
=
1080 gpu_array_tile_free(group
->shared_tile
);
1084 if (r
< 0 || (!force_private
&& (!use_private
|| no_reuse
))) {
1085 isl_union_map_free(access
);
1089 access
= isl_union_map_apply_domain(access
,
1090 isl_union_map_copy(data
->thread_sched
));
1092 acc
= isl_map_from_union_map(access
);
1094 if (!force_private
&& !access_is_bijective(data
, acc
)) {
1099 unique_depth
= compute_accessed_by_single_thread_depth(data
, acc
);
1101 acc
= isl_map_intersect_domain(acc
, isl_set_copy(data
->privatization
));
1102 acc
= isl_map_project_out(acc
, isl_dim_in
, data
->thread_depth
,
1104 requires_unroll
= check_requires_unroll(data
, acc
, force_private
);
1105 if (unique_depth
< 0 || requires_unroll
< 0 ||
1106 (requires_unroll
&& kernel
->any_force_private
)) {
1108 return requires_unroll
< 0 ? isl_stat_error
: isl_stat_ok
;
1111 group
->private_tile
= gpu_array_tile_create(ctx
, n_index
);
1112 group
->private_tile
->requires_unroll
= requires_unroll
;
1113 ok
= can_tile(acc
, group
->private_tile
);
1115 group
->private_tile
= gpu_array_tile_free(group
->private_tile
);
1118 return isl_stat_error
;
1120 if (group
->private_tile
) {
1121 struct gpu_array_tile
*tile
= group
->private_tile
;
1122 int tile_depth
= compute_tile_depth(data
, tile
);
1123 if (tile_depth
< unique_depth
)
1124 tile_depth
= unique_depth
;
1125 if (tile_adjust_depth(tile
, tile_depth
) < 0)
1126 return isl_stat_error
;
1129 if (force_private
&& !group
->private_tile
)
1130 isl_die(ctx
, isl_error_internal
,
1131 "unable to map array reference group to registers",
1132 return isl_stat_error
);
1137 /* Compute the private and/or shared memory tiles for the array
1138 * reference group "group" of array "array" and set the tile depth.
1139 * Return 0 on success and -1 on error.
1141 static int compute_group_bounds(struct ppcg_kernel
*kernel
,
1142 struct gpu_array_ref_group
*group
, struct gpu_group_data
*data
)
1146 if (compute_group_bounds_core(kernel
, group
, data
) < 0)
1148 if (set_depth(data
, group
) < 0)
1154 /* If two groups have overlapping access relations (as determined by
1155 * the "overlap" function) and if one of them involves a write,
1156 * then merge the two groups into one.
1157 * If "compute_bounds" is set, then call compute_group_bounds
1158 * on the merged groups.
1159 * If any group is merged into the current group, then its access
1160 * relation may have changed or it may have been turned into a write.
1161 * The combined group might therefore overlap with groups that
1162 * the original group did not overlap with. The groups therefore
1163 * need to be checked again.
1165 * Return the updated number of groups.
1166 * Return -1 on error.
1168 static int group_writes(struct ppcg_kernel
*kernel
,
1169 int n
, struct gpu_array_ref_group
**groups
,
1170 int (*overlap
)(struct gpu_array_ref_group
*group1
,
1171 struct gpu_array_ref_group
*group2
), int compute_bounds
,
1172 struct gpu_group_data
*data
)
1177 for (i
= 0; i
< n
; i
+= !any_merge
) {
1179 for (j
= n
- 1; j
> i
; --j
) {
1180 if (!groups
[i
]->write
&& !groups
[j
]->write
)
1183 if (!overlap(groups
[i
], groups
[j
]))
1187 groups
[i
] = join_groups_and_free(groups
[i
], groups
[j
]);
1189 groups
[j
] = groups
[n
- 1];
1190 groups
[n
- 1] = NULL
;
1195 if (compute_bounds
&&
1196 compute_group_bounds(kernel
, groups
[i
], data
) < 0)
1204 /* If two groups have overlapping access relations (within the innermost
1205 * loop) and if one of them involves a write, then merge the two groups
1208 * Return the updated number of groups.
1210 static int group_overlapping_writes(struct ppcg_kernel
*kernel
,
1211 int n
, struct gpu_array_ref_group
**groups
,
1212 struct gpu_group_data
*data
)
1214 return group_writes(kernel
, n
, groups
, &accesses_overlap
, 0, data
);
1217 /* Check if the access relations of group1 and group2 overlap within
1218 * the outermost min(group1->min_depth, group2->min_depth) loops.
1220 static int depth_accesses_overlap(struct gpu_array_ref_group
*group1
,
1221 struct gpu_array_ref_group
*group2
)
1226 isl_map
*map_i
, *map_j
, *map
;
1228 depth
= group1
->min_depth
;
1229 if (group2
->min_depth
< depth
)
1230 depth
= group2
->min_depth
;
1231 map_i
= isl_map_copy(group1
->access
);
1232 dim
= isl_map_dim(map_i
, isl_dim_in
);
1233 map_i
= isl_map_eliminate(map_i
, isl_dim_in
, depth
, dim
- depth
);
1234 map_j
= isl_map_copy(group2
->access
);
1235 map_j
= isl_map_eliminate(map_j
, isl_dim_in
, depth
, dim
- depth
);
1236 map
= isl_map_intersect(map_i
, map_j
);
1237 empty
= isl_map_is_empty(map
);
1243 /* If two groups have overlapping access relations (within the outer
1244 * depth loops) and if one of them involves a write,
1245 * then merge the two groups into one.
1247 * Return the updated number of groups.
1249 static int group_depth_overlapping_writes(struct ppcg_kernel
*kernel
,
1250 int n
, struct gpu_array_ref_group
**groups
, struct gpu_group_data
*data
)
1252 return group_writes(kernel
, n
, groups
, &depth_accesses_overlap
, 1,
1256 /* Is the size of the tile specified by "tile" smaller than the sum of
1257 * the sizes of the tiles specified by "tile1" and "tile2"?
1259 static int smaller_tile(struct gpu_array_tile
*tile
,
1260 struct gpu_array_tile
*tile1
, struct gpu_array_tile
*tile2
)
1263 isl_val
*size
, *size1
, *size2
;
1265 size
= gpu_array_tile_size(tile
);
1266 size1
= gpu_array_tile_size(tile1
);
1267 size2
= gpu_array_tile_size(tile2
);
1269 size
= isl_val_sub(size
, size1
);
1270 size
= isl_val_sub(size
, size2
);
1271 smaller
= isl_val_is_neg(size
);
1278 /* Given an initial grouping of array references and shared memory tiles
1279 * for each group that allows for a shared memory tile, merge two groups
1280 * if both have a shared memory tile, the merged group also has
1281 * a shared memory tile and the size of the tile for the merge group
1282 * is smaller than the sum of the tile sizes of the individual groups.
1283 * If any group is merged into the current group, then it may become
1284 * profitable to combine it with groups that were considered before
1285 * the merge. The groups are therefore checked again after a merge.
1287 * If merging two groups decreases the depth of the tile of
1288 * one or both of the two groups, then we need to check for overlapping
1291 * Return the number of groups after merging.
1292 * Return -1 on error.
1294 static int group_common_shared_memory_tile(struct ppcg_kernel
*kernel
,
1295 struct gpu_array_info
*array
, int n
,
1296 struct gpu_array_ref_group
**groups
, struct gpu_group_data
*data
)
1299 int recompute_overlap
= 0;
1302 for (i
= 0; i
< n
; i
+= !any_merge
) {
1304 if (!groups
[i
]->shared_tile
)
1306 for (j
= n
- 1; j
> i
; --j
) {
1307 struct gpu_array_ref_group
*group
;
1309 if (!groups
[j
]->shared_tile
)
1312 if (!depth_accesses_overlap(groups
[i
], groups
[j
]))
1315 group
= join_groups(groups
[i
], groups
[j
]);
1316 if (compute_group_bounds(kernel
, group
, data
) < 0) {
1317 gpu_array_ref_group_free(group
);
1320 if (!group
->shared_tile
||
1321 !smaller_tile(group
->shared_tile
,
1322 groups
[i
]->shared_tile
,
1323 groups
[j
]->shared_tile
)) {
1324 gpu_array_ref_group_free(group
);
1329 if (group
->min_depth
< groups
[i
]->min_depth
||
1330 group
->min_depth
< groups
[j
]->min_depth
)
1331 recompute_overlap
= 1;
1332 gpu_array_ref_group_free(groups
[i
]);
1333 gpu_array_ref_group_free(groups
[j
]);
1336 groups
[j
] = groups
[n
- 1];
1341 if (recompute_overlap
)
1342 n
= group_depth_overlapping_writes(kernel
, n
, groups
, data
);
1346 /* Set array->n_group and array->groups to n and groups.
1348 * Additionally, set the "nr" field of each group.
1350 static void set_array_groups(struct gpu_local_array_info
*array
,
1351 int n
, struct gpu_array_ref_group
**groups
)
1356 array
->groups
= groups
;
1358 for (i
= 0; i
< n
; ++i
)
1362 /* Combine all groups in "groups" into a single group and return
1363 * the new number of groups (1 or 0 if there were no groups to start with).
1365 static int join_all_groups(int n
, struct gpu_array_ref_group
**groups
)
1369 for (i
= n
- 1; i
> 0; --i
) {
1370 groups
[0] = join_groups_and_free(groups
[0], groups
[i
]);
1378 /* Group array references that should be considered together when
1379 * deciding whether to access them from private, shared or global memory.
1380 * Return -1 on error.
1382 * In particular, if two array references overlap and if one of them
1383 * is a write, then the two references are grouped together.
1384 * We first perform an initial grouping based only on the access relation.
1385 * After computing shared and private memory tiles, we check for
1386 * overlapping writes again, but this time taking into account
1387 * the depth of the effective tile.
1389 * Furthermore, if two groups admit a shared memory tile and if the
1390 * combination of the two also admits a shared memory tile, we merge
1393 * If the array contains structures, then we compute a single
1394 * reference group without trying to find any tiles
1395 * since we do not map such arrays to private or shared
1396 * memory. The only exception is when those arrays of structures
1397 * are required to be mapped to private memory.
1399 static int group_array_references(struct ppcg_kernel
*kernel
,
1400 struct gpu_local_array_info
*local
, struct gpu_group_data
*data
)
1404 isl_ctx
*ctx
= isl_union_map_get_ctx(data
->shared_sched
);
1405 struct gpu_array_ref_group
**groups
;
1407 groups
= isl_calloc_array(ctx
, struct gpu_array_ref_group
*,
1408 local
->array
->n_ref
);
1412 n
= populate_array_references(local
, groups
, data
);
1414 if (local
->array
->has_compound_element
&& !local
->force_private
) {
1415 n
= join_all_groups(n
, groups
);
1416 set_array_groups(local
, n
, groups
);
1420 n
= group_overlapping_writes(kernel
, n
, groups
, data
);
1422 for (i
= 0; i
< n
; ++i
)
1423 if (compute_group_bounds(kernel
, groups
[i
], data
) < 0)
1426 n
= group_depth_overlapping_writes(kernel
, n
, groups
, data
);
1428 n
= group_common_shared_memory_tile(kernel
, local
->array
,
1431 set_array_groups(local
, n
, groups
);
1436 for (i
= 0; i
< local
->array
->n_ref
; ++i
)
1437 gpu_array_ref_group_free(groups
[i
]);
1441 /* For each array in the input program that can be mapped to private memory,
1442 * check if there are any order dependences active inside the current kernel,
1443 * within the same iteration of the host schedule, i.e., the prefix
1444 * schedule at "node".
1445 * If so, mark the array as force_private so that its reference groups will be
1446 * mapped to a registers.
1448 * Note that the arrays that cannot be mapped to private memory have
1449 * had their order dependences added to prog->array_order and
1450 * subsequently to the coincidence constraints.
1452 static void check_can_be_private_live_ranges(struct ppcg_kernel
*kernel
,
1453 __isl_keep isl_schedule_node
*node
)
1456 isl_union_set
*domain
;
1457 isl_multi_union_pw_aff
*prefix
;
1458 isl_union_pw_multi_aff
*contraction
;
1460 if (!kernel
->options
->live_range_reordering
)
1463 kernel
->any_force_private
= 0;
1465 prefix
= isl_schedule_node_get_prefix_schedule_multi_union_pw_aff(node
);
1466 contraction
= isl_union_pw_multi_aff_copy(kernel
->contraction
);
1467 prefix
= isl_multi_union_pw_aff_pullback_union_pw_multi_aff(prefix
,
1469 domain
= isl_union_set_copy(kernel
->expanded_domain
);
1470 domain
= isl_union_set_universe(domain
);
1472 for (i
= 0; i
< kernel
->n_array
; ++i
) {
1473 struct gpu_local_array_info
*local
= &kernel
->array
[i
];
1474 isl_union_map
*order
;
1476 local
->force_private
= 0;
1477 if (!gpu_array_can_be_private(local
->array
))
1479 order
= isl_union_map_copy(local
->array
->dep_order
);
1480 order
= isl_union_map_intersect_domain(order
,
1481 isl_union_set_copy(domain
));
1482 order
= isl_union_map_intersect_range(order
,
1483 isl_union_set_copy(domain
));
1484 order
= isl_union_map_eq_at_multi_union_pw_aff(order
,
1485 isl_multi_union_pw_aff_copy(prefix
));
1486 if (!isl_union_map_is_empty(order
)) {
1487 local
->force_private
= 1;
1488 kernel
->any_force_private
= 1;
1490 isl_union_map_free(order
);
1493 isl_multi_union_pw_aff_free(prefix
);
1494 isl_union_set_free(domain
);
1497 /* Expand the domain of the schedule "s" by plugging in
1498 * the contraction "contraction" and return the result.
1500 static __isl_give isl_union_map
*expand(__isl_take isl_union_map
*s
,
1501 __isl_keep isl_union_pw_multi_aff
*contraction
)
1503 contraction
= isl_union_pw_multi_aff_copy(contraction
);
1504 s
= isl_union_map_preimage_domain_union_pw_multi_aff(s
, contraction
);
1508 /* Create a set of dimension data->thread_depth + data->n_thread
1509 * that equates the residue of the final data->n_thread dimensions
1510 * modulo the kernel->block_dim sizes to the thread identifiers.
1511 * Store the computed set in data->privatization.
1513 * The construction starts with the space of kernel->thread_filter,
1514 * which is known to reference all thread identifiers.
1516 static void compute_privatization(struct gpu_group_data
*data
,
1517 struct ppcg_kernel
*kernel
)
1522 isl_local_space
*ls
;
1525 ctx
= isl_union_map_get_ctx(data
->shared_sched
);
1526 space
= isl_union_set_get_space(kernel
->thread_filter
);
1527 space
= isl_space_set_from_params(space
);
1528 space
= isl_space_add_dims(space
, isl_dim_set
,
1529 data
->thread_depth
+ data
->n_thread
);
1530 set
= isl_set_universe(space
);
1531 space
= isl_set_get_space(set
);
1532 ls
= isl_local_space_from_space(space
);
1534 for (i
= 0; i
< data
->n_thread
; ++i
) {
1535 isl_aff
*aff
, *aff2
;
1544 aff
= isl_aff_var_on_domain(isl_local_space_copy(ls
),
1545 isl_dim_set
, data
->thread_depth
+ i
);
1546 v
= isl_val_int_from_si(ctx
, kernel
->block_dim
[i
]);
1547 aff
= isl_aff_mod_val(aff
, v
);
1548 id
= isl_id_list_get_id(kernel
->thread_ids
, i
);
1549 pos
= isl_set_find_dim_by_id(set
, isl_dim_param
, id
);
1551 aff2
= isl_aff_var_on_domain(isl_local_space_copy(ls
),
1552 isl_dim_param
, pos
);
1553 aff
= isl_aff_sub(aff
, aff2
);
1554 c
= isl_equality_from_aff(aff
);
1555 set
= isl_set_add_constraint(set
, c
);
1558 isl_local_space_free(ls
);
1559 data
->privatization
= set
;
1562 /* Return the prefix schedule at "node" as a relation
1563 * between domain elements and schedule dimensions after detecting
1564 * equalities in this relation.
1566 static __isl_give isl_union_map
*prefix_with_equalities(
1567 __isl_keep isl_schedule_node
*node
)
1569 isl_union_map
*schedule
;
1571 schedule
= isl_schedule_node_get_prefix_schedule_relation(node
);
1572 schedule
= isl_union_map_detect_equalities(schedule
);
1577 /* Group references of all arrays in "kernel".
1578 * "node" points to the kernel mark.
1579 * The mapping to shared memory in computed at the "shared" mark.
1581 * We first extract all required schedule information into
1582 * a gpu_group_data structure and then consider each array
1585 int gpu_group_references(struct ppcg_kernel
*kernel
,
1586 __isl_keep isl_schedule_node
*node
)
1590 isl_union_pw_multi_aff
*contraction
;
1591 struct gpu_group_data data
;
1593 check_can_be_private_live_ranges(kernel
, node
);
1595 data
.scop
= kernel
->prog
->scop
;
1597 data
.kernel_depth
= isl_schedule_node_get_schedule_depth(node
);
1598 data
.host_sched
= isl_schedule_node_get_prefix_schedule_relation(node
);
1600 node
= isl_schedule_node_copy(node
);
1601 node
= gpu_tree_move_down_to_shared(node
, kernel
->core
);
1602 data
.shared_depth
= isl_schedule_node_get_schedule_depth(node
);
1603 data
.shared_sched
= prefix_with_equalities(node
);
1605 node
= gpu_tree_move_down_to_thread(node
, kernel
->core
);
1606 node
= isl_schedule_node_child(node
, 0);
1607 data
.thread_depth
= isl_schedule_node_get_schedule_depth(node
);
1608 data
.n_thread
= isl_schedule_node_band_n_member(node
);
1609 if (data
.thread_depth
== data
.shared_depth
)
1610 data
.copy_sched
= isl_union_map_copy(data
.shared_sched
);
1612 data
.copy_sched
= prefix_with_equalities(node
);
1613 data
.thread_sched
= isl_union_map_copy(data
.copy_sched
);
1614 data
.thread_sched
= isl_union_map_flat_range_product(data
.thread_sched
,
1615 isl_schedule_node_band_get_partial_schedule_union_map(node
));
1616 data
.thread_sched
= isl_union_map_detect_equalities(data
.thread_sched
);
1618 contraction
= isl_union_pw_multi_aff_copy(kernel
->contraction
);
1619 data
.host_sched
= expand(data
.host_sched
, contraction
);
1620 data
.shared_sched
= expand(data
.shared_sched
, contraction
);
1621 if (data
.thread_depth
== data
.shared_depth
) {
1622 isl_union_map_free(data
.copy_sched
);
1623 data
.copy_sched
= isl_union_map_copy(data
.shared_sched
);
1625 data
.copy_sched
= expand(data
.copy_sched
, contraction
);
1627 data
.thread_sched
= expand(data
.thread_sched
, contraction
);
1628 isl_union_pw_multi_aff_free(contraction
);
1630 node
= isl_schedule_node_child(node
, 0);
1631 data
.full_sched
= isl_union_map_copy(data
.thread_sched
);
1632 data
.full_sched
= isl_union_map_flat_range_product(data
.full_sched
,
1633 isl_schedule_node_get_subtree_schedule_union_map(node
));
1634 isl_schedule_node_free(node
);
1636 compute_privatization(&data
, kernel
);
1638 for (i
= 0; i
< kernel
->n_array
; ++i
) {
1639 r
= group_array_references(kernel
, &kernel
->array
[i
], &data
);
1644 isl_union_map_free(data
.host_sched
);
1645 isl_union_map_free(data
.shared_sched
);
1646 isl_union_map_free(data
.copy_sched
);
1647 isl_union_map_free(data
.thread_sched
);
1648 isl_union_map_free(data
.full_sched
);
1649 isl_set_free(data
.privatization
);
1654 /* Given a description of an array tile "tile" and the "space"
1658 * where D represents the first tile->depth schedule dimensions
1659 * and A represents the array, construct an isl_multi_aff
1661 * { [D[i] -> A[a]] -> A'[a'] }
1663 * with A' a scaled down copy of A according to the shifts and strides
1664 * in "tile". In particular,
1666 * a' = (a + shift(i))/stride
1668 * "insert_array" represents
1672 * and is used to insert A into the domain of functions that only
1675 static __isl_give isl_multi_aff
*strided_tile(
1676 struct gpu_array_tile
*tile
, __isl_keep isl_space
*space
,
1677 __isl_keep isl_multi_aff
*insert_array
)
1681 isl_multi_aff
*shift
;
1682 isl_multi_val
*stride
;
1684 isl_local_space
*ls
;
1685 isl_multi_aff
*tiling
;
1687 ctx
= isl_space_get_ctx(space
);
1688 space2
= isl_space_domain(isl_space_copy(space
));
1689 ls
= isl_local_space_from_space(space2
);
1690 space2
= isl_space_range(isl_space_copy(space
));
1691 stride
= isl_multi_val_zero(space2
);
1692 shift
= isl_multi_aff_zero(isl_space_copy(space
));
1694 for (i
= 0; i
< tile
->n
; ++i
) {
1695 struct gpu_array_bound
*bound
= &tile
->bound
[i
];
1699 stride_i
= isl_val_copy(bound
->stride
);
1700 shift_i
= isl_aff_copy(bound
->shift
);
1702 stride
= isl_multi_val_set_val(stride
, i
, stride_i
);
1703 shift
= isl_multi_aff_set_aff(shift
, i
, shift_i
);
1705 isl_local_space_free(ls
);
1707 shift
= isl_multi_aff_pullback_multi_aff(shift
,
1708 isl_multi_aff_copy(insert_array
));
1710 tiling
= isl_multi_aff_range_map(isl_space_copy(space
));
1711 tiling
= isl_multi_aff_add(tiling
, shift
);
1712 tiling
= isl_multi_aff_scale_down_multi_val(tiling
, stride
);
1717 /* Compute a tiling for the array reference group "group".
1719 * The tiling is of the form
1721 * { [D[i] -> A[a]] -> T[t] }
1723 * where D represents the first tile->depth schedule dimensions,
1724 * A represents the global array and T represents the shared or
1725 * private memory tile. The name of T is the name of the local
1728 * If there is any stride in the accesses, then the mapping is
1730 * t = (a + shift(i))/stride - lb(i)
1732 * otherwise, it is simply
1736 void gpu_array_ref_group_compute_tiling(struct gpu_array_ref_group
*group
)
1739 struct gpu_array_tile
*tile
;
1741 isl_multi_aff
*tiling
, *lb
, *insert_array
;
1745 tile
= gpu_array_ref_group_tile(group
);
1749 space
= isl_map_get_space(group
->access
);
1750 space
= isl_space_from_range(isl_space_range(space
));
1751 space
= isl_space_add_dims(space
, isl_dim_in
, tile
->depth
);
1752 insert_array
= isl_multi_aff_domain_map(isl_space_copy(space
));
1754 for (i
= 0; i
< tile
->n
; ++i
)
1755 if (tile
->bound
[i
].shift
)
1759 tiling
= strided_tile(tile
, space
, insert_array
);
1761 tiling
= isl_multi_aff_range_map(isl_space_copy(space
));
1763 lb
= isl_multi_aff_zero(space
);
1764 for (i
= 0; i
< tile
->n
; ++i
) {
1765 isl_aff
*lb_i
= isl_aff_copy(tile
->bound
[i
].lb
);
1766 lb
= isl_multi_aff_set_aff(lb
, i
, lb_i
);
1768 lb
= isl_multi_aff_pullback_multi_aff(lb
, insert_array
);
1770 tiling
= isl_multi_aff_sub(tiling
, lb
);
1772 p
= isl_printer_to_str(isl_multi_aff_get_ctx(tiling
));
1773 p
= gpu_array_ref_group_print_name(group
, p
);
1774 local_name
= isl_printer_get_str(p
);
1775 isl_printer_free(p
);
1776 tiling
= isl_multi_aff_set_tuple_name(tiling
, isl_dim_out
, local_name
);
1779 tile
->tiling
= tiling
;