2 * Copyright 2010-2011 INRIA Saclay
3 * Copyright 2012-2013 Ecole Normale Superieure
5 * Use of this software is governed by the MIT license
7 * Written by Sven Verdoolaege, INRIA Saclay - Ile-de-France,
8 * Parc Club Orsay Universite, ZAC des vignes, 4 rue Jacques Monod,
10 * and Ecole Normale Superieure, 45 rue d’Ulm, 75230 Paris, France
17 #include <isl/polynomial.h>
18 #include <isl/union_set.h>
23 #include <isl/schedule.h>
24 #include <isl/options.h>
25 #include <isl/ast_build.h>
30 #include "ppcg_options.h"
34 /* The fields stride, shift and shift_map only contain valid information
36 * If so, they express that current index is such that if you add shift,
37 * then the result is always a multiple of stride.
38 * shift_map contains the mapping
40 * i -> (i + shift)/stride
42 * Let D represent the initial shared_len dimensions of the computed schedule.
43 * The spaces of "lb" and "shift" are of the form
47 * "shift_map" is of the form
49 * [D -> i] -> [D -> (i + shift(D))/stride]
51 struct gpu_array_bound
{
57 isl_basic_map
*shift_map
;
60 /* A tile of an array.
62 * n is the dimension of the array.
63 * bound is an array of size "n" representing the lower bound
64 * and size for each index.
66 * tiling maps a tile in the global array to the correspondin
67 * shared/private memory tile and is of the form
69 * { [D[i] -> A[a]] -> T[(a + shift(i))/stride - lb(i)] }
71 * where D represents the initial shared_len dimensions
72 * of the computed schedule.
74 struct gpu_array_tile
{
76 struct gpu_array_bound
*bound
;
77 isl_multi_aff
*tiling
;
80 struct gpu_array_info
;
82 /* A group of array references in a kernel that should be handled together.
83 * If private_tile is not NULL, then it is mapped to registers.
84 * Otherwise, if shared_tile is not NULL, it is mapped to shared memory.
85 * Otherwise, it is accessed from global memory.
87 struct gpu_array_ref_group
{
88 /* The references in this group access this array. */
89 struct gpu_array_info
*array
;
90 /* Position of this group in the list of reference groups of array. */
93 /* The following fields are use during the construction of the groups.
94 * access is the combined access relation relative to the shared
95 * memory tiling. In particular, the domain of the map corresponds
96 * to the first shared_len dimensions of the computed schedule.
97 * write is set if any access in the group is a write.
102 /* The shared memory tile, NULL if none. */
103 struct gpu_array_tile
*shared_tile
;
105 /* The private memory tile, NULL if none. */
106 struct gpu_array_tile
*private_tile
;
108 /* References in this group; point to elements of a linked list. */
110 struct gpu_stmt_access
**refs
;
112 /* Last shared memory tile dimension that affects tile of this group. */
118 struct ppcg_options
*options
;
120 /* Callback for printing of AST in appropriate format. */
121 __isl_give isl_printer
*(*print
)(__isl_take isl_printer
*p
,
122 struct gpu_prog
*prog
, __isl_keep isl_ast_node
*tree
,
126 struct gpu_prog
*prog
;
127 /* The generated AST. */
130 /* tile, grid and block sizes for each kernel */
131 isl_union_map
*sizes
;
133 /* Identifier of current kernel. */
135 /* Pointer to the current kernel. */
136 struct ppcg_kernel
*kernel
;
137 /* Does the computed schedule exhibit any parallelism? */
140 /* First tile dimension. */
142 /* Number of tile dimensions. */
144 /* Number of initial parallel loops among tile dimensions. */
147 /* Number of dimensions determining shared memory. */
150 /* Number of rows in the untiled schedule. */
152 /* Number of rows in the tiled schedule. */
154 /* Number of rows in schedule after tiling/wrapping over threads. */
155 int thread_tiled_len
;
157 /* Global untiled schedule. */
158 isl_union_map
*sched
;
159 /* Local (per kernel launch) tiled schedule. */
160 isl_union_map
*tiled_sched
;
161 /* Local schedule per shared memory tile loop iteration. */
162 isl_union_map
*local_sched
;
164 /* Local tiled schedule projected onto the shared tile loops and
165 * the loops that will be wrapped over the threads,
166 * with all shared tile loops parametrized.
168 isl_union_map
*shared_sched
;
169 /* Projects out the loops that will be wrapped over the threads
172 isl_union_map
*shared_proj
;
174 /* A map that takes the range of shared_sched as input,
175 * wraps the appropriate loops over the threads and then projects
178 isl_map
*privatization
;
180 /* A map from the shared memory tile loops and the thread indices
181 * (as parameters) to the set of accessed memory elements that
182 * will be accessed through private copies.
184 isl_union_map
*private_access
;
186 /* The schedule for the current private/shared access
187 * (within print_private_access or print_shared_access).
190 /* The array reference group corresponding to copy_sched. */
191 struct gpu_array_ref_group
*copy_group
;
193 /* First loop to unroll (or -1 if none) in the current part of the
200 /* Note: in the input file, the sizes of the grid and the blocks
201 * are specified in the order x, y, z, but internally, the sizes
202 * are stored in reverse order, so that the last element always
203 * refers to the x dimension.
210 /* Print the name of the local copy of a given group of array references.
212 static __isl_give isl_printer
*print_array_name(__isl_take isl_printer
*p
,
213 struct gpu_array_ref_group
*group
)
217 if (group
->private_tile
)
218 p
= isl_printer_print_str(p
, "private_");
219 else if (group
->shared_tile
)
220 p
= isl_printer_print_str(p
, "shared_");
223 p
= isl_printer_print_str(p
, group
->array
->name
);
224 if (!global
&& group
->array
->n_group
> 1) {
225 p
= isl_printer_print_str(p
, "_");
226 p
= isl_printer_print_int(p
, group
->nr
);
232 /* Collect all references to the given array and store pointers to them
235 static void collect_references(struct gpu_prog
*prog
,
236 struct gpu_array_info
*array
)
242 for (i
= 0; i
< prog
->n_stmts
; ++i
) {
243 struct gpu_stmt
*stmt
= &prog
->stmts
[i
];
244 struct gpu_stmt_access
*access
;
246 for (access
= stmt
->accesses
; access
; access
= access
->next
) {
248 name
= isl_map_get_tuple_name(access
->access
,
250 if (name
&& !strcmp(array
->name
, name
))
256 array
->refs
= isl_alloc_array(prog
->ctx
, struct gpu_stmt_access
*, n
);
260 for (i
= 0; i
< prog
->n_stmts
; ++i
) {
261 struct gpu_stmt
*stmt
= &prog
->stmts
[i
];
262 struct gpu_stmt_access
*access
;
264 for (access
= stmt
->accesses
; access
; access
= access
->next
) {
266 name
= isl_map_get_tuple_name(access
->access
,
268 if (!name
|| strcmp(array
->name
, name
))
271 array
->refs
[n
++] = access
;
276 /* Create a gpu_array_tile for an array of dimension "n_index".
278 static struct gpu_array_tile
*create_tile(isl_ctx
*ctx
, int n_index
)
281 struct gpu_array_tile
*tile
;
283 tile
= isl_calloc_type(ctx
, struct gpu_array_tile
);
288 tile
->bound
= isl_alloc_array(ctx
, struct gpu_array_bound
, n_index
);
291 for (i
= 0; i
< n_index
; ++i
) {
292 tile
->bound
[i
].size
= NULL
;
293 tile
->bound
[i
].lb
= NULL
;
294 tile
->bound
[i
].stride
= NULL
;
295 tile
->bound
[i
].shift
= NULL
;
296 tile
->bound
[i
].shift_map
= NULL
;
302 static void *free_tile(struct gpu_array_tile
*tile
)
309 for (j
= 0; j
< tile
->n
; ++j
) {
310 isl_val_free(tile
->bound
[j
].size
);
311 isl_val_free(tile
->bound
[j
].stride
);
312 isl_aff_free(tile
->bound
[j
].lb
);
313 isl_aff_free(tile
->bound
[j
].shift
);
314 isl_basic_map_free(tile
->bound
[j
].shift_map
);
317 isl_multi_aff_free(tile
->tiling
);
323 static struct pet_array
*find_array(struct ppcg_scop
*scop
,
324 __isl_keep isl_set
*accessed
)
329 id
= isl_set_get_tuple_id(accessed
);
331 for (i
= 0; i
< scop
->n_array
; ++i
) {
334 id_i
= isl_set_get_tuple_id(scop
->arrays
[i
]->extent
);
341 return i
< scop
->n_array
? scop
->arrays
[i
] : NULL
;
344 /* Compute and return the extent of "array", taking into account the set of
347 * In particular, the extent in the outer dimension is taken
348 * from "accessed", while then extent in the remaing dimensions
349 * are taken from array->extent.
351 * The extent in the outer dimension cannot be taken from array->extent
352 * because that may be unbounded. Furthermore, even if it is bounded,
353 * it may be larger than the piece of the array that is being accessed.
355 static __isl_give isl_set
*compute_extent(struct pet_array
*array
,
356 __isl_keep isl_set
*accessed
)
363 extent
= isl_set_copy(array
->extent
);
365 n_index
= isl_set_dim(accessed
, isl_dim_set
);
369 extent
= isl_set_project_out(extent
, isl_dim_set
, 0, 1);
370 outer
= isl_set_copy(accessed
);
371 outer
= isl_set_project_out(outer
, isl_dim_set
, 1, n_index
- 1);
372 extent
= isl_set_flat_product(outer
, extent
);
373 id
= isl_set_get_tuple_id(accessed
);
374 extent
= isl_set_set_tuple_id(extent
, id
);
379 /* Compute bounds on the host arrays based on the accessed elements
380 * and collect all references to the array.
382 * If the array is zero-dimensional, i.e., a scalar, we check
383 * whether it is read-only.
385 static int extract_array_info(__isl_take isl_set
*array
, void *user
)
388 struct gpu_prog
*prog
= (struct gpu_prog
*)user
;
392 struct pet_array
*pa
;
395 n_index
= isl_set_dim(array
, isl_dim_set
);
396 name
= isl_set_get_tuple_name(array
);
397 bounds
= isl_alloc_array(isl_set_get_ctx(array
),
398 isl_pw_aff
*, n_index
);
400 prog
->array
[prog
->n_array
].dim
= isl_set_get_space(array
);
401 prog
->array
[prog
->n_array
].name
= strdup(name
);
402 prog
->array
[prog
->n_array
].n_index
= n_index
;
403 prog
->array
[prog
->n_array
].bound
= bounds
;
405 pa
= find_array(prog
->scop
, array
);
408 prog
->array
[prog
->n_array
].type
= strdup(pa
->element_type
);
409 prog
->array
[prog
->n_array
].size
= pa
->element_size
;
410 prog
->array
[prog
->n_array
].local
= pa
->declared
&& !pa
->exposed
;
414 isl_union_map
*write
;
417 write
= isl_union_map_copy(prog
->write
);
418 space
= isl_set_universe(isl_set_get_space(array
));
419 write
= isl_union_map_intersect_range(write
,
420 isl_union_set_from_set(space
));
421 empty
= isl_union_map_is_empty(write
);
422 isl_union_map_free(write
);
424 prog
->array
[prog
->n_array
].read_only
= empty
;
427 extent
= compute_extent(pa
, array
);
428 for (i
= 0; i
< n_index
; ++i
) {
434 bound
= isl_set_dim_max(isl_set_copy(extent
), i
);
436 dom
= isl_pw_aff_domain(isl_pw_aff_copy(bound
));
437 ls
= isl_local_space_from_space(isl_set_get_space(dom
));
438 one
= isl_aff_zero_on_domain(ls
);
439 one
= isl_aff_add_constant_si(one
, 1);
440 bound
= isl_pw_aff_add(bound
, isl_pw_aff_alloc(dom
, one
));
441 bound
= isl_pw_aff_gist(bound
, isl_set_copy(prog
->context
));
445 prog
->array
[prog
->n_array
].extent
= extent
;
447 collect_references(prog
, &prog
->array
[prog
->n_array
]);
455 void collect_array_info(struct gpu_prog
*prog
)
457 isl_union_set
*arrays
;
459 arrays
= isl_union_map_range(isl_union_map_copy(prog
->read
));
460 arrays
= isl_union_set_union(arrays
,
461 isl_union_map_range(isl_union_map_copy(prog
->write
)));
462 arrays
= isl_union_set_coalesce(arrays
);
464 prog
->n_array
= isl_union_set_n_set(arrays
);
465 prog
->array
= isl_alloc_array(prog
->ctx
,
466 struct gpu_array_info
, prog
->n_array
);
469 isl_union_set_foreach_set(arrays
, &extract_array_info
, prog
);
470 isl_union_set_free(arrays
);
473 static void free_array_info(struct gpu_prog
*prog
)
477 for (i
= 0; i
< prog
->n_array
; ++i
) {
478 int n_index
= prog
->array
[i
].n_index
;
479 free(prog
->array
[i
].type
);
480 free(prog
->array
[i
].name
);
481 for (j
= 0; j
< n_index
; ++j
)
482 isl_pw_aff_free(prog
->array
[i
].bound
[j
]);
483 isl_space_free(prog
->array
[i
].dim
);
484 isl_set_free(prog
->array
[i
].extent
);
485 free(prog
->array
[i
].bound
);
486 free(prog
->array
[i
].refs
);
491 /* Check if a gpu array is a scalar. A scalar is a value that is not stored
492 * as an array or through a pointer reference, but as single data element. At
493 * the moment, scalars are represented as zero dimensional arrays.
495 int gpu_array_is_scalar(struct gpu_array_info
*array
)
497 return (array
->n_index
== 0);
500 /* Is "array" a read-only scalar?
502 int gpu_array_is_read_only_scalar(struct gpu_array_info
*array
)
504 return gpu_array_is_scalar(array
) && array
->read_only
;
507 /* Internal data structure for extract_size_of_type.
508 * "type" specifies the name of the space that we want to extract.
509 * "res" is used to store the subset of that space.
511 struct ppcg_extract_size_data
{
516 /* This function is called for each set in a union_set.
517 * If the name of the set matches data->type, we store the
520 static int extract_size_of_type(__isl_take isl_set
*size
, void *user
)
522 struct ppcg_extract_size_data
*data
= user
;
525 name
= isl_set_get_tuple_name(size
);
526 if (name
&& !strcmp(name
, data
->type
)) {
535 /* Given a union map { kernel[i] -> *[...] },
536 * return the range in the space called "type" for the kernel with
537 * sequence number "id".
539 static __isl_give isl_set
*extract_sizes(__isl_keep isl_union_map
*sizes
,
540 const char *type
, int id
)
544 isl_union_set
*local_sizes
;
545 struct ppcg_extract_size_data data
= { type
, NULL
};
550 space
= isl_union_map_get_space(sizes
);
551 space
= isl_space_set_from_params(space
);
552 space
= isl_space_add_dims(space
, isl_dim_set
, 1);
553 space
= isl_space_set_tuple_name(space
, isl_dim_set
, "kernel");
554 dom
= isl_set_universe(space
);
555 dom
= isl_set_fix_si(dom
, isl_dim_set
, 0, id
);
557 local_sizes
= isl_union_set_apply(isl_union_set_from_set(dom
),
558 isl_union_map_copy(sizes
));
559 isl_union_set_foreach_set(local_sizes
, &extract_size_of_type
, &data
);
560 isl_union_set_free(local_sizes
);
564 /* Given a singleton set, extract the first (at most *len) elements
565 * of the single integer tuple into *sizes and update *len if needed.
567 static void read_sizes_from_set(__isl_take isl_set
*set
, int *sizes
, int *len
)
575 dim
= isl_set_dim(set
, isl_dim_set
);
579 for (i
= 0; i
< *len
; ++i
) {
582 v
= isl_set_plain_get_val_if_fixed(set
, isl_dim_set
, i
);
585 sizes
[i
] = isl_val_get_num_si(v
);
592 /* Extract user specified "tile" sizes from the "sizes" command line option,
593 * defaulting to option->tile_size in each dimension.
595 static void read_tile_sizes(struct gpu_gen
*gen
)
600 gen
->tile_size
= isl_alloc_array(gen
->ctx
, int, gen
->tile_len
);
601 assert(gen
->tile_size
);
602 for (n
= 0; n
< gen
->tile_len
; ++n
)
603 gen
->tile_size
[n
] = gen
->options
->tile_size
;
605 size
= extract_sizes(gen
->sizes
, "tile", gen
->kernel_id
);
606 read_sizes_from_set(size
, gen
->tile_size
, &gen
->tile_len
);
608 if (gen
->n_parallel
> gen
->tile_len
)
609 gen
->n_parallel
= gen
->tile_len
;
612 /* Extract user specified "block" sizes from the "sizes" command line option,
613 * after filling in some potentially useful defaults.
615 static void read_block_sizes(struct gpu_gen
*gen
)
621 gen
->n_block
= (n
<= 3) ? n
: 3;
622 switch (gen
->n_block
) {
624 gen
->block_dim
[0] = 512;
627 gen
->block_dim
[0] = 32;
628 gen
->block_dim
[1] = 16;
631 gen
->block_dim
[0] = 32;
632 gen
->block_dim
[1] = 4;
633 gen
->block_dim
[2] = 4;
637 size
= extract_sizes(gen
->sizes
, "block", gen
->kernel_id
);
638 read_sizes_from_set(size
, gen
->block_dim
, &gen
->n_block
);
641 /* Extract user specified "grid" sizes from the "sizes" command line option,
642 * after filling in some potentially useful defaults.
644 static void read_grid_sizes(struct gpu_gen
*gen
)
646 int n
= gen
->n_parallel
;
649 gen
->n_grid
= (n
<= 2) ? n
: 2;
650 switch (gen
->n_grid
) {
652 gen
->grid_dim
[0] = 32768;
655 gen
->grid_dim
[0] = 256;
656 gen
->grid_dim
[1] = 256;
660 size
= extract_sizes(gen
->sizes
, "grid", gen
->kernel_id
);
661 read_sizes_from_set(size
, gen
->grid_dim
, &gen
->n_grid
);
664 /* Extract user specified sizes from the "sizes" command line option
665 * after filling in some potentially useful defaults.
667 static void read_sizes(struct gpu_gen
*gen
)
669 read_tile_sizes(gen
);
670 read_block_sizes(gen
);
671 read_grid_sizes(gen
);
674 static void *free_stmts(struct gpu_stmt
*stmts
, int n
)
681 for (i
= 0; i
< n
; ++i
) {
682 struct gpu_stmt_access
*access
, *next
;
684 for (access
= stmts
[i
].accesses
; access
; access
= next
) {
686 isl_map_free(access
->access
);
690 isl_id_free(stmts
[i
].id
);
697 void clear_gpu_gen(struct gpu_gen
*gen
)
699 isl_union_map_free(gen
->sizes
);
700 isl_union_map_free(gen
->sched
);
703 /* Construct a map from a domain of dimensionality "len"
704 * to a domain of dimensionality "len" + "tile_len" that tiles
705 * the "tile_len" coordinates starting at "first".
706 * In particular, [s_i] -> [s_i / tile_size[i], s_i % tile_size[i]].
707 * "dim" prescribes the parameters.
709 static __isl_give isl_map
*tile(__isl_take isl_space
*dim
, int len
,
710 int first
, int tile_len
, int *tile_size
)
717 dim
= isl_space_add_dims(dim
, isl_dim_in
, len
);
718 dim
= isl_space_add_dims(dim
, isl_dim_out
, len
+ tile_len
);
719 bmap
= isl_basic_map_universe(isl_space_copy(dim
));
720 ls
= isl_local_space_from_space(dim
);
722 for (i
= 0; i
< len
- tile_len
; ++i
) {
723 int j
= i
< first
? i
: i
+ tile_len
;
724 int k
= i
< first
? i
: i
+ 2 * tile_len
;
726 c
= isl_equality_alloc(isl_local_space_copy(ls
));
727 c
= isl_constraint_set_coefficient_si(c
, isl_dim_in
, j
, -1);
728 c
= isl_constraint_set_coefficient_si(c
, isl_dim_out
, k
, 1);
729 bmap
= isl_basic_map_add_constraint(bmap
, c
);
732 for (i
= 0; i
< tile_len
; ++i
) {
733 c
= isl_equality_alloc(isl_local_space_copy(ls
));
734 c
= isl_constraint_set_coefficient_si(c
, isl_dim_in
,
736 c
= isl_constraint_set_coefficient_si(c
, isl_dim_out
,
737 first
+ i
, tile_size
[i
]);
738 c
= isl_constraint_set_coefficient_si(c
, isl_dim_out
,
739 first
+ i
+ tile_len
, 1);
740 bmap
= isl_basic_map_add_constraint(bmap
, c
);
742 c
= isl_inequality_alloc(isl_local_space_copy(ls
));
743 c
= isl_constraint_set_coefficient_si(c
, isl_dim_out
,
744 first
+ i
+ tile_len
, 1);
745 bmap
= isl_basic_map_add_constraint(bmap
, c
);
747 c
= isl_inequality_alloc(isl_local_space_copy(ls
));
748 c
= isl_constraint_set_coefficient_si(c
, isl_dim_out
,
749 first
+ i
+ tile_len
, -1);
750 c
= isl_constraint_set_constant_si(c
, tile_size
[i
] - 1);
751 bmap
= isl_basic_map_add_constraint(bmap
, c
);
754 isl_local_space_free(ls
);
756 return isl_map_from_basic_map(bmap
);
759 /* Construct a map from a domain of dimensionality "len"
760 * to a domain of dimensionality "len" + "wrap_len" that "wraps"
761 * the "wrap_len" coordinates starting at "first" according to "wrap_size".
762 * In particular, [s_i] -> [s_i, s_i % wrap_size[i]].
763 * To do so, we need extra variables corresponding to [s_i / wrap_size[i]],
764 * that are projected out at the end.
765 * "dim" prescribes the parameters.
767 static __isl_give isl_map
*wrap(__isl_take isl_space
*dim
, int len
,
768 int first
, int wrap_len
, int *wrap_size
)
775 dim
= isl_space_add_dims(dim
, isl_dim_in
, len
);
776 dim
= isl_space_add_dims(dim
, isl_dim_out
, len
+ 2 * wrap_len
);
777 bmap
= isl_basic_map_universe(isl_space_copy(dim
));
778 ls
= isl_local_space_from_space(dim
);
780 for (i
= 0; i
< len
; ++i
) {
781 int k
= i
< first
+ wrap_len
? i
: i
+ 2 * wrap_len
;
783 c
= isl_equality_alloc(isl_local_space_copy(ls
));
784 c
= isl_constraint_set_coefficient_si(c
, isl_dim_in
, i
, -1);
785 c
= isl_constraint_set_coefficient_si(c
, isl_dim_out
, k
, 1);
786 bmap
= isl_basic_map_add_constraint(bmap
, c
);
789 for (i
= 0; i
< wrap_len
; ++i
) {
790 c
= isl_equality_alloc(isl_local_space_copy(ls
));
791 c
= isl_constraint_set_coefficient_si(c
, isl_dim_out
,
793 c
= isl_constraint_set_coefficient_si(c
, isl_dim_out
,
794 first
+ wrap_len
+ i
, 1);
795 c
= isl_constraint_set_coefficient_si(c
, isl_dim_out
,
796 first
+ 2 * wrap_len
+ i
, wrap_size
[i
]);
797 bmap
= isl_basic_map_add_constraint(bmap
, c
);
799 c
= isl_inequality_alloc(isl_local_space_copy(ls
));
800 c
= isl_constraint_set_coefficient_si(c
, isl_dim_out
,
801 first
+ wrap_len
+ i
, 1);
802 bmap
= isl_basic_map_add_constraint(bmap
, c
);
804 c
= isl_inequality_alloc(isl_local_space_copy(ls
));
805 c
= isl_constraint_set_coefficient_si(c
, isl_dim_out
,
806 first
+ wrap_len
+ i
, -1);
807 c
= isl_constraint_set_constant_si(c
, wrap_size
[i
] - 1);
808 bmap
= isl_basic_map_add_constraint(bmap
, c
);
811 isl_local_space_free(ls
);
813 bmap
= isl_basic_map_project_out(bmap
, isl_dim_out
,
814 first
+ 2 * wrap_len
, wrap_len
);
816 return isl_map_from_basic_map(bmap
);
819 /* Add "n" parameters named prefix%d.
821 static __isl_give isl_set
*add_params( __isl_take isl_set
*set
,
822 int n
, const char *prefix
)
828 nparam
= isl_set_dim(set
, isl_dim_param
);
829 set
= isl_set_add_dims(set
, isl_dim_param
, n
);
831 for (i
= 0; i
< n
; ++i
) {
832 snprintf(name
, sizeof(name
), "%s%d", prefix
, i
);
833 set
= isl_set_set_dim_name(set
, isl_dim_param
,
840 /* Equate the "n" dimensions of "set" starting at "first" to
841 * freshly created parameters named prefix%d.
843 static __isl_give isl_set
*parametrize(__isl_take isl_set
*set
,
844 int first
, int n
, const char *prefix
)
849 nparam
= isl_set_dim(set
, isl_dim_param
);
851 set
= add_params(set
, n
, prefix
);
853 for (i
= 0; i
< n
; ++i
)
854 set
= isl_set_equate(set
, isl_dim_param
, nparam
+ i
,
855 isl_dim_set
, first
+ i
);
860 /* Given a parameter space "space", create a set of dimension "len"
861 * of which the "n" dimensions starting at "first" are equated to
862 * freshly created parameters named prefix%d.
864 static __isl_give isl_set
*parametrization(__isl_take isl_space
*space
,
865 int len
, int first
, int n
, const char *prefix
)
869 space
= isl_space_set_from_params(space
);
870 space
= isl_space_add_dims(space
, isl_dim_set
, len
);
871 set
= isl_set_universe(space
);
873 return parametrize(set
, first
, n
, prefix
);
876 /* Tile the B loops over the tile sizes and then tile/wrap
877 * the T1 loops over the blocks.
879 static __isl_give isl_union_map
*tile_schedule(struct gpu_gen
*gen
,
880 __isl_take isl_union_map
*sched
)
883 isl_map
*tiling
, *block_tiling
;
885 dim
= isl_union_map_get_space(sched
);
886 tiling
= tile(isl_space_copy(dim
), gen
->untiled_len
,
887 gen
->tile_first
, gen
->tile_len
, gen
->tile_size
);
889 if (gen
->options
->wrap
)
890 block_tiling
= wrap(dim
, gen
->untiled_len
+ gen
->tile_len
,
891 gen
->tile_first
, gen
->n_grid
, gen
->grid_dim
);
893 block_tiling
= tile(dim
, gen
->untiled_len
+ gen
->tile_len
,
894 gen
->tile_first
, gen
->n_grid
, gen
->grid_dim
);
896 gen
->tiled_len
= gen
->untiled_len
+ gen
->tile_len
+ gen
->n_grid
;
898 tiling
= isl_map_apply_range(tiling
, block_tiling
);
900 sched
= isl_union_map_apply_range(sched
,
901 isl_union_map_from_map(tiling
));
903 gen
->shared_len
= gen
->tile_first
+ gen
->tile_len
+ gen
->n_grid
;
908 /* Equate the "T1P" iterators in the tiled schedule "sched"
909 * to the block dimensions.
911 static __isl_give isl_union_map
*parametrize_tiled_schedule(
912 struct gpu_gen
*gen
, __isl_take isl_union_map
*sched
)
917 dim
= isl_union_map_get_space(sched
);
918 par
= parametrization(dim
, gen
->tiled_len
,
919 gen
->tile_first
+ gen
->n_grid
, gen
->n_grid
, "b");
920 sched
= isl_union_map_intersect_range(sched
,
921 isl_union_set_from_set(par
));
926 /* Tile/wrap the P1 loops over the threads.
928 static __isl_give isl_union_map
*thread_tile_schedule(struct gpu_gen
*gen
,
929 __isl_take isl_union_map
*sched
)
935 dim
= isl_union_map_get_space(sched
);
937 if (gen
->options
->wrap
)
938 tiling
= wrap(isl_space_copy(dim
), gen
->tiled_len
,
939 gen
->shared_len
, gen
->n_block
, gen
->block_dim
);
941 tiling
= tile(isl_space_copy(dim
), gen
->tiled_len
,
942 gen
->shared_len
, gen
->n_block
, gen
->block_dim
);
943 gen
->thread_tiled_len
= gen
->tiled_len
+ gen
->n_block
;
945 sched
= isl_union_map_apply_range(sched
,
946 isl_union_map_from_map(tiling
));
948 par
= parametrization(dim
, gen
->thread_tiled_len
,
949 gen
->tile_first
+ gen
->tile_len
+ gen
->n_grid
+ gen
->n_block
,
951 sched
= isl_union_map_intersect_range(sched
,
952 isl_union_set_from_set(par
));
954 gen
->shared_len
= gen
->tile_first
+ gen
->tile_len
+ gen
->n_grid
;
959 /* If the user asked for it, scale the shared memory tile loops
960 * (T1T and T2) of "sched" by gen->tile_size[i].
961 * If we are not performing "wrapping", then additionally scale the T1P
962 * loops by gen->grid_dim[i].
964 static __isl_give isl_union_map
*scale_tile_loops(struct gpu_gen
*gen
,
965 __isl_take isl_union_map
*sched
)
969 isl_basic_map
*scale
;
973 if (!gen
->options
->scale_tile_loops
)
976 dim
= isl_union_map_get_space(sched
);
977 dim
= isl_space_add_dims(dim
, isl_dim_in
, gen
->tiled_len
);
978 dim
= isl_space_add_dims(dim
, isl_dim_out
, gen
->tiled_len
);
979 scale
= isl_basic_map_universe(isl_space_copy(dim
));
980 ls
= isl_local_space_from_space(dim
);
982 for (i
= 0; i
< gen
->tiled_len
; ++i
) {
985 if (i
>= gen
->tile_first
&& i
< gen
->tile_first
+ gen
->n_grid
) {
986 f
= gen
->tile_size
[i
- gen
->tile_first
];
987 if (!gen
->options
->wrap
)
988 f
*= gen
->grid_dim
[i
- gen
->tile_first
];
989 } else if (i
>= gen
->tile_first
+ gen
->n_grid
&&
990 i
< gen
->tile_first
+ gen
->n_grid
+ gen
->tile_len
) {
991 f
= gen
->tile_size
[i
- (gen
->tile_first
+ gen
->n_grid
)];
994 c
= isl_equality_alloc(isl_local_space_copy(ls
));
995 c
= isl_constraint_set_coefficient_si(c
, isl_dim_in
, i
, f
);
996 c
= isl_constraint_set_coefficient_si(c
, isl_dim_out
, i
, -1);
997 scale
= isl_basic_map_add_constraint(scale
, c
);
1000 isl_local_space_free(ls
);
1002 sched
= isl_union_map_apply_range(sched
,
1003 isl_union_map_from_map(isl_map_from_basic_map(scale
)));
1008 /* If we are not performing "wrapping" and if the user asked for it,
1009 * scale the thread tile loops (P1T) of "sched" by gen->block_dim[i].
1011 static __isl_give isl_union_map
*scale_thread_tile_loops(struct gpu_gen
*gen
,
1012 __isl_take isl_union_map
*sched
)
1016 isl_basic_map
*scale
;
1018 isl_local_space
*ls
;
1020 if (gen
->options
->wrap
)
1022 if (!gen
->options
->scale_tile_loops
)
1025 dim
= isl_union_map_get_space(sched
);
1026 dim
= isl_space_add_dims(dim
, isl_dim_in
, gen
->thread_tiled_len
);
1027 dim
= isl_space_add_dims(dim
, isl_dim_out
, gen
->thread_tiled_len
);
1028 scale
= isl_basic_map_universe(isl_space_copy(dim
));
1029 ls
= isl_local_space_from_space(dim
);
1031 for (i
= 0; i
< gen
->thread_tiled_len
; ++i
) {
1034 if (i
>= gen
->shared_len
&&
1035 i
< gen
->shared_len
+ gen
->n_block
)
1036 f
= gen
->block_dim
[i
- gen
->shared_len
];
1038 c
= isl_equality_alloc(isl_local_space_copy(ls
));
1039 c
= isl_constraint_set_coefficient_si(c
, isl_dim_in
, i
, f
);
1040 c
= isl_constraint_set_coefficient_si(c
, isl_dim_out
, i
, -1);
1041 scale
= isl_basic_map_add_constraint(scale
, c
);
1044 isl_local_space_free(ls
);
1046 sched
= isl_union_map_apply_range(sched
,
1047 isl_union_map_from_map(isl_map_from_basic_map(scale
)));
1052 /* If we are not performing "wrapping" and if the user asked for it,
1053 * scale the "n_tile" loops starting at "first" of "sched" by gen->block_dim[i].
1055 static __isl_give isl_union_map
*scale_access_tile_loops(struct gpu_gen
*gen
,
1056 __isl_take isl_union_map
*sched
, int len
, int first
, int n_tile
)
1060 isl_basic_map
*scale
;
1062 isl_local_space
*ls
;
1064 if (gen
->options
->wrap
)
1066 if (!gen
->options
->scale_tile_loops
)
1069 dim
= isl_union_map_get_space(sched
);
1070 dim
= isl_space_add_dims(dim
, isl_dim_in
, len
);
1071 dim
= isl_space_add_dims(dim
, isl_dim_out
, len
);
1072 scale
= isl_basic_map_universe(isl_space_copy(dim
));
1073 ls
= isl_local_space_from_space(dim
);
1075 for (i
= 0; i
< len
; ++i
) {
1078 if (i
>= first
&& i
< first
+ n_tile
)
1079 f
= gen
->kernel
->block_dim
[i
- first
];
1081 c
= isl_equality_alloc(isl_local_space_copy(ls
));
1082 c
= isl_constraint_set_coefficient_si(c
, isl_dim_in
, i
, f
);
1083 c
= isl_constraint_set_coefficient_si(c
, isl_dim_out
, i
, -1);
1084 scale
= isl_basic_map_add_constraint(scale
, c
);
1087 isl_local_space_free(ls
);
1089 sched
= isl_union_map_apply_range(sched
,
1090 isl_union_map_from_map(isl_map_from_basic_map(scale
)));
1095 /* Add "len" parameters p[i] called prefix%d,
1096 * with bounds to 0 <= p[i] < size[i].
1098 __isl_give isl_set
*add_bounded_parameters(__isl_take isl_set
*set
,
1099 int len
, int *size
, const char *prefix
)
1104 isl_basic_set
*bset
;
1106 isl_local_space
*ls
;
1109 nparam
= isl_set_dim(set
, isl_dim_param
);
1110 set
= isl_set_add_dims(set
, isl_dim_param
, len
);
1112 for (i
= 0; i
< len
; ++i
) {
1113 snprintf(name
, sizeof(name
), "%s%d", prefix
, i
);
1114 set
= isl_set_set_dim_name(set
, isl_dim_param
,
1118 dim
= isl_set_get_space(set
);
1119 bset
= isl_basic_set_universe(isl_space_copy(dim
));
1120 ls
= isl_local_space_from_space(dim
);
1122 for (i
= 0; i
< len
; ++i
) {
1123 c
= isl_inequality_alloc(isl_local_space_copy(ls
));
1124 c
= isl_constraint_set_coefficient_si(c
, isl_dim_param
,
1126 bset
= isl_basic_set_add_constraint(bset
, c
);
1128 c
= isl_inequality_alloc(isl_local_space_copy(ls
));
1129 c
= isl_constraint_set_coefficient_si(c
, isl_dim_param
,
1131 c
= isl_constraint_set_constant_si(c
, size
[i
] - 1);
1132 bset
= isl_basic_set_add_constraint(bset
, c
);
1135 isl_local_space_free(ls
);
1137 return isl_set_intersect(set
, isl_set_from_basic_set(bset
));
1140 /* Add "len" parameters p[i] called prefix%d,
1141 * with bounds to 0 <= p[i] < size[i].
1143 static __isl_give isl_set
*add_bounded_parameters_dynamic(
1144 __isl_take isl_set
*set
, __isl_keep isl_multi_pw_aff
*size
,
1150 isl_local_space
*ls
;
1153 len
= isl_multi_pw_aff_dim(size
, isl_dim_out
);
1154 nparam
= isl_set_dim(set
, isl_dim_param
);
1155 set
= isl_set_add_dims(set
, isl_dim_param
, len
);
1157 for (i
= 0; i
< len
; ++i
) {
1158 snprintf(name
, sizeof(name
), "%s%d", prefix
, i
);
1159 set
= isl_set_set_dim_name(set
, isl_dim_param
,
1163 space
= isl_space_params(isl_set_get_space(set
));
1164 ls
= isl_local_space_from_space(space
);
1165 for (i
= 0; i
< len
; ++i
) {
1166 isl_pw_aff
*param
, *size_i
, *zero
;
1169 param
= isl_pw_aff_var_on_domain(isl_local_space_copy(ls
),
1170 isl_dim_param
, nparam
+ i
);
1172 size_i
= isl_multi_pw_aff_get_pw_aff(size
, i
);
1173 bound
= isl_pw_aff_lt_set(isl_pw_aff_copy(param
), size_i
);
1174 set
= isl_set_intersect_params(set
, bound
);
1176 zero
= isl_pw_aff_zero_on_domain(isl_local_space_copy(ls
));
1177 bound
= isl_pw_aff_ge_set(param
, zero
);
1178 set
= isl_set_intersect_params(set
, bound
);
1180 isl_local_space_free(ls
);
1185 /* Construct a map from an access to group->array to the corresponding
1186 * shared/private memory tile.
1187 * The map is of the form
1189 * { [D[i] -> A[a]] -> T[t] }
1191 * where D represents the initial shared_len dimensions
1192 * of the computed schedule.
1194 static __isl_give isl_map
*shift_access(struct gpu_array_ref_group
*group
)
1196 struct gpu_array_tile
*tile
;
1197 isl_multi_aff
*tiling
;
1199 tile
= group
->private_tile
;
1201 tile
= group
->shared_tile
;
1203 tiling
= isl_multi_aff_copy(tile
->tiling
);
1205 return isl_map_from_multi_aff(tiling
);
1208 /* Does "map" have an obviously fixed value at variable "pos" of "type"?
1210 static int map_plain_is_fixed(isl_map
*map
, enum isl_dim_type type
,
1216 v
= isl_map_plain_get_val_if_fixed(map
, type
, pos
);
1219 fixed
= isl_val_is_int(v
);
1225 /* Given a schedule that iterates over all elements in a piece of an array,
1226 * perform tiling/wrapping over the threads.
1228 * In particular, we tile the final iterators so that the final thread
1229 * dimension runs over the final array dimension.
1230 * However, if those final iterators have only a single iteration,
1231 * we try to tile earlier iterators instead.
1233 static __isl_give isl_map
*tile_access_schedule(struct gpu_gen
*gen
,
1234 __isl_take isl_map
*sched
)
1237 isl_union_map
*usched
;
1240 unsigned nvar
= isl_map_dim(sched
, isl_dim_out
);
1244 n_tile
= gen
->kernel
->n_block
;
1245 if (n_tile
> nvar
) {
1247 sched
= isl_map_insert_dims(sched
,
1248 isl_dim_out
, 0, n_tile
- nvar
);
1249 for (i
= 0; i
< n_tile
- nvar
; ++i
)
1250 sched
= isl_map_fix_si(sched
, isl_dim_out
, i
, 0);
1254 first
= nvar
- n_tile
;
1256 for (; first
> 0; first
--)
1257 if (!map_plain_is_fixed(sched
, isl_dim_out
, first
+ n_tile
- 1))
1260 dim
= isl_map_get_space(sched
);
1261 dim
= isl_space_params(dim
);
1262 if (gen
->options
->wrap
)
1263 tiling
= wrap(isl_space_copy(dim
), nvar
, first
,
1264 n_tile
, gen
->kernel
->block_dim
);
1266 tiling
= tile(isl_space_copy(dim
), nvar
, first
,
1267 n_tile
, gen
->kernel
->block_dim
);
1268 sched
= isl_map_apply_range(sched
, tiling
);
1270 par
= parametrization(dim
, nvar
+ n_tile
, first
+ n_tile
, n_tile
, "t");
1271 sched
= isl_map_intersect_range(sched
, par
);
1273 usched
= isl_union_map_from_map(sched
);
1274 usched
= scale_access_tile_loops(gen
, usched
, nvar
+ n_tile
,
1276 sched
= isl_map_from_union_map(usched
);
1281 /* Given an index expression "pa" into a tile of an array, adjust the expression
1282 * to a shift of the tile to the origin
1283 * (based on the lower bounds in "bound".
1284 * If the index is strided, then we first add
1285 * bound->shift and divide by bound->stride.
1286 * In the end, we compute the gist with respect to "domain".
1288 * All of the input expression "pa", the set "domain" and
1289 * the output are expressed in terms of the AST schedule domain.
1290 * The expressions in "bound" are expressed
1291 * in terms of the first shared_len dimensions of the schedule computed by PPCG.
1292 * The mapping "sched2shared" maps the former domain to the latter domain.
1294 static __isl_give isl_pw_aff
*shift_index(__isl_take isl_pw_aff
*pa
,
1295 struct gpu_array_info
*array
,
1296 struct gpu_array_bound
*bound
, __isl_take isl_set
*domain
,
1297 __isl_take isl_map
*sched2shared
)
1301 isl_pw_multi_aff
*pma
;
1304 map
= isl_map_from_aff(isl_aff_copy(bound
->shift
));
1305 map
= isl_map_apply_range(isl_map_copy(sched2shared
), map
);
1306 pma
= isl_pw_multi_aff_from_map(map
);
1307 tmp
= isl_pw_multi_aff_get_pw_aff(pma
, 0);
1308 isl_pw_multi_aff_free(pma
);
1309 pa
= isl_pw_aff_add(pa
, tmp
);
1310 pa
= isl_pw_aff_scale_down_val(pa
, isl_val_copy(bound
->stride
));
1314 map
= isl_map_from_aff(isl_aff_copy(bound
->lb
));
1315 map
= isl_map_apply_range(sched2shared
, map
);
1316 pma
= isl_pw_multi_aff_from_map(map
);
1317 tmp
= isl_pw_multi_aff_get_pw_aff(pma
, 0);
1318 isl_pw_multi_aff_free(pma
);
1319 pa
= isl_pw_aff_sub(pa
, tmp
);
1320 pa
= isl_pw_aff_coalesce(pa
);
1321 pa
= isl_pw_aff_gist(pa
, domain
);
1326 /* Return the union of all read (read = 1) and/or write (write = 1)
1327 * access relations in the group.
1329 static __isl_give isl_union_map
*group_access_relation(
1330 struct gpu_array_ref_group
*group
, int read
, int write
)
1333 isl_union_map
*access
;
1335 access
= isl_union_map_empty(isl_map_get_space(group
->access
));
1336 for (i
= 0; i
< group
->n_ref
; ++i
) {
1339 if (!((read
&& group
->refs
[i
]->read
) ||
1340 (write
&& group
->refs
[i
]->write
)))
1342 map_i
= isl_map_copy(group
->refs
[i
]->access
);
1343 access
= isl_union_map_union(access
,
1344 isl_union_map_from_map(map_i
));
1350 /* Return the extent of "array", recomputed from the bounds.
1351 * The recomputed extent may be simpler than the original extent.
1353 static __isl_give isl_set
*array_extent(struct gpu_array_info
*array
)
1358 isl_local_space
*ls
;
1361 id
= isl_set_get_tuple_id(array
->extent
);
1362 space
= isl_set_get_space(array
->extent
);
1363 extent
= isl_set_universe(isl_space_copy(space
));
1364 ls
= isl_local_space_from_space(space
);
1365 for (i
= 0; i
< array
->n_index
; ++i
) {
1371 extent
= isl_set_lower_bound_si(extent
, isl_dim_set
, i
, 0);
1373 aff
= isl_aff_var_on_domain(isl_local_space_copy(ls
),
1375 index
= isl_pw_aff_from_aff(aff
);
1376 bound
= isl_pw_aff_copy(array
->bound
[i
]);
1377 bound
= isl_pw_aff_from_range(bound
);
1378 bound
= isl_pw_aff_add_dims(bound
, isl_dim_in
, array
->n_index
);
1379 bound
= isl_pw_aff_set_tuple_id(bound
, isl_dim_in
,
1381 lt
= isl_pw_aff_lt_set(index
, bound
);
1382 extent
= isl_set_intersect(extent
, lt
);
1384 isl_local_space_free(ls
);
1390 /* Return a map from the first shared_len dimensions of the computed
1391 * schedule to the array tile in
1392 * global memory that corresponds to the shared memory copy.
1394 * In particular, return a map
1400 * tile_offset(i) <= a <= tile_offset(i) + tile_size - 1 (1)
1404 * 0 <= a <= array_size - 1 (2)
1406 * Note that if some stride has been detected (i.e., when
1407 * group->shared_tile->bound[i].shift is set), then a in (1) refers
1408 * to the shifted and scaled down version.
1410 * Constraints (1) are obtained by mapping the size constraints on the
1411 * shared/private memory tile back to the access relation.
1412 * Constraints (2) are obtained from the (recomputed) extent.
1414 static __isl_give isl_map
*group_tile(struct gpu_array_ref_group
*group
)
1417 int n_index
= group
->array
->n_index
;
1423 space
= isl_multi_aff_get_space(group
->shared_tile
->tiling
);
1424 space
= isl_space_range(space
);
1425 local
= isl_set_universe(space
);
1426 for (i
= 0; i
< n_index
; ++i
) {
1429 local
= isl_set_lower_bound_si(local
, isl_dim_set
, i
, 0);
1430 bound
= isl_val_copy(group
->shared_tile
->bound
[i
].size
);
1431 bound
= isl_val_sub_ui(bound
, 1);
1432 local
= isl_set_upper_bound_val(local
, isl_dim_set
, i
, bound
);
1434 local
= isl_set_preimage_multi_aff(local
,
1435 isl_multi_aff_copy(group
->shared_tile
->tiling
));
1436 tile
= isl_set_unwrap(local
);
1437 extent
= array_extent(group
->array
);
1438 tile
= isl_map_intersect_range(tile
, extent
);
1443 /* Given a mapping "sched" from the AST schedule to a domain,
1444 * return the corresponding mapping from the AST schedule to
1445 * to the first shared_len dimensions of the schedule computed by PPCG.
1447 static __isl_give isl_map
*compute_sched_to_shared(struct gpu_gen
*gen
,
1448 __isl_take isl_map
*sched
)
1450 isl_union_map
*umap
;
1454 space
= isl_space_range(isl_map_get_space(sched
));
1455 space
= isl_space_from_domain(space
);
1456 space
= isl_space_add_dims(space
, isl_dim_out
, gen
->shared_len
);
1458 umap
= isl_union_map_copy(gen
->shared_sched
);
1459 umap
= isl_union_map_apply_range(umap
,
1460 isl_union_map_copy(gen
->shared_proj
));
1461 map
= isl_union_map_extract_map(umap
, space
);
1462 isl_union_map_free(umap
);
1464 sched
= isl_map_apply_range(sched
, map
);
1465 sched
= isl_map_detect_equalities(sched
);
1470 /* Set unroll[j] if the input dimension j is involved in
1471 * the index expression represented by ma.
1473 static int check_unroll(__isl_take isl_set
*set
, __isl_take isl_multi_aff
*ma
,
1477 int n_in
= isl_multi_aff_dim(ma
, isl_dim_in
);
1478 int n_out
= isl_multi_aff_dim(ma
, isl_dim_out
);
1481 for (i
= 0; i
< n_out
; ++i
) {
1484 aff
= isl_multi_aff_get_aff(ma
, i
);
1485 for (j
= 0; j
< n_in
; ++j
)
1486 if (isl_aff_involves_dims(aff
, isl_dim_in
, j
, 1))
1492 isl_multi_aff_free(ma
);
1496 /* Given an array pos mapping input dimensions to the corresponding
1497 * output dimension, construct the corresponding map.
1499 static __isl_give isl_map
*permutation(__isl_take isl_space
*dim
,
1504 isl_basic_map
*bmap
;
1505 isl_local_space
*ls
;
1507 dim
= isl_space_add_dims(dim
, isl_dim_in
, len
);
1508 dim
= isl_space_add_dims(dim
, isl_dim_out
, len
);
1509 bmap
= isl_basic_map_universe(isl_space_copy(dim
));
1510 ls
= isl_local_space_from_space(dim
);
1512 for (i
= 0; i
< len
; ++i
) {
1513 c
= isl_equality_alloc(isl_local_space_copy(ls
));
1514 c
= isl_constraint_set_coefficient_si(c
, isl_dim_in
, i
,
1516 c
= isl_constraint_set_coefficient_si(c
, isl_dim_out
, pos
[i
],
1518 bmap
= isl_basic_map_add_constraint(bmap
, c
);
1520 isl_local_space_free(ls
);
1522 return isl_map_from_basic_map(bmap
);
1525 /* Find all loops involved in any of the index expressions for any of
1526 * the private accesses, move them innermost and then mark them as
1527 * requiring unrolling by setting gen->first_unroll.
1528 * The loops involved should all be parallel because of the checks
1529 * we performed in check_private_group_access. Moving them innermost
1530 * is therefore a valid transformation.
1532 * Loops up to gen->shared_len are generated before the mapping to
1533 * threads is applied. They should therefore be ignored.
1535 * We compute the hidden equalities of the schedule first
1536 * since we will need them in our calls to isl_pw_multi_aff_from_map
1537 * and because we want to make sure that the same equalities
1538 * are also available to the code generator.
1540 static __isl_give isl_union_map
*interchange_for_unroll(struct gpu_gen
*gen
,
1541 __isl_take isl_union_map
*sched
)
1544 int unroll
[gen
->thread_tiled_len
];
1545 int perm
[gen
->thread_tiled_len
];
1548 int len
= gen
->shared_len
+ gen
->n_parallel
+ gen
->n_block
;
1550 gen
->first_unroll
= -1;
1552 sched
= isl_union_map_detect_equalities(sched
);
1553 for (i
= 0; i
< gen
->thread_tiled_len
; ++i
)
1555 for (i
= 0; i
< gen
->prog
->n_array
; ++i
) {
1556 struct gpu_array_info
*array
= &gen
->prog
->array
[i
];
1558 for (j
= 0; j
< array
->n_group
; ++j
) {
1559 isl_union_map
*access
;
1561 isl_pw_multi_aff
*pma
;
1563 if (!array
->groups
[j
]->private_tile
)
1566 access
= group_access_relation(array
->groups
[j
], 1, 1);
1567 access
= isl_union_map_apply_domain(access
,
1568 isl_union_map_copy(sched
));
1570 acc
= isl_map_from_union_map(access
);
1571 pma
= isl_pw_multi_aff_from_map(acc
);
1572 isl_pw_multi_aff_foreach_piece(pma
,
1573 &check_unroll
, unroll
);
1575 isl_pw_multi_aff_free(pma
);
1579 for (i
= gen
->shared_len
; i
< len
; ++i
)
1586 for (i
= len
; i
< gen
->thread_tiled_len
; ++i
)
1591 for (i
= 0; i
< gen
->shared_len
; ++i
)
1593 for (i
= gen
->shared_len
; i
< gen
->thread_tiled_len
; ++i
)
1596 gen
->first_unroll
= j
- gen
->shared_len
;
1597 for (i
= gen
->shared_len
; i
< len
; ++i
)
1601 dim
= isl_union_map_get_space(sched
);
1602 permute
= permutation(dim
, perm
, gen
->thread_tiled_len
);
1603 sched
= isl_union_map_apply_range(sched
,
1604 isl_union_map_from_map(permute
));
1609 /* Given a constraint
1611 * a(p,i) + j = g f(e)
1613 * or -a(p,i) - j = g f(e) if sign < 0,
1614 * store a(p,i) in bound->shift and g (stride) in bound->stride.
1615 * a(p,i) is assumed to be an expression in only the parameters
1616 * and the input dimensions.
1618 static void extract_stride(__isl_keep isl_constraint
*c
,
1619 struct gpu_array_bound
*bound
, __isl_keep isl_val
*stride
, int sign
)
1628 isl_val_free(bound
->stride
);
1629 bound
->stride
= isl_val_copy(stride
);
1631 space
= isl_constraint_get_space(c
);
1632 space
= isl_space_domain(space
);
1634 nparam
= isl_space_dim(space
, isl_dim_param
);
1635 nvar
= isl_space_dim(space
, isl_dim_set
);
1637 v
= isl_constraint_get_constant_val(c
);
1640 aff
= isl_aff_zero_on_domain(isl_local_space_from_space(space
));
1641 aff
= isl_aff_set_constant_val(aff
, v
);
1643 for (i
= 0; i
< nparam
; ++i
) {
1644 if (!isl_constraint_involves_dims(c
, isl_dim_param
, i
, 1))
1646 v
= isl_constraint_get_coefficient_val(c
, isl_dim_param
, i
);
1649 aff
= isl_aff_add_coefficient_val(aff
, isl_dim_param
, i
, v
);
1652 for (i
= 0; i
< nvar
; ++i
) {
1653 if (!isl_constraint_involves_dims(c
, isl_dim_in
, i
, 1))
1655 v
= isl_constraint_get_coefficient_val(c
, isl_dim_in
, i
);
1658 aff
= isl_aff_add_coefficient_val(aff
, isl_dim_in
, i
, v
);
1664 /* Given an equality constraint of a map with a single output dimension j,
1665 * check if the constraint is of the form
1667 * a(p,i) + j = g f(e)
1669 * with a(p,i) an expression in the parameters and input dimensions
1670 * and f(e) an expression in the existentially quantified variables.
1671 * If so, and if g is larger than any such g from a previously considered
1672 * constraint, then call extract_stride to record the stride information
1675 static int check_stride_constraint(__isl_take isl_constraint
*c
, void *user
)
1681 struct gpu_array_bound
*bound
= user
;
1683 ctx
= isl_constraint_get_ctx(c
);
1684 n_div
= isl_constraint_dim(c
, isl_dim_div
);
1685 v
= isl_constraint_get_coefficient_val(c
, isl_dim_out
, 0);
1687 if (n_div
&& (isl_val_is_one(v
) || isl_val_is_negone(v
))) {
1688 int s
= isl_val_sgn(v
);
1689 isl_val
*stride
= isl_val_zero(ctx
);
1692 for (i
= 0; i
< n_div
; ++i
) {
1693 v
= isl_constraint_get_coefficient_val(c
,
1695 stride
= isl_val_gcd(stride
, v
);
1697 if (!isl_val_is_zero(stride
) &&
1698 isl_val_gt(stride
, bound
->stride
))
1699 extract_stride(c
, bound
, stride
, s
);
1701 isl_val_free(stride
);
1705 isl_constraint_free(c
);
1709 /* Given contraints on an array index i, check if we can find
1710 * a shift a(p) and a stride g such that
1712 * a(p) + i = 0 mod g
1714 * If so, record the information in bound and apply the mapping
1715 * i -> (i + a(p))/g to the array index in bounds and return
1716 * the new constraints.
1717 * If not, simply return the original constraints.
1719 * If bounds is a subset of the space
1723 * then the bound recorded in bound->shift is of the form
1727 * with s(D) equal to a(p) above.
1728 * The mapping recorded in bound->shift_map is of the form
1730 * [D -> i] -> [D -> (i + S(D))/g]
1732 * This mapping is computed as follows.
1733 * We first introduce "i" in the domain through precomposition
1734 * with [D -> i] -> D obtaining
1738 * Adding [D -> i] -> i produces
1740 * [D -> i] -> i + s(D)
1742 * and the domain product with [D -> i] -> D yields
1744 * [D -> i] -> [D -> i + s(D)]
1746 * Composition with [D -> i] -> [D -> i/g] gives the desired result.
1748 static __isl_give isl_basic_map
*check_stride(struct gpu_array_bound
*bound
,
1749 __isl_take isl_basic_map
*bounds
)
1752 isl_basic_map
*hull
;
1753 isl_basic_map
*shift
, *id
, *bmap
, *scale
;
1754 isl_basic_set
*bset
;
1757 bound
->stride
= NULL
;
1759 hull
= isl_basic_map_affine_hull(isl_basic_map_copy(bounds
));
1761 isl_basic_map_foreach_constraint(hull
, &check_stride_constraint
, bound
);
1763 isl_basic_map_free(hull
);
1768 shift
= isl_basic_map_from_aff(isl_aff_copy(bound
->shift
));
1769 space
= isl_basic_map_get_space(bounds
);
1770 bmap
= isl_basic_map_domain_map(isl_basic_map_universe(space
));
1771 shift
= isl_basic_map_apply_range(bmap
, shift
);
1772 space
= isl_basic_map_get_space(bounds
);
1773 id
= isl_basic_map_range_map(isl_basic_map_universe(space
));
1774 shift
= isl_basic_map_sum(id
, shift
);
1775 space
= isl_basic_map_get_space(bounds
);
1776 id
= isl_basic_map_domain_map(isl_basic_map_universe(space
));
1777 shift
= isl_basic_map_range_product(id
, shift
);
1779 space
= isl_space_domain(isl_basic_map_get_space(bounds
));
1780 id
= isl_basic_map_identity(isl_space_map_from_set(space
));
1781 space
= isl_space_range(isl_basic_map_get_space(bounds
));
1782 aff
= isl_aff_zero_on_domain(isl_local_space_from_space(space
));
1783 aff
= isl_aff_add_coefficient_si(aff
, isl_dim_in
, 0, 1);
1784 aff
= isl_aff_scale_down_val(aff
, isl_val_copy(bound
->stride
));
1785 scale
= isl_basic_map_from_aff(aff
);
1786 scale
= isl_basic_map_product(id
, scale
);
1788 bound
->shift_map
= isl_basic_map_apply_range(shift
, scale
);
1789 bmap
= isl_basic_map_copy(bound
->shift_map
);
1790 bset
= isl_basic_set_apply(isl_basic_map_wrap(bounds
), bmap
);
1791 bounds
= isl_basic_set_unwrap(bset
);
1796 /* Data used in compute_array_dim_size and compute_size_in_direction.
1798 * pos is the position of the variable representing the array index,
1799 * i.e., the variable for which want to compute the size. This variable
1800 * is also the last variable in the set.
1802 struct gpu_size_info
{
1803 isl_basic_set
*bset
;
1804 struct gpu_array_bound
*bound
;
1808 /* Given a constraint from the basic set describing the bounds on
1809 * an array index, check if it is a lower bound, say m i >= b(x), and,
1810 * if so, check whether the expression "i - ceil(b(x)/m) + 1" has a constant
1811 * upper bound. If so, and if this bound is smaller than any bound
1812 * derived from earlier constraints, set the size to this bound on
1813 * the expression and the lower bound to ceil(b(x)/m).
1815 static int compute_size_in_direction(__isl_take isl_constraint
*c
, void *user
)
1817 struct gpu_size_info
*size
= user
;
1824 nparam
= isl_basic_set_dim(size
->bset
, isl_dim_param
);
1825 n_div
= isl_constraint_dim(c
, isl_dim_div
);
1827 if (isl_constraint_involves_dims(c
, isl_dim_div
, 0, n_div
) ||
1828 !isl_constraint_is_lower_bound(c
, isl_dim_set
, size
->pos
)) {
1829 isl_constraint_free(c
);
1833 aff
= isl_constraint_get_bound(c
, isl_dim_set
, size
->pos
);
1834 aff
= isl_aff_ceil(aff
);
1836 lb
= isl_aff_copy(aff
);
1838 aff
= isl_aff_neg(aff
);
1839 aff
= isl_aff_add_coefficient_si(aff
, isl_dim_in
, size
->pos
, 1);
1841 v
= isl_basic_set_max_val(size
->bset
, aff
);
1844 if (isl_val_is_int(v
)) {
1845 v
= isl_val_add_ui(v
, 1);
1846 if (!size
->bound
->size
|| isl_val_lt(v
, size
->bound
->size
)) {
1847 isl_val_free(size
->bound
->size
);
1848 size
->bound
->size
= isl_val_copy(v
);
1849 lb
= isl_aff_drop_dims(lb
, isl_dim_in
, size
->pos
, 1);
1850 isl_aff_free(size
->bound
->lb
);
1851 size
->bound
->lb
= isl_aff_copy(lb
);
1857 isl_constraint_free(c
);
1862 /* Given a basic map "bounds" that maps parameters and input dimensions
1863 * to a single output dimension, look for an expression in the parameters
1864 * and input dimensions such that the range of the output dimension shifted
1865 * by this expression is a constant.
1867 * In particular, we currently only consider lower bounds on the output
1868 * dimension as candidate expressions.
1870 static int compute_array_dim_size(struct gpu_array_bound
*bound
,
1871 __isl_take isl_basic_map
*bounds
)
1873 struct gpu_size_info size
;
1875 bounds
= isl_basic_map_detect_equalities(bounds
);
1876 bounds
= check_stride(bound
, bounds
);
1882 size
.pos
= isl_basic_map_dim(bounds
, isl_dim_in
);
1883 size
.bset
= isl_basic_map_wrap(bounds
);
1884 size
.bset
= isl_basic_set_flatten(size
.bset
);
1885 size
.bset
= isl_set_simple_hull(isl_basic_set_compute_divs(size
.bset
));
1886 isl_basic_set_foreach_constraint(size
.bset
, &compute_size_in_direction
,
1888 isl_basic_set_free(size
.bset
);
1890 return bound
->size
? 0 : -1;
1893 /* Check if we can find a memory tile for the given array
1894 * based on the given accesses, and if so, put the results in "tile".
1896 * We project the accesses on each index in turn and look for a parametric
1897 * offset such that the size is constant.
1899 static int can_tile(__isl_keep isl_map
*access
, struct gpu_array_tile
*tile
)
1903 for (i
= 0; i
< tile
->n
; ++i
) {
1905 isl_basic_map
*hull
;
1907 access_i
= isl_map_copy(access
);
1908 access_i
= isl_map_project_out(access_i
, isl_dim_out
, 0, i
);
1909 access_i
= isl_map_project_out(access_i
, isl_dim_out
,
1910 1, tile
->n
- (i
+ 1));
1911 access_i
= isl_map_compute_divs(access_i
);
1912 hull
= isl_map_simple_hull(access_i
);
1913 if (compute_array_dim_size(&tile
->bound
[i
], hull
) < 0)
1920 /* Construct a map with input the shared tile loops and the loops that
1921 * will be wrapped around the threads that relates these later loops
1922 * to the thread indices and then projects them out.
1924 static __isl_give isl_map
*compute_privatization(struct gpu_gen
*gen
)
1932 dim
= isl_union_map_get_space(gen
->shared_sched
);
1934 if (gen
->options
->wrap
)
1935 tiling
= wrap(isl_space_copy(dim
), gen
->shared_len
+ gen
->n_block
,
1936 gen
->shared_len
, gen
->n_block
, gen
->block_dim
);
1938 tiling
= tile(isl_space_copy(dim
), gen
->shared_len
+ gen
->n_block
,
1939 gen
->shared_len
, gen
->n_block
, gen
->block_dim
);
1943 par
= parametrization(dim
, gen
->shared_len
+ 2 * gen
->n_block
,
1944 gen
->tile_first
+ gen
->tile_len
+ gen
->n_grid
+ gen
->n_block
,
1947 priv
= isl_map_align_params(priv
, isl_set_get_space(par
));
1948 priv
= isl_map_intersect_range(priv
, par
);
1950 dim
= isl_map_get_space(priv
);
1951 dim
= isl_space_drop_dims(dim
, isl_dim_in
, 0, isl_space_dim(dim
, isl_dim_in
));
1952 dim
= isl_space_drop_dims(dim
, isl_dim_out
, 0, isl_space_dim(dim
, isl_dim_out
));
1953 proj
= projection(dim
, gen
->shared_len
+ 2 * gen
->n_block
,
1956 priv
= isl_map_apply_range(priv
, proj
);
1961 /* Construct a map from domain_dim to domain_dim that increments
1962 * the dimension at position "pos" and leaves all other dimensions
1965 static __isl_give isl_map
*next(__isl_take isl_space
*domain_dim
, int pos
)
1968 int len
= isl_space_dim(domain_dim
, isl_dim_set
);
1970 isl_basic_map
*next
;
1971 isl_local_space
*ls
;
1973 dim
= isl_space_map_from_set(domain_dim
);
1974 next
= isl_basic_map_universe(isl_space_copy(dim
));
1975 ls
= isl_local_space_from_space(dim
);
1977 for (i
= 0; i
< len
; ++i
) {
1980 c
= isl_equality_alloc(isl_local_space_copy(ls
));
1981 c
= isl_constraint_set_coefficient_si(c
, isl_dim_in
, i
, 1);
1982 c
= isl_constraint_set_coefficient_si(c
, isl_dim_out
, i
, -1);
1984 c
= isl_constraint_set_constant_si(c
, 1);
1985 next
= isl_basic_map_add_constraint(next
, c
);
1988 isl_local_space_free(ls
);
1990 return isl_map_from_basic_map(next
);
1993 /* Check if the given access is coalesced.
1994 * That is, check whether incrementing the dimension that will get
1995 * wrapped over the last thread index results in incrementing
1996 * the last array index.
1998 * This function is only called for access relations without reuse.
2000 static int access_is_coalesced(struct gpu_gen
*gen
,
2001 __isl_keep isl_union_map
*access
)
2004 isl_map
*access_map
;
2005 isl_map
*next_thread_x
;
2006 isl_map
*next_element
;
2010 access
= isl_union_map_copy(access
);
2011 access
= isl_union_map_apply_domain(access
,
2012 isl_union_map_copy(gen
->tiled_sched
));
2013 access_map
= isl_map_from_union_map(access
);
2015 dim
= isl_map_get_space(access_map
);
2016 dim
= isl_space_domain(dim
);
2017 next_thread_x
= next(dim
, gen
->shared_len
+ gen
->n_block
- 1);
2019 dim
= isl_map_get_space(access_map
);
2020 dim
= isl_space_range(dim
);
2021 next_element
= next(dim
, isl_space_dim(dim
, isl_dim_set
) - 1);
2023 map
= isl_map_apply_domain(next_thread_x
, isl_map_copy(access_map
));
2024 map
= isl_map_apply_range(map
, access_map
);
2026 coalesced
= isl_map_is_subset(map
, next_element
);
2028 isl_map_free(next_element
);
2034 /* Given an access relation in terms of the first gen->shared_len + gen->n_block
2035 * dimensions of the computed schedule, check if it is bijective for
2036 * fixed values of the first gen->shared_len dimensions.
2037 * We perform this check by equating these dimensions to parameters.
2039 static int access_is_bijective(struct gpu_gen
*gen
, __isl_keep isl_map
*access
)
2045 access
= isl_map_copy(access
);
2046 space
= isl_space_params(isl_map_get_space(access
));
2047 par
= parametrization(space
, gen
->shared_len
+ gen
->n_block
,
2048 0, gen
->shared_len
, "s");
2049 access
= isl_map_intersect_domain(access
, par
);
2050 res
= isl_map_is_bijective(access
);
2051 isl_map_free(access
);
2056 /* Look for the last shared tile loop that affects the offset of "tile"
2057 * and return the result.
2058 * If there is no such loop, then return the index of the loop
2059 * before the first shared tile loop, in particular gen->tile_first - 1.
2061 static int compute_tile_last_shared(struct gpu_gen
*gen
,
2062 struct gpu_array_tile
*tile
)
2066 for (j
= gen
->shared_len
- 1; j
>= gen
->tile_first
; --j
) {
2067 for (i
= 0; i
< tile
->n
; ++i
) {
2071 lb
= tile
->bound
[i
].lb
;
2072 if (isl_aff_involves_dims(lb
, isl_dim_in
, j
, 1))
2075 shift
= tile
->bound
[i
].shift
;
2078 if (isl_aff_involves_dims(shift
, isl_dim_in
, j
, 1))
2088 /* Look for the last shared tile loop that affects the offset of the
2089 * shared or private tile and store the result in group->last_shared.
2090 * If there is no such loop, then group->last_shared is set to a value
2091 * before the first shared tile loop, in particular gen->tile_first - 1.
2092 * If there is no tile defined on the array reference group,
2093 * then set group->last_shared to gen->shared_len - 1.
2095 static void set_last_shared(struct gpu_gen
*gen
,
2096 struct gpu_array_ref_group
*group
)
2098 struct gpu_array_tile
*tile
;
2100 group
->last_shared
= gen
->shared_len
- 1;
2102 tile
= group
->private_tile
;
2104 tile
= group
->shared_tile
;
2108 group
->last_shared
= compute_tile_last_shared(gen
, tile
);
2111 /* Compute a privatized copy of all access relations from reference groups that
2112 * are mapped to private memory and store the result in gen->privatization.
2114 static void compute_private_access(struct gpu_gen
*gen
)
2117 isl_union_map
*private;
2119 if (!gen
->options
->use_private_memory
)
2122 private = isl_union_map_empty(isl_union_map_get_space(gen
->shared_sched
));
2124 for (i
= 0; i
< gen
->prog
->n_array
; ++i
) {
2125 struct gpu_array_info
*array
= &gen
->prog
->array
[i
];
2127 if (gpu_array_is_read_only_scalar(array
))
2130 for (j
= 0; j
< array
->n_group
; ++j
) {
2131 if (!array
->groups
[j
]->private_tile
)
2134 private = isl_union_map_union(private,
2135 group_access_relation(array
->groups
[j
], 1, 1));
2139 if (isl_union_map_is_empty(private))
2140 isl_union_map_free(private);
2142 isl_union_map
*priv
;
2144 private = isl_union_map_apply_domain(private,
2145 isl_union_map_copy(gen
->shared_sched
));
2146 priv
= isl_union_map_from_map(isl_map_copy(gen
->privatization
));
2147 private = isl_union_map_apply_domain(private, priv
);
2148 gen
->private_access
= private;
2152 /* Compute the size of the tile specified by "tile"
2153 * in number of elements and return the result.
2155 static __isl_give isl_val
*tile_size(isl_ctx
*ctx
, struct gpu_array_tile
*tile
)
2160 size
= isl_val_one(ctx
);
2162 for (i
= 0; i
< tile
->n
; ++i
)
2163 size
= isl_val_mul(size
, isl_val_copy(tile
->bound
[i
].size
));
2168 /* If max_shared_memory is not set to infinity (-1), then make
2169 * sure that the total amount of shared memory required by the
2170 * array reference groups mapped to shared memory is no larger
2171 * than this maximum.
2173 * We apply a greedy approach and discard (keep in global memory)
2174 * those groups that would result in a total memory size that
2175 * is larger than the maximum.
2177 static void check_shared_memory_bound(struct gpu_gen
*gen
)
2180 isl_val
*left
, *size
;
2182 if (gen
->options
->max_shared_memory
< 0)
2185 left
= isl_val_int_from_si(gen
->ctx
, gen
->options
->max_shared_memory
);
2187 for (i
= 0; i
< gen
->prog
->n_array
; ++i
) {
2188 struct gpu_array_info
*array
= &gen
->prog
->array
[i
];
2190 for (j
= 0; j
< array
->n_group
; ++j
) {
2191 struct gpu_array_ref_group
*group
;
2193 group
= array
->groups
[j
];
2194 if (!group
->shared_tile
)
2197 size
= tile_size(gen
->ctx
, group
->shared_tile
);
2198 size
= isl_val_mul_ui(size
, array
->size
);
2200 if (isl_val_le(size
, left
)) {
2201 left
= isl_val_sub(left
, size
);
2206 group
->shared_tile
= free_tile(group
->shared_tile
);
2213 /* Given a description of an array tile "tile" and the "space"
2217 * where D represents the first shared_len schedule dimensions
2218 * and A represents the array, construct an isl_multi_aff
2220 * { [D[i] -> A[a]] -> A'[a'] }
2222 * with A' a scaled down copy of A according to the shifts and strides
2223 * in "tile". In particular,
2225 * a' = (a + shift(i))/stride
2227 * "insert_array" represents
2231 * and is used to insert A into the domain of functions that only
2234 static __isl_give isl_multi_aff
*strided_tile(
2235 struct gpu_array_tile
*tile
, __isl_keep isl_space
*space
,
2236 __isl_keep isl_multi_aff
*insert_array
)
2240 isl_multi_aff
*shift
;
2241 isl_multi_val
*stride
;
2243 isl_local_space
*ls
;
2244 isl_multi_aff
*tiling
;
2246 ctx
= isl_space_get_ctx(space
);
2247 space2
= isl_space_domain(isl_space_copy(space
));
2248 ls
= isl_local_space_from_space(space2
);
2249 space2
= isl_space_range(isl_space_copy(space
));
2250 stride
= isl_multi_val_zero(space2
);
2251 shift
= isl_multi_aff_zero(isl_space_copy(space
));
2253 for (i
= 0; i
< tile
->n
; ++i
) {
2254 struct gpu_array_bound
*bound
= &tile
->bound
[i
];
2258 if (tile
->bound
[i
].shift
) {
2259 stride_i
= isl_val_copy(bound
->stride
);
2260 shift_i
= isl_aff_copy(bound
->shift
);
2262 stride_i
= isl_val_one(ctx
);
2263 shift_i
= isl_aff_zero_on_domain(
2264 isl_local_space_copy(ls
));
2267 stride
= isl_multi_val_set_val(stride
, i
, stride_i
);
2268 shift
= isl_multi_aff_set_aff(shift
, i
, shift_i
);
2270 isl_local_space_free(ls
);
2272 shift
= isl_multi_aff_pullback_multi_aff(shift
,
2273 isl_multi_aff_copy(insert_array
));
2275 tiling
= isl_multi_aff_range_map(isl_space_copy(space
));
2276 tiling
= isl_multi_aff_add(tiling
, shift
);
2277 tiling
= isl_multi_aff_scale_down_multi_val(tiling
, stride
);
2282 /* Compute a tiling for the array reference group "group".
2284 * The tiling is of the form
2286 * { [D[i] -> A[a]] -> T[t] }
2288 * where D represents the first shared_len schedule dimensions,
2289 * A represents the global array and T represents the shared or
2290 * private memory tile. The name of T is the name of the local
2293 * If there is any stride in the accesses, then the mapping is
2295 * t = (a + shift(i))/stride - lb(i)
2297 * otherwise, it is simply
2301 static void compute_group_tiling(struct gpu_array_ref_group
*group
)
2304 struct gpu_array_tile
*tile
;
2305 struct gpu_array_info
*array
= group
->array
;
2307 isl_multi_aff
*tiling
, *lb
, *insert_array
;
2311 tile
= group
->private_tile
;
2313 tile
= group
->shared_tile
;
2317 space
= isl_map_get_space(group
->access
);
2318 insert_array
= isl_multi_aff_domain_map(isl_space_copy(space
));
2320 for (i
= 0; i
< tile
->n
; ++i
)
2321 if (tile
->bound
[i
].shift
)
2325 tiling
= strided_tile(tile
, space
, insert_array
);
2327 tiling
= isl_multi_aff_range_map(isl_space_copy(space
));
2329 lb
= isl_multi_aff_zero(space
);
2330 for (i
= 0; i
< tile
->n
; ++i
) {
2331 isl_aff
*lb_i
= isl_aff_copy(tile
->bound
[i
].lb
);
2332 lb
= isl_multi_aff_set_aff(lb
, i
, lb_i
);
2334 lb
= isl_multi_aff_pullback_multi_aff(lb
, insert_array
);
2336 tiling
= isl_multi_aff_sub(tiling
, lb
);
2338 p
= isl_printer_to_str(isl_multi_aff_get_ctx(tiling
));
2339 p
= print_array_name(p
, group
);
2340 local_name
= isl_printer_get_str(p
);
2341 isl_printer_free(p
);
2342 tiling
= isl_multi_aff_set_tuple_name(tiling
, isl_dim_out
, local_name
);
2345 tile
->tiling
= tiling
;
2348 /* Compute a tiling for all the array reference groups.
2350 static void compute_group_tilings(struct gpu_gen
*gen
)
2354 for (i
= 0; i
< gen
->prog
->n_array
; ++i
) {
2355 struct gpu_array_info
*array
= &gen
->prog
->array
[i
];
2357 for (j
= 0; j
< array
->n_group
; ++j
)
2358 compute_group_tiling(array
->groups
[j
]);
2362 /* Fill up the groups array with singleton groups, i.e., one group
2363 * per reference, initializing the array, access, write, n_ref and refs fields.
2364 * In particular the access field is initialized to the scheduled
2365 * access relation of the array reference.
2367 * Return the number of elements initialized, i.e., the number of
2368 * active references in the current kernel.
2370 static int populate_array_references(struct gpu_array_info
*array
,
2371 __isl_keep isl_union_map
*sched
, struct gpu_array_ref_group
**groups
)
2375 isl_ctx
*ctx
= isl_union_map_get_ctx(sched
);
2378 for (i
= 0; i
< array
->n_ref
; ++i
) {
2379 isl_union_map
*umap
;
2381 struct gpu_array_ref_group
*group
;
2382 struct gpu_stmt_access
*access
= array
->refs
[i
];
2384 map
= isl_map_copy(access
->access
);
2385 umap
= isl_union_map_from_map(map
);
2386 umap
= isl_union_map_apply_domain(umap
,
2387 isl_union_map_copy(sched
));
2389 if (isl_union_map_is_empty(umap
)) {
2390 isl_union_map_free(umap
);
2394 map
= isl_map_from_union_map(umap
);
2395 map
= isl_map_detect_equalities(map
);
2397 group
= isl_calloc_type(ctx
, struct gpu_array_ref_group
);
2399 group
->array
= array
;
2400 group
->access
= map
;
2401 group
->write
= access
->write
;
2402 group
->refs
= &array
->refs
[i
];
2405 groups
[n
++] = group
;
2411 /* If group->n_ref == 1, then group->refs was set by
2412 * populate_array_references to point directly into
2413 * group->array->refs and should not be freed.
2414 * If group->n_ref > 1, then group->refs was set by join_groups
2415 * to point to a newly allocated array.
2417 static void free_array_ref_group(struct gpu_array_ref_group
*group
)
2421 free_tile(group
->shared_tile
);
2422 free_tile(group
->private_tile
);
2423 isl_map_free(group
->access
);
2424 if (group
->n_ref
> 1)
2429 /* Given a map where the input dimensions represent the tile loops,
2430 * eliminate the innermost of those that have a fixed value
2431 * until we reach one that does not (obviously) have a fixed value.
2433 static __isl_give isl_map
*eliminate_fixed_inner_loops(
2434 __isl_take isl_map
*access
)
2438 n
= isl_map_dim(access
, isl_dim_in
);
2440 for (i
= n
- 1; i
>= 0; --i
) {
2441 if (!map_plain_is_fixed(access
, isl_dim_in
, i
))
2443 access
= isl_map_eliminate(access
, isl_dim_in
, i
, 1);
2448 /* Check if the access relations of group1 and group2 overlap within
2449 * the innermost loop. In particular, ignore any inner dimension
2450 * with a fixed value.
2451 * The copying to and from shared memory will be performed within
2452 * the innermost actual loop so we are only allowed to consider
2453 * the dimensions up to that innermost loop while checking whether
2454 * two access relations overlap.
2456 static int accesses_overlap(struct gpu_array_ref_group
*group1
,
2457 struct gpu_array_ref_group
*group2
)
2460 isl_map
*access1
, *access2
;
2462 access1
= isl_map_copy(group1
->access
);
2463 access1
= eliminate_fixed_inner_loops(access1
);
2464 access2
= isl_map_copy(group2
->access
);
2465 access2
= eliminate_fixed_inner_loops(access2
);
2466 access1
= isl_map_intersect(access1
, access2
);
2467 empty
= isl_map_is_empty(access1
);
2468 isl_map_free(access1
);
2473 /* Combine the given two groups into a single group, containing
2474 * the references of both groups.
2476 static struct gpu_array_ref_group
*join_groups(
2477 struct gpu_array_ref_group
*group1
,
2478 struct gpu_array_ref_group
*group2
)
2482 struct gpu_array_ref_group
*group
;
2484 ctx
= isl_map_get_ctx(group1
->access
);
2485 group
= isl_calloc_type(ctx
, struct gpu_array_ref_group
);
2487 group
->array
= group1
->array
;
2488 group
->access
= isl_map_union(isl_map_copy(group1
->access
),
2489 isl_map_copy(group2
->access
));
2490 group
->write
= group1
->write
|| group2
->write
;
2491 group
->n_ref
= group1
->n_ref
+ group2
->n_ref
;
2492 group
->refs
= isl_alloc_array(ctx
, struct gpu_stmt_access
*,
2494 assert(group
->refs
);
2495 for (i
= 0; i
< group1
->n_ref
; ++i
)
2496 group
->refs
[i
] = group1
->refs
[i
];
2497 for (i
= 0; i
< group2
->n_ref
; ++i
)
2498 group
->refs
[group1
->n_ref
+ i
] = group2
->refs
[i
];
2503 /* Combine the given two groups into a single group and free
2504 * the original two groups.
2506 static struct gpu_array_ref_group
*join_groups_and_free(
2507 struct gpu_array_ref_group
*group1
,
2508 struct gpu_array_ref_group
*group2
)
2510 struct gpu_array_ref_group
*group
;
2512 group
= join_groups(group1
, group2
);
2513 free_array_ref_group(group1
);
2514 free_array_ref_group(group2
);
2518 /* Compute the private and/or shared memory tiles for the array
2519 * reference group "group" of array "array".
2521 * If the array is a read-only scalar or if the user requested
2522 * not to use shared or private memory, then we do not need to do anything.
2524 * We only try to compute a shared memory tile if there is any reuse
2525 * or if the access is not coalesced.
2527 * For computing a private memory tile, we also require that there is
2528 * some reuse. Moreover, we require that the access is private
2529 * to the thread. That is, we check that any given array element
2530 * is only accessed by a single thread.
2531 * We compute an access relation that maps the shared tile loop iterators
2532 * and the shared point loop iterators that will be wrapped over the
2533 * threads to the array elements.
2534 * We actually check that those iterators that will be wrapped
2535 * partition the array space. This check is stricter than necessary
2536 * since several iterations may be mapped onto the same thread
2537 * and then they could be allowed to access the same memory elements,
2538 * but our check does not allow this situation.
2540 * We also check that the index expression only depends on parallel
2541 * loops. That way, we can move those loops innermost and unroll them.
2542 * Again, we use a test that is stricter than necessary.
2543 * We actually check whether the index expression only depends
2544 * on the iterators that are wrapped over the threads.
2545 * These are necessarily parallel, but there may be more parallel loops.
2547 * Combining the injectivity of the first test with the single-valuedness
2548 * of the second test, we simply test for bijectivity.
2550 * If it turns out we can use registers, we compute the private memory
2551 * tile size using can_tile, after introducing a dependence
2552 * on the thread indices.
2554 static void compute_group_bounds_core(struct gpu_gen
*gen
,
2555 struct gpu_array_ref_group
*group
)
2557 isl_ctx
*ctx
= isl_space_get_ctx(group
->array
->dim
);
2558 isl_union_map
*access
;
2559 int n_index
= group
->array
->n_index
;
2562 int use_shared
= gen
->options
->use_shared_memory
;
2563 int use_private
= gen
->options
->use_private_memory
;
2565 if (!use_shared
&& !use_private
)
2567 if (gpu_array_is_read_only_scalar(group
->array
))
2570 access
= group_access_relation(group
, 1, 1);
2571 no_reuse
= isl_union_map_is_injective(access
);
2573 if (use_shared
&& (!no_reuse
|| !access_is_coalesced(gen
, access
))) {
2574 group
->shared_tile
= create_tile(ctx
, group
->array
->n_index
);
2575 if (!can_tile(group
->access
, group
->shared_tile
))
2576 group
->shared_tile
= free_tile(group
->shared_tile
);
2579 if (!use_private
|| no_reuse
) {
2580 isl_union_map_free(access
);
2584 access
= isl_union_map_apply_domain(access
,
2585 isl_union_map_copy(gen
->shared_sched
));
2587 acc
= isl_map_from_union_map(access
);
2589 if (!access_is_bijective(gen
, acc
)) {
2594 group
->private_tile
= create_tile(gen
->ctx
, n_index
);
2595 acc
= isl_map_apply_domain(acc
, isl_map_copy(gen
->privatization
));
2596 if (!can_tile(acc
, group
->private_tile
))
2597 group
->private_tile
= free_tile(group
->private_tile
);
2602 /* Compute the private and/or shared memory tiles for the array
2603 * reference group "group" of array "array" and set last_shared.
2605 static void compute_group_bounds(struct gpu_gen
*gen
,
2606 struct gpu_array_ref_group
*group
)
2608 compute_group_bounds_core(gen
, group
);
2609 set_last_shared(gen
, group
);
2612 /* If two groups have overlapping access relations (as determined by
2613 * the "overlap" function) and if one of them involves a write,
2614 * then merge the two groups into one.
2615 * If "compute_bounds" is set, then call compute_group_bounds
2616 * on the merged groups.
2618 * Return the updated number of groups.
2620 static int group_writes(struct gpu_gen
*gen
,
2621 int n
, struct gpu_array_ref_group
**groups
,
2622 int (*overlap
)(struct gpu_array_ref_group
*group1
,
2623 struct gpu_array_ref_group
*group2
), int compute_bounds
)
2627 for (i
= 0; i
< n
; ++i
) {
2628 for (j
= n
- 1; j
> i
; --j
) {
2629 if (!groups
[i
]->write
&& !groups
[j
]->write
)
2632 if (!overlap(groups
[i
], groups
[j
]))
2635 groups
[i
] = join_groups_and_free(groups
[i
], groups
[j
]);
2637 compute_group_bounds(gen
, groups
[i
]);
2639 groups
[j
] = groups
[n
- 1];
2647 /* If two groups have overlapping access relations (within the innermost
2648 * loop) and if one of them involves a write, then merge the two groups
2651 * Return the updated number of groups.
2653 static int group_overlapping_writes(struct gpu_gen
*gen
,
2654 int n
, struct gpu_array_ref_group
**groups
)
2656 return group_writes(gen
, n
, groups
, &accesses_overlap
, 0);
2659 /* Check if the access relations of group1 and group2 overlap within
2660 * the outermost min(group1->last_shared, group2->last_shared) loops.
2662 static int last_shared_accesses_overlap(struct gpu_array_ref_group
*group1
,
2663 struct gpu_array_ref_group
*group2
)
2668 isl_map
*map_i
, *map_j
, *map
;
2670 last_shared
= group1
->last_shared
;
2671 if (group2
->last_shared
< last_shared
)
2672 last_shared
= group2
->last_shared
;
2673 map_i
= isl_map_copy(group1
->access
);
2674 dim
= isl_map_dim(map_i
, isl_dim_in
);
2675 map_i
= isl_map_eliminate(map_i
, isl_dim_in
,
2676 last_shared
+ 1, dim
- (last_shared
+ 1));
2677 map_j
= isl_map_copy(group2
->access
);
2678 map_j
= isl_map_eliminate(map_j
, isl_dim_in
,
2679 last_shared
+ 1, dim
- (last_shared
+ 1));
2680 map
= isl_map_intersect(map_i
, map_j
);
2681 empty
= isl_map_is_empty(map
);
2687 /* If two groups have overlapping access relations (within the outer
2688 * last_shared loops) and if one of them involves a write,
2689 * then merge the two groups into one.
2691 * Return the updated number of groups.
2693 static int group_last_shared_overlapping_writes(struct gpu_gen
*gen
, int n
,
2694 struct gpu_array_ref_group
**groups
)
2696 return group_writes(gen
, n
, groups
, &last_shared_accesses_overlap
, 1);
2699 /* Is the size of the tile specified by "tile" smaller than the sum of
2700 * the sizes of the tiles specified by "tile1" and "tile2"?
2702 static int smaller_tile(isl_ctx
*ctx
, struct gpu_array_tile
*tile
,
2703 struct gpu_array_tile
*tile1
, struct gpu_array_tile
*tile2
)
2706 isl_val
*size
, *size1
, *size2
;
2708 size
= tile_size(ctx
, tile
);
2709 size1
= tile_size(ctx
, tile1
);
2710 size2
= tile_size(ctx
, tile2
);
2712 size
= isl_val_sub(size
, size1
);
2713 size
= isl_val_sub(size
, size2
);
2714 smaller
= isl_val_is_neg(size
);
2721 /* Given an initial grouping of array references and shared memory tiles
2722 * for each group that allows for a shared memory tile, merge two groups
2723 * if both have a shared memory tile, the merged group also has
2724 * a shared memory tile and the size of the tile for the merge group
2725 * is smaller than the sum of the tile sizes of the individual groups.
2727 * If merging two groups decreases the "last_shared" dimension of
2728 * one or both of the two groups, then we need to check for overlapping
2731 * Return the number of groups after merging.
2733 static int group_common_shared_memory_tile(struct gpu_gen
*gen
,
2734 struct gpu_array_info
*array
, int n
,
2735 struct gpu_array_ref_group
**groups
)
2738 int recompute_overlap
= 0;
2739 isl_ctx
*ctx
= isl_space_get_ctx(array
->dim
);
2741 for (i
= 0; i
< n
; ++i
) {
2742 if (!groups
[i
]->shared_tile
)
2744 for (j
= n
- 1; j
> i
; --j
) {
2747 struct gpu_array_ref_group
*group
;
2749 if (!groups
[j
]->shared_tile
)
2752 map
= isl_map_intersect(isl_map_copy(groups
[i
]->access
),
2753 isl_map_copy(groups
[j
]->access
));
2754 empty
= isl_map_is_empty(map
);
2760 group
= join_groups(groups
[i
], groups
[j
]);
2761 compute_group_bounds(gen
, group
);
2762 if (!group
->shared_tile
||
2763 !smaller_tile(ctx
, group
->shared_tile
,
2764 groups
[i
]->shared_tile
,
2765 groups
[j
]->shared_tile
)) {
2766 free_array_ref_group(group
);
2770 if (group
->last_shared
< groups
[i
]->last_shared
||
2771 group
->last_shared
< groups
[j
]->last_shared
)
2772 recompute_overlap
= 1;
2773 free_array_ref_group(groups
[i
]);
2774 free_array_ref_group(groups
[j
]);
2777 groups
[j
] = groups
[n
- 1];
2782 if (recompute_overlap
)
2783 n
= group_last_shared_overlapping_writes(gen
, n
, groups
);
2787 /* Set array->n_group and array->groups to n and groups.
2789 * Additionally, set the "nr" field of each group
2790 * and the "group" field of each reference in each group.
2792 static void set_array_groups(struct gpu_array_info
*array
,
2793 int n
, struct gpu_array_ref_group
**groups
)
2798 array
->groups
= groups
;
2800 for (i
= 0; i
< n
; ++i
) {
2803 for (j
= 0; j
< groups
[i
]->n_ref
; ++j
)
2804 groups
[i
]->refs
[j
]->group
= i
;
2808 /* Group array references that should be considered together when
2809 * deciding whether to access them from private, shared or global memory.
2811 * In particular, if two array references overlap and if one of them
2812 * is a write, then the two references are grouped together.
2813 * We first perform an initial grouping based only on the access relation.
2814 * After computing shared and private memory tiles, we check for
2815 * overlapping writes again, but this time taking into account
2816 * the "last_shared" property.
2818 * Furthermore, if two groups admit a shared memory tile and if the
2819 * combination of the two also admits a shared memory tile, we merge
2822 static void group_array_references(struct gpu_gen
*gen
,
2823 struct gpu_array_info
*array
, __isl_keep isl_union_map
*sched
)
2827 isl_ctx
*ctx
= isl_union_map_get_ctx(sched
);
2828 struct gpu_array_ref_group
**groups
;
2830 groups
= isl_calloc_array(ctx
, struct gpu_array_ref_group
*,
2834 n
= populate_array_references(array
, sched
, groups
);
2836 n
= group_overlapping_writes(gen
, n
, groups
);
2838 for (i
= 0; i
< n
; ++i
)
2839 compute_group_bounds(gen
, groups
[i
]);
2841 n
= group_last_shared_overlapping_writes(gen
, n
, groups
);
2843 n
= group_common_shared_memory_tile(gen
, array
, n
, groups
);
2845 set_array_groups(array
, n
, groups
);
2848 /* Take tiled_sched, project it onto the shared tile loops and
2849 * the loops that will be wrapped over the threads and
2850 * store the result in gen->shared_sched.
2851 * Also compute a projection that projects out the loops that will be
2852 * wrapped over the threads and store this projection in gen->shared_proj.
2854 static void compute_shared_sched(struct gpu_gen
*gen
)
2859 isl_union_map
*sched
;
2861 sched
= isl_union_map_copy(gen
->tiled_sched
);
2863 dim
= isl_union_map_get_space(sched
);
2864 proj
= projection(dim
, gen
->tiled_len
, gen
->shared_len
+ gen
->n_block
);
2865 sched
= isl_union_map_apply_range(sched
, isl_union_map_from_map(proj
));
2867 dim
= isl_union_map_get_space(sched
);
2868 proj
= projection(dim
, gen
->shared_len
+ gen
->n_block
, gen
->shared_len
);
2870 gen
->shared_sched
= sched
;
2871 gen
->shared_proj
= isl_union_map_from_map(proj
);
2874 /* Group references of all arrays in the program.
2876 static void group_references(struct gpu_gen
*gen
)
2879 isl_union_map
*sched
;
2881 sched
= isl_union_map_apply_range(isl_union_map_copy(gen
->shared_sched
),
2882 isl_union_map_copy(gen
->shared_proj
));
2884 for (i
= 0; i
< gen
->prog
->n_array
; ++i
)
2885 group_array_references(gen
, &gen
->prog
->array
[i
], sched
);
2887 isl_union_map_free(sched
);
2890 /* Free all array information that is local to the current kernel.
2892 static void free_local_array_info(struct gpu_gen
*gen
)
2896 for (i
= 0; i
< gen
->prog
->n_array
; ++i
) {
2897 struct gpu_array_info
*array
= &gen
->prog
->array
[i
];
2899 for (j
= 0; j
< array
->n_group
; ++j
)
2900 free_array_ref_group(array
->groups
[j
]);
2901 free(array
->groups
);
2905 /* Compute the size of a bounding box around the origin and "set",
2906 * where "set" is assumed to contain only non-negative elements.
2907 * In particular, compute the maximal value of "set" in each direction
2910 static __isl_give isl_multi_pw_aff
*extract_size(__isl_take isl_set
*set
,
2911 __isl_keep isl_set
*context
)
2914 isl_multi_pw_aff
*mpa
;
2916 n
= isl_set_dim(set
, isl_dim_set
);
2917 mpa
= isl_multi_pw_aff_zero(isl_set_get_space(set
));
2918 for (i
= 0; i
< n
; ++i
) {
2923 bound
= isl_set_dim_max(isl_set_copy(set
), i
);
2924 bound
= isl_pw_aff_coalesce(bound
);
2925 bound
= isl_pw_aff_gist(bound
, isl_set_copy(context
));
2927 space
= isl_pw_aff_get_domain_space(bound
);
2928 one
= isl_aff_zero_on_domain(isl_local_space_from_space(space
));
2929 one
= isl_aff_add_constant_si(one
, 1);
2930 bound
= isl_pw_aff_add(bound
, isl_pw_aff_from_aff(one
));
2931 mpa
= isl_multi_pw_aff_set_pw_aff(mpa
, i
, bound
);
2938 /* Compute the effective grid size as a list of the sizes in each dimension.
2940 * The grid size specified by the user or set by default
2941 * in read_grid_sizes() and applied in tile_schedule(),
2942 * may be too large for the given code in the sense that
2943 * it may contain blocks that don't need to execute anything.
2944 * We therefore don't return this grid size, but instead the
2945 * smallest grid size that ensures that all blocks that actually
2946 * execute code are included in the grid.
2948 * We first extract a description of the grid, i.e., the possible values
2949 * of the block ids, from gen->tiled_sched.
2950 * The block ids are parameters in gen->tiled_sched.
2951 * We simply need to change them into set dimensions.
2953 * Then, for each block dimension, we compute the maximal value of the block id
2956 static __isl_give isl_multi_pw_aff
*extract_grid_size(struct gpu_gen
*gen
,
2957 struct ppcg_kernel
*kernel
)
2962 grid
= isl_union_map_params(isl_union_map_copy(gen
->tiled_sched
));
2963 grid
= isl_set_from_params(grid
);
2964 grid
= isl_set_add_dims(grid
, isl_dim_set
, gen
->n_grid
);
2965 for (i
= 0; i
< gen
->n_grid
; ++i
) {
2969 snprintf(name
, sizeof(name
), "b%d", i
);
2970 pos
= isl_set_find_dim_by_name(grid
, isl_dim_param
, name
);
2972 grid
= isl_set_equate(grid
, isl_dim_param
, pos
, isl_dim_set
, i
);
2973 grid
= isl_set_project_out(grid
, isl_dim_param
, pos
, 1);
2976 return extract_size(grid
, kernel
->context
);
2979 /* Compute the size of a fixed bounding box around the origin and "set",
2980 * where "set" is assumed to contain only non-negative elements,
2981 * and store the results in "size".
2982 * In particular, compute the maximal value of "set" in each direction
2985 static void extract_fixed_size(__isl_take isl_set
*set
, int *size
)
2988 isl_local_space
*ls
;
2991 n
= isl_set_dim(set
, isl_dim_set
);
2992 ls
= isl_local_space_from_space(isl_set_get_space(set
));
2993 obj
= isl_aff_zero_on_domain(ls
);
2994 for (i
= 0; i
< n
; ++i
) {
2997 obj
= isl_aff_set_coefficient_si(obj
, isl_dim_in
, i
, 1);
2998 max
= isl_set_max_val(set
, obj
);
2999 size
[i
] = isl_val_get_num_si(max
) + 1;
3001 obj
= isl_aff_set_coefficient_si(obj
, isl_dim_in
, i
, 0);
3007 /* Compute the effective block size as a list of the sizes in each dimension
3008 * and store the sizes in kernel->block_dim.
3010 * The block size specified by the user or set by default
3011 * in read_block_sizes() and applied in thread_tile_schedule(),
3012 * may be too large for the given code in the sense that
3013 * it may contain threads that don't need to execute anything.
3014 * We therefore don't store this block size in kernel->block_dim,
3015 * but instead the smallest block size that ensures that all threads
3016 * that actually execute code are included in the block.
3018 * The current implementation eliminates all parameters, ensuring
3019 * that the size is a fixed constant in each dimension.
3020 * In principle we could also compute parametric sizes.
3021 * We would have to make sure to project out all b%d and t%d parameters,
3024 static void extract_block_size(struct gpu_gen
*gen
, struct ppcg_kernel
*kernel
)
3029 isl_multi_pw_aff
*mpa
;
3031 block
= isl_union_map_params(isl_union_map_copy(gen
->local_sched
));
3032 block
= isl_set_from_params(block
);
3033 block
= isl_set_add_dims(block
, isl_dim_set
, gen
->n_block
);
3034 kernel
->n_block
= gen
->n_block
;
3035 for (i
= 0; i
< gen
->n_block
; ++i
) {
3039 snprintf(name
, sizeof(name
), "t%d", i
);
3040 pos
= isl_set_find_dim_by_name(block
, isl_dim_param
, name
);
3042 block
= isl_set_equate(block
, isl_dim_param
, pos
,
3045 nparam
= isl_set_dim(block
, isl_dim_param
);
3046 block
= isl_set_project_out(block
, isl_dim_param
, 0, nparam
);
3048 extract_fixed_size(block
, kernel
->block_dim
);
3051 void ppcg_kernel_free(void *user
)
3053 struct ppcg_kernel
*kernel
= user
;
3059 isl_multi_pw_aff_free(kernel
->grid_size
);
3060 isl_set_free(kernel
->context
);
3061 isl_union_set_free(kernel
->arrays
);
3062 isl_space_free(kernel
->space
);
3063 isl_ast_node_free(kernel
->tree
);
3065 for (i
= 0; i
< kernel
->n_array
; ++i
)
3066 isl_pw_aff_list_free(kernel
->array
[i
].bound
);
3067 free(kernel
->array
);
3069 for (i
= 0; i
< kernel
->n_var
; ++i
) {
3070 free(kernel
->var
[i
].name
);
3071 isl_vec_free(kernel
->var
[i
].size
);
3078 static void create_kernel_var(isl_ctx
*ctx
, struct gpu_array_ref_group
*group
,
3079 struct ppcg_kernel_var
*var
)
3082 struct gpu_array_tile
*tile
;
3086 var
->array
= group
->array
;
3088 tile
= group
->private_tile
;
3089 var
->type
= ppcg_access_private
;
3091 tile
= group
->shared_tile
;
3092 var
->type
= ppcg_access_shared
;
3095 p
= isl_printer_to_str(ctx
);
3096 p
= print_array_name(p
, group
);
3097 var
->name
= isl_printer_get_str(p
);
3098 isl_printer_free(p
);
3100 var
->size
= isl_vec_alloc(ctx
, group
->array
->n_index
);
3102 for (j
= 0; j
< group
->array
->n_index
; ++j
)
3103 var
->size
= isl_vec_set_element_val(var
->size
, j
,
3104 isl_val_copy(tile
->bound
[j
].size
));
3107 static void create_kernel_vars(struct gpu_gen
*gen
, struct ppcg_kernel
*kernel
)
3112 for (i
= 0; i
< gen
->prog
->n_array
; ++i
) {
3113 struct gpu_array_info
*array
= &gen
->prog
->array
[i
];
3115 for (j
= 0; j
< array
->n_group
; ++j
) {
3116 struct gpu_array_ref_group
*group
= array
->groups
[j
];
3117 if (group
->private_tile
|| group
->shared_tile
)
3123 kernel
->var
= isl_calloc_array(gen
->ctx
, struct ppcg_kernel_var
, n
);
3124 assert(kernel
->var
);
3127 for (i
= 0; i
< gen
->prog
->n_array
; ++i
) {
3128 struct gpu_array_info
*array
= &gen
->prog
->array
[i
];
3130 for (j
= 0; j
< array
->n_group
; ++j
) {
3131 struct gpu_array_ref_group
*group
= array
->groups
[j
];
3132 if (!group
->private_tile
&& !group
->shared_tile
)
3134 create_kernel_var(gen
->ctx
, group
, &kernel
->var
[n
]);
3140 /* The sizes of the arrays on the host that have been computed by
3141 * extract_array_info may depend on the parameters. Use the extra
3142 * constraints on the parameters that are valid at "host_domain"
3143 * to simplify these expressions and store the results in kernel->array.
3145 static void localize_bounds(struct gpu_gen
*gen
, struct ppcg_kernel
*kernel
,
3146 __isl_keep isl_set
*host_domain
)
3151 kernel
->array
= isl_calloc_array(gen
->ctx
,
3152 struct gpu_local_array_info
, gen
->prog
->n_array
);
3153 assert(kernel
->array
);
3154 kernel
->n_array
= gen
->prog
->n_array
;
3156 context
= isl_set_copy(host_domain
);
3157 context
= isl_set_params(context
);
3159 for (i
= 0; i
< gen
->prog
->n_array
; ++i
) {
3160 struct gpu_array_info
*array
= &gen
->prog
->array
[i
];
3161 isl_pw_aff_list
*local
;
3163 if (array
->n_group
== 0)
3166 local
= isl_pw_aff_list_alloc(gen
->ctx
, array
->n_index
);
3168 for (j
= 0; j
< array
->n_index
; ++j
) {
3171 pwaff
= isl_pw_aff_copy(array
->bound
[j
]);
3172 pwaff
= isl_pw_aff_gist(pwaff
, isl_set_copy(context
));
3173 local
= isl_pw_aff_list_add(local
, pwaff
);
3176 kernel
->array
[i
].bound
= local
;
3178 isl_set_free(context
);
3181 /* Find the element in gen->stmt that has the given "id".
3182 * Return NULL if no such gpu_stmt can be found.
3184 static struct gpu_stmt
*find_stmt(struct gpu_prog
*prog
, __isl_keep isl_id
*id
)
3188 for (i
= 0; i
< prog
->n_stmts
; ++i
) {
3189 if (id
== prog
->stmts
[i
].id
)
3193 return i
< prog
->n_stmts
? &prog
->stmts
[i
] : NULL
;
3196 /* Set gen->tile_len and gen->n_parallel to those of the statement
3197 * affected by the first map (part of the schedule)
3198 * on which this function is called.
3199 * Because of the way the schedule is constructed, the other statements
3200 * in the list, if any, should have the same values for these properties.
3202 static int extract_tile_len(__isl_take isl_map
*map
, void *user
)
3204 struct gpu_gen
*gen
= (struct gpu_gen
*) user
;
3206 struct gpu_stmt
*stmt
;
3208 id
= isl_map_get_tuple_id(map
, isl_dim_in
);
3209 stmt
= find_stmt(gen
->prog
, id
);
3215 isl_die(gen
->ctx
, isl_error_unknown
,
3216 "statement not found", return -1);
3218 gen
->tile_len
= stmt
->tile_len
;
3219 gen
->n_parallel
= stmt
->n_parallel
;
3224 void ppcg_kernel_stmt_free(void *user
)
3227 struct ppcg_kernel_stmt
*stmt
= user
;
3232 switch (stmt
->type
) {
3233 case ppcg_kernel_copy
:
3234 isl_ast_expr_free(stmt
->u
.c
.index
);
3235 isl_ast_expr_free(stmt
->u
.c
.local_index
);
3237 case ppcg_kernel_domain
:
3238 for (i
= 0; i
< stmt
->u
.d
.n_access
; ++i
) {
3239 isl_ast_expr_list_free(stmt
->u
.d
.access
[i
].index
);
3240 free(stmt
->u
.d
.access
[i
].local_name
);
3242 free(stmt
->u
.d
.access
);
3244 case ppcg_kernel_sync
:
3251 /* Set the options of "context" to
3253 * { space -> [x] : x >= first }
3255 static __isl_give isl_ast_build
*set_unroll(
3256 __isl_take isl_ast_build
*build
, __isl_take isl_space
*space
,
3263 ctx
= isl_ast_build_get_ctx(build
);
3265 space
= isl_space_from_domain(space
);
3266 space
= isl_space_add_dims(space
, isl_dim_out
, 1);
3267 space
= isl_space_set_tuple_name(space
, isl_dim_out
, "unroll");
3268 unroll
= isl_map_universe(space
);
3269 unroll
= isl_map_lower_bound_si(unroll
, isl_dim_out
, 0, first
);
3270 opt
= isl_union_map_from_map(unroll
);
3272 build
= isl_ast_build_set_options(build
, opt
);
3277 /* Return a list of isl_ids of the form "prefix%d".
3279 static __isl_give isl_id_list
*generate_names(isl_ctx
*ctx
,
3280 int n
, const char *prefix
)
3286 names
= isl_id_list_alloc(ctx
, n
);
3287 for (i
= 0; i
< n
; ++i
) {
3290 snprintf(name
, sizeof(name
), "%s%d", prefix
, i
);
3291 id
= isl_id_alloc(ctx
, name
, NULL
);
3292 names
= isl_id_list_add(names
, id
);
3298 /* Extend the schedule "schedule" with the part of "extension"
3299 * starting at "first" up to "len".
3301 static __isl_give isl_union_map
*extend_schedule(
3302 __isl_take isl_union_map
*schedule
,
3303 __isl_take isl_union_map
*extension
, int first
, int len
)
3307 isl_union_map
*umap
;
3310 space
= isl_union_map_get_space(schedule
);
3311 space
= isl_space_set_from_params(space
);
3312 space
= isl_space_add_dims(space
, isl_dim_set
, len
);
3313 proj
= isl_set_identity(isl_set_universe(space
));
3314 proj
= isl_map_project_out(proj
, isl_dim_out
, 0, first
);
3315 extension
= isl_union_map_apply_range(extension
,
3316 isl_union_map_from_map(proj
));
3318 schedule
= isl_union_map_range_product(schedule
, extension
);
3323 /* This function is called for each access to an array in each instance
3324 * in the kernel of some statement in the original code.
3325 * Replace that access by an access to global, shared or private memory
3326 * and store the results in *kernel_access.
3328 * Since the array in shared or private memory is just
3329 * a shifted copy of part of the original array, we simply need
3330 * to subtract the lower bound, which was computed in can_tile.
3331 * If any of the indices is strided, then we first add
3332 * shared_tile->bound[i].shift and divide by shared_tile->bound[i].stride.
3334 * If the given array is accessed directly from global memory,
3335 * we don't need to perform any shifting and simply simplify
3336 * the expression in the context of the domain instead.
3338 * If the array space (range of access) has no name, then we are
3339 * accessing an iterator in the original program.
3341 * The input stmt_access->access relation maps the iteration domain
3342 * of the current statement to an array element.
3343 * The first step is to reformulate
3344 * this access relation in terms of the loop iterators of the generated
3345 * code through precomposition with gen->stmt_it.
3347 * The expressions in "tile" are formulated in terms of the first
3348 * gen->shared_len dimensions of the computed schedule using the mapping
3349 * sched2shared which maps the loop iterators to these dimensions.
3351 static void compute_index_expression(struct gpu_gen
*gen
,
3352 struct ppcg_kernel_access
*kernel_access
,
3353 struct gpu_stmt_access
*stmt_access
, __isl_keep isl_map
*stmt_it
,
3354 __isl_keep isl_map
*sched2shared
, __isl_keep isl_ast_build
*build
)
3357 isl_pw_multi_aff
*pma
;
3360 struct gpu_array_tile
*tile
= NULL
;
3362 if (isl_map_has_tuple_name(stmt_access
->access
, isl_dim_out
)) {
3365 struct gpu_array_ref_group
*group
;
3368 name
= isl_map_get_tuple_name(stmt_access
->access
, isl_dim_out
);
3370 for (i
= 0; i
< gen
->prog
->n_array
; ++i
) {
3371 if (strcmp(name
, gen
->prog
->array
[i
].name
))
3373 kernel_access
->array
= &gen
->prog
->array
[i
];
3374 kernel_access
->local_array
= &gen
->kernel
->array
[i
];
3376 assert(kernel_access
->array
);
3377 group
= kernel_access
->array
->groups
[stmt_access
->group
];
3378 p
= isl_printer_to_str(gen
->ctx
);
3379 p
= print_array_name(p
, group
);
3380 kernel_access
->local_name
= isl_printer_get_str(p
);
3381 isl_printer_free(p
);
3382 tile
= group
->private_tile
;
3383 kernel_access
->type
= ppcg_access_private
;
3385 tile
= group
->shared_tile
;
3386 kernel_access
->type
= ppcg_access_shared
;
3390 kernel_access
->type
= ppcg_access_global
;
3392 n_index
= isl_map_dim(stmt_access
->access
, isl_dim_out
);
3393 kernel_access
->index
= isl_ast_expr_list_alloc(gen
->ctx
, n_index
);
3398 access
= isl_map_copy(stmt_access
->access
);
3399 access
= isl_map_apply_range(isl_map_copy(stmt_it
), access
);
3400 pma
= isl_pw_multi_aff_from_map(access
);
3401 pma
= isl_pw_multi_aff_coalesce(pma
);
3403 for (i
= 0; i
< n_index
; ++i
) {
3408 index
= isl_pw_multi_aff_get_pw_aff(pma
, i
);
3410 if (!kernel_access
->array
) {
3412 domain
= isl_map_domain(isl_map_copy(stmt_it
));
3413 index
= isl_pw_aff_coalesce(index
);
3414 index
= isl_pw_aff_gist(index
, domain
);
3416 domain
= isl_map_domain(isl_map_copy(stmt_it
));
3417 index
= shift_index(index
, kernel_access
->array
,
3418 &tile
->bound
[i
], domain
,
3419 isl_map_copy(sched2shared
));
3422 expr
= isl_ast_build_expr_from_pw_aff(build
, index
);
3424 kernel_access
->index
= isl_ast_expr_list_add(
3425 kernel_access
->index
, expr
);
3428 isl_pw_multi_aff_free(pma
);
3431 /* This function is called for each instance of a user statement
3434 * We attach a struct ppcg_kernel_stmt to the "node", containing
3435 * local information about the accesses.
3436 * This information is computed from stmt_it, which expresses the domain
3437 * elements in terms of the generated loops, and sched2shared,
3438 * which expresses the first shared_len dimensions of the schedule
3439 * computed by PPCG in terms of the generated loops.
3441 static __isl_give isl_ast_node
*at_each_domain(__isl_take isl_ast_node
*node
,
3442 __isl_keep isl_ast_build
*build
, void *user
)
3444 struct gpu_gen
*gen
= (struct gpu_gen
*) user
;
3445 struct ppcg_kernel_stmt
*stmt
;
3447 isl_map
*stmt_it
, *sched2shared
;
3448 isl_ast_expr
*expr
, *arg
;
3449 isl_union_map
*schedule
;
3451 struct gpu_stmt_access
*access
;
3453 stmt
= isl_calloc_type(gen
->ctx
, struct ppcg_kernel_stmt
);
3455 return isl_ast_node_free(node
);
3457 expr
= isl_ast_node_user_get_expr(node
);
3458 arg
= isl_ast_expr_get_op_arg(expr
, 0);
3459 id
= isl_ast_expr_get_id(arg
);
3461 schedule
= isl_ast_build_get_schedule(build
);
3462 stmt_it
= isl_map_reverse(isl_map_from_union_map(schedule
));
3463 sched2shared
= compute_sched_to_shared(gen
, isl_map_copy(stmt_it
));
3465 stmt
->type
= ppcg_kernel_domain
;
3466 stmt
->u
.d
.stmt
= find_stmt(gen
->prog
, id
);
3467 if (!stmt
->u
.d
.stmt
)
3471 for (access
= stmt
->u
.d
.stmt
->accesses
; access
; access
= access
->next
)
3474 stmt
->u
.d
.access
= isl_calloc_array(gen
->ctx
,
3475 struct ppcg_kernel_access
, n
);
3476 if (!stmt
->u
.d
.access
)
3479 stmt
->u
.d
.n_access
= n
;
3481 access
= stmt
->u
.d
.stmt
->accesses
;
3482 for (i
= 0; i
< n
; ++i
, access
= access
->next
) {
3483 compute_index_expression(gen
, &stmt
->u
.d
.access
[i
], access
,
3484 stmt_it
, sched2shared
, build
);
3488 isl_map_free(stmt_it
);
3489 isl_map_free(sched2shared
);
3490 isl_ast_expr_free(arg
);
3491 isl_ast_expr_free(expr
);
3493 id
= isl_id_alloc(gen
->ctx
, NULL
, stmt
);
3494 id
= isl_id_set_free_user(id
, &ppcg_kernel_stmt_free
);
3495 return isl_ast_node_set_annotation(node
, id
);
3498 isl_map_free(stmt_it
);
3499 ppcg_kernel_stmt_free(stmt
);
3500 isl_map_free(sched2shared
);
3501 return isl_ast_node_free(node
);
3504 /* This function is called when code has been generated for the shared
3505 * tile loops. The "schedule" refers only to the original statements.
3507 * We extend the schedule with that part of gen->local_sched that hasn't
3508 * been taken into account yet. This introduces parameters referring
3509 * to thread ids in the schedule, so we add them (with the appropriate
3510 * bounds to the context as well).
3511 * Finally, we set the appropriate unrolling options
3512 * if gen->first_unroll is set.
3514 static __isl_give isl_ast_node
*create_domain_leaf(
3515 __isl_take isl_union_map
*schedule
, __isl_take isl_ast_build
*build
,
3518 struct gpu_gen
*gen
= (struct gpu_gen
*) user
;
3520 isl_union_map
*sched
;
3523 isl_id_list
*iterators
;
3526 schedule
= extend_schedule(schedule
,
3527 isl_union_map_copy(gen
->local_sched
),
3528 gen
->shared_len
, gen
->thread_tiled_len
);
3530 space
= isl_ast_build_get_schedule_space(build
);
3531 set
= isl_set_universe(space
);
3532 set
= add_bounded_parameters(set
, gen
->kernel
->n_block
,
3533 gen
->kernel
->block_dim
, "t");
3534 build
= isl_ast_build_restrict(build
, set
);
3536 n
= gen
->thread_tiled_len
- gen
->shared_len
;
3538 if (gen
->first_unroll
>= 0) {
3539 space
= isl_space_set_alloc(gen
->ctx
, 0, n
);
3540 build
= set_unroll(build
, space
, gen
->first_unroll
);
3542 iterators
= generate_names(gen
->ctx
, n
, "c");
3543 build
= isl_ast_build_set_iterators(build
, iterators
);
3544 build
= isl_ast_build_set_at_each_domain(build
, &at_each_domain
, gen
);
3545 tree
= isl_ast_build_ast_from_schedule(build
, schedule
);
3546 isl_ast_build_free(build
);
3551 /* This function is called for each statement node in the AST of the code
3552 * for copying to or from shared/private memory.
3553 * Attach a pointer to a ppcg_kernel_stmt representing the copy
3554 * statement to the node.
3555 * The statement name is "read" or "write", depending on whether we are
3556 * reading from global memory or writing to global memory.
3557 * The name of the T space is {shared,private}_<array>.
3559 * The schedule is of the form
3563 * where A refers to a piece of an array and T to the corresponding
3564 * shifted tile. We split this schedule into mappings L -> A and L -> T
3565 * and store the corresponding expressions in stmt->index and stmt->local_index,
3566 * where stmt points to the ppcg_kernel_stmt that is attached to the node.
3568 static __isl_give isl_ast_node
*attach_copy_stmt(__isl_take isl_ast_node
*node
,
3569 __isl_keep isl_ast_build
*build
, void *user
)
3571 struct gpu_gen
*gen
= (struct gpu_gen
*) user
;
3572 struct ppcg_kernel_stmt
*stmt
;
3576 isl_map
*access
, *local_access
, *map
;
3577 isl_pw_multi_aff
*pma
;
3581 stmt
= isl_calloc_type(gen
->ctx
, struct ppcg_kernel_stmt
);
3583 return isl_ast_node_free(node
);
3585 access
= isl_map_from_union_map(isl_ast_build_get_schedule(build
));
3586 type
= isl_map_get_tuple_name(access
, isl_dim_in
);
3587 stmt
->u
.c
.read
= !strcmp(type
, "read");
3588 access
= isl_map_reverse(access
);
3589 space
= isl_space_unwrap(isl_space_range(isl_map_get_space(access
)));
3590 local_access
= isl_map_copy(access
);
3592 map
= isl_map_domain_map(isl_map_universe(isl_space_copy(space
)));
3593 id
= isl_map_get_tuple_id(access
, isl_dim_out
);
3594 map
= isl_map_set_tuple_id(map
, isl_dim_in
, id
);
3595 access
= isl_map_apply_range(access
, map
);
3596 pma
= isl_pw_multi_aff_from_map(access
);
3597 expr
= isl_ast_build_call_from_pw_multi_aff(build
, pma
);
3598 stmt
->u
.c
.index
= expr
;
3600 map
= isl_map_range_map(isl_map_universe(space
));
3601 id
= isl_map_get_tuple_id(local_access
, isl_dim_out
);
3602 map
= isl_map_set_tuple_id(map
, isl_dim_in
, id
);
3603 local_access
= isl_map_apply_range(local_access
, map
);
3604 pma
= isl_pw_multi_aff_from_map(local_access
);
3605 expr
= isl_ast_build_call_from_pw_multi_aff(build
, pma
);
3606 stmt
->u
.c
.local_index
= expr
;
3608 stmt
->u
.c
.array
= gen
->copy_group
->array
;
3609 array_index
= stmt
->u
.c
.array
- gen
->prog
->array
;
3610 stmt
->u
.c
.local_array
= &gen
->kernel
->array
[array_index
];
3611 stmt
->type
= ppcg_kernel_copy
;
3613 id
= isl_id_alloc(gen
->ctx
, NULL
, stmt
);
3614 id
= isl_id_set_free_user(id
, &ppcg_kernel_stmt_free
);
3615 return isl_ast_node_set_annotation(node
, id
);
3618 /* Given a schedule of the form
3622 * (with S the first shared_len dimensions of the computed schedule,
3623 * A the array and L the schedule correponding to the generated loops),
3624 * indicating where the copying the array elements that need to be copied,
3625 * construct code for performing the copying.
3627 * "group" is the array reference group that is being copied
3628 * "type" is either "read" or "write"
3629 * private is set if copying needs to be performed to/from registers
3631 * We first construct a mapping to a shifted tile of the array,
3633 * [S -> A] -> T(S,A) (1)
3635 * If private is set, then we also use this mapping as a schedule
3636 * (which is already thread-specific and will be completely unrolled).
3637 * Otherwise, we wrap/tile the range over the threads.
3640 * [S -> A] -> T'(S,A)
3642 * Combined with the given schedule, we have
3644 * [S -> A] -> [L -> T'(S,A)] (2)
3646 * From the shifted tile mapping, we construct a mapping
3648 * [S -> A] -> [A -> T(S,A)]
3650 * and apply it to the schedule (2), obtaining
3652 * [A -> T(S(L),A)] -> [L -> T'(S(L),A)]
3654 * Note that we can project out S because it is uniquely defined by L.
3656 static __isl_give isl_ast_node
*copy_access(struct gpu_gen
*gen
,
3657 __isl_take isl_map
*sched
,
3658 const char *type
, struct gpu_array_ref_group
*group
,
3659 __isl_take isl_ast_build
*build
, int private)
3663 isl_map
*schedule
, *shift
, *map
;
3665 isl_id_list
*iterators
;
3668 shift
= shift_access(group
);
3670 schedule
= isl_map_copy(shift
);
3671 schedule
= isl_map_reset_tuple_id(schedule
, isl_dim_out
);
3673 schedule
= tile_access_schedule(gen
, schedule
);
3675 n
= isl_map_dim(schedule
, isl_dim_out
);
3676 set
= isl_set_universe(isl_ast_build_get_schedule_space(build
));
3677 set
= add_bounded_parameters(set
, gen
->kernel
->n_block
,
3678 gen
->kernel
->block_dim
, "t");
3680 schedule
= isl_map_range_product(sched
, schedule
);
3682 space
= isl_space_domain(isl_map_get_space(shift
));
3683 map
= isl_map_range_map(isl_map_universe(isl_space_unwrap(space
)));
3684 map
= isl_map_range_product(map
, shift
);
3686 schedule
= isl_map_apply_domain(schedule
, map
);
3688 schedule
= isl_map_set_tuple_name(schedule
, isl_dim_in
, type
);
3690 build
= isl_ast_build_restrict(build
, set
);
3692 gen
->copy_group
= group
;
3695 space
= isl_space_range(isl_map_get_space(schedule
));
3696 space
= isl_space_range(isl_space_unwrap(space
));
3697 build
= set_unroll(build
, space
, 0);
3699 iterators
= generate_names(gen
->ctx
, n
, "c");
3700 build
= isl_ast_build_set_iterators(build
, iterators
);
3701 build
= isl_ast_build_set_at_each_domain(build
, &attach_copy_stmt
, gen
);
3702 tree
= isl_ast_build_ast_from_schedule(build
,
3703 isl_union_map_from_map(schedule
));
3704 isl_ast_build_free(build
);
3709 /* Return code for reading into or writing from shared memory
3710 * the given array reference group.
3712 * If we are performing a read from global memory to shared memory and
3713 * if the array involved is not a scalar, then we copy
3714 * the entire tile to shared memory. This may result in some extra
3715 * elements getting copied, but it should lead to simpler code
3716 * (which means that fewer registers may be needed) and less divergence.
3718 * Otherwise, we only copy the elements that will be read or have been written
3722 * The input "sched" is of the form.
3726 * with S the first shared_len dimensions of the computed schedule,
3727 * A the array and L the schedule correponding to the generated loops.
3729 * We first drop "type",
3733 * If the above conditions are satisfied, we project out A,
3738 * and then introduce the group tile [S -> T], resulting in
3742 static __isl_give isl_ast_node
*copy_group_shared_accesses(
3743 struct gpu_gen
*gen
, struct gpu_array_ref_group
*group
,
3744 __isl_take isl_map
*sched
, __isl_take isl_ast_build
*build
)
3748 isl_union_map
*access
;
3750 type
= isl_map_get_tuple_name(sched
, isl_dim_in
);
3751 read
= !strcmp(type
, "read");
3753 sched
= isl_map_reset_tuple_id(sched
, isl_dim_in
);
3755 if (read
&& group
->array
->n_index
> 0) {
3759 space
= isl_space_domain(isl_map_get_space(sched
));
3760 space
= isl_space_unwrap(space
);
3761 map
= isl_map_domain_map(isl_map_universe(space
));
3762 sched
= isl_map_apply_domain(sched
, map
);
3764 map
= group_tile(group
);
3765 map
= isl_map_reverse(isl_map_domain_map(map
));
3766 sched
= isl_map_apply_domain(sched
, map
);
3769 return copy_access(gen
, sched
, type
, group
, build
, 0);
3772 /* Return code for reading into or writing from private memory
3773 * the given array reference group.
3775 * Let S be the first shared_len dimensions of the computed schedule,
3776 * D the iteration domains, A the array and L the schedule correponding
3777 * to the generated loops.
3778 * "sched" is of the form
3782 * where type is either "read" or "write".
3783 * We apply the privatization D -> S(t), with t the thread ids,
3784 * to the access relation D -> A to obtain the privatized access relation
3788 * We drop the type from "sched" and intersect with the privatized access
3789 * relation to obtain
3793 static __isl_give isl_ast_node
*copy_group_private_accesses(
3794 struct gpu_gen
*gen
, struct gpu_array_ref_group
*group
,
3795 __isl_take isl_map
*sched
, __isl_take isl_ast_build
*build
)
3799 isl_union_map
*priv
;
3800 isl_union_map
*access
;
3801 isl_map
*access_map
;
3803 type
= isl_map_get_tuple_name(sched
, isl_dim_in
);
3804 read
= !strcmp(type
, "read");
3806 priv
= isl_union_map_from_map(isl_map_copy(gen
->privatization
));
3807 priv
= isl_union_map_apply_range(isl_union_map_copy(gen
->shared_sched
),
3810 access
= group_access_relation(group
, read
, !read
);
3811 access
= isl_union_map_apply_domain(access
, priv
);
3812 access_map
= isl_map_from_union_map(access
);
3814 sched
= isl_map_reset_tuple_id(sched
, isl_dim_in
);
3815 sched
= isl_map_intersect_domain(sched
, isl_map_wrap(access_map
));
3817 return copy_access(gen
, sched
, type
, group
, build
, 1);
3820 /* Return code for reading into or writing from shared or private memory.
3822 * "schedule" is of the form
3826 * with S be the first shared_len dimensions of the computed schedule,
3827 * A the array and L the schedule correponding to the generated loops.
3828 * The array reference group is attached to "type".
3830 static __isl_give isl_ast_node
*create_access_leaf(
3831 struct gpu_gen
*gen
, __isl_take isl_map
*schedule
,
3832 __isl_take isl_ast_build
*build
)
3834 struct gpu_array_ref_group
*group
;
3837 id
= isl_map_get_tuple_id(schedule
, isl_dim_in
);
3838 group
= isl_id_get_user(id
);
3841 if (group
->private_tile
)
3842 return copy_group_private_accesses(gen
, group
, schedule
,
3845 return copy_group_shared_accesses(gen
, group
, schedule
,
3849 /* Create a domain node representing a synchronization.
3851 static __isl_give isl_ast_node
*create_sync_leaf(
3852 struct gpu_gen
*gen
, __isl_take isl_map
*schedule
,
3853 __isl_take isl_ast_build
*build
)
3855 struct ppcg_kernel_stmt
*stmt
;
3861 isl_map_free(schedule
);
3863 stmt
= isl_calloc_type(gen
->ctx
, struct ppcg_kernel_stmt
);
3867 stmt
->type
= ppcg_kernel_sync
;
3869 space
= isl_ast_build_get_schedule_space(build
);
3870 space
= isl_space_from_domain(space
);
3871 space
= isl_space_set_tuple_name(space
, isl_dim_out
, "sync");
3872 expr
= isl_ast_build_call_from_pw_multi_aff(build
,
3873 isl_pw_multi_aff_from_multi_aff(isl_multi_aff_zero(space
)));
3874 node
= isl_ast_node_alloc_user(expr
);
3875 isl_ast_build_free(build
);
3877 id
= isl_id_alloc(gen
->ctx
, NULL
, stmt
);
3878 id
= isl_id_set_free_user(id
, &ppcg_kernel_stmt_free
);
3879 return isl_ast_node_set_annotation(node
, id
);
3882 /* This function is called during the code generation at the point
3883 * where the schedule domain element is completely determined by
3884 * the generated code. The input schedule contains the original
3885 * statements as well as synchronization and copy "statements".
3886 * The latter are scheduled at different points than any of the original
3887 * statements, so they will only arrive here in isolation.
3889 * If the current schedule only refers to a single statement,
3890 * we check if it is a copy or synchronization statement and
3891 * call the appropriate functions.
3892 * Otherwise, we assume we are dealing with the original statements
3893 * and we call create_domain_leaf.
3895 static __isl_give isl_ast_node
*create_kernel_leaf(
3896 __isl_take isl_ast_build
*build
, void *user
)
3898 struct gpu_gen
*gen
= (struct gpu_gen
*) user
;
3900 isl_union_map
*schedule
;
3903 schedule
= isl_ast_build_get_schedule(build
);
3905 if (isl_union_map_n_map(schedule
) != 1)
3906 return create_domain_leaf(schedule
, build
, user
);
3908 map
= isl_map_from_union_map(schedule
);
3909 name
= isl_map_get_tuple_name(map
, isl_dim_in
);
3910 if (!strcmp(name
, "read") || !strcmp(name
, "write"))
3911 return create_access_leaf(gen
, map
, build
);
3912 if (!strcmp(name
, "sync"))
3913 return create_sync_leaf(gen
, map
, build
);
3915 return create_domain_leaf(isl_union_map_from_map(map
), build
, user
);
3918 /* Mark all odd schedule dimensions as "atomic" (when the even dimensions
3919 * have value 0) and all even schedule dimensions as "unroll".
3921 * That is, the options look as follows
3923 * { [0, b, 0, d, ..., 0] -> atomic[i] : exists a : i = 2 a + 1;
3924 * [a, b, c, d, ..., z] -> unroll[i] : exists a : i = 2 a }
3926 * The even positions are used to be able to schedule copying blocks
3927 * and synchronization before or after each level of the shared memory
3928 * tile loops and we want to make sure that code for these is generated
3929 * separately (within each level).
3931 static __isl_give isl_ast_build
*set_atomic_and_unroll(
3932 __isl_take isl_ast_build
*build
,
3933 __isl_take isl_space
*space
, int sched_len
)
3939 isl_local_space
*ls
;
3942 ctx
= isl_ast_build_get_ctx(build
);
3944 space
= isl_space_params(space
);
3945 space
= isl_space_add_dims(space
, isl_dim_set
, sched_len
);
3946 space
= isl_space_from_domain(space
);
3947 space
= isl_space_add_dims(space
, isl_dim_out
, 2);
3948 map
= isl_map_universe(isl_space_copy(space
));
3949 for (i
= 0; i
< sched_len
; i
+= 2)
3950 map
= isl_map_fix_si(map
, isl_dim_in
, i
, 0);
3951 ls
= isl_local_space_from_space(isl_map_get_space(map
));
3952 c
= isl_equality_alloc(ls
);
3953 c
= isl_constraint_set_coefficient_si(c
, isl_dim_out
, 0, 1);
3954 c
= isl_constraint_set_coefficient_si(c
, isl_dim_out
, 1, 2);
3955 c
= isl_constraint_set_constant_si(c
, 1);
3956 map
= isl_map_add_constraint(map
, c
);
3957 map
= isl_map_project_out(map
, isl_dim_out
, 1, 1);
3958 map
= isl_map_set_tuple_name(map
, isl_dim_out
, "atomic");
3959 opt
= isl_union_map_from_map(map
);
3961 map
= isl_map_universe(space
);
3962 ls
= isl_local_space_from_space(isl_map_get_space(map
));
3963 c
= isl_equality_alloc(ls
);
3964 c
= isl_constraint_set_coefficient_si(c
, isl_dim_out
, 0, 1);
3965 c
= isl_constraint_set_coefficient_si(c
, isl_dim_out
, 1, 2);
3966 map
= isl_map_add_constraint(map
, c
);
3967 map
= isl_map_project_out(map
, isl_dim_out
, 1, 1);
3968 map
= isl_map_set_tuple_name(map
, isl_dim_out
, "unroll");
3969 opt
= isl_union_map_add_map(opt
, map
);
3971 build
= isl_ast_build_set_options(build
, opt
);
3976 /* Return a map that maps a space of dimension gen->shared_len
3977 * to its last dimensions starting at gen->tile_first.
3978 * The range is of dimension
3980 * 2 * (gen->shared_len - gen->tile_first) + 1
3982 * The input dimensions are mapped to the odd dimensions in the output,
3983 * while the even dimensions (except 2*pos) are fixed to 0.
3984 * Output dimension 2*pos (if pos >= 0) is fixed to "val".
3985 * If pos >= 0, then only the pos first dimensions starting at gen->tile_first
3986 * are mapped to the output. The remaining input dimensions are projected
3987 * out and the corresponding output dimensions are fixed to 0.
3989 static __isl_give isl_map
*insert_even(struct gpu_gen
*gen
,
3990 __isl_take isl_space
*space
, int pos
, int val
)
3995 space
= isl_space_set_from_params(space
);
3996 space
= isl_space_add_dims(space
, isl_dim_set
, gen
->shared_len
);
3997 space
= isl_space_map_from_set(space
);
3998 proj
= isl_map_identity(space
);
3999 proj
= isl_map_project_out(proj
, isl_dim_out
, 0, gen
->tile_first
);
4000 n
= gen
->shared_len
- gen
->tile_first
;
4001 for (i
= 0; i
<= n
; ++i
) {
4002 proj
= isl_map_insert_dims(proj
, isl_dim_out
, 2 * i
, 1);
4004 proj
= isl_map_fix_si(proj
, isl_dim_out
, 2 * i
, val
);
4006 proj
= isl_map_fix_si(proj
, isl_dim_out
, 2 * i
, 0);
4012 proj
= isl_map_eliminate(proj
, isl_dim_in
, gen
->tile_first
+ pos
,
4013 gen
->shared_len
- (gen
->tile_first
+ pos
));
4014 for (i
= pos
; i
< n
; ++i
)
4015 proj
= isl_map_fix_si(proj
, isl_dim_out
, 2 * i
+ 1, 0);
4020 /* Given the AST context schedule "schedule" and the mapping from
4021 * domains to the shared tile loops "shared_sched", add a schedule
4022 * for a synchronization operation at position "val" of loop level "pos".
4024 * schedule is of the form
4028 * (with D the iteration domains and L the already generated loops),
4029 * while shared_sched is of the form
4033 * We combine them into
4039 * [s_0,...] -> [0,s_{tile_first},0,..., val, 0, 0, ... 0]
4041 * and use the result as a schedule for "sync".
4043 static __isl_give isl_union_map
*add_sync_schedule(struct gpu_gen
*gen
,
4044 __isl_take isl_union_map
*res
, __isl_keep isl_union_map
*schedule
,
4045 __isl_keep isl_union_map
*shared_sched
, int pos
, int val
)
4048 isl_map
*proj
, *map
;
4050 shared_sched
= isl_union_map_copy(shared_sched
);
4051 schedule
= isl_union_map_copy(schedule
);
4053 space
= isl_union_map_get_space(shared_sched
);
4054 schedule
= isl_union_map_apply_domain(shared_sched
, schedule
);
4055 map
= isl_map_from_union_map(schedule
);
4057 proj
= insert_even(gen
, space
, pos
, val
);
4058 map
= isl_map_apply_range(map
, proj
);
4059 map
= isl_map_from_range(isl_map_wrap(map
));
4060 map
= isl_map_set_tuple_name(map
, isl_dim_in
, "sync");
4062 res
= isl_union_map_add_map(res
, map
);
4067 /* Given the AST context schedule "schedule" and the mapping from
4068 * domains to the shared tile loops "shared_sched", add a schedule
4069 * for copying an array reference group to/from shared/private memory.
4070 * "read" is set if data should be copied from global memory
4071 * to shared/private memory.
4072 * "k" represents the current group
4073 * "s" is the total number of groups
4075 * We schedule an operation before or after the innermost loop
4076 * of "shared_sched" that affects the tile of the array reference group.
4078 * schedule is of the form
4082 * (with D the iteration domains and L the already generated loops),
4083 * while shared_sched is of the form
4087 * We first compute the access relation for the reference group
4091 * and combine it with shared_sched into
4095 * If this results in an empty relation, no copying needs to be performed
4097 * Otherwise, we invert the relation and combine it with "schedule" into
4101 * The actual additional piece of the schedule is obtained from combining
4107 * [s_0,...] -> [0,s_{tile_first},0,..., val, 0, 0, ... 0]
4109 * The position of "val" corresponds to the innermost loop that affects
4110 * the tile and the value indicates where the copying is scheduled
4111 * with respect to the actual kernel code (at value 0).
4112 * Reads are schedule before the code, writes to global memory from
4113 * private memory are scheduled at values 1 to s, writes to global
4114 * memory from shared memory are scheduled at values s + 2 to 2 * s + 1.
4116 * If we are scheduling a read from global memory to shared memory,
4117 * we insert a synchronization before the kernel code (at the innermost
4119 * If we are scheduling a write to global memory, then we add
4120 * a synchronization after all writes (at value 2 *s + 2).
4121 * However, there is no need for a synchronization after the outermost loop.
4122 * A write to global memory from private memory at the innermost level
4123 * does not require a synchronization, because it is covered by
4124 * the synchronization after the kernel inserted by body_schedule.
4126 static __isl_give isl_union_map
*add_group_schedule(struct gpu_gen
*gen
,
4127 __isl_take isl_union_map
*res
, __isl_keep isl_union_map
*schedule
,
4128 __isl_keep isl_union_map
*shared_sched
,
4129 struct gpu_array_ref_group
*group
, int read
, int k
, int s
)
4134 isl_union_map
*access
;
4135 isl_map
*map
, *proj
, *access_map
;
4138 access
= group_access_relation(group
, read
, !read
);
4139 access
= isl_union_map_range_product(isl_union_map_copy(shared_sched
),
4142 if (isl_union_map_is_empty(access
)) {
4143 isl_union_map_free(access
);
4147 access
= isl_union_map_reverse(access
);
4148 access
= isl_union_map_apply_range(access
,
4149 isl_union_map_copy(schedule
));
4150 access_map
= isl_map_from_union_map(access
);
4152 space
= isl_space_copy(group
->array
->dim
);
4153 space
= isl_space_from_range(space
);
4154 space
= isl_space_add_dims(space
, isl_dim_in
, gen
->shared_len
);
4155 map
= isl_map_domain_map(isl_map_universe(space
));
4157 space
= isl_union_map_get_space(schedule
);
4158 pos
= group
->last_shared
+ 1 - gen
->tile_first
;
4162 else if (group
->private_tile
)
4165 val
= 1 + s
+ 1 + k
;
4166 proj
= insert_even(gen
, space
, pos
, val
);
4167 map
= isl_map_apply_range(map
, proj
);
4169 access_map
= isl_map_range_product(access_map
, map
);
4171 id
= isl_id_alloc(gen
->ctx
, read
? "read" : "write", group
);
4172 access_map
= isl_map_set_tuple_id(access_map
, isl_dim_in
, id
);
4174 res
= isl_union_map_add_map(res
, access_map
);
4176 n
= gen
->shared_len
- gen
->tile_first
;
4178 if (!group
->private_tile
)
4179 res
= add_sync_schedule(gen
, res
, schedule
,
4180 shared_sched
, n
, -1);
4184 if (pos
== n
&& group
->private_tile
)
4186 res
= add_sync_schedule(gen
, res
, schedule
, shared_sched
,
4193 /* Return a schedule for the shared tile loops based on the current
4194 * AST context schedule.
4196 * We create a "shared_sched" that maps the domains to the first
4197 * shared_len dimensions of the computed schedule, project out the
4198 * first tile_first dimensions (as these are already covered by
4199 * the host code) and insert "statement-level" dimensions at even
4200 * positions so that we can schedule copy blocks and synchronization
4201 * before/after each level.
4203 * In particular, copy blocks are inserted inside the innermost
4204 * level that affect the tile. For the copying to global memory,
4205 * those from private memory are scheduled before those from shared
4206 * memory such that synchronization can be inserted between the two
4207 * at the innermost level.
4208 * Synchronization is inserted at the innermost level before the
4209 * actual kernel code if there is any copying from global memory
4210 * to shared memory. It is inserted unconditionally at the innermost
4211 * level after the actual kernel code and the copying to global memory
4212 * from private memory (if any). Finally, it is inserted after
4213 * any copying to global memory, except at the outermost level
4214 * and at the innermost level if there is no copying from shared
4215 * memory. The copying from private memory is covered by the unconditional
4216 * synchronization at the innermost level.
4218 static __isl_give isl_union_map
*body_schedule(struct gpu_gen
*gen
,
4219 __isl_take isl_union_map
*schedule
)
4223 isl_union_map
*shared_sched
;
4224 isl_union_map
*sched
;
4225 isl_map
*proj
, *map
;
4228 shared_sched
= isl_union_map_copy(gen
->tiled_sched
);
4229 proj
= projection(isl_union_map_get_space(shared_sched
),
4230 gen
->tiled_len
, gen
->shared_len
);
4231 shared_sched
= isl_union_map_apply_range(shared_sched
,
4232 isl_union_map_from_map(proj
));
4233 space
= isl_union_map_get_space(shared_sched
);
4234 proj
= insert_even(gen
, space
, -1, 0);
4235 sched
= isl_union_map_apply_range(isl_union_map_copy(shared_sched
),
4236 isl_union_map_from_map(proj
));
4238 res
= isl_union_map_range_product(isl_union_map_copy(schedule
), sched
);
4241 for (i
= 0; i
< gen
->prog
->n_array
; ++i
)
4242 s
+= gen
->prog
->array
[i
].n_group
;
4245 for (i
= 0; i
< gen
->prog
->n_array
; ++i
) {
4246 struct gpu_array_info
*array
= &gen
->prog
->array
[i
];
4248 for (j
= 0; j
< array
->n_group
; ++j
) {
4249 struct gpu_array_ref_group
*group
;
4251 group
= array
->groups
[j
];
4252 if (!group
->private_tile
&& !group
->shared_tile
)
4254 res
= add_group_schedule(gen
, res
, schedule
,
4255 shared_sched
, group
, 0, k
, s
);
4256 res
= add_group_schedule(gen
, res
, schedule
,
4257 shared_sched
, group
, 1, k
, s
);
4262 res
= add_sync_schedule(gen
, res
, schedule
, shared_sched
,
4263 gen
->shared_len
- gen
->tile_first
, 1 + s
);
4265 isl_union_map_free(shared_sched
);
4266 isl_union_map_free(schedule
);
4271 /* Generate code for "kernel" in the given "context".
4273 * We first generate code for the shared tile loops (T1T, T1P and T2)
4274 * in a context that includes the block ids.
4275 * Within each iteration of these loops an additional code generation
4276 * is performed (within create_kernel_leaf) for the rest of the schedule
4277 * in a context that includes the thread ids.
4279 static __isl_give isl_ast_node
*generate_kernel(struct gpu_gen
*gen
,
4280 __isl_keep isl_ast_build
*build
, __isl_keep isl_set
*host_domain
,
4281 __isl_keep isl_multi_pw_aff
*grid_size
)
4285 isl_id_list
*iterators
;
4286 isl_union_map
*schedule
;
4290 schedule
= isl_ast_build_get_schedule(build
);
4292 build
= isl_ast_build_copy(build
);
4293 build
= isl_ast_build_restrict(build
, isl_set_copy(host_domain
));
4294 space
= isl_ast_build_get_schedule_space(build
);
4295 set
= isl_set_universe(isl_space_copy(space
));
4296 set
= add_bounded_parameters_dynamic(set
, grid_size
, "b");
4297 build
= isl_ast_build_restrict(build
, set
);
4299 schedule
= body_schedule(gen
, schedule
);
4301 sched_len
= 2 * (gen
->shared_len
- gen
->tile_first
) + 1;
4303 build
= set_atomic_and_unroll(build
, space
, sched_len
);
4304 iterators
= generate_names(gen
->ctx
, sched_len
, "g");
4305 build
= isl_ast_build_set_iterators(build
, iterators
);
4306 build
= isl_ast_build_set_create_leaf(build
, &create_kernel_leaf
, gen
);
4307 tree
= isl_ast_build_ast_from_schedule(build
, schedule
);
4308 isl_ast_build_free(build
);
4313 /* Attach "id" to the given node.
4315 static __isl_give isl_ast_node
*attach_id(__isl_take isl_ast_node
*node
,
4316 __isl_keep isl_ast_build
*build
, void *user
)
4320 node
= isl_ast_node_set_annotation(node
, id
);
4325 /* Construct an AST node for performing a kernel launch and attach
4326 * the information about the kernel to that node.
4328 * The kernel AST has been constructed in the context of the range
4329 * of "schedule". In particular, the grid size has been computed
4330 * in the context. We therefore still need to make sure that these
4331 * constraints are expressed in the code. We do this by creating a schedule
4333 * kernel[] -> [S -> []]
4335 * where S is the schedule domain, i.e., the range of "schedule".
4336 * The AST generation will then create a single call surrounded by
4337 * all the condition in "S" that have not been expressed yet.
4339 * The kernel information is attached to this node in attach_id.
4341 static __isl_give isl_ast_node
*construct_launch(
4342 __isl_take isl_ast_build
*build
, __isl_take isl_union_map
*schedule
,
4343 __isl_take
struct ppcg_kernel
*kernel
)
4347 isl_union_set
*domain
;
4352 ctx
= isl_ast_build_get_ctx(build
);
4354 id
= isl_id_alloc(ctx
, NULL
, kernel
);
4355 id
= isl_id_set_free_user(id
, &ppcg_kernel_free
);
4357 domain
= isl_union_map_range(schedule
);
4358 set
= isl_set_from_union_set(domain
);
4359 map
= isl_map_from_domain(set
);
4360 map
= isl_map_from_range(isl_map_wrap(map
));
4361 map
= isl_map_set_tuple_name(map
, isl_dim_in
, "kernel");
4362 schedule
= isl_union_map_from_map(map
);
4364 build
= isl_ast_build_set_at_each_domain(build
, &attach_id
, id
);
4365 node
= isl_ast_build_ast_from_schedule(build
, schedule
);
4366 isl_ast_build_free(build
);
4371 /* This function is called for each leaf in the AST of the host code.
4372 * We first specialize the schedule to the site of the leaf, compute
4373 * the size of shared memory and then construct the body of host code
4374 * and the associated kernel.
4376 * The necessary information for printing the kernel launch is
4377 * stored in a struct ppcg_kernel and attached to the leaf node
4378 * created to represent the launch.
4380 static __isl_give isl_ast_node
*create_host_leaf(
4381 __isl_take isl_ast_build
*build
, void *user
)
4383 struct gpu_gen
*gen
= (struct gpu_gen
*) user
;
4386 struct ppcg_kernel
*kernel
;
4387 isl_set
*host_domain
;
4388 isl_union_map
*schedule
;
4389 isl_union_map
*local_sched
;
4390 isl_union_map
*access
;
4391 isl_union_set
*domain
;
4394 schedule
= isl_ast_build_get_schedule(build
);
4396 isl_union_map_foreach_map(schedule
, &extract_tile_len
, gen
);
4399 domain
= isl_union_map_domain(isl_union_map_copy(schedule
));
4401 local_sched
= isl_union_map_copy(gen
->sched
);
4402 local_sched
= isl_union_map_intersect_domain(local_sched
, domain
);
4403 access
= isl_union_map_union(isl_union_map_copy(gen
->prog
->read
),
4404 isl_union_map_copy(gen
->prog
->write
));
4405 access
= isl_union_map_apply_domain(access
,
4406 isl_union_map_copy(local_sched
));
4408 gen
->tiled_sched
= tile_schedule(gen
, local_sched
);
4409 gen
->tiled_sched
= parametrize_tiled_schedule(gen
, gen
->tiled_sched
);
4410 gen
->tiled_sched
= scale_tile_loops(gen
, gen
->tiled_sched
);
4412 gen
->local_sched
= isl_union_map_copy(gen
->tiled_sched
);
4413 gen
->local_sched
= thread_tile_schedule(gen
, gen
->local_sched
);
4414 gen
->local_sched
= scale_thread_tile_loops(gen
, gen
->local_sched
);
4416 kernel
= gen
->kernel
= isl_calloc_type(gen
->ctx
, struct ppcg_kernel
);
4420 kernel
->id
= gen
->kernel_id
++;
4421 kernel
->context
= isl_union_map_params(isl_union_map_copy(schedule
));
4422 kernel
->grid_size
= extract_grid_size(gen
, kernel
);
4423 extract_block_size(gen
, kernel
);
4424 kernel
->arrays
= isl_union_map_range(access
);
4425 kernel
->space
= isl_ast_build_get_schedule_space(build
);
4427 gen
->private_access
= NULL
;
4428 compute_shared_sched(gen
);
4429 gen
->privatization
= compute_privatization(gen
);
4430 group_references(gen
);
4431 compute_private_access(gen
);
4432 check_shared_memory_bound(gen
);
4433 compute_group_tilings(gen
);
4434 host_domain
= isl_set_from_union_set(isl_union_map_range(
4435 isl_union_map_copy(schedule
)));
4436 localize_bounds(gen
, kernel
, host_domain
);
4438 gen
->local_sched
= interchange_for_unroll(gen
, gen
->local_sched
);
4440 kernel
->tree
= generate_kernel(gen
, build
, host_domain
,
4442 create_kernel_vars(gen
, kernel
);
4444 free_local_array_info(gen
);
4445 isl_map_free(gen
->privatization
);
4446 isl_union_map_free(gen
->private_access
);
4447 isl_union_map_free(gen
->local_sched
);
4448 isl_union_map_free(gen
->tiled_sched
);
4449 isl_union_map_free(gen
->shared_sched
);
4450 isl_union_map_free(gen
->shared_proj
);
4451 isl_set_free(host_domain
);
4452 free(gen
->tile_size
);
4454 node
= construct_launch(build
, schedule
, kernel
);
4458 isl_union_map_free(schedule
);
4462 /* Use isl to generate code for the outer gen->tile_first loops
4463 * of the global schedule in gen->sched, resulting in the host code.
4464 * Within each iteration of this partial schedule, i.e., for each kernel
4465 * launch, create_host_leaf takes care of generating the kernel code.
4467 static __isl_give isl_ast_node
*generate_host_code(struct gpu_gen
*gen
)
4469 isl_ast_build
*build
;
4471 isl_union_map
*sched
;
4473 isl_id_list
*iterators
;
4475 sched
= isl_union_map_copy(gen
->sched
);
4476 proj
= projection(isl_union_map_get_space(sched
),
4477 gen
->untiled_len
, gen
->tile_first
);
4478 sched
= isl_union_map_apply_range(sched
, isl_union_map_from_map(proj
));
4480 isl_options_set_ast_build_group_coscheduled(gen
->ctx
, 1);
4481 build
= isl_ast_build_from_context(isl_set_copy(gen
->prog
->context
));
4482 iterators
= generate_names(gen
->ctx
, gen
->tile_first
, "h");
4483 build
= isl_ast_build_set_iterators(build
, iterators
);
4484 build
= isl_ast_build_set_create_leaf(build
, &create_host_leaf
, gen
);
4485 tree
= isl_ast_build_ast_from_schedule(build
, sched
);
4486 isl_ast_build_free(build
);
4491 __isl_give isl_union_map
*extract_sizes_from_str(isl_ctx
*ctx
, const char *str
)
4495 return isl_union_map_read_from_str(ctx
, str
);
4498 /* Information about the outermost tilable bands in the forest of bands.
4500 * tile_len and n_parallel are only sets on band_info structures
4501 * that correspond to outermost bands. For other bands (in particular,
4502 * ancestors of the outermost bands), n_parallal is set to 0.
4504 * prefix is the (padded) schedule leading up to the outermost tilable bands.
4506 * tile_first is the number of schedule dimensions in prefix.
4508 * suffix is the schedule of the outermost tilable bands and their descendants.
4511 struct gpu_gen
*gen
;
4515 isl_union_map
*prefix
;
4516 isl_union_map
*suffix
;
4519 /* Set tile_len and n_parallel of the statement to that of
4520 * their outermost band, recorded in the band_info.
4522 static int set_stmt_tile_len(__isl_take isl_map
*map
, void *user
)
4524 struct band_info
*info
= user
;
4525 struct gpu_stmt
*stmt
;
4528 id
= isl_map_get_tuple_id(map
, isl_dim_in
);
4529 stmt
= find_stmt(info
->gen
->prog
, id
);
4532 stmt
->tile_len
= info
->tile_len
;
4533 stmt
->n_parallel
= info
->n_parallel
;
4540 static void list_select_outer_band(struct gpu_gen
*gen
,
4541 __isl_take isl_band_list
*list
, int pos
, struct band_info
*list_info
);
4543 /* Check if this band has any parallel loops. If so, take it as
4544 * the outermost tilable band. If not, continue looking for the
4545 * outermost tilable band in the children of the current band.
4547 static void band_select_outer_band(struct gpu_gen
*gen
,
4548 __isl_take isl_band
*band
, int pos
, struct band_info
*info
)
4550 int n
= isl_band_n_member(band
);
4553 for (n_parallel
= 0; n_parallel
< n
; ++n_parallel
)
4554 if (!isl_band_member_is_zero_distance(band
, n_parallel
))
4557 info
->n_parallel
= n_parallel
;
4559 gen
->any_parallelism
= 1;
4561 info
->tile_first
= pos
;
4563 info
->prefix
= isl_band_get_prefix_schedule(band
);
4564 info
->suffix
= isl_union_map_flat_range_product(
4565 isl_band_get_partial_schedule(band
),
4566 isl_band_get_suffix_schedule(band
));
4567 isl_union_map_foreach_map(info
->prefix
,
4568 &set_stmt_tile_len
, info
);
4569 } else if (isl_band_has_children(band
)) {
4570 isl_band_list
*children
;
4571 children
= isl_band_get_children(band
);
4572 list_select_outer_band(gen
, children
, pos
+ n
, info
);
4575 info
->tile_first
= pos
+ n
;
4577 info
->prefix
= isl_union_map_flat_range_product(
4578 isl_band_get_prefix_schedule(band
),
4579 isl_band_get_partial_schedule(band
));
4580 info
->suffix
= isl_band_get_suffix_schedule(band
);
4581 isl_union_map_foreach_map(info
->prefix
,
4582 &set_stmt_tile_len
, info
);
4585 isl_band_free(band
);
4588 /* Comparison function that returns a non-zero value for band_infos
4589 * with different tile_len fields or different n_parallel fields.
4591 static int cmp_band(const void *p1
, const void *p2
)
4593 const struct band_info
*info1
= p1
;
4594 const struct band_info
*info2
= p2
;
4596 if (info1
->tile_len
!= info2
->tile_len
)
4597 return info1
->tile_len
- info2
->tile_len
;
4599 return info1
->n_parallel
- info2
->n_parallel
;
4602 /* Extend "umap" with coordinates with fixed value "val"
4603 * to a total length of "dst_len", assuming the original dimension is "src_len".
4605 static __isl_give isl_union_map
*extend_range(
4606 __isl_take isl_union_map
*umap
, int src_len
, int dst_len
, int val
)
4612 dim
= isl_union_map_get_space(umap
);
4613 map
= isl_map_reverse(projection(dim
, dst_len
, src_len
));
4614 for (i
= src_len
; i
< dst_len
; ++i
)
4615 map
= isl_map_fix_si(map
, isl_dim_out
, i
, val
);
4617 umap
= isl_union_map_apply_range(umap
, isl_union_map_from_map(map
));
4622 /* Group bands with the same values for tile_len and n_parallel.
4623 * The prefix schedule is then extended with a fixed coordinate that
4624 * is different for each such group.
4625 * Note that the actual values for this coordinate are not important.
4626 * The bands have already been effectively separated at a higher level
4627 * or they are independent and may be executed in parallel.
4628 * The list of band_info has been sorted before this functions is called.
4630 static void separate_bands(struct band_info
*info
, int n
)
4635 for (i
= 0; i
< n
; ++i
) {
4636 int l
= info
[i
].tile_first
;
4639 (info
[i
].tile_len
!= info
[i
- 1].tile_len
||
4640 info
[i
].n_parallel
!= info
[i
- 1].n_parallel
))
4643 info
[i
].prefix
= extend_range(info
[i
].prefix
,
4645 info
[i
].tile_first
= l
+ 1;
4649 /* Select the outermost bands in the elements of the list, align
4650 * their prefix schedules, separate bands with different values
4651 * for tile_len and/or n_parallel and then combine the resulting
4652 * prefix and suffix schedules into a single pair of prefix and
4653 * suffix schedules for the entire list.
4655 static void list_select_outer_band(struct gpu_gen
*gen
,
4656 __isl_take isl_band_list
*list
, int pos
, struct band_info
*list_info
)
4660 int n
= isl_band_list_n_band(list
);
4661 isl_ctx
*ctx
= isl_band_list_get_ctx(list
);
4662 struct band_info
*info
;
4664 isl_union_map
*prefix
;
4665 isl_union_map
*suffix
;
4668 info
= isl_calloc_array(ctx
, struct band_info
, n
);
4672 for (i
= 0; i
< n
; ++i
) {
4673 band
= isl_band_list_get_band(list
, i
);
4674 band_select_outer_band(gen
, band
, pos
, &info
[i
]);
4675 if (info
[i
].tile_first
> max_tile_first
)
4676 max_tile_first
= info
[i
].tile_first
;
4679 for (i
= 0; i
< n
; ++i
) {
4680 if (info
[i
].tile_first
== max_tile_first
)
4682 info
[i
].prefix
= extend_range(info
[i
].prefix
,
4683 info
[i
].tile_first
, max_tile_first
, 0);
4684 info
[i
].tile_first
= max_tile_first
;
4687 qsort(info
, n
, sizeof(struct band_info
), &cmp_band
);
4689 for (i
= 0; i
< n
- 1; ++i
)
4690 if (info
[i
].tile_len
!= info
[i
+ 1].tile_len
||
4691 info
[i
].n_parallel
!= info
[i
+ 1].n_parallel
)
4695 separate_bands(info
, n
);
4697 prefix
= info
[0].prefix
;
4698 suffix
= info
[0].suffix
;
4700 for (i
= 1; i
< n
; ++i
) {
4701 prefix
= isl_union_map_union(prefix
, info
[i
].prefix
);
4702 suffix
= isl_union_map_union(suffix
, info
[i
].suffix
);
4705 list_info
->tile_first
= info
[0].tile_first
;
4706 list_info
->tile_len
= -1;
4707 list_info
->prefix
= prefix
;
4708 list_info
->suffix
= suffix
;
4710 isl_band_list_free(list
);
4714 /* Select the outermost tilable band that (by construction)
4715 * has at least one parallel loop.
4716 * The starting position of the aligned band is stored in the pair
4718 * The sizes and number of parallel loops may be different in different
4719 * parts of the band forest and are therefore stored in the gpu_stmts.
4721 * Return the complete schedule, with the tilable bands aligned
4722 * at gen->tile_first and padded with zero, if needed.
4724 static __isl_give isl_union_map
*select_outer_tilable_band(struct gpu_gen
*gen
,
4725 __isl_keep isl_schedule
*schedule
)
4727 isl_band_list
*list
;
4728 struct band_info info
;
4730 gen
->n_parallel
= 0;
4733 list
= isl_schedule_get_band_forest(schedule
);
4735 list_select_outer_band(gen
, list
, 0, &info
);
4737 gen
->tile_first
= info
.tile_first
;
4738 info
.suffix
= align_range(info
.suffix
);
4740 return isl_union_map_flat_range_product(info
.prefix
, info
.suffix
);
4743 /* Set gen->untiled_len to the number of scheduling dimensions
4744 * for the schedule of the first domain.
4745 * We assume here that this number is the same for all domains.
4747 static int set_untiled_len(__isl_take isl_map
*map
, void *user
)
4749 unsigned *untiled_len
= user
;
4751 *untiled_len
= isl_map_dim(map
, isl_dim_out
);
4757 /* Compute an appropriate schedule based on the accesses in
4758 * gen->read and gen->write.
4760 * We use the dependences in gen->prog->scop to compute
4761 * a schedule that has a parallel loop in each tilable band.
4762 * Finally, we select the outermost tilable band.
4764 static void compute_schedule(struct gpu_gen
*gen
)
4766 isl_union_set
*domain
;
4767 isl_union_map
*dep_raw
, *dep
;
4768 isl_union_map
*sched
;
4769 isl_schedule
*schedule
;
4771 dep_raw
= isl_union_map_copy(gen
->prog
->scop
->dep_flow
);
4773 dep
= isl_union_map_copy(gen
->prog
->scop
->dep_false
);
4774 dep
= isl_union_map_union(dep
, dep_raw
);
4775 dep
= isl_union_map_coalesce(dep
);
4777 domain
= isl_union_set_copy(gen
->prog
->scop
->domain
);
4778 domain
= isl_union_set_intersect_params(domain
,
4779 isl_set_copy(gen
->prog
->scop
->context
));
4780 schedule
= isl_union_set_compute_schedule(isl_union_set_copy(domain
),
4781 isl_union_map_copy(dep
), dep
);
4782 if (gen
->options
->debug
->dump_schedule
)
4783 isl_schedule_dump(schedule
);
4785 sched
= select_outer_tilable_band(gen
, schedule
);
4787 isl_union_map_foreach_map(sched
, &set_untiled_len
, &gen
->untiled_len
);
4788 sched
= isl_union_map_intersect_domain(sched
, domain
);
4791 isl_schedule_free(schedule
);
4794 /* Compute the sets of array elements that need to be copied in and out.
4796 * In particular, for each array that is written anywhere in gen->prog and
4797 * that is visible outside the corresponding scop, we copy out its entire
4800 * Any array elements that is read without first being written needs
4801 * to be copied in. Furthermore, if there are any array elements that
4802 * are copied out, but that are not written inside gen->prog, then
4803 * they also need to be copied in to ensure that the value after execution
4804 * is the same as the value before execution.
4805 * While computing the set of array elements that
4806 * are copied out but not written, we intersect both sets with the context.
4807 * This helps in those cases where the arrays are declared with a fixed size,
4808 * while the accesses are parametric and the context assigns a fixed value
4809 * to the parameters.
4811 static void compute_copy_in_and_out(struct gpu_gen
*gen
)
4814 isl_union_set
*write
;
4815 isl_union_set
*copy_in
, *copy_out
;
4816 isl_union_set
*not_written
;
4817 isl_union_map
*uninitialized
;
4819 write
= isl_union_map_range(isl_union_map_copy(gen
->prog
->write
));
4820 write
= isl_union_set_intersect_params(write
,
4821 isl_set_copy(gen
->prog
->context
));
4822 copy_out
= isl_union_set_empty(isl_union_set_get_space(write
));
4824 for (i
= 0; i
< gen
->prog
->n_array
; ++i
) {
4829 if (gen
->prog
->array
[i
].local
)
4832 space
= isl_space_copy(gen
->prog
->array
[i
].dim
);
4833 write_i
= isl_union_set_extract_set(write
, space
);
4834 empty
= isl_set_fast_is_empty(write_i
);
4835 isl_set_free(write_i
);
4839 write_i
= isl_set_copy(gen
->prog
->array
[i
].extent
);
4840 copy_out
= isl_union_set_add_set(copy_out
, write_i
);
4843 copy_out
= isl_union_set_intersect_params(copy_out
,
4844 isl_set_copy(gen
->prog
->context
));
4846 gen
->prog
->copy_out
= isl_union_set_copy(copy_out
);
4848 uninitialized
= isl_union_map_copy(gen
->prog
->scop
->live_in
);
4849 copy_in
= isl_union_map_range(uninitialized
);
4851 not_written
= isl_union_set_subtract(copy_out
, write
);
4852 copy_in
= isl_union_set_union(copy_in
, not_written
);
4853 gen
->prog
->copy_in
= copy_in
;
4856 static struct gpu_stmt_access
**expr_extract_access(struct pet_expr
*expr
,
4857 struct gpu_stmt_access
**next_access
)
4859 struct gpu_stmt_access
*access
;
4860 isl_ctx
*ctx
= isl_map_get_ctx(expr
->acc
.access
);
4862 access
= isl_alloc_type(ctx
, struct gpu_stmt_access
);
4864 access
->next
= NULL
;
4865 access
->read
= expr
->acc
.read
;
4866 access
->write
= expr
->acc
.write
;
4867 access
->access
= isl_map_copy(expr
->acc
.access
);
4869 *next_access
= access
;
4870 next_access
= &(*next_access
)->next
;
4874 static struct gpu_stmt_access
**expr_extract_accesses(struct pet_expr
*expr
,
4875 struct gpu_stmt_access
**next_access
)
4879 for (i
= 0; i
< expr
->n_arg
; ++i
)
4880 next_access
= expr_extract_accesses(expr
->args
[i
],
4883 if (expr
->type
== pet_expr_access
)
4884 next_access
= expr_extract_access(expr
, next_access
);
4889 static void pet_stmt_extract_accesses(struct gpu_stmt
*stmt
)
4891 struct gpu_stmt_access
**next_access
= &stmt
->accesses
;
4893 stmt
->accesses
= NULL
;
4894 expr_extract_accesses(stmt
->body
, next_access
);
4897 /* Return an array of gpu_stmt representing the statements in "scop".
4899 static struct gpu_stmt
*extract_stmts(isl_ctx
*ctx
, struct ppcg_scop
*scop
,
4900 __isl_keep isl_set
*context
)
4903 struct gpu_stmt
*stmts
;
4905 stmts
= isl_calloc_array(ctx
, struct gpu_stmt
, scop
->n_stmt
);
4909 for (i
= 0; i
< scop
->n_stmt
; ++i
) {
4910 struct gpu_stmt
*s
= &stmts
[i
];
4912 s
->id
= isl_set_get_tuple_id(scop
->stmts
[i
]->domain
);
4913 s
->body
= scop
->stmts
[i
]->body
;
4914 pet_stmt_extract_accesses(s
);
4920 /* Callback for ppcg_print_guarded that calls the callback for generate_gpu.
4922 static __isl_give isl_printer
*print_gpu(__isl_take isl_printer
*p
, void *user
)
4924 struct gpu_gen
*gen
= user
;
4926 return gen
->print(p
, gen
->prog
, gen
->tree
, gen
->print_user
);
4929 /* Replace the scop in the "input" file by equivalent code
4930 * that uses the GPU and print the result to "out".
4931 * "scop" is assumed to correspond to this scop.
4932 * The code before the scop is first copied to "out",
4933 * then the transformed scop is printed and finally
4934 * the code after the scop is copied to "out".
4935 * After generating an AST for the transformed scop as explained below,
4936 * we call "print" to print the AST in the desired output format
4937 * to a printer hooked up to "out".
4939 * If it turns out that it does not make sense to generate GPU code,
4940 * then we generate CPU code instead.
4942 * The GPU code is generated in a context where at least one
4943 * statement instance is executed. The corresponding guard (if any) is printed
4944 * around the entire generated GPU code, except for the declaration
4945 * of the arrays that are visible outside of the scop and that therefore
4946 * cannot be declared inside the body of any possible guard.
4948 * We first compute a schedule that respects the dependences
4949 * of the original program and select the outermost band
4950 * of tilable dimensions that has at least one parallel loop.
4951 * We then have three blocks of dimensions
4955 * The tilable band "B" is first tiled according to "tile" sizes, resulting
4960 * For each iteration of the T loop and for each array, we compute
4961 * the array elements accessed by that iteration, construct a rectangular
4962 * box around it and shift it to the origin. The result is used
4963 * as shared memory for the array.
4965 * We then split off at most 2 parallel loops from the T loops and
4966 * at most 3 parallel loops from the P loops
4970 * The T1/P1 loops are then tiled or "wrapped" over the blocks/threads,
4971 * according to "grid"/"block" sizes.
4973 * H T1T T1P T2 P1T P1P P2 G
4975 * Finally, the T1P and P1P iterators are equated to the block and
4976 * thread dimensions respectively and so are effectively removed.
4977 * The H loops are run on the host. The T1T, T2, P1T, P2 and G loops
4978 * are run on the GPU.
4980 * Code is generated in three stages. We first generate code for the
4981 * host (the H loops), with iterators h%d. Then, for each leaf node
4982 * of the resulting AST, we generate code for the shared loops (up to
4983 * and including T2), with iterators g%d and after equating the H loops
4984 * to h%d parameters and the T1P loops to the block dimensions.
4985 * Finally, we generate code for the remaining loops in a similar fashion.
4987 int generate_gpu(isl_ctx
*ctx
, const char *input
, FILE *out
,
4988 struct ppcg_scop
*scop
, struct ppcg_options
*options
,
4989 __isl_give isl_printer
*(*print
)(__isl_take isl_printer
*p
,
4990 struct gpu_prog
*prog
, __isl_keep isl_ast_node
*tree
,
4991 void *user
), void *user
)
4994 struct gpu_prog
*prog
;
4995 isl_set
*context
, *guard
;
5002 in
= fopen(input
, "r");
5003 copy(in
, out
, 0, scop
->start
);
5005 prog
= gpu_prog_alloc(ctx
, scop
);
5009 p
= isl_printer_to_file(ctx
, out
);
5010 p
= isl_printer_set_output_format(p
, ISL_FORMAT_C
);
5012 context
= isl_set_copy(prog
->context
);
5013 guard
= isl_union_set_params(isl_union_set_copy(prog
->scop
->domain
));
5014 prog
->context
= isl_set_intersect(prog
->context
, isl_set_copy(guard
));
5018 gen
.sizes
= extract_sizes_from_str(ctx
, options
->sizes
);
5019 gen
.options
= options
;
5021 gen
.any_parallelism
= 0;
5022 compute_schedule(&gen
);
5024 if (!gen
.any_parallelism
) {
5025 isl_set_free(context
);
5026 isl_set_free(guard
);
5027 p
= print_cpu(p
, scop
, options
);
5029 compute_copy_in_and_out(&gen
);
5033 gen
.print_user
= user
;
5034 gen
.tree
= generate_host_code(&gen
);
5035 p
= ppcg_print_exposed_declarations(p
, prog
->scop
);
5036 p
= ppcg_print_guarded(p
, guard
, context
, &print_gpu
, &gen
);
5037 isl_ast_node_free(gen
.tree
);
5040 clear_gpu_gen(&gen
);
5042 isl_printer_free(p
);
5044 gpu_prog_free(prog
);
5046 copy(in
, out
, scop
->end
, -1);
5052 struct gpu_prog
*gpu_prog_alloc(isl_ctx
*ctx
, struct ppcg_scop
*scop
)
5054 struct gpu_prog
*prog
;
5059 prog
= isl_calloc_type(ctx
, struct gpu_prog
);
5064 prog
->context
= isl_set_copy(scop
->context
);
5065 prog
->n_stmts
= scop
->n_stmt
;
5066 prog
->stmts
= extract_stmts(ctx
, scop
, prog
->context
);
5067 prog
->read
= isl_union_map_copy(scop
->reads
);
5068 prog
->write
= isl_union_map_copy(scop
->writes
);
5071 return gpu_prog_free(prog
);
5073 collect_array_info(prog
);
5078 void *gpu_prog_free(struct gpu_prog
*prog
)
5082 free_array_info(prog
);
5083 free_stmts(prog
->stmts
, prog
->n_stmts
);
5084 isl_union_set_free(prog
->copy_in
);
5085 isl_union_set_free(prog
->copy_out
);
5086 isl_union_map_free(prog
->read
);
5087 isl_union_map_free(prog
->write
);
5088 isl_set_free(prog
->context
);