allow specification of output file name
[ppcg.git] / gpu.c
blob6af175976a5afcecbf8b8004ecb70c9374a087f1
1 /*
2 * Copyright 2010-2011 INRIA Saclay
3 * Copyright 2012 Ecole Normale Superieure
5 * Use of this software is governed by the GNU LGPLv2.1 license
7 * Written by Sven Verdoolaege, INRIA Saclay - Ile-de-France,
8 * Parc Club Orsay Universite, ZAC des vignes, 4 rue Jacques Monod,
9 * 91893 Orsay, France
10 * and Ecole Normale Superieure, 45 rue d’Ulm, 75230 Paris, France
13 #include <assert.h>
14 #include <stdlib.h>
16 #include <isl/polynomial.h>
17 #include <isl/union_set.h>
18 #include <isl/aff.h>
19 #include <isl/ilp.h>
20 #include <isl/flow.h>
21 #include <isl/band.h>
22 #include <isl/schedule.h>
23 #include <isl/options.h>
24 #include <isl/ast_build.h>
26 #include "gpu.h"
27 #include "schedule.h"
28 #include "ppcg_options.h"
30 /* The fields stride, shift and shift_map only contain valid information
31 * if shift != NULL.
32 * If so, they express that current index is such that if you add shift,
33 * then the result is always a multiple of stride.
34 * shift_map contains the mapping
36 * i -> (i + shift)/stride
38 * Let D represent the initial shared_len dimensions of the computed schedule.
39 * The spaces of "lb" and "shift" are of the form
41 * D -> [b]
43 * "shift_map" is of the form
45 * [D -> i] -> [D -> (i + shift(D))/stride]
47 struct gpu_array_bound {
48 isl_int size;
49 isl_aff *lb;
51 isl_int stride;
52 isl_aff *shift;
53 isl_basic_map *shift_map;
56 struct gpu_array_info;
58 /* A group of array references in a kernel that should be handled together.
59 * If private_bound is not NULL, then it is mapped to registers.
60 * Otherwise, if shared_bound is not NULL, it is mapped to shared memory.
61 * Otherwise, it is accessed from global memory.
63 struct gpu_array_ref_group {
64 /* The references in this group access this array. */
65 struct gpu_array_info *array;
66 /* Position of this group in the list of reference groups of array. */
67 int nr;
69 /* The following fields are use during the construction of the groups.
70 * access is the combined access relation relative to the shared
71 * memory tiling. In particular, the domain of the map corresponds
72 * to the first shared_len dimensions of the computed schedule.
73 * write is set if any access in the group is a write.
75 isl_map *access;
76 int write;
78 /* For each index, size and offset of piece in shared memory. */
79 struct gpu_array_bound *shared_bound;
81 /* For each index, size and offset of piece in private memory. */
82 struct gpu_array_bound *private_bound;
84 /* References in this group; point to elements of a linked list. */
85 int n_ref;
86 struct gpu_stmt_access **refs;
88 /* Last shared memory tile dimension that affects tile of this group. */
89 int last_shared;
92 struct gpu_gen {
93 isl_ctx *ctx;
94 struct ppcg_options *options;
96 struct gpu_prog *prog;
98 /* tile, grid and block sizes for each kernel */
99 isl_union_map *sizes;
101 /* Identifier of current kernel. */
102 int kernel_id;
103 /* Pointer to the current kernel. */
104 struct ppcg_kernel *kernel;
106 /* First tile dimension. */
107 int tile_first;
108 /* Number of tile dimensions. */
109 int tile_len;
110 /* Number of initial parallel loops among tile dimensions. */
111 int n_parallel;
113 /* Number of dimensions determining shared memory. */
114 int shared_len;
116 /* Number of rows in the untiled schedule. */
117 int untiled_len;
118 /* Number of rows in the tiled schedule. */
119 int tiled_len;
120 /* Number of rows in schedule after tiling/wrapping over threads. */
121 int thread_tiled_len;
123 /* Global untiled schedule. */
124 isl_union_map *sched;
125 /* Local (per kernel launch) tiled schedule. */
126 isl_union_map *tiled_sched;
127 /* Local schedule per shared memory tile loop iteration. */
128 isl_union_map *local_sched;
130 /* Local tiled schedule projected onto the shared tile loops and
131 * the loops that will be wrapped over the threads,
132 * with all shared tile loops parametrized.
134 isl_union_map *shared_sched;
135 /* Projects out the loops that will be wrapped over the threads
136 * from shared_sched.
138 isl_union_map *shared_proj;
140 /* A map that takes the range of shared_sched as input,
141 * wraps the appropriate loops over the threads and then projects
142 * out these loops.
144 isl_map *privatization;
146 /* A map from the shared memory tile loops and the thread indices
147 * (as parameters) to the set of accessed memory elements that
148 * will be accessed through private copies.
150 isl_union_map *private_access;
152 /* The schedule for the current private/shared access
153 * (within print_private_access or print_shared_access).
155 isl_map *copy_sched;
156 /* The array reference group corresponding to copy_sched. */
157 struct gpu_array_ref_group *copy_group;
158 /* copy_group->private_bound or copy_group->shared_bound */
159 struct gpu_array_bound *copy_bound;
161 /* First loop to unroll (or -1 if none) in the current part of the
162 * schedule.
164 int first_unroll;
166 int n_grid;
167 int n_block;
168 /* Note: in the input file, the sizes of the grid and the blocks
169 * are specified in the order x, y, z, but internally, the sizes
170 * are stored in reverse order, so that the last element always
171 * refers to the x dimension.
173 int grid_dim[2];
174 int block_dim[3];
175 int *tile_size;
178 /* Print the name of the local copy of a given group of array references.
180 static __isl_give isl_printer *print_array_name(__isl_take isl_printer *p,
181 struct gpu_array_ref_group *group)
183 int global = 0;
185 if (group->private_bound)
186 p = isl_printer_print_str(p, "private_");
187 else if (group->shared_bound)
188 p = isl_printer_print_str(p, "shared_");
189 else
190 global = 1;
191 p = isl_printer_print_str(p, group->array->name);
192 if (!global && group->array->n_group > 1) {
193 p = isl_printer_print_str(p, "_");
194 p = isl_printer_print_int(p, group->nr);
197 return p;
200 /* Collect all references to the given array and store pointers to them
201 * in array->refs.
203 static void collect_references(struct gpu_prog *prog,
204 struct gpu_array_info *array)
206 int i;
207 int n;
209 n = 0;
210 for (i = 0; i < prog->n_stmts; ++i) {
211 struct gpu_stmt *stmt = &prog->stmts[i];
212 struct gpu_stmt_access *access;
214 for (access = stmt->accesses; access; access = access->next) {
215 const char *name;
216 name = isl_map_get_tuple_name(access->access,
217 isl_dim_out);
218 if (name && !strcmp(array->name, name))
219 n++;
223 array->n_ref = n;
224 array->refs = isl_alloc_array(prog->ctx, struct gpu_stmt_access *, n);
225 assert(array->refs);
227 n = 0;
228 for (i = 0; i < prog->n_stmts; ++i) {
229 struct gpu_stmt *stmt = &prog->stmts[i];
230 struct gpu_stmt_access *access;
232 for (access = stmt->accesses; access; access = access->next) {
233 const char *name;
234 name = isl_map_get_tuple_name(access->access,
235 isl_dim_out);
236 if (!name || strcmp(array->name, name))
237 continue;
239 array->refs[n++] = access;
244 static struct gpu_array_bound *create_bound_list(isl_ctx *ctx, int n_index)
246 int i;
247 struct gpu_array_bound *bound;
249 bound = isl_alloc_array(ctx, struct gpu_array_bound, n_index);
250 assert(bound);
252 for (i = 0; i < n_index; ++i) {
253 isl_int_init(bound[i].size);
254 bound[i].lb = NULL;
255 isl_int_init(bound[i].stride);
256 bound[i].shift = NULL;
257 bound[i].shift_map = NULL;
260 return bound;
263 static void free_bound_list(struct gpu_array_bound *bound, int n_index)
265 int j;
267 if (!bound)
268 return;
270 for (j = 0; j < n_index; ++j) {
271 isl_int_clear(bound[j].size);
272 isl_int_clear(bound[j].stride);
273 isl_aff_free(bound[j].lb);
274 isl_aff_free(bound[j].shift);
275 isl_basic_map_free(bound[j].shift_map);
277 free(bound);
280 static struct pet_array *find_array(struct ppcg_scop *scop,
281 __isl_keep isl_set *accessed)
283 int i;
284 isl_id *id;
286 id = isl_set_get_tuple_id(accessed);
288 for (i = 0; i < scop->n_array; ++i) {
289 isl_id *id_i;
291 id_i = isl_set_get_tuple_id(scop->arrays[i]->extent);
292 isl_id_free(id_i);
293 if (id == id_i)
294 break;
296 isl_id_free(id);
298 return i < scop->n_array ? scop->arrays[i] : NULL;
301 /* Compute bounds on the host arrays based on the accessed elements
302 * and collect all references to the array.
304 * If the array is zero-dimensional, i.e., a scalar, we check
305 * whether it is read-only.
307 static int extract_array_info(__isl_take isl_set *array, void *user)
309 int i;
310 struct gpu_prog *prog = (struct gpu_prog *)user;
311 const char *name;
312 int n_index;
313 isl_pw_aff **bounds;
314 struct pet_array *pa;
316 n_index = isl_set_dim(array, isl_dim_set);
317 name = isl_set_get_tuple_name(array);
318 bounds = isl_alloc_array(isl_set_get_ctx(array),
319 isl_pw_aff *, n_index);
320 assert(bounds);
321 prog->array[prog->n_array].dim = isl_set_get_space(array);
322 prog->array[prog->n_array].name = strdup(name);
323 prog->array[prog->n_array].n_index = n_index;
324 prog->array[prog->n_array].bound = bounds;
326 pa = find_array(prog->scop, array);
327 assert(pa);
329 prog->array[prog->n_array].type = strdup(pa->element_type);
330 prog->array[prog->n_array].size = pa->element_size;
331 prog->array[prog->n_array].local = pa->declared && !pa->exposed;
333 if (n_index == 0) {
334 isl_set *space;
335 isl_union_map *write;
336 int empty;
338 write = isl_union_map_copy(prog->write);
339 space = isl_set_universe(isl_set_get_space(array));
340 write = isl_union_map_intersect_range(write,
341 isl_union_set_from_set(space));
342 empty = isl_union_map_is_empty(write);
343 isl_union_map_free(write);
345 prog->array[prog->n_array].read_only = empty;
348 for (i = 0; i < n_index; ++i) {
349 isl_set *dom;
350 isl_local_space *ls;
351 isl_aff *one;
352 isl_pw_aff *bound;
353 isl_set *size = i == 0 ? array : pa->extent;
355 bound = isl_set_dim_max(isl_set_copy(size), i);
356 assert(bound);
357 dom = isl_pw_aff_domain(isl_pw_aff_copy(bound));
358 ls = isl_local_space_from_space(isl_set_get_space(dom));
359 one = isl_aff_zero_on_domain(ls);
360 one = isl_aff_add_constant_si(one, 1);
361 bound = isl_pw_aff_add(bound, isl_pw_aff_alloc(dom, one));
362 bound = isl_pw_aff_gist(bound, isl_set_copy(prog->context));
364 bounds[i] = bound;
367 collect_references(prog, &prog->array[prog->n_array]);
369 prog->n_array++;
371 isl_set_free(array);
372 return 0;
375 void collect_array_info(struct gpu_prog *prog)
377 isl_union_set *arrays;
379 arrays = isl_union_map_range(isl_union_map_copy(prog->read));
380 arrays = isl_union_set_union(arrays,
381 isl_union_map_range(isl_union_map_copy(prog->write)));
382 arrays = isl_union_set_coalesce(arrays);
384 prog->n_array = isl_union_set_n_set(arrays);
385 prog->array = isl_alloc_array(prog->ctx,
386 struct gpu_array_info, prog->n_array);
387 assert(prog->array);
388 prog->n_array = 0;
389 isl_union_set_foreach_set(arrays, &extract_array_info, prog);
390 isl_union_set_free(arrays);
393 static void free_array_info(struct gpu_prog *prog)
395 int i, j;
397 for (i = 0; i < prog->n_array; ++i) {
398 int n_index = prog->array[i].n_index;
399 free(prog->array[i].type);
400 free(prog->array[i].name);
401 for (j = 0; j < n_index; ++j)
402 isl_pw_aff_free(prog->array[i].bound[j]);
403 isl_space_free(prog->array[i].dim);
404 free(prog->array[i].bound);
405 free(prog->array[i].refs);
407 free(prog->array);
410 /* Check if a gpu array is a scalar. A scalar is a value that is not stored
411 * as an array or through a pointer reference, but as single data element. At
412 * the moment, scalars are represented as zero dimensional arrays.
414 int gpu_array_is_scalar(struct gpu_array_info *array)
416 return (array->n_index == 0);
419 /* Is "array" a read-only scalar?
421 int gpu_array_is_read_only_scalar(struct gpu_array_info *array)
423 return gpu_array_is_scalar(array) && array->read_only;
426 /* Internal data structure for extract_size_of_type.
427 * "type" specifies the name of the space that we want to extract.
428 * "res" is used to store the subset of that space.
430 struct ppcg_extract_size_data {
431 const char *type;
432 isl_set *res;
435 /* This function is called for each set in a union_set.
436 * If the name of the set matches data->type, we store the
437 * set in data->res.
439 static int extract_size_of_type(__isl_take isl_set *size, void *user)
441 struct ppcg_extract_size_data *data = user;
442 const char *name;
444 name = isl_set_get_tuple_name(size);
445 if (name && !strcmp(name, data->type)) {
446 data->res = size;
447 return -1;
450 isl_set_free(size);
451 return 0;
454 /* Given a union map { kernel[i] -> *[...] },
455 * return the range in the space called "type" for the kernel with
456 * sequence number "id".
458 static __isl_give isl_set *extract_sizes(__isl_keep isl_union_map *sizes,
459 const char *type, int id)
461 isl_space *space;
462 isl_set *dom;
463 isl_union_set *local_sizes;
464 struct ppcg_extract_size_data data = { type, NULL };
466 if (!sizes)
467 return NULL;
469 space = isl_union_map_get_space(sizes);
470 space = isl_space_set_from_params(space);
471 space = isl_space_add_dims(space, isl_dim_set, 1);
472 space = isl_space_set_tuple_name(space, isl_dim_set, "kernel");
473 dom = isl_set_universe(space);
474 dom = isl_set_fix_si(dom, isl_dim_set, 0, id);
476 local_sizes = isl_union_set_apply(isl_union_set_from_set(dom),
477 isl_union_map_copy(sizes));
478 isl_union_set_foreach_set(local_sizes, &extract_size_of_type, &data);
479 isl_union_set_free(local_sizes);
480 return data.res;
483 /* Given a singleton set, extract the first (at most *len) elements
484 * of the single integer tuple into *sizes and update *len if needed.
486 static void read_sizes_from_set(__isl_take isl_set *set, int *sizes, int *len)
488 int i;
489 int dim;
490 isl_int v;
492 if (!set)
493 return;
495 dim = isl_set_dim(set, isl_dim_set);
496 if (dim < *len)
497 *len = dim;
499 isl_int_init(v);
501 for (i = 0; i < *len; ++i) {
502 int ok;
504 ok = isl_set_plain_is_fixed(set, isl_dim_set, i, &v);
505 assert(ok);
507 sizes[i] = isl_int_get_si(v);
510 isl_int_clear(v);
512 isl_set_free(set);
515 /* Extract user specified "tile" sizes from the "sizes" command line option,
516 * defaulting to option->tile_size in each dimension.
518 static void read_tile_sizes(struct gpu_gen *gen)
520 int n;
521 isl_set *size;
523 gen->tile_size = isl_alloc_array(gen->ctx, int, gen->tile_len);
524 assert(gen->tile_size);
525 for (n = 0; n < gen->tile_len; ++n)
526 gen->tile_size[n] = gen->options->tile_size;
528 size = extract_sizes(gen->sizes, "tile", gen->kernel_id);
529 read_sizes_from_set(size, gen->tile_size, &gen->tile_len);
531 if (gen->n_parallel > gen->tile_len)
532 gen->n_parallel = gen->tile_len;
535 /* Extract user specified "block" sizes from the "sizes" command line option,
536 * after filling in some potentially useful defaults.
538 static void read_block_sizes(struct gpu_gen *gen)
540 int n;
541 isl_set *size;
543 n = gen->n_parallel;
544 gen->n_block = (n <= 3) ? n : 3;
545 switch (gen->n_block) {
546 case 1:
547 gen->block_dim[0] = 512;
548 break;
549 case 2:
550 gen->block_dim[0] = 32;
551 gen->block_dim[1] = 16;
552 break;
553 default:
554 gen->block_dim[0] = 32;
555 gen->block_dim[1] = 4;
556 gen->block_dim[2] = 4;
557 break;
560 size = extract_sizes(gen->sizes, "block", gen->kernel_id);
561 read_sizes_from_set(size, gen->block_dim, &gen->n_block);
564 /* Extract user specified "grid" sizes from the "sizes" command line option,
565 * after filling in some potentially useful defaults.
567 static void read_grid_sizes(struct gpu_gen *gen)
569 int n = gen->n_parallel;
570 isl_set *size;
572 gen->n_grid = (n <= 2) ? n : 2;
573 switch (gen->n_grid) {
574 case 1:
575 gen->grid_dim[0] = 32768;
576 break;
577 default:
578 gen->grid_dim[0] = 256;
579 gen->grid_dim[1] = 256;
580 break;
583 size = extract_sizes(gen->sizes, "grid", gen->kernel_id);
584 read_sizes_from_set(size, gen->grid_dim, &gen->n_grid);
587 /* Extract user specified sizes from the "sizes" command line option
588 * after filling in some potentially useful defaults.
590 static void read_sizes(struct gpu_gen *gen)
592 read_tile_sizes(gen);
593 read_block_sizes(gen);
594 read_grid_sizes(gen);
597 static void free_stmts(struct gpu_stmt *stmts, int n)
599 int i;
601 for (i = 0; i < n; ++i) {
602 struct gpu_stmt_access *access, *next;
604 for (access = stmts[i].accesses; access; access = next) {
605 next = access->next;
606 isl_map_free(access->access);
607 free(access);
610 isl_id_free(stmts[i].id);
612 free(stmts);
615 void clear_gpu_gen(struct gpu_gen *gen)
617 isl_union_map_free(gen->sizes);
618 isl_union_map_free(gen->sched);
621 /* Construct a map from a domain of dimensionality "len"
622 * to a domain of dimensionality "len" + "tile_len" that tiles
623 * the "tile_len" coordinates starting at "first".
624 * In particular, [s_i] -> [s_i / tile_size[i], s_i % tile_size[i]].
625 * "dim" prescribes the parameters.
627 static __isl_give isl_map *tile(__isl_take isl_space *dim, int len,
628 int first, int tile_len, int *tile_size)
630 int i;
631 isl_int v;
632 isl_basic_map *bmap;
633 isl_constraint *c;
634 isl_local_space *ls;
636 isl_int_init(v);
638 dim = isl_space_add_dims(dim, isl_dim_in, len);
639 dim = isl_space_add_dims(dim, isl_dim_out, len + tile_len);
640 bmap = isl_basic_map_universe(isl_space_copy(dim));
641 ls = isl_local_space_from_space(dim);
643 for (i = 0; i < len - tile_len; ++i) {
644 int j = i < first ? i : i + tile_len;
645 int k = i < first ? i : i + 2 * tile_len;
647 c = isl_equality_alloc(isl_local_space_copy(ls));
648 isl_int_set_si(v, -1);
649 isl_constraint_set_coefficient(c, isl_dim_in, j, v);
650 isl_int_set_si(v, 1);
651 isl_constraint_set_coefficient(c, isl_dim_out, k, v);
652 bmap = isl_basic_map_add_constraint(bmap, c);
655 for (i = 0; i < tile_len; ++i) {
656 c = isl_equality_alloc(isl_local_space_copy(ls));
657 isl_int_set_si(v, -1);
658 isl_constraint_set_coefficient(c, isl_dim_in, first + i, v);
659 isl_int_set_si(v, tile_size[i]);
660 isl_constraint_set_coefficient(c, isl_dim_out, first + i, v);
661 isl_int_set_si(v, 1);
662 isl_constraint_set_coefficient(c, isl_dim_out,
663 first + i + tile_len, v);
664 bmap = isl_basic_map_add_constraint(bmap, c);
666 c = isl_inequality_alloc(isl_local_space_copy(ls));
667 isl_int_set_si(v, 1);
668 isl_constraint_set_coefficient(c, isl_dim_out,
669 first + i + tile_len, v);
670 bmap = isl_basic_map_add_constraint(bmap, c);
672 c = isl_inequality_alloc(isl_local_space_copy(ls));
673 isl_int_set_si(v, -1);
674 isl_constraint_set_coefficient(c, isl_dim_out,
675 first + i + tile_len, v);
676 isl_int_set_si(v, tile_size[i] - 1);
677 isl_constraint_set_constant(c, v);
678 bmap = isl_basic_map_add_constraint(bmap, c);
681 isl_local_space_free(ls);
682 isl_int_clear(v);
684 return isl_map_from_basic_map(bmap);
687 /* Construct a map from a domain of dimensionality "len"
688 * to a domain of dimensionality "len" + "wrap_len" that "wraps"
689 * the "wrap_len" coordinates starting at "first" according to "wrap_size".
690 * In particular, [s_i] -> [s_i, s_i % wrap_size[i]].
691 * To do so, we need extra variables corresponding to [s_i / wrap_size[i]],
692 * that are projected out at the end.
693 * "dim" prescribes the parameters.
695 static __isl_give isl_map *wrap(__isl_take isl_space *dim, int len,
696 int first, int wrap_len, int *wrap_size)
698 int i;
699 isl_basic_map *bmap;
700 isl_constraint *c;
701 isl_local_space *ls;
703 dim = isl_space_add_dims(dim, isl_dim_in, len);
704 dim = isl_space_add_dims(dim, isl_dim_out, len + 2 * wrap_len);
705 bmap = isl_basic_map_universe(isl_space_copy(dim));
706 ls = isl_local_space_from_space(dim);
708 for (i = 0; i < len; ++i) {
709 int k = i < first + wrap_len ? i : i + 2 * wrap_len;
711 c = isl_equality_alloc(isl_local_space_copy(ls));
712 isl_constraint_set_coefficient_si(c, isl_dim_in, i, -1);
713 isl_constraint_set_coefficient_si(c, isl_dim_out, k, 1);
714 bmap = isl_basic_map_add_constraint(bmap, c);
717 for (i = 0; i < wrap_len; ++i) {
718 c = isl_equality_alloc(isl_local_space_copy(ls));
719 isl_constraint_set_coefficient_si(c, isl_dim_out,
720 first + i, -1);
721 isl_constraint_set_coefficient_si(c, isl_dim_out,
722 first + wrap_len + i, 1);
723 isl_constraint_set_coefficient_si(c, isl_dim_out,
724 first + 2 * wrap_len + i, wrap_size[i]);
725 bmap = isl_basic_map_add_constraint(bmap, c);
727 c = isl_inequality_alloc(isl_local_space_copy(ls));
728 isl_constraint_set_coefficient_si(c, isl_dim_out,
729 first + wrap_len + i, 1);
730 bmap = isl_basic_map_add_constraint(bmap, c);
732 c = isl_inequality_alloc(isl_local_space_copy(ls));
733 isl_constraint_set_coefficient_si(c, isl_dim_out,
734 first + wrap_len + i, -1);
735 isl_constraint_set_constant_si(c, wrap_size[i] - 1);
736 bmap = isl_basic_map_add_constraint(bmap, c);
739 isl_local_space_free(ls);
741 bmap = isl_basic_map_project_out(bmap, isl_dim_out,
742 first + 2 * wrap_len, wrap_len);
744 return isl_map_from_basic_map(bmap);
747 /* Add "n" parameters named prefix%d.
749 static __isl_give isl_set *add_params( __isl_take isl_set *set,
750 int n, const char *prefix)
752 int i;
753 unsigned nparam;
754 char name[20];
756 nparam = isl_set_dim(set, isl_dim_param);
757 set = isl_set_add_dims(set, isl_dim_param, n);
759 for (i = 0; i < n; ++i) {
760 snprintf(name, sizeof(name), "%s%d", prefix, i);
761 set = isl_set_set_dim_name(set, isl_dim_param,
762 nparam + i, name);
765 return set;
768 /* Equate the "n" dimensions of "set" starting at "first" to
769 * freshly created parameters named prefix%d.
771 static __isl_give isl_set *parametrize(__isl_take isl_set *set,
772 int first, int n, const char *prefix)
774 int i;
775 unsigned nparam;
776 isl_int v;
777 isl_space *dim;
778 isl_basic_set *bset;
779 isl_constraint *c;
780 isl_local_space *ls;
782 nparam = isl_set_dim(set, isl_dim_param);
784 set = add_params(set, n, prefix);
786 dim = isl_set_get_space(set);
787 bset = isl_basic_set_universe(isl_space_copy(dim));
788 ls = isl_local_space_from_space(dim);
790 isl_int_init(v);
792 for (i = 0; i < n; ++i) {
793 c = isl_equality_alloc(isl_local_space_copy(ls));
794 isl_int_set_si(v, -1);
795 isl_constraint_set_coefficient(c, isl_dim_param, nparam + i, v);
796 isl_int_set_si(v, 1);
797 isl_constraint_set_coefficient(c, isl_dim_set, first + i, v);
798 bset = isl_basic_set_add_constraint(bset, c);
801 isl_int_clear(v);
802 isl_local_space_free(ls);
804 return isl_set_intersect(set, isl_set_from_basic_set(bset));
807 /* Given a parameter space "space", create a set of dimension "len"
808 * of which the "n" dimensions starting at "first" are equated to
809 * freshly created parameters named prefix%d.
811 static __isl_give isl_set *parametrization(__isl_take isl_space *space,
812 int len, int first, int n, const char *prefix)
814 isl_set *set;
816 space = isl_space_set_from_params(space);
817 space = isl_space_add_dims(space, isl_dim_set, len);
818 set = isl_set_universe(space);
820 return parametrize(set, first, n, prefix);
823 /* Tile the B loops over the tile sizes and then tile/wrap
824 * the T1 loops over the blocks.
826 static __isl_give isl_union_map *tile_schedule(struct gpu_gen *gen,
827 __isl_take isl_union_map *sched)
829 isl_space *dim;
830 isl_map *tiling, *block_tiling;
832 dim = isl_union_map_get_space(sched);
833 tiling = tile(isl_space_copy(dim), gen->untiled_len,
834 gen->tile_first, gen->tile_len, gen->tile_size);
836 if (gen->options->wrap)
837 block_tiling = wrap(dim, gen->untiled_len + gen->tile_len,
838 gen->tile_first, gen->n_grid, gen->grid_dim);
839 else
840 block_tiling = tile(dim, gen->untiled_len + gen->tile_len,
841 gen->tile_first, gen->n_grid, gen->grid_dim);
843 gen->tiled_len = gen->untiled_len + gen->tile_len + gen->n_grid;
845 tiling = isl_map_apply_range(tiling, block_tiling);
847 sched = isl_union_map_apply_range(sched,
848 isl_union_map_from_map(tiling));
850 gen->shared_len = gen->tile_first + gen->tile_len + gen->n_grid;
852 return sched;
855 /* Equate the "T1P" iterators in the tiled schedule "sched"
856 * to the block dimensions.
858 static __isl_give isl_union_map *parametrize_tiled_schedule(
859 struct gpu_gen *gen, __isl_take isl_union_map *sched)
861 isl_space *dim;
862 isl_set *par;
864 dim = isl_union_map_get_space(sched);
865 par = parametrization(dim, gen->tiled_len,
866 gen->tile_first + gen->n_grid, gen->n_grid, "b");
867 sched = isl_union_map_intersect_range(sched,
868 isl_union_set_from_set(par));
870 return sched;
873 /* Tile/wrap the P1 loops over the threads.
875 static __isl_give isl_union_map *thread_tile_schedule(struct gpu_gen *gen,
876 __isl_take isl_union_map *sched)
878 isl_space *dim;
879 isl_map *tiling;
880 isl_set *par;
882 dim = isl_union_map_get_space(sched);
884 if (gen->options->wrap)
885 tiling = wrap(isl_space_copy(dim), gen->tiled_len,
886 gen->shared_len, gen->n_block, gen->block_dim);
887 else
888 tiling = tile(isl_space_copy(dim), gen->tiled_len,
889 gen->shared_len, gen->n_block, gen->block_dim);
890 gen->thread_tiled_len = gen->tiled_len + gen->n_block;
892 sched = isl_union_map_apply_range(sched,
893 isl_union_map_from_map(tiling));
895 par = parametrization(dim, gen->thread_tiled_len,
896 gen->tile_first + gen->tile_len + gen->n_grid + gen->n_block,
897 gen->n_block, "t");
898 sched = isl_union_map_intersect_range(sched,
899 isl_union_set_from_set(par));
901 gen->shared_len = gen->tile_first + gen->tile_len + gen->n_grid;
903 return sched;
906 /* If the user asked for it, scale the shared memory tile loops
907 * (T1T and T2) of "sched" by gen->tile_size[i].
908 * If we are not performing "wrapping", then additionally scale the T1P
909 * loops by gen->grid_dim[i].
911 static __isl_give isl_union_map *scale_tile_loops(struct gpu_gen *gen,
912 __isl_take isl_union_map *sched)
914 int i;
915 isl_space *dim;
916 isl_basic_map *scale;
917 isl_constraint *c;
918 isl_local_space *ls;
920 if (!gen->options->scale_tile_loops)
921 return sched;
923 dim = isl_union_map_get_space(sched);
924 dim = isl_space_add_dims(dim, isl_dim_in, gen->tiled_len);
925 dim = isl_space_add_dims(dim, isl_dim_out, gen->tiled_len);
926 scale = isl_basic_map_universe(isl_space_copy(dim));
927 ls = isl_local_space_from_space(dim);
929 for (i = 0; i < gen->tiled_len; ++i) {
930 int f = 1;
932 if (i >= gen->tile_first && i < gen->tile_first + gen->n_grid) {
933 f = gen->tile_size[i - gen->tile_first];
934 if (!gen->options->wrap)
935 f *= gen->grid_dim[i - gen->tile_first];
936 } else if (i >= gen->tile_first + gen->n_grid &&
937 i < gen->tile_first + gen->n_grid + gen->tile_len) {
938 f = gen->tile_size[i - (gen->tile_first + gen->n_grid)];
941 c = isl_equality_alloc(isl_local_space_copy(ls));
942 isl_constraint_set_coefficient_si(c, isl_dim_in, i, f);
943 isl_constraint_set_coefficient_si(c, isl_dim_out, i, -1);
944 scale = isl_basic_map_add_constraint(scale, c);
947 isl_local_space_free(ls);
949 sched = isl_union_map_apply_range(sched,
950 isl_union_map_from_map(isl_map_from_basic_map(scale)));
952 return sched;
955 /* If we are not performing "wrapping" and if the user asked for it,
956 * scale the thread tile loops (P1T) of "sched" by gen->block_dim[i].
958 static __isl_give isl_union_map *scale_thread_tile_loops(struct gpu_gen *gen,
959 __isl_take isl_union_map *sched)
961 int i;
962 isl_space *dim;
963 isl_basic_map *scale;
964 isl_constraint *c;
965 isl_local_space *ls;
967 if (gen->options->wrap)
968 return sched;
969 if (!gen->options->scale_tile_loops)
970 return sched;
972 dim = isl_union_map_get_space(sched);
973 dim = isl_space_add_dims(dim, isl_dim_in, gen->thread_tiled_len);
974 dim = isl_space_add_dims(dim, isl_dim_out, gen->thread_tiled_len);
975 scale = isl_basic_map_universe(isl_space_copy(dim));
976 ls = isl_local_space_from_space(dim);
978 for (i = 0; i < gen->thread_tiled_len; ++i) {
979 int f = 1;
981 if (i >= gen->shared_len &&
982 i < gen->shared_len + gen->n_block)
983 f = gen->block_dim[i - gen->shared_len];
985 c = isl_equality_alloc(isl_local_space_copy(ls));
986 isl_constraint_set_coefficient_si(c, isl_dim_in, i, f);
987 isl_constraint_set_coefficient_si(c, isl_dim_out, i, -1);
988 scale = isl_basic_map_add_constraint(scale, c);
991 isl_local_space_free(ls);
993 sched = isl_union_map_apply_range(sched,
994 isl_union_map_from_map(isl_map_from_basic_map(scale)));
996 return sched;
999 /* If we are not performing "wrapping" and if the user asked for it,
1000 * scale the "n_tile" loops starting at "first" of "sched" by gen->block_dim[i].
1002 static __isl_give isl_union_map *scale_access_tile_loops(struct gpu_gen *gen,
1003 __isl_take isl_union_map *sched, int len, int first, int n_tile)
1005 int i;
1006 isl_space *dim;
1007 isl_basic_map *scale;
1008 isl_constraint *c;
1009 isl_local_space *ls;
1011 if (gen->options->wrap)
1012 return sched;
1013 if (!gen->options->scale_tile_loops)
1014 return sched;
1016 dim = isl_union_map_get_space(sched);
1017 dim = isl_space_add_dims(dim, isl_dim_in, len);
1018 dim = isl_space_add_dims(dim, isl_dim_out, len);
1019 scale = isl_basic_map_universe(isl_space_copy(dim));
1020 ls = isl_local_space_from_space(dim);
1022 for (i = 0; i < len; ++i) {
1023 int f = 1;
1025 if (i >= first && i < first + n_tile)
1026 f = gen->block_dim[i - first];
1028 c = isl_equality_alloc(isl_local_space_copy(ls));
1029 isl_constraint_set_coefficient_si(c, isl_dim_in, i, f);
1030 isl_constraint_set_coefficient_si(c, isl_dim_out, i, -1);
1031 scale = isl_basic_map_add_constraint(scale, c);
1034 isl_local_space_free(ls);
1036 sched = isl_union_map_apply_range(sched,
1037 isl_union_map_from_map(isl_map_from_basic_map(scale)));
1039 return sched;
1042 /* Add "len" parameters p[i] called prefix%d,
1043 * with bounds to 0 <= p[i] < size[i].
1045 __isl_give isl_set *add_bounded_parameters(__isl_take isl_set *set,
1046 int len, int *size, const char *prefix)
1048 int i;
1049 unsigned nparam;
1050 isl_int v;
1051 isl_space *dim;
1052 isl_basic_set *bset;
1053 isl_constraint *c;
1054 isl_local_space *ls;
1055 char name[20];
1057 nparam = isl_set_dim(set, isl_dim_param);
1058 set = isl_set_add_dims(set, isl_dim_param, len);
1060 for (i = 0; i < len; ++i) {
1061 snprintf(name, sizeof(name), "%s%d", prefix, i);
1062 set = isl_set_set_dim_name(set, isl_dim_param,
1063 nparam + i, name);
1066 dim = isl_set_get_space(set);
1067 bset = isl_basic_set_universe(isl_space_copy(dim));
1068 ls = isl_local_space_from_space(dim);
1070 isl_int_init(v);
1072 for (i = 0; i < len; ++i) {
1073 c = isl_inequality_alloc(isl_local_space_copy(ls));
1074 isl_int_set_si(v, 1);
1075 isl_constraint_set_coefficient(c, isl_dim_param, nparam + i, v);
1076 bset = isl_basic_set_add_constraint(bset, c);
1078 c = isl_inequality_alloc(isl_local_space_copy(ls));
1079 isl_int_set_si(v, -1);
1080 isl_constraint_set_coefficient(c, isl_dim_param, nparam + i, v);
1081 isl_int_set_si(v, size[i] - 1);
1082 isl_constraint_set_constant(c, v);
1083 bset = isl_basic_set_add_constraint(bset, c);
1086 isl_int_clear(v);
1087 isl_local_space_free(ls);
1089 return isl_set_intersect(set, isl_set_from_basic_set(bset));
1092 /* Add "len" parameters p[i] called prefix%d,
1093 * with bounds to 0 <= p[i] < size[i].
1095 static __isl_give isl_set *add_bounded_parameters_dynamic(
1096 __isl_take isl_set *set, __isl_keep isl_multi_pw_aff *size,
1097 const char *prefix)
1099 int i, len;
1100 unsigned nparam;
1101 isl_space *space;
1102 isl_local_space *ls;
1103 char name[20];
1105 len = isl_multi_pw_aff_dim(size, isl_dim_out);
1106 nparam = isl_set_dim(set, isl_dim_param);
1107 set = isl_set_add_dims(set, isl_dim_param, len);
1109 for (i = 0; i < len; ++i) {
1110 snprintf(name, sizeof(name), "%s%d", prefix, i);
1111 set = isl_set_set_dim_name(set, isl_dim_param,
1112 nparam + i, name);
1115 space = isl_space_params(isl_set_get_space(set));
1116 ls = isl_local_space_from_space(space);
1117 for (i = 0; i < len; ++i) {
1118 isl_pw_aff *param, *size_i, *zero;
1119 isl_set *bound;
1121 param = isl_pw_aff_var_on_domain(isl_local_space_copy(ls),
1122 isl_dim_param, nparam + i);
1124 size_i = isl_multi_pw_aff_get_pw_aff(size, i);
1125 bound = isl_pw_aff_lt_set(isl_pw_aff_copy(param), size_i);
1126 set = isl_set_intersect_params(set, bound);
1128 zero = isl_pw_aff_zero_on_domain(isl_local_space_copy(ls));
1129 bound = isl_pw_aff_ge_set(param, zero);
1130 set = isl_set_intersect_params(set, bound);
1132 isl_local_space_free(ls);
1134 return set;
1137 /* Given a mapping "sched" of the form
1139 * [D -> A] -> [D -> T(A)]
1141 * apply the mapping encoded in bounds[i].shift_map to the range of "sched".
1142 * The mappings in bounds[i].shift_map are of the form
1144 * [D -> a] -> [D -> s(D,a)]
1146 * We first compose them with a mapping
1148 * [D -> v] -> v
1150 * (If bounds[i].shift_map is not set, then it is assumed to be
1151 * an identity mapping and then we use this second mapping instead.)
1152 * This results in
1154 * [D -> a] -> s(D,a)
1156 * We precompose them with a projection on the i th dimension to obtain
1158 * [D -> T] -> s(D,T)
1160 * and collect these into
1162 * [D -> T] -> S(D,T)
1164 * Introducing D in the range yields
1166 * [D -> T] -> [D -> S(D,T)]
1168 * and application to "sched" yields
1170 * [D -> A] -> [D -> S(D,T(A))]
1172 static __isl_give isl_map *pre_shift(__isl_take isl_map *sched,
1173 int n_index, struct gpu_array_bound *bounds)
1175 int i;
1176 isl_ctx *ctx = isl_map_get_ctx(sched);
1177 isl_space *space, *space2;
1178 isl_basic_map *def;
1179 isl_map *map, *id, *pre_shift;
1181 space = isl_space_range(isl_map_get_space(sched));
1182 space2 = isl_space_from_domain(isl_space_copy(space));
1183 pre_shift = isl_map_universe(space2);
1184 space = isl_space_domain(isl_space_unwrap(space));
1185 id = isl_map_identity(isl_space_map_from_set(isl_space_copy(space)));
1186 space = isl_space_from_domain(space);
1187 space = isl_space_add_dims(space, isl_dim_out, 1);
1188 def = isl_basic_map_range_map(isl_basic_map_universe(space));
1190 for (i = 0; i < n_index; ++i) {
1191 isl_basic_map *bmap, *drop;
1192 isl_map *proj;
1194 space = isl_space_alloc(ctx, 0, n_index, n_index);
1195 proj = isl_map_identity(space);
1196 proj = isl_map_project_out(proj, isl_dim_out,
1197 i + 1, n_index - (i + 1));
1198 proj = isl_map_project_out(proj, isl_dim_out, 0, i);
1199 proj = isl_map_product(isl_map_copy(id), proj);
1201 if (!bounds[i].shift_map)
1202 bmap = isl_basic_map_copy(def);
1203 else {
1204 bmap = isl_basic_map_copy(bounds[i].shift_map);
1205 bmap = isl_basic_map_apply_range(bmap,
1206 isl_basic_map_copy(def));
1209 map = isl_map_from_basic_map(bmap);
1210 map = isl_map_apply_range(proj, map);
1211 pre_shift = isl_map_flat_range_product(pre_shift, map);
1214 isl_map_free(id);
1215 isl_basic_map_free(def);
1217 space = isl_space_domain(isl_map_get_space(pre_shift));
1218 map = isl_map_domain_map(isl_map_universe(isl_space_unwrap(space)));
1219 pre_shift = isl_map_range_product(map, pre_shift);
1221 sched = isl_map_apply_range(sched, pre_shift);
1223 return sched;
1226 /* Given an access relation to a tile of an array, construct a map that
1227 * maps each element in the space of the access relation
1228 * to a copy of the tile shifted to the origin
1229 * (based on the lower bounds in group->private_bound or group->shared_bound).
1230 * If any of the indices is strided, then {private,shared}_bound[i].shift_map
1231 * is applied to the index first.
1232 * The domain space of the resulting map is that of access "access",
1233 * while the range space is anonymous.
1234 * The resulting map only encodes the mapping to the shift tile and
1235 * not the constraints of "access".
1237 * Let the space of the access relation be
1239 * D -> A
1241 * We first construct an identity relation on a wrapped copy of this space,
1242 * except that it strips off the name of array
1244 * [D -> A] -> [D -> T(A)] (1)
1246 * The bounds in bounds[i].lb are of the form
1248 * D -> b(D)
1250 * We collect them into
1252 * D -> B(D)
1254 * and then transform them into
1256 * [D -> T] -> T - B(D) (2)
1258 * Combining those two mappings (1) and (2) yields
1260 * [D -> A] -> T(A) - B(D)
1262 * If there are any strides, then (1) is first transformed into (1')
1264 * [D -> A] -> [D -> T'(A)] (1')
1266 * by a call to pre_shift.
1268 static __isl_give isl_map *shift_access(__isl_take isl_map *access,
1269 struct gpu_array_ref_group *group)
1271 int i;
1272 isl_space *space;
1273 isl_map *id1, *id2;
1274 isl_map *map;
1275 isl_map *shift;
1276 isl_map *sched;
1277 struct gpu_array_bound *bounds;
1278 int n_index = group->array->n_index;
1280 bounds = group->private_bound;
1281 if (!bounds)
1282 bounds = group->shared_bound;
1284 space = isl_space_domain(isl_map_get_space(access));
1285 space = isl_space_map_from_set(space);
1286 id1 = isl_map_identity(space);
1287 space = isl_space_range(isl_map_get_space(access));
1288 space = isl_space_map_from_set(space);
1289 space = isl_space_set_tuple_name(space, isl_dim_out, NULL);
1290 id2 = isl_map_identity(space);
1291 sched = isl_map_product(id1, id2);
1293 space = isl_space_unwrap(isl_space_range(isl_map_get_space(sched)));
1294 space = isl_space_from_domain(isl_space_domain(space));
1295 shift = isl_map_universe(space);
1296 for (i = 0; i < n_index; ++i) {
1297 map = isl_map_from_aff(isl_aff_copy(bounds[i].lb));
1298 shift = isl_map_flat_range_product(shift, map);
1301 space = isl_space_unwrap(isl_space_range(isl_map_get_space(sched)));
1302 map = isl_map_universe(space);
1303 id1 = isl_map_range_map(isl_map_copy(map));
1304 map = isl_map_domain_map(map);
1305 shift = isl_map_neg(shift);
1306 shift = isl_map_apply_range(map, shift);
1307 shift = isl_map_sum(id1, shift);
1309 for (i = 0; i < n_index; ++i)
1310 if (bounds[i].shift_map)
1311 break;
1313 if (i < n_index)
1314 sched = pre_shift(sched, n_index, bounds);
1316 sched = isl_map_apply_range(sched, shift);
1318 isl_map_free(access);
1320 return sched;
1323 /* Given a schedule that iterates over all elements in a piece of an array,
1324 * perform tiling/wrapping over the threads.
1326 * In particular, we tile the final iterators so that the final thread
1327 * dimension runs over the final array dimension.
1328 * However, if those final iterators have only a single iteration,
1329 * we try to tile earlier iterators instead.
1331 static __isl_give isl_map *tile_access_schedule(struct gpu_gen *gen,
1332 __isl_take isl_map *sched)
1334 isl_space *dim;
1335 isl_union_map *usched;
1336 isl_map *tiling;
1337 isl_set *par;
1338 unsigned nvar = isl_map_dim(sched, isl_dim_out);
1339 int n_tile;
1340 int first;
1342 n_tile = gen->n_block;
1343 if (n_tile > nvar) {
1344 int i;
1345 sched = isl_map_insert_dims(sched,
1346 isl_dim_out, 0, n_tile - nvar);
1347 for (i = 0; i < n_tile - nvar; ++i)
1348 sched = isl_map_fix_si(sched, isl_dim_out, i, 0);
1349 nvar = n_tile;
1352 first = nvar - n_tile;
1354 for (; first > 0; first --)
1355 if (!isl_map_plain_is_fixed(sched, isl_dim_out,
1356 first + n_tile - 1, NULL))
1357 break;
1359 dim = isl_map_get_space(sched);
1360 dim = isl_space_params(dim);
1361 if (gen->options->wrap)
1362 tiling = wrap(isl_space_copy(dim), nvar, first,
1363 n_tile, gen->block_dim);
1364 else
1365 tiling = tile(isl_space_copy(dim), nvar, first,
1366 n_tile, gen->block_dim);
1367 sched = isl_map_apply_range(sched, tiling);
1369 par = parametrization(dim, nvar + n_tile, first + n_tile, n_tile, "t");
1370 sched = isl_map_intersect_range(sched, par);
1372 usched = isl_union_map_from_map(sched);
1373 usched = scale_access_tile_loops(gen, usched, nvar + n_tile,
1374 first, n_tile);
1375 sched = isl_map_from_union_map(usched);
1377 return sched;
1380 /* Given an index expression "pa" into a tile of an array, adjust the expression
1381 * to a shift of the tile to the origin
1382 * (based on the lower bounds in "bound".
1383 * If the index is strided, then we first add
1384 * bound->shift and divide by bound->stride.
1385 * In the end, we compute the gist with respect to "domain".
1387 * All of the input expression "pa", the set "domain" and
1388 * the output are expressed in terms of the AST schedule domain.
1389 * The expressions in "bound" are expressed
1390 * in terms of the first shared_len dimensions of the schedule computed by PPCG.
1391 * The mapping "sched2shared" maps the former domain to the latter domain.
1393 static __isl_give isl_pw_aff *shift_index(__isl_take isl_pw_aff *pa,
1394 struct gpu_array_info *array,
1395 struct gpu_array_bound *bound, __isl_take isl_set *domain,
1396 __isl_take isl_map *sched2shared)
1398 isl_map *map;
1399 isl_pw_aff *tmp;
1400 isl_pw_multi_aff *pma;
1402 if (bound->shift) {
1403 map = isl_map_from_aff(isl_aff_copy(bound->shift));
1404 map = isl_map_apply_range(isl_map_copy(sched2shared), map);
1405 pma = isl_pw_multi_aff_from_map(map);
1406 tmp = isl_pw_multi_aff_get_pw_aff(pma, 0);
1407 isl_pw_multi_aff_free(pma);
1408 pa = isl_pw_aff_add(pa, tmp);
1409 pa = isl_pw_aff_scale_down(pa, bound->stride);
1413 map = isl_map_from_aff(isl_aff_copy(bound->lb));
1414 map = isl_map_apply_range(sched2shared, map);
1415 pma = isl_pw_multi_aff_from_map(map);
1416 tmp = isl_pw_multi_aff_get_pw_aff(pma, 0);
1417 isl_pw_multi_aff_free(pma);
1418 pa = isl_pw_aff_sub(pa, tmp);
1419 pa = isl_pw_aff_coalesce(pa);
1420 pa = isl_pw_aff_gist(pa, domain);
1422 return pa;
1425 /* Return the union of all read (read = 1) and/or write (write = 1)
1426 * access relations in the group.
1428 static __isl_give isl_union_map *group_access_relation(
1429 struct gpu_array_ref_group *group, int read, int write)
1431 int i;
1432 isl_union_map *access;
1434 access = isl_union_map_empty(isl_map_get_space(group->access));
1435 for (i = 0; i < group->n_ref; ++i) {
1436 isl_map *map_i;
1438 if (!((read && group->refs[i]->read) ||
1439 (write && group->refs[i]->write)))
1440 continue;
1441 map_i = isl_map_copy(group->refs[i]->access);
1442 access = isl_union_map_union(access,
1443 isl_union_map_from_map(map_i));
1446 return access;
1449 /* Return a map from the first shared_len dimensions of the computed
1450 * schedule to the values of the given index "i"
1451 * of the elements in the array tile in global memory that corresponds
1452 * to the shared memory copy.
1453 * In particular, if a is the index, then the range of the map
1455 * { D -> [a] }
1457 * is constrained as follows
1459 * tile_offset(D) <= a <= tile_offset(D) + tile_size - 1 (1)
1461 * and
1463 * 0 <= a <= array_size - 1 (2)
1466 * Note that if some stride has been detected (i.e., when
1467 * group->shared_bound[i].shift is set), then offset and size (i.e.,
1468 * constraints (1)) apply to the shifted and scaled down copy of the tile.
1469 * These constraints therefore have to be mapped back to the original
1470 * array space using the inverse of the shift_map.
1472 static __isl_give isl_map *group_tile_dim(struct gpu_array_ref_group *group,
1473 int i)
1475 isl_aff *aff;
1476 isl_space *space;
1477 isl_map *map, *tile, *gt;
1478 isl_set *bound;
1480 map = isl_map_from_aff(isl_aff_copy(group->shared_bound[i].lb));
1481 space = isl_space_range(isl_map_get_space(map));
1482 map = isl_map_apply_range(map, isl_map_lex_le(isl_space_copy(space)));
1483 tile = map;
1485 aff = isl_aff_copy(group->shared_bound[i].lb);
1486 aff = isl_aff_add_constant(aff, group->shared_bound[i].size);
1487 map = isl_map_from_aff(aff);
1488 gt = isl_map_lex_gt(space);
1489 map = isl_map_apply_range(map, isl_map_copy(gt));
1490 tile = isl_map_intersect(tile, map);
1492 if (group->shared_bound[i].shift) {
1493 isl_basic_map *shift;
1494 shift = isl_basic_map_copy(group->shared_bound[i].shift_map);
1495 shift = isl_basic_map_reverse(shift);
1496 tile = isl_set_unwrap(isl_set_apply(isl_map_wrap(tile),
1497 isl_map_from_basic_map(shift)));
1500 tile = isl_map_lower_bound_si(tile, isl_dim_out, 0, 0);
1502 bound = isl_set_from_pw_aff(isl_pw_aff_copy(group->array->bound[i]));
1503 bound = isl_set_apply(bound, gt);
1504 tile = isl_map_intersect_range(tile, bound);
1506 return tile;
1509 /* Return a map from the first shared_len dimensions of the computed
1510 * schedule to the array tile in
1511 * global memory that corresponds to the shared memory copy.
1513 static __isl_give isl_map *group_tile(struct gpu_array_ref_group *group)
1515 int i;
1516 int n_index = group->array->n_index;
1517 isl_map *tile;
1519 tile = group_tile_dim(group, 0);
1520 for (i = 1; i < n_index; ++i) {
1521 isl_map *tile_i;
1523 tile_i = group_tile_dim(group, i);
1524 tile = isl_map_flat_range_product(tile, tile_i);
1527 tile = isl_map_set_tuple_name(tile, isl_dim_out, group->array->name);
1529 return tile;
1532 /* Given a mapping "sched" from the AST schedule to a domain,
1533 * return the corresponding mapping from the AST schedule to
1534 * to the first shared_len dimensions of the schedule computed by PPCG.
1536 static __isl_give isl_map *compute_sched_to_shared(struct gpu_gen *gen,
1537 __isl_take isl_map *sched)
1539 isl_union_map *umap;
1540 isl_space *space;
1541 isl_map *map;
1543 space = isl_space_range(isl_map_get_space(sched));
1544 space = isl_space_from_domain(space);
1545 space = isl_space_add_dims(space, isl_dim_out, gen->shared_len);
1547 umap = isl_union_map_copy(gen->shared_sched);
1548 umap = isl_union_map_apply_range(umap,
1549 isl_union_map_copy(gen->shared_proj));
1550 map = isl_union_map_extract_map(umap, space);
1551 isl_union_map_free(umap);
1553 sched = isl_map_apply_range(sched, map);
1554 sched = isl_map_detect_equalities(sched);
1556 return sched;
1559 /* Set unroll[j] if the input dimension j is involved in
1560 * the index expression represented by ma.
1562 static int check_unroll(__isl_take isl_set *set, __isl_take isl_multi_aff *ma,
1563 void *user)
1565 int i, j;
1566 int n_in = isl_multi_aff_dim(ma, isl_dim_in);
1567 int n_out = isl_multi_aff_dim(ma, isl_dim_out);
1568 int *unroll = user;
1570 for (i = 0; i < n_out; ++i) {
1571 isl_aff *aff;
1573 aff = isl_multi_aff_get_aff(ma, i);
1574 for (j = 0; j < n_in; ++j)
1575 if (isl_aff_involves_dims(aff, isl_dim_in, j, 1))
1576 unroll[j] = 1;
1577 isl_aff_free(aff);
1580 isl_set_free(set);
1581 isl_multi_aff_free(ma);
1582 return 0;
1585 /* Given an array pos mapping input dimensions to the corresponding
1586 * output dimension, construct the corresponding map.
1588 static __isl_give isl_map *permutation(__isl_take isl_space *dim,
1589 int *pos, int len)
1591 int i;
1592 isl_constraint *c;
1593 isl_basic_map *bmap;
1594 isl_local_space *ls;
1596 dim = isl_space_add_dims(dim, isl_dim_in, len);
1597 dim = isl_space_add_dims(dim, isl_dim_out, len);
1598 bmap = isl_basic_map_universe(isl_space_copy(dim));
1599 ls = isl_local_space_from_space(dim);
1601 for (i = 0; i < len; ++i) {
1602 c = isl_equality_alloc(isl_local_space_copy(ls));
1603 isl_constraint_set_coefficient_si(c, isl_dim_in, i, -1);
1604 isl_constraint_set_coefficient_si(c, isl_dim_out, pos[i], 1);
1605 bmap = isl_basic_map_add_constraint(bmap, c);
1607 isl_local_space_free(ls);
1609 return isl_map_from_basic_map(bmap);
1612 /* Find all loops involved in any of the index expressions for any of
1613 * the private accesses, move them innermost and then mark them as
1614 * requiring unrolling by setting gen->first_unroll.
1615 * The loops involved should all be parallel because of the checks
1616 * we performed in check_private_group_access. Moving them innermost
1617 * is therefore a valid transformation.
1619 * Loops up to gen->shared_len are generated before the mapping to
1620 * threads is applied. They should therefore be ignored.
1622 * We compute the hidden equalities of the schedule first
1623 * since we will need them in our calls to isl_pw_multi_aff_from_map
1624 * and because we want to make sure that the same equalities
1625 * are also available to the code generator.
1627 static __isl_give isl_union_map *interchange_for_unroll(struct gpu_gen *gen,
1628 __isl_take isl_union_map *sched)
1630 int i, j;
1631 int unroll[gen->thread_tiled_len];
1632 int perm[gen->thread_tiled_len];
1633 isl_space *dim;
1634 isl_map *permute;
1635 int len = gen->shared_len + gen->n_parallel + gen->n_block;
1637 gen->first_unroll = -1;
1639 sched = isl_union_map_detect_equalities(sched);
1640 for (i = 0; i < gen->thread_tiled_len; ++i)
1641 unroll[i] = 0;
1642 for (i = 0; i < gen->prog->n_array; ++i) {
1643 struct gpu_array_info *array = &gen->prog->array[i];
1645 for (j = 0; j < array->n_group; ++j) {
1646 isl_union_map *access;
1647 isl_map *acc;
1648 isl_pw_multi_aff *pma;
1650 if (!array->groups[j]->private_bound)
1651 continue;
1653 access = group_access_relation(array->groups[j], 1, 1);
1654 access = isl_union_map_apply_domain(access,
1655 isl_union_map_copy(sched));
1657 acc = isl_map_from_union_map(access);
1658 pma = isl_pw_multi_aff_from_map(acc);
1659 isl_pw_multi_aff_foreach_piece(pma,
1660 &check_unroll, unroll);
1662 isl_pw_multi_aff_free(pma);
1666 for (i = gen->shared_len; i < len; ++i)
1667 if (unroll[i])
1668 break;
1670 if (i >= len)
1671 return sched;
1673 for (i = len; i < gen->thread_tiled_len; ++i)
1674 if (unroll[i])
1675 return sched;
1677 j = 0;
1678 for (i = 0; i < gen->shared_len; ++i)
1679 perm[i] = j++;
1680 for (i = gen->shared_len; i < gen->thread_tiled_len; ++i)
1681 if (!unroll[i])
1682 perm[i] = j++;
1683 gen->first_unroll = j - gen->shared_len;
1684 for (i = gen->shared_len; i < len; ++i)
1685 if (unroll[i])
1686 perm[i] = j++;
1688 dim = isl_union_map_get_space(sched);
1689 permute = permutation(dim, perm, gen->thread_tiled_len);
1690 sched = isl_union_map_apply_range(sched,
1691 isl_union_map_from_map(permute));
1693 return sched;
1696 /* Given a constraint
1698 * a(p,i) + j = g f(e)
1700 * or -a(p,i) - j = g f(e) if sign < 0,
1701 * store a(p,i) in bound->shift and g (stride) in bound->stride.
1702 * a(p,i) is assumed to be an expression in only the parameters
1703 * and the input dimensions.
1705 static void extract_stride(__isl_keep isl_constraint *c,
1706 struct gpu_array_bound *bound, isl_int stride, int sign)
1708 int i;
1709 isl_int v;
1710 isl_space *space;
1711 unsigned nparam;
1712 unsigned nvar;
1713 isl_aff *aff;
1715 isl_int_set(bound->stride, stride);
1717 space = isl_constraint_get_space(c);
1718 space = isl_space_domain(space);
1720 nparam = isl_space_dim(space, isl_dim_param);
1721 nvar = isl_space_dim(space, isl_dim_set);
1723 isl_int_init(v);
1725 isl_constraint_get_constant(c, &v);
1726 if (sign < 0)
1727 isl_int_neg(v, v);
1728 aff = isl_aff_zero_on_domain(isl_local_space_from_space(space));
1729 aff = isl_aff_set_constant(aff, v);
1731 for (i = 0; i < nparam; ++i) {
1732 isl_constraint_get_coefficient(c, isl_dim_param, i, &v);
1733 if (isl_int_is_zero(v))
1734 continue;
1735 if (sign < 0)
1736 isl_int_neg(v, v);
1737 aff = isl_aff_add_coefficient(aff, isl_dim_param, i, v);
1740 for (i = 0; i < nvar; ++i) {
1741 isl_constraint_get_coefficient(c, isl_dim_in, i, &v);
1742 if (isl_int_is_zero(v))
1743 continue;
1744 if (sign < 0)
1745 isl_int_neg(v, v);
1746 aff = isl_aff_add_coefficient(aff, isl_dim_in, i, v);
1749 isl_int_clear(v);
1751 bound->shift = aff;
1754 /* Given an equality constraint of a map with a single output dimension j,
1755 * check if the constraint is of the form
1757 * a(p,i) + j = g f(e)
1759 * with a(p,i) an expression in the parameters and input dimensions
1760 * and f(e) an expression in the existentially quantified variables.
1761 * If so, and if g is larger than any such g from a previously considered
1762 * constraint, then call extract_stride to record the stride information
1763 * in bound.
1765 static int check_stride_constraint(__isl_take isl_constraint *c, void *user)
1767 int i;
1768 isl_int v, stride;
1769 unsigned n_div;
1770 struct gpu_array_bound *bound = user;
1772 isl_int_init(v);
1773 isl_int_init(stride);
1775 n_div = isl_constraint_dim(c, isl_dim_div);
1776 isl_constraint_get_coefficient(c, isl_dim_out, 0, &v);
1778 if (n_div && (isl_int_is_one(v) || isl_int_is_negone(v))) {
1779 int s = isl_int_sgn(v);
1780 isl_int_set_si(stride, 0);
1781 for (i = 0; i < n_div; ++i) {
1782 isl_constraint_get_coefficient(c, isl_dim_div, i, &v);
1783 isl_int_gcd(stride, stride, v);
1785 if (!isl_int_is_zero(stride) &&
1786 isl_int_gt(stride, bound->stride))
1787 extract_stride(c, bound, stride, s);
1790 isl_int_clear(stride);
1791 isl_int_clear(v);
1793 isl_constraint_free(c);
1794 return 0;
1797 /* Given contraints on an array index i, check if we can find
1798 * a shift a(p) and a stride g such that
1800 * a(p) + i = 0 mod g
1802 * If so, record the information in bound and apply the mapping
1803 * i -> (i + a(p))/g to the array index in bounds and return
1804 * the new constraints.
1805 * If not, simply return the original constraints.
1807 * If bounds is a subset of the space
1809 * D -> i
1811 * then the bound recorded in bound->shift is of the form
1813 * D -> s(D)
1815 * with s(D) equal to a(p) above.
1816 * The mapping recorded in bound->shift_map is of the form
1818 * [D -> i] -> [D -> (i + S(D))/g]
1820 * This mapping is computed as follows.
1821 * We first introduce "i" in the domain through precomposition
1822 * with [D -> i] -> D obtaining
1824 * [D -> i] -> s(D)
1826 * Adding [D -> i] -> i produces
1828 * [D -> i] -> i + s(D)
1830 * and the domain product with [D -> i] -> D yields
1832 * [D -> i] -> [D -> i + s(D)]
1834 * Composition with [D -> i] -> [D -> i/g] gives the desired result.
1836 static __isl_give isl_basic_map *check_stride(struct gpu_array_bound *bound,
1837 __isl_take isl_basic_map *bounds)
1839 isl_space *space;
1840 isl_basic_map *hull;
1841 isl_basic_map *shift, *id, *bmap, *scale;
1842 isl_basic_set *bset;
1843 isl_aff *aff;
1845 isl_int_set_si(bound->stride, -1);
1847 hull = isl_basic_map_affine_hull(isl_basic_map_copy(bounds));
1849 isl_basic_map_foreach_constraint(hull, &check_stride_constraint, bound);
1851 isl_basic_map_free(hull);
1853 if (isl_int_is_neg(bound->stride))
1854 return bounds;
1856 shift = isl_basic_map_from_aff(isl_aff_copy(bound->shift));
1857 space = isl_basic_map_get_space(bounds);
1858 bmap = isl_basic_map_domain_map(isl_basic_map_universe(space));
1859 shift = isl_basic_map_apply_range(bmap, shift);
1860 space = isl_basic_map_get_space(bounds);
1861 id = isl_basic_map_range_map(isl_basic_map_universe(space));
1862 shift = isl_basic_map_sum(id, shift);
1863 space = isl_basic_map_get_space(bounds);
1864 id = isl_basic_map_domain_map(isl_basic_map_universe(space));
1865 shift = isl_basic_map_range_product(id, shift);
1867 space = isl_space_domain(isl_basic_map_get_space(bounds));
1868 id = isl_basic_map_identity(isl_space_map_from_set(space));
1869 space = isl_space_range(isl_basic_map_get_space(bounds));
1870 aff = isl_aff_zero_on_domain(isl_local_space_from_space(space));
1871 aff = isl_aff_add_coefficient_si(aff, isl_dim_in, 0, 1);
1872 aff = isl_aff_scale_down(aff, bound->stride);
1873 scale = isl_basic_map_from_aff(aff);
1874 scale = isl_basic_map_product(id, scale);
1876 bound->shift_map = isl_basic_map_apply_range(shift, scale);
1877 bmap = isl_basic_map_copy(bound->shift_map);
1878 bset = isl_basic_set_apply(isl_basic_map_wrap(bounds), bmap);
1879 bounds = isl_basic_set_unwrap(bset);
1881 return bounds;
1884 /* Data used in compute_array_dim_size and compute_size_in_direction.
1886 * pos is the position of the variable representing the array index,
1887 * i.e., the variable for which want to compute the size. This variable
1888 * is also the last variable in the set.
1890 struct gpu_size_info {
1891 isl_basic_set *bset;
1892 struct gpu_array_bound *bound;
1893 int pos;
1896 /* Given a constraint from the basic set describing the bounds on
1897 * an array index, check if it is a lower bound, say m i >= b(x), and,
1898 * if so, check whether the expression "i - ceil(b(x)/m) + 1" has a constant
1899 * upper bound. If so, and if this bound is smaller than any bound
1900 * derived from earlier constraints, set the size to this bound on
1901 * the expression and the lower bound to ceil(b(x)/m).
1903 static int compute_size_in_direction(__isl_take isl_constraint *c, void *user)
1905 struct gpu_size_info *size = user;
1906 unsigned nparam;
1907 unsigned n_div;
1908 isl_int v;
1910 nparam = isl_basic_set_dim(size->bset, isl_dim_param);
1911 n_div = isl_constraint_dim(c, isl_dim_div);
1913 if (isl_constraint_involves_dims(c, isl_dim_div, 0, n_div)) {
1914 isl_constraint_free(c);
1915 return 0;
1918 isl_int_init(v);
1920 isl_constraint_get_coefficient(c, isl_dim_set, size->pos, &v);
1922 if (isl_int_is_pos(v)) {
1923 isl_aff *aff;
1924 isl_aff *lb;
1925 enum isl_lp_result res;
1927 aff = isl_constraint_get_bound(c, isl_dim_set, size->pos);
1928 aff = isl_aff_ceil(aff);
1930 lb = isl_aff_copy(aff);
1932 aff = isl_aff_neg(aff);
1933 aff = isl_aff_add_coefficient_si(aff, isl_dim_in, size->pos, 1);
1935 res = isl_basic_set_max(size->bset, aff, &v);
1936 isl_aff_free(aff);
1938 if (res == isl_lp_ok) {
1939 isl_int_add_ui(v, v, 1);
1940 if (isl_int_is_neg(size->bound->size) ||
1941 isl_int_lt(v, size->bound->size)) {
1942 isl_int_set(size->bound->size, v);
1943 lb = isl_aff_drop_dims(lb, isl_dim_in,
1944 size->pos, 1);
1945 isl_aff_free(size->bound->lb);
1946 size->bound->lb = isl_aff_copy(lb);
1949 isl_aff_free(lb);
1952 isl_int_clear(v);
1953 isl_constraint_free(c);
1955 return 0;
1958 /* Given a basic map "bounds" that maps parameters and input dimensions
1959 * to a single output dimension, look for an expression in the parameters
1960 * and input dimensions such that the range of the output dimension shifted
1961 * by this expression is a constant.
1963 * In particular, we currently only consider lower bounds on the output
1964 * dimension as candidate expressions.
1966 static int compute_array_dim_size(struct gpu_array_bound *bound,
1967 __isl_take isl_basic_map *bounds)
1969 struct gpu_size_info size;
1971 bounds = isl_basic_map_detect_equalities(bounds);
1972 bounds = check_stride(bound, bounds);
1974 isl_int_set_si(bound->size, -1);
1975 bound->lb = NULL;
1977 size.bound = bound;
1978 size.pos = isl_basic_map_dim(bounds, isl_dim_in);
1979 size.bset = isl_basic_map_wrap(bounds);
1980 size.bset = isl_basic_set_flatten(size.bset);
1981 size.bset = isl_set_simple_hull(isl_basic_set_compute_divs(size.bset));
1982 isl_basic_set_foreach_constraint(size.bset, &compute_size_in_direction,
1983 &size);
1984 isl_basic_set_free(size.bset);
1986 return isl_int_is_nonneg(bound->size) ? 0 : -1;
1989 /* Check if we can find a shared memory tile for the given array
1990 * based on the given accesses, and if so, put the results
1991 * in array->shared_bound.
1993 * We project the accesses on each index in turn and look for a parametric
1994 * offset such that the size is constant.
1996 static int can_tile_for_shared_memory(struct gpu_array_info *array,
1997 __isl_keep isl_map *access, struct gpu_array_bound *bounds)
1999 int i;
2001 for (i = 0; i < array->n_index; ++i) {
2002 isl_map *access_i;
2003 isl_basic_map *hull;
2005 access_i = isl_map_copy(access);
2006 access_i = isl_map_project_out(access_i, isl_dim_out, 0, i);
2007 access_i = isl_map_project_out(access_i, isl_dim_out,
2008 1, array->n_index - (i + 1));
2009 access_i = isl_map_compute_divs(access_i);
2010 hull = isl_map_simple_hull(access_i);
2011 if (compute_array_dim_size(&bounds[i], hull) < 0)
2012 return 0;
2015 return 1;
2018 /* Construct a map with input the shared tile loops and the loops that
2019 * will be wrapped around the threads that relates these later loops
2020 * to the thread indices and then projects them out.
2022 static __isl_give isl_map *compute_privatization(struct gpu_gen *gen)
2024 isl_map *priv;
2025 isl_map *tiling;
2026 isl_map *proj;
2027 isl_set *par;
2028 isl_space *dim;
2030 dim = isl_union_map_get_space(gen->shared_sched);
2032 if (gen->options->wrap)
2033 tiling = wrap(isl_space_copy(dim), gen->shared_len + gen->n_block,
2034 gen->shared_len, gen->n_block, gen->block_dim);
2035 else
2036 tiling = tile(isl_space_copy(dim), gen->shared_len + gen->n_block,
2037 gen->shared_len, gen->n_block, gen->block_dim);
2039 priv = tiling;
2041 par = parametrization(dim, gen->shared_len + 2 * gen->n_block,
2042 gen->tile_first + gen->tile_len + gen->n_grid + gen->n_block,
2043 gen->n_block, "t");
2045 priv = isl_map_align_params(priv, isl_set_get_space(par));
2046 priv = isl_map_intersect_range(priv, par);
2048 dim = isl_map_get_space(priv);
2049 dim = isl_space_drop_dims(dim, isl_dim_in, 0, isl_space_dim(dim, isl_dim_in));
2050 dim = isl_space_drop_dims(dim, isl_dim_out, 0, isl_space_dim(dim, isl_dim_out));
2051 proj = projection(dim, gen->shared_len + 2 * gen->n_block,
2052 gen->shared_len);
2054 priv = isl_map_apply_range(priv, proj);
2056 return priv;
2059 /* Construct a map from domain_dim to domain_dim that increments
2060 * the dimension at position "pos" and leaves all other dimensions
2061 * constant.
2063 static __isl_give isl_map *next(__isl_take isl_space *domain_dim, int pos)
2065 int i;
2066 int len = isl_space_dim(domain_dim, isl_dim_set);
2067 isl_space *dim;
2068 isl_basic_map *next;
2069 isl_local_space *ls;
2071 dim = isl_space_map_from_set(domain_dim);
2072 next = isl_basic_map_universe(isl_space_copy(dim));
2073 ls = isl_local_space_from_space(dim);
2075 for (i = 0; i < len; ++i) {
2076 isl_constraint *c;
2078 c = isl_equality_alloc(isl_local_space_copy(ls));
2079 isl_constraint_set_coefficient_si(c, isl_dim_in, i, 1);
2080 isl_constraint_set_coefficient_si(c, isl_dim_out, i, -1);
2081 if (i == pos)
2082 isl_constraint_set_constant_si(c, 1);
2083 next = isl_basic_map_add_constraint(next, c);
2086 isl_local_space_free(ls);
2088 return isl_map_from_basic_map(next);
2091 /* Check if the given access is coalesced.
2092 * That is, check whether incrementing the dimension that will get
2093 * wrapped over the last thread index results in incrementing
2094 * the last array index.
2096 * This function is only called for access relations without reuse.
2098 static int access_is_coalesced(struct gpu_gen *gen,
2099 __isl_keep isl_union_map *access)
2101 isl_space *dim;
2102 isl_map *access_map;
2103 isl_map *next_thread_x;
2104 isl_map *next_element;
2105 isl_map *map;
2106 int coalesced;
2108 access = isl_union_map_copy(access);
2109 access = isl_union_map_apply_domain(access,
2110 isl_union_map_copy(gen->tiled_sched));
2111 access_map = isl_map_from_union_map(access);
2113 dim = isl_map_get_space(access_map);
2114 dim = isl_space_domain(dim);
2115 next_thread_x = next(dim, gen->shared_len + gen->n_block - 1);
2117 dim = isl_map_get_space(access_map);
2118 dim = isl_space_range(dim);
2119 next_element = next(dim, isl_space_dim(dim, isl_dim_set) - 1);
2121 map = isl_map_apply_domain(next_thread_x, isl_map_copy(access_map));
2122 map = isl_map_apply_range(map, access_map);
2124 coalesced = isl_map_is_subset(map, next_element);
2126 isl_map_free(next_element);
2127 isl_map_free(map);
2129 return coalesced;
2132 /* Given an access relation in terms of the first gen->shared_len + gen->n_block
2133 * dimensions of the computed schedule, check if it is bijective for
2134 * fixed values of the first gen->shared_len dimensions.
2135 * We perform this check by equating these dimensions to parameters.
2137 static int access_is_bijective(struct gpu_gen *gen, __isl_keep isl_map *access)
2139 int res;
2140 isl_set *par;
2141 isl_space *space;
2143 access = isl_map_copy(access);
2144 space = isl_space_params(isl_map_get_space(access));
2145 par = parametrization(space, gen->shared_len + gen->n_block,
2146 0, gen->shared_len, "s");
2147 access = isl_map_intersect_domain(access, par);
2148 res = isl_map_is_bijective(access);
2149 isl_map_free(access);
2151 return res;
2154 /* For the given array reference group, check whether the access is private
2155 * to the thread. That is, check that any given array element
2156 * is only accessed by a single thread.
2157 * We compute an access relation that maps the shared tile loop iterators
2158 * and the shared point loop iterators that will be wrapped over the
2159 * threads to the array elements.
2160 * We actually check that those iterators that will be wrapped
2161 * partition the array space. This check is stricter than necessary
2162 * since several iterations may be mapped onto the same thread
2163 * and then they could be allowed to access the same memory elements,
2164 * but our check does not allow this situation.
2166 * We also check that the index expression only depends on parallel
2167 * loops. That way, we can move those loops innermost and unroll them.
2168 * Again, we use a test that is stricter than necessary.
2169 * We actually check whether the index expression only depends
2170 * on the iterators that are wrapped over the threads.
2171 * These are necessarily parallel, but there may be more parallel loops.
2173 * Combining the injectivity of the first test with the single-valuedness
2174 * of the second test, we simply test for bijectivity.
2176 * If it turns out we can use registers, we compute the private memory
2177 * tile size using can_tile_for_shared_memory, after introducing a dependence
2178 * on the thread indices.
2180 * Before performing any of the above computations, we first check
2181 * if there is any reuse on the reference group. If not, we simply
2182 * return. If, moreover, the access is coalesced then we also remove
2183 * the shared memory tiling since we should just use global memory instead.
2185 static void check_private_group_access(struct gpu_gen *gen,
2186 struct gpu_array_ref_group *group)
2188 isl_map *acc;
2189 isl_union_map *access;
2190 int n_index = group->array->n_index;
2192 access = group_access_relation(group, 1, 1);
2193 if (isl_union_map_is_injective(access)) {
2194 if (group->shared_bound && access_is_coalesced(gen, access)) {
2195 free_bound_list(group->shared_bound, n_index);
2196 group->shared_bound = NULL;
2198 isl_union_map_free(access);
2199 return;
2201 access = isl_union_map_apply_domain(access,
2202 isl_union_map_copy(gen->shared_sched));
2204 acc = isl_map_from_union_map(access);
2206 if (!access_is_bijective(gen, acc)) {
2207 isl_map_free(acc);
2208 return;
2211 group->private_bound = create_bound_list(gen->ctx, n_index);
2212 acc = isl_map_apply_domain(acc, isl_map_copy(gen->privatization));
2213 if (!can_tile_for_shared_memory(group->array, acc,
2214 group->private_bound)) {
2215 free_bound_list(group->private_bound, n_index);
2216 group->private_bound = NULL;
2219 isl_map_free(acc);
2222 /* Look for the last shared tile loop that affects the offset of the
2223 * shared or private tile and store the result in array->last_shared.
2224 * If there is no such loop, then array->last_shared is set to a value
2225 * before the first shared tile loop, in particular gen->tile_first - 1.
2227 static void set_last_shared(struct gpu_gen *gen,
2228 struct gpu_array_ref_group *group)
2230 int i, j;
2231 struct gpu_array_bound *bounds;
2232 int n_index = group->array->n_index;
2234 bounds = group->private_bound;
2235 if (!bounds)
2236 bounds = group->shared_bound;
2237 if (!bounds)
2238 return;
2240 for (j = gen->shared_len - 1; j >= gen->tile_first; --j) {
2241 for (i = 0; i < n_index; ++i) {
2242 isl_aff *lb;
2243 isl_aff *shift;
2245 lb = bounds[i].lb;
2246 if (isl_aff_involves_dims(lb, isl_dim_in, j, 1))
2247 break;
2249 shift = bounds[i].shift;
2250 if (!shift)
2251 continue;
2252 if (isl_aff_involves_dims(shift, isl_dim_in, j, 1))
2253 break;
2255 if (i < n_index)
2256 break;
2258 group->last_shared = j;
2261 /* Compute the sizes of all private arrays for the current kernel,
2262 * as well as the offsets of the private pieces in the original arrays.
2263 * If we cannot or don't want to privatize a given array group,
2264 * we use the shared memory tile sizes computed in
2265 * compute_group_shared_bound instead.
2267 * If we have been able to find a private or shared tile,
2268 * we also look for the last shared tile loop that affects the offset
2269 * (and therefore the group tile) and store the result in group->last_shared.
2271 * A privatized copy of all access relations from reference groups that
2272 * are mapped to private memory is stored in gen->privatization.
2274 static void compute_private_size(struct gpu_gen *gen)
2276 int i, j;
2277 isl_union_map *private;
2279 if (!gen->options->use_private_memory)
2280 return;
2282 private = isl_union_map_empty(isl_union_map_get_space(gen->shared_sched));
2284 for (i = 0; i < gen->prog->n_array; ++i) {
2285 struct gpu_array_info *array = &gen->prog->array[i];
2287 if (gpu_array_is_read_only_scalar(array))
2288 continue;
2290 for (j = 0; j < array->n_group; ++j) {
2291 check_private_group_access(gen, array->groups[j]);
2293 if (!array->groups[j]->private_bound)
2294 continue;
2296 private = isl_union_map_union(private,
2297 group_access_relation(array->groups[j], 1, 1));
2300 for (j = 0; j < array->n_group; ++j) {
2301 array->groups[j]->last_shared = gen->shared_len - 1;
2302 set_last_shared(gen, array->groups[j]);
2306 if (isl_union_map_is_empty(private))
2307 isl_union_map_free(private);
2308 else {
2309 isl_union_map *priv;
2311 private = isl_union_map_apply_domain(private,
2312 isl_union_map_copy(gen->shared_sched));
2313 priv = isl_union_map_from_map(isl_map_copy(gen->privatization));
2314 private = isl_union_map_apply_domain(private, priv);
2315 gen->private_access = private;
2319 /* Compute the size of the tile specified by the list "bound" of n_index
2320 * gpu_array_bounds in number of elements and put the result in *size.
2322 static void tile_size(unsigned n_index, struct gpu_array_bound *bound,
2323 isl_int *size)
2325 int i;
2327 isl_int_set_si(*size, 1);
2329 for (i = 0; i < n_index; ++i)
2330 isl_int_mul(*size, *size, bound[i].size);
2333 /* If max_shared_memory is not set to infinity (-1), then make
2334 * sure that the total amount of shared memory required by the
2335 * array reference groups mapped to shared memory is no larger
2336 * than this maximum.
2338 * We apply a greedy approach and discard (keep in global memory)
2339 * those groups that would result in a total memory size that
2340 * is larger than the maximum.
2342 static void check_shared_memory_bound(struct gpu_gen *gen)
2344 int i, j;
2345 isl_int left, size;
2347 if (gen->options->max_shared_memory < 0)
2348 return;
2350 isl_int_init(left);
2351 isl_int_init(size);
2352 isl_int_set_si(left, gen->options->max_shared_memory);
2354 for (i = 0; i < gen->prog->n_array; ++i) {
2355 struct gpu_array_info *array = &gen->prog->array[i];
2357 for (j = 0; j < array->n_group; ++j) {
2358 struct gpu_array_ref_group *group;
2360 group = array->groups[j];
2361 if (!group->shared_bound)
2362 continue;
2364 tile_size(array->n_index, group->shared_bound, &size);
2365 isl_int_mul_ui(size, size, array->size);
2367 if (isl_int_le(size, left)) {
2368 isl_int_sub(left, left, size);
2369 continue;
2372 free_bound_list(group->shared_bound, array->n_index);
2373 group->shared_bound = NULL;
2377 isl_int_clear(size);
2378 isl_int_clear(left);
2381 /* Fill up the groups array with singleton groups, i.e., one group
2382 * per reference, initializing the array, access, write and refs fields.
2383 * In particular the access field is initialized to the scheduled
2384 * access relation of the array reference.
2386 * Return the number of elements initialized, i.e., the number of
2387 * active references in the current kernel.
2389 static int populate_array_references(struct gpu_array_info *array,
2390 __isl_keep isl_union_map *sched, struct gpu_array_ref_group **groups)
2392 int i;
2393 int n;
2394 isl_ctx *ctx = isl_union_map_get_ctx(sched);
2396 n = 0;
2397 for (i = 0; i < array->n_ref; ++i) {
2398 isl_union_map *umap;
2399 isl_map *map;
2400 struct gpu_array_ref_group *group;
2401 struct gpu_stmt_access *access = array->refs[i];
2403 map = isl_map_copy(access->access);
2404 umap = isl_union_map_from_map(map);
2405 umap = isl_union_map_apply_domain(umap,
2406 isl_union_map_copy(sched));
2408 if (isl_union_map_is_empty(umap)) {
2409 isl_union_map_free(umap);
2410 continue;
2413 map = isl_map_from_union_map(umap);
2414 map = isl_map_detect_equalities(map);
2416 group = isl_calloc_type(ctx, struct gpu_array_ref_group);
2417 assert(group);
2418 group->array = array;
2419 group->access = map;
2420 group->write = access->write;
2421 group->refs = &array->refs[i];
2423 groups[n++] = group;
2426 return n;
2429 static void free_array_ref_group(struct gpu_array_ref_group *group,
2430 int n_index)
2432 if (!group)
2433 return;
2434 free_bound_list(group->shared_bound, n_index);
2435 free_bound_list(group->private_bound, n_index);
2436 isl_map_free(group->access);
2437 free(group->refs);
2438 free(group);
2441 /* Given a map where the input dimensions represent the tile loops,
2442 * eliminate the innermost of those that have a fixed value
2443 * until we reach one that does not (obviously) have a fixed value.
2445 static __isl_give isl_map *eliminate_fixed_inner_loops(
2446 __isl_take isl_map *access)
2448 int i, n;
2450 n = isl_map_dim(access, isl_dim_in);
2452 for (i = n - 1; i >= 0; --i) {
2453 if (!isl_map_plain_is_fixed(access, isl_dim_in, i, NULL))
2454 break;
2455 access = isl_map_eliminate(access, isl_dim_in, i, 1);
2457 return access;
2460 /* Check if the access relations of group1 and group2 overlap within
2461 * the innermost loop. In particular, ignore any inner dimension
2462 * with a fixed value.
2463 * The copying to and from shared memory will be performed within
2464 * the innermost actual loop so we are only allowed to consider
2465 * the dimensions up to that innermost loop while checking whether
2466 * two access relations overlap.
2468 static int accesses_overlap(struct gpu_array_ref_group *group1,
2469 struct gpu_array_ref_group *group2)
2471 int empty;
2472 isl_map *access1, *access2;
2474 access1 = isl_map_copy(group1->access);
2475 access1 = eliminate_fixed_inner_loops(access1);
2476 access2 = isl_map_copy(group2->access);
2477 access2 = eliminate_fixed_inner_loops(access2);
2478 access1 = isl_map_intersect(access1, access2);
2479 empty = isl_map_is_empty(access1);
2480 isl_map_free(access1);
2482 return !empty;
2485 /* If two groups have overlapping access relations (within the innermost
2486 * loop) and if one of them involves a write, then merge the two groups
2487 * into one.
2489 * We keep track of the grouping in "leader". leader[j] points to
2490 * an earlier group array element that belongs to the same group,
2491 * or the array element j itself if this element is the first in the group.
2493 * Return the number of group leaders.
2495 static int group_overlapping_writes(int n,
2496 struct gpu_array_ref_group **groups, int *leader)
2498 int i, j;
2499 int n_group = n;
2501 for (i = 0; i < n; ++i) {
2502 int l = i;
2503 groups[l]->n_ref = 1;
2504 for (j = i - 1; j >= 0; --j) {
2505 if (leader[j] != j)
2506 continue;
2507 if (!groups[l]->write && !groups[j]->write)
2508 continue;
2510 if (!accesses_overlap(groups[l], groups[j]))
2511 continue;
2513 groups[j]->access = isl_map_union(groups[j]->access,
2514 groups[l]->access);
2515 groups[j]->write = 1;
2516 groups[l]->access = NULL;
2517 groups[j]->n_ref += groups[l]->n_ref;
2518 l = leader[l] = j;
2519 n_group--;
2521 leader[i] = l;
2524 return n_group;
2527 /* Compute the size of the shared array corresponding to the given
2528 * array reference group, based on the accesses from the current kernel,
2529 * as well as the offset of the shared piece in the original array.
2531 static void compute_group_shared_bound(struct gpu_gen *gen,
2532 struct gpu_array_info *array, struct gpu_array_ref_group *group)
2534 isl_ctx *ctx = isl_space_get_ctx(array->dim);
2536 if (!gen->options->use_shared_memory)
2537 return;
2538 if (gpu_array_is_read_only_scalar(array))
2539 return;
2541 group->shared_bound = create_bound_list(ctx, array->n_index);
2542 if (!can_tile_for_shared_memory(array, group->access,
2543 group->shared_bound)) {
2544 free_bound_list(group->shared_bound, array->n_index);
2545 group->shared_bound = NULL;
2549 /* Is the size of the tile specified by "bound" smaller than the sum of
2550 * the sizes of the tiles specified by "bound1" and "bound2"?
2552 static int smaller_tile(unsigned n_index, struct gpu_array_bound *bound,
2553 struct gpu_array_bound *bound1, struct gpu_array_bound *bound2)
2555 int smaller;
2556 isl_int size, size1, size2;
2558 isl_int_init(size);
2559 isl_int_init(size1);
2560 isl_int_init(size2);
2562 tile_size(n_index, bound, &size);
2563 tile_size(n_index, bound1, &size1);
2564 tile_size(n_index, bound2, &size2);
2566 isl_int_sub(size, size, size1);
2567 isl_int_sub(size, size, size2);
2568 smaller = isl_int_is_neg(size);
2570 isl_int_clear(size2);
2571 isl_int_clear(size1);
2572 isl_int_clear(size);
2574 return smaller;
2577 /* Given an initial grouping of array references and shared memory tiles
2578 * for each group that allows for a shared memory tile, merge two groups
2579 * if both have a shared memory tile, the merged group also has
2580 * a shared memory tile and the size of the tile for the merge group
2581 * is smaller than the sum of the tile sizes of the individual groups.
2583 * Return the number of group leaders after merging.
2585 static int group_common_shared_memory_tile(struct gpu_array_info *array, int n,
2586 struct gpu_array_ref_group **groups, int *leader, int n_group)
2588 int i, j;
2589 isl_ctx *ctx = isl_space_get_ctx(array->dim);
2591 for (i = 0; n_group > 1 && i < n; ++i) {
2592 int l = i;
2593 if (leader[i] != i)
2594 continue;
2595 if (!groups[i]->shared_bound)
2596 continue;
2597 for (j = i - 1; j >= 0; --j) {
2598 isl_map *map;
2599 int empty;
2600 struct gpu_array_bound *shared_bound;
2602 if (leader[j] != j)
2603 continue;
2604 if (!groups[j]->shared_bound)
2605 continue;
2607 map = isl_map_intersect(isl_map_copy(groups[l]->access),
2608 isl_map_copy(groups[j]->access));
2609 empty = isl_map_is_empty(map);
2610 isl_map_free(map);
2612 if (empty)
2613 continue;
2615 map = isl_map_union(isl_map_copy(groups[l]->access),
2616 isl_map_copy(groups[j]->access));
2617 shared_bound = create_bound_list(ctx, array->n_index);
2618 if (!can_tile_for_shared_memory(array, map,
2619 shared_bound) ||
2620 !smaller_tile(array->n_index, shared_bound,
2621 groups[l]->shared_bound,
2622 groups[j]->shared_bound)) {
2623 isl_map_free(map);
2624 free_bound_list(shared_bound, array->n_index);
2625 continue;
2628 free_bound_list(groups[j]->shared_bound,
2629 array->n_index);
2630 groups[j]->shared_bound = shared_bound;
2631 isl_map_free(groups[j]->access);
2632 groups[j]->access = map;
2633 groups[j]->n_ref += groups[l]->n_ref;
2634 l = leader[l] = j;
2635 n_group--;
2639 return n_group;
2642 /* Extract an array of array reference groups from the array of references
2643 * and the grouping information in "leader".
2645 * Store the results in array->n_group and array->groups.
2647 static void extract_array_groups(isl_ctx *ctx, struct gpu_array_info *array,
2648 int n, struct gpu_array_ref_group **groups, int *leader, int n_group)
2650 int i, j;
2652 for (i = 2; i < n; ++i)
2653 leader[i] = leader[leader[i]];
2655 array->n_group = n_group;
2656 array->groups = isl_alloc_array(ctx, struct gpu_array_ref_group *,
2657 n_group);
2658 assert(array->groups);
2660 j = 0;
2661 for (i = 0; i < n; ++i) {
2662 int k, l;
2663 struct gpu_stmt_access **refs;
2665 if (leader[i] != i) {
2666 groups[i]->refs = NULL;
2667 free_array_ref_group(groups[i], array->n_index);
2668 continue;
2671 refs = isl_alloc_array(ctx, struct gpu_stmt_access *,
2672 groups[i]->n_ref);
2673 assert(refs);
2674 l = 0;
2675 for (k = i; k < n; ++k)
2676 if (leader[k] == i) {
2677 refs[l++] = *groups[k]->refs;
2678 (*groups[k]->refs)->group = j;
2681 groups[i]->refs = refs;
2682 groups[i]->nr = j;
2683 array->groups[j++] = groups[i];
2687 /* Group array references that should be considered together when
2688 * deciding whether to access them from private, shared or global memory.
2690 * In particular, if two array references overlap and if one of them
2691 * is a write, then the two references are grouped together.
2692 * Furthermore, if two groups admit a shared memory tile and if the
2693 * combination of the two also admits a shared memory tile, we merge
2694 * the two groups.
2696 * During the construction the group->refs field points to a single
2697 * array reference inside the array of array references, while
2698 * group->n_ref contains the number of element in leader that
2699 * (directly or indirectly) point to this group, provided the group
2700 * is a leader.
2702 static void group_array_references(struct gpu_gen *gen,
2703 struct gpu_array_info *array, __isl_keep isl_union_map *sched)
2705 int i;
2706 int n, n_group;
2707 isl_ctx *ctx = isl_union_map_get_ctx(sched);
2708 struct gpu_array_ref_group **groups;
2709 int *leader;
2711 groups = isl_calloc_array(ctx, struct gpu_array_ref_group *,
2712 array->n_ref);
2713 assert(groups);
2715 n = populate_array_references(array, sched, groups);
2717 leader = isl_alloc_array(ctx, int, n);
2718 assert(leader);
2720 n_group = group_overlapping_writes(n, groups, leader);
2722 for (i = 0; i < n; ++i)
2723 if (leader[i] == i)
2724 compute_group_shared_bound(gen, array, groups[i]);
2726 n_group = group_common_shared_memory_tile(array, n, groups,
2727 leader, n_group);
2729 extract_array_groups(ctx, array, n, groups, leader, n_group);
2731 free(leader);
2732 free(groups);
2735 /* Take tiled_sched, project it onto the shared tile loops and
2736 * the loops that will be wrapped over the threads and
2737 * store the result in gen->shared_sched.
2738 * Also compute a projection that projects out the loops that will be
2739 * wrapped over the threads and store this projection in gen->shared_proj.
2741 static void compute_shared_sched(struct gpu_gen *gen)
2743 isl_space *dim;
2744 isl_map *proj;
2745 isl_set *par;
2746 isl_union_map *sched;
2748 sched = isl_union_map_copy(gen->tiled_sched);
2750 dim = isl_union_map_get_space(sched);
2751 proj = projection(dim, gen->tiled_len, gen->shared_len + gen->n_block);
2752 sched = isl_union_map_apply_range(sched, isl_union_map_from_map(proj));
2754 dim = isl_union_map_get_space(sched);
2755 proj = projection(dim, gen->shared_len + gen->n_block, gen->shared_len);
2757 gen->shared_sched = sched;
2758 gen->shared_proj = isl_union_map_from_map(proj);
2761 /* Group references of all arrays in the program.
2763 static void group_references(struct gpu_gen *gen)
2765 int i;
2766 isl_union_map *sched;
2768 sched = isl_union_map_apply_range(isl_union_map_copy(gen->shared_sched),
2769 isl_union_map_copy(gen->shared_proj));
2771 for (i = 0; i < gen->prog->n_array; ++i)
2772 group_array_references(gen, &gen->prog->array[i], sched);
2774 isl_union_map_free(sched);
2777 /* Free all array information that is local to the current kernel.
2779 static void free_local_array_info(struct gpu_gen *gen)
2781 int i, j;
2783 for (i = 0; i < gen->prog->n_array; ++i) {
2784 struct gpu_array_info *array = &gen->prog->array[i];
2786 for (j = 0; j < array->n_group; ++j)
2787 free_array_ref_group(array->groups[j], array->n_index);
2788 free(array->groups);
2792 /* Compute the effective grid size as a list of the sizes in each dimension.
2794 * The grid size specified by the user or set by default
2795 * in read_grid_sizes() and applied in tile_schedule(),
2796 * may be too large for the given code in the sense that
2797 * it may contain blocks that don't need to execute anything.
2798 * We therefore don't return this grid size, but instead the
2799 * smallest grid size that ensures that all blocks that actually
2800 * execute code are included in the grid.
2802 * We first extract a description of the grid, i.e., the possible values
2803 * of the block ids, from gen->tiled_sched.
2804 * The block ids are parameters in gen->tiled_sched.
2805 * We simply need to change them into set dimensions.
2807 * Then, for each block dimension, we compute the maximal value of the block id
2808 * and add one.
2810 static __isl_give isl_multi_pw_aff *extract_grid_size(struct gpu_gen *gen,
2811 struct ppcg_kernel *kernel)
2813 int i;
2814 isl_set *grid;
2815 isl_multi_pw_aff *mpa;
2817 grid = isl_union_map_params(isl_union_map_copy(gen->tiled_sched));
2818 grid = isl_set_from_params(grid);
2819 grid = isl_set_add_dims(grid, isl_dim_set, gen->n_grid);
2820 for (i = 0; i < gen->n_grid; ++i) {
2821 int pos;
2822 char name[20];
2824 snprintf(name, sizeof(name), "b%d", i);
2825 pos = isl_set_find_dim_by_name(grid, isl_dim_param, name);
2826 assert(pos >= 0);
2827 grid = isl_set_equate(grid, isl_dim_param, pos, isl_dim_set, i);
2828 grid = isl_set_project_out(grid, isl_dim_param, pos, 1);
2831 mpa = isl_multi_pw_aff_zero(isl_set_get_space(grid));
2832 for (i = 0; i < gen->n_grid; ++i) {
2833 isl_space *space;
2834 isl_aff *one;
2835 isl_pw_aff *bound;
2837 bound = isl_set_dim_max(isl_set_copy(grid), i);
2838 bound = isl_pw_aff_coalesce(bound);
2839 bound = isl_pw_aff_gist(bound, isl_set_copy(kernel->context));
2841 space = isl_pw_aff_get_domain_space(bound);
2842 one = isl_aff_zero_on_domain(isl_local_space_from_space(space));
2843 one = isl_aff_add_constant_si(one, 1);
2844 bound = isl_pw_aff_add(bound, isl_pw_aff_from_aff(one));
2845 mpa = isl_multi_pw_aff_set_pw_aff(mpa, i, bound);
2847 isl_set_free(grid);
2849 return mpa;
2852 void ppcg_kernel_free(void *user)
2854 struct ppcg_kernel *kernel = user;
2855 int i;
2857 if (!kernel)
2858 return;
2860 isl_multi_pw_aff_free(kernel->grid_size);
2861 isl_set_free(kernel->context);
2862 isl_union_set_free(kernel->arrays);
2863 isl_space_free(kernel->space);
2864 isl_ast_node_free(kernel->tree);
2866 for (i = 0; i < kernel->n_array; ++i)
2867 isl_pw_aff_list_free(kernel->array[i].bound);
2868 free(kernel->array);
2870 for (i = 0; i < kernel->n_var; ++i) {
2871 free(kernel->var[i].name);
2872 isl_vec_free(kernel->var[i].size);
2874 free(kernel->var);
2876 free(kernel);
2879 static void create_kernel_var(isl_ctx *ctx, struct gpu_array_ref_group *group,
2880 struct ppcg_kernel_var *var)
2882 int j;
2883 struct gpu_array_bound *bounds;
2884 isl_printer *p;
2885 char *name;
2887 var->array = group->array;
2889 bounds = group->private_bound;
2890 var->type = ppcg_access_private;
2891 if (!bounds) {
2892 bounds = group->shared_bound;
2893 var->type = ppcg_access_shared;
2896 p = isl_printer_to_str(ctx);
2897 p = print_array_name(p, group);
2898 var->name = isl_printer_get_str(p);
2899 isl_printer_free(p);
2901 var->size = isl_vec_alloc(ctx, group->array->n_index);
2903 for (j = 0; j < group->array->n_index; ++j)
2904 var->size = isl_vec_set_element(var->size, j, bounds[j].size);
2907 static void create_kernel_vars(struct gpu_gen *gen, struct ppcg_kernel *kernel)
2909 int i, j, n;
2911 n = 0;
2912 for (i = 0; i < gen->prog->n_array; ++i) {
2913 struct gpu_array_info *array = &gen->prog->array[i];
2915 for (j = 0; j < array->n_group; ++j) {
2916 struct gpu_array_ref_group *group = array->groups[j];
2917 if (group->private_bound || group->shared_bound)
2918 ++n;
2922 kernel->n_var = n;
2923 kernel->var = isl_calloc_array(gen->ctx, struct ppcg_kernel_var, n);
2924 assert(kernel->var);
2926 n = 0;
2927 for (i = 0; i < gen->prog->n_array; ++i) {
2928 struct gpu_array_info *array = &gen->prog->array[i];
2930 for (j = 0; j < array->n_group; ++j) {
2931 struct gpu_array_ref_group *group = array->groups[j];
2932 if (!group->private_bound && !group->shared_bound)
2933 continue;
2934 create_kernel_var(gen->ctx, group, &kernel->var[n]);
2935 ++n;
2940 /* The sizes of the arrays on the host that have been computed by
2941 * extract_array_info may depend on the parameters. Use the extra
2942 * constraints on the parameters that are valid at "host_domain"
2943 * to simplify these expressions and store the results in kernel->array.
2945 static void localize_bounds(struct gpu_gen *gen, struct ppcg_kernel *kernel,
2946 __isl_keep isl_set *host_domain)
2948 int i, j;
2949 isl_set *context;
2951 kernel->array = isl_calloc_array(gen->ctx,
2952 struct gpu_local_array_info, gen->prog->n_array);
2953 assert(kernel->array);
2954 kernel->n_array = gen->prog->n_array;
2956 context = isl_set_copy(host_domain);
2957 context = isl_set_params(context);
2959 for (i = 0; i < gen->prog->n_array; ++i) {
2960 struct gpu_array_info *array = &gen->prog->array[i];
2961 isl_pw_aff_list *local;
2963 if (array->n_group == 0)
2964 continue;
2966 local = isl_pw_aff_list_alloc(gen->ctx, array->n_index);
2968 for (j = 0; j < array->n_index; ++j) {
2969 isl_pw_aff *pwaff;
2971 pwaff = isl_pw_aff_copy(array->bound[j]);
2972 pwaff = isl_pw_aff_gist(pwaff, isl_set_copy(context));
2973 local = isl_pw_aff_list_add(local, pwaff);
2976 kernel->array[i].bound = local;
2978 isl_set_free(context);
2981 /* Find the element in gen->stmt that has the given "id".
2982 * Return NULL if no such gpu_stmt can be found.
2984 static struct gpu_stmt *find_stmt(struct gpu_prog *prog, __isl_keep isl_id *id)
2986 int i;
2988 for (i = 0; i < prog->n_stmts; ++i) {
2989 if (id == prog->stmts[i].id)
2990 break;
2993 return i < prog->n_stmts ? &prog->stmts[i] : NULL;
2996 /* Set gen->tile_len and gen->n_parallel to those of the statement
2997 * affected by the first map (part of the schedule)
2998 * on which this function is called.
2999 * Because of the way the schedule is constructed, the other statements
3000 * in the list, if any, should have the same values for these properties.
3002 static int extract_tile_len(__isl_take isl_map *map, void *user)
3004 struct gpu_gen *gen = (struct gpu_gen *) user;
3005 isl_id *id;
3006 struct gpu_stmt *stmt;
3008 id = isl_map_get_tuple_id(map, isl_dim_in);
3009 stmt = find_stmt(gen->prog, id);
3010 isl_id_free(id);
3012 isl_map_free(map);
3014 if (!stmt)
3015 isl_die(gen->ctx, isl_error_unknown,
3016 "statement not found", return -1);
3018 gen->tile_len = stmt->tile_len;
3019 gen->n_parallel = stmt->n_parallel;
3021 return -1;
3024 void ppcg_kernel_stmt_free(void *user)
3026 int i;
3027 struct ppcg_kernel_stmt *stmt = user;
3029 if (!stmt)
3030 return;
3032 switch (stmt->type) {
3033 case ppcg_kernel_copy:
3034 isl_ast_expr_free(stmt->u.c.index);
3035 isl_ast_expr_free(stmt->u.c.local_index);
3036 break;
3037 case ppcg_kernel_domain:
3038 for (i = 0; i < stmt->u.d.n_access; ++i) {
3039 isl_ast_expr_list_free(stmt->u.d.access[i].index);
3040 free(stmt->u.d.access[i].local_name);
3042 free(stmt->u.d.access);
3043 break;
3044 case ppcg_kernel_sync:
3045 break;
3048 free(stmt);
3051 /* Set the options of "context" to
3053 * { space -> [x] : x >= first }
3055 static __isl_give isl_ast_build *set_unroll(
3056 __isl_take isl_ast_build *build, __isl_take isl_space *space,
3057 int first)
3059 isl_ctx *ctx;
3060 isl_map *unroll;
3061 isl_union_map *opt;
3063 ctx = isl_ast_build_get_ctx(build);
3065 space = isl_space_from_domain(space);
3066 space = isl_space_add_dims(space, isl_dim_out, 1);
3067 space = isl_space_set_tuple_name(space, isl_dim_out, "unroll");
3068 unroll = isl_map_universe(space);
3069 unroll = isl_map_lower_bound_si(unroll, isl_dim_out, 0, first);
3070 opt = isl_union_map_from_map(unroll);
3072 build = isl_ast_build_set_options(build, opt);
3074 return build;
3077 /* Return a list of isl_ids of the form "prefix%d".
3079 static __isl_give isl_id_list *generate_names(isl_ctx *ctx,
3080 int n, const char *prefix)
3082 int i;
3083 char name[10];
3084 isl_id_list *names;
3086 names = isl_id_list_alloc(ctx, n);
3087 for (i = 0; i < n; ++i) {
3088 isl_id *id;
3090 snprintf(name, sizeof(name), "%s%d", prefix, i);
3091 id = isl_id_alloc(ctx, name, NULL);
3092 names = isl_id_list_add(names, id);
3095 return names;
3098 /* Extend the schedule "schedule" with the part of "extension"
3099 * starting at "first" up to "len".
3101 static __isl_give isl_union_map *extend_schedule(
3102 __isl_take isl_union_map *schedule,
3103 __isl_take isl_union_map *extension, int first, int len)
3105 isl_space *space;
3106 isl_map *proj;
3107 isl_union_map *umap;
3108 isl_set *set;
3110 space = isl_union_map_get_space(schedule);
3111 space = isl_space_set_from_params(space);
3112 space = isl_space_add_dims(space, isl_dim_set, len);
3113 proj = isl_set_identity(isl_set_universe(space));
3114 proj = isl_map_project_out(proj, isl_dim_out, 0, first);
3115 extension = isl_union_map_apply_range(extension,
3116 isl_union_map_from_map(proj));
3118 schedule = isl_union_map_range_product(schedule, extension);
3120 return schedule;
3123 /* This function is called for each access to an array in each instance
3124 * in the kernel of some statement in the original code.
3125 * Replace that access by an access to global, shared or private memory
3126 * and store the results in *kernel_access.
3128 * Since the array in shared or private memory is just
3129 * a shifted copy of part of the original array, we simply need
3130 * to subtract the lower bound, which was computed
3131 * in can_tile_for_shared_memory.
3132 * If any of the indices is strided, then we first add
3133 * shared_bound[i].shift and divide by shared_bound[i].stride.
3135 * If the given array is accessed directly from global memory,
3136 * we don't need to perform any shifting and simply simplify
3137 * the expression in the context of the domain instead.
3139 * If the array space (range of access) has no name, then we are
3140 * accessing an iterator in the original program.
3142 * The input stmt_access->access relation maps the iteration domain
3143 * of the current statement to an array element.
3144 * The first step is to reformulate
3145 * this access relation in terms of the loop iterators of the generated
3146 * code through precomposition with gen->stmt_it.
3148 * The expressions in "bounds" are formulated in terms of the first
3149 * gen->shared_len dimensions of the computed schedule using the mapping
3150 * sched2shared which maps the loop iterators to these dimensions.
3152 static void compute_index_expression(struct gpu_gen *gen,
3153 struct ppcg_kernel_access *kernel_access,
3154 struct gpu_stmt_access *stmt_access, __isl_keep isl_map *stmt_it,
3155 __isl_keep isl_map *sched2shared, __isl_keep isl_ast_build *build)
3157 isl_map *access;
3158 isl_pw_multi_aff *pma;
3159 int i;
3160 unsigned n_index;
3161 struct gpu_array_bound *bounds = NULL;
3163 if (isl_map_has_tuple_name(stmt_access->access, isl_dim_out)) {
3164 int i;
3165 const char *name;
3166 struct gpu_array_ref_group *group;
3167 isl_printer *p;
3169 name = isl_map_get_tuple_name(stmt_access->access, isl_dim_out);
3171 for (i = 0; i < gen->prog->n_array; ++i) {
3172 if (strcmp(name, gen->prog->array[i].name))
3173 continue;
3174 kernel_access->array = &gen->prog->array[i];
3175 kernel_access->local_array = &gen->kernel->array[i];
3177 assert(kernel_access->array);
3178 group = kernel_access->array->groups[stmt_access->group];
3179 p = isl_printer_to_str(gen->ctx);
3180 p = print_array_name(p, group);
3181 kernel_access->local_name = isl_printer_get_str(p);
3182 isl_printer_free(p);
3183 bounds = group->private_bound;
3184 kernel_access->type = ppcg_access_private;
3185 if (!bounds) {
3186 bounds = group->shared_bound;
3187 kernel_access->type = ppcg_access_shared;
3190 if (!bounds)
3191 kernel_access->type = ppcg_access_global;
3193 n_index = isl_map_dim(stmt_access->access, isl_dim_out);
3194 kernel_access->index = isl_ast_expr_list_alloc(gen->ctx, n_index);
3196 if (n_index == 0)
3197 return;
3199 access = isl_map_copy(stmt_access->access);
3200 access = isl_map_apply_range(isl_map_copy(stmt_it), access);
3201 pma = isl_pw_multi_aff_from_map(access);
3202 pma = isl_pw_multi_aff_coalesce(pma);
3204 for (i = 0; i < n_index; ++i) {
3205 isl_set *domain;
3206 isl_pw_aff *index;
3207 isl_ast_expr *expr;
3209 index = isl_pw_multi_aff_get_pw_aff(pma, i);
3211 if (!kernel_access->array) {
3212 } else if (!bounds) {
3213 domain = isl_map_domain(isl_map_copy(stmt_it));
3214 index = isl_pw_aff_coalesce(index);
3215 index = isl_pw_aff_gist(index, domain);
3216 } else {
3217 domain = isl_map_domain(isl_map_copy(stmt_it));
3218 index = shift_index(index, kernel_access->array,
3219 &bounds[i], domain, isl_map_copy(sched2shared));
3222 expr = isl_ast_build_expr_from_pw_aff(build, index);
3224 kernel_access->index = isl_ast_expr_list_add(
3225 kernel_access->index, expr);
3228 isl_pw_multi_aff_free(pma);
3231 /* This function is called for each instance of a user statement
3232 * in the kernel.
3234 * We attach a struct ppcg_kernel_stmt to the "node", containing
3235 * local information about the accesses.
3236 * This information is computed from stmt_it, which expresses the domain
3237 * elements in terms of the generated loops, and sched2shared,
3238 * which expresses the first shared_len dimensions of the schedule
3239 * computed by PPCG in terms of the generated loops.
3241 static __isl_give isl_ast_node *at_each_domain(__isl_take isl_ast_node *node,
3242 __isl_keep isl_ast_build *build, void *user)
3244 struct gpu_gen *gen = (struct gpu_gen *) user;
3245 struct ppcg_kernel_stmt *stmt;
3246 isl_id *id;
3247 isl_map *stmt_it, *sched2shared;
3248 isl_ast_expr *expr, *arg;
3249 isl_union_map *schedule;
3250 int i, n;
3251 struct gpu_stmt_access *access;
3253 stmt = isl_calloc_type(gen->ctx, struct ppcg_kernel_stmt);
3254 if (!stmt)
3255 return isl_ast_node_free(node);
3257 expr = isl_ast_node_user_get_expr(node);
3258 arg = isl_ast_expr_get_op_arg(expr, 0);
3259 id = isl_ast_expr_get_id(arg);
3261 schedule = isl_ast_build_get_schedule(build);
3262 stmt_it = isl_map_reverse(isl_map_from_union_map(schedule));
3263 sched2shared = compute_sched_to_shared(gen, isl_map_copy(stmt_it));
3265 stmt->type = ppcg_kernel_domain;
3266 stmt->u.d.stmt = find_stmt(gen->prog, id);
3267 if (!stmt->u.d.stmt)
3268 goto error;
3270 n = 0;
3271 for (access = stmt->u.d.stmt->accesses; access; access = access->next)
3272 ++n;
3274 stmt->u.d.access = isl_calloc_array(gen->ctx,
3275 struct ppcg_kernel_access, n);
3276 if (!stmt->u.d.access)
3277 goto error;
3279 stmt->u.d.n_access = n;
3281 access = stmt->u.d.stmt->accesses;
3282 for (i = 0; i < n; ++i, access = access->next) {
3283 compute_index_expression(gen, &stmt->u.d.access[i], access,
3284 stmt_it, sched2shared, build);
3287 isl_id_free(id);
3288 isl_map_free(stmt_it);
3289 isl_map_free(sched2shared);
3290 isl_ast_expr_free(arg);
3291 isl_ast_expr_free(expr);
3293 id = isl_id_alloc(gen->ctx, NULL, stmt);
3294 id = isl_id_set_free_user(id, &ppcg_kernel_stmt_free);
3295 return isl_ast_node_set_annotation(node, id);
3296 error:
3297 isl_id_free(id);
3298 isl_map_free(stmt_it);
3299 ppcg_kernel_stmt_free(stmt);
3300 isl_map_free(sched2shared);
3301 return isl_ast_node_free(node);
3304 /* This function is called when code has been generated for the shared
3305 * tile loops. The "schedule" refers only to the original statements.
3307 * We extend the schedule with that part of gen->local_sched that hasn't
3308 * been taken into account yet. This introduces parameters referring
3309 * to thread ids in the schedule, so we add them (with the appropriate
3310 * bounds to the context as well).
3311 * Finally, we set the appropriate unrolling options
3312 * if gen->first_unroll is set.
3314 static __isl_give isl_ast_node *create_domain_leaf(
3315 __isl_take isl_union_map *schedule, __isl_take isl_ast_build *build,
3316 void *user)
3318 struct gpu_gen *gen = (struct gpu_gen *) user;
3319 isl_space *space;
3320 isl_union_map *sched;
3321 isl_ast_node *tree;
3322 isl_set *set;
3323 isl_id_list *iterators;
3324 int n;
3326 schedule = extend_schedule(schedule,
3327 isl_union_map_copy(gen->local_sched),
3328 gen->shared_len, gen->thread_tiled_len);
3330 space = isl_ast_build_get_schedule_space(build);
3331 set = isl_set_universe(space);
3332 set = add_bounded_parameters(set, gen->n_block, gen->block_dim, "t");
3333 build = isl_ast_build_restrict(build, set);
3335 n = gen->thread_tiled_len - gen->shared_len;
3337 if (gen->first_unroll >= 0) {
3338 space = isl_space_set_alloc(gen->ctx, 0, n);
3339 build = set_unroll(build, space, gen->first_unroll);
3341 iterators = generate_names(gen->ctx, n, "c");
3342 build = isl_ast_build_set_iterators(build, iterators);
3343 build = isl_ast_build_set_at_each_domain(build, &at_each_domain, gen);
3344 tree = isl_ast_build_ast_from_schedule(build, schedule);
3345 isl_ast_build_free(build);
3347 return tree;
3350 /* This function is called for each leaf in the AST of the code
3351 * for copying to or from shared/private memory.
3352 * The statement name is {read,write}_{shared,private}_<array>.
3354 * The schedule is of the form
3356 * [A -> T] -> L
3358 * where A refers to a piece of an array and T to the corresponding
3359 * shifted tile. We split this schedule into mappings L -> A and L -> T
3360 * and store the corresponding expressions in stmt->index and stmt->local_index,
3361 * where stmt represents the copy statement.
3363 static __isl_give isl_ast_node *create_copy_leaf(
3364 __isl_take isl_ast_build *build, void *user)
3366 struct gpu_gen *gen = (struct gpu_gen *) user;
3367 struct ppcg_kernel_stmt *stmt;
3368 isl_id *id;
3369 isl_ast_expr *expr;
3370 isl_ast_node *node;
3371 isl_space *space;
3372 isl_map *access, *local_access, *map;
3373 isl_pw_multi_aff *pma;
3374 const char *name;
3375 int array_index;
3377 stmt = isl_calloc_type(gen->ctx, struct ppcg_kernel_stmt);
3378 if (!stmt)
3379 return isl_ast_build_free(build);
3381 access = isl_map_from_union_map(isl_ast_build_get_schedule(build));
3382 name = isl_map_get_tuple_name(access, isl_dim_in);
3383 stmt->u.c.read = !strncmp(name, "read", 4);
3384 access = isl_map_reverse(access);
3385 space = isl_space_unwrap(isl_space_range(isl_map_get_space(access)));
3386 local_access = isl_map_copy(access);
3388 map = isl_map_domain_map(isl_map_universe(isl_space_copy(space)));
3389 id = isl_map_get_tuple_id(access, isl_dim_out);
3390 map = isl_map_set_tuple_id(map, isl_dim_in, id);
3391 access = isl_map_apply_range(access, map);
3392 pma = isl_pw_multi_aff_from_map(access);
3393 expr = isl_ast_build_call_from_pw_multi_aff(build, pma);
3394 stmt->u.c.index = expr;
3396 map = isl_map_range_map(isl_map_universe(space));
3397 id = isl_map_get_tuple_id(local_access, isl_dim_out);
3398 map = isl_map_set_tuple_id(map, isl_dim_in, id);
3399 local_access = isl_map_apply_range(local_access, map);
3400 pma = isl_pw_multi_aff_from_map(local_access);
3401 expr = isl_ast_build_call_from_pw_multi_aff(build, pma);
3402 stmt->u.c.local_index = expr;
3404 stmt->u.c.array = gen->copy_group->array;
3405 array_index = stmt->u.c.array - gen->prog->array;
3406 stmt->u.c.local_array = &gen->kernel->array[array_index];
3407 stmt->type = ppcg_kernel_copy;
3409 space = isl_ast_build_get_schedule_space(build);
3410 space = isl_space_from_domain(space);
3411 space = isl_space_set_tuple_name(space, isl_dim_out, name);
3412 expr = isl_ast_build_call_from_pw_multi_aff(build,
3413 isl_pw_multi_aff_from_multi_aff(isl_multi_aff_zero(space)));
3414 node = isl_ast_node_alloc_user(expr);
3415 isl_ast_build_free(build);
3417 id = isl_id_alloc(gen->ctx, NULL, stmt);
3418 id = isl_id_set_free_user(id, &ppcg_kernel_stmt_free);
3419 return isl_ast_node_set_annotation(node, id);
3422 /* Given a schedule of the form
3424 * [S -> A] -> L
3426 * (with S the first shared_len dimensions of the computed schedule,
3427 * A the array and L the schedule correponding to the generated loops),
3428 * indicating where the copying the array elements that need to be copied,
3429 * construct code for performing the copying.
3431 * "group" is the array reference group that is being copied
3432 * "type" is either "read" or "write"
3433 * private is set if copying needs to be performed to/from registers
3435 * We first construct a mapping to a shifted tile of the array,
3437 * [S -> A] -> T(S,A) (1)
3439 * If private is set, then we also use this mapping as a schedule
3440 * (which is already thread-specific and will be completely unrolled).
3441 * Otherwise, we wrap/tile the range over the threads.
3442 * The result is
3444 * [S -> A] -> T'(S,A)
3446 * Combined with the given schedule, we have
3448 * [S -> A] -> [L -> T'(S,A)] (2)
3450 * From the shifted tile mapping, we construct a mapping
3452 * [S -> A] -> [A -> T(S,A)]
3454 * and apply it to the schedule (2), obtaining
3456 * [A -> T(S(L),A)] -> [L -> T'(S(L),A)]
3458 * Note that we can project out S because it is uniquely defined by L.
3460 static __isl_give isl_ast_node *copy_access(struct gpu_gen *gen,
3461 __isl_take isl_map *sched,
3462 const char *type, struct gpu_array_ref_group *group,
3463 __isl_take isl_ast_build *build, int private)
3465 const char *array_name;
3466 const char *mem = private ? "private" : "shared";
3467 char *name;
3468 isl_space *space;
3469 isl_ast_node *tree;
3470 isl_map *schedule, *shift, *map;
3471 isl_set *set;
3472 isl_id_list *iterators;
3473 int n;
3475 shift = isl_set_unwrap(isl_map_domain(isl_map_copy(sched)));
3476 array_name = isl_map_get_tuple_name(shift, isl_dim_out);
3477 shift = shift_access(shift, group);
3479 schedule = isl_map_copy(shift);
3480 if (!private)
3481 schedule = tile_access_schedule(gen, schedule);
3483 n = isl_map_dim(schedule, isl_dim_out);
3484 set = isl_set_universe(isl_ast_build_get_schedule_space(build));
3485 set = add_bounded_parameters(set, gen->n_block, gen->block_dim, "t");
3487 schedule = isl_map_range_product(sched, schedule);
3489 assert(array_name);
3490 name = isl_alloc_array(gen->ctx, char,
3491 strlen(type) + sizeof("_private_") + strlen(array_name) + 20);
3492 if (group->array->n_group > 1)
3493 sprintf(name, "%s_%s_%s_%d", type, mem, array_name, group->nr);
3494 else
3495 sprintf(name, "%s_%s_%s", type, mem, array_name);
3496 shift = isl_map_set_tuple_name(shift,
3497 isl_dim_out, name + strlen(type) + 1);
3499 space = isl_space_domain(isl_map_get_space(shift));
3500 map = isl_map_range_map(isl_map_universe(isl_space_unwrap(space)));
3501 map = isl_map_range_product(map, shift);
3503 schedule = isl_map_apply_domain(schedule, map);
3505 schedule = isl_map_set_tuple_name(schedule, isl_dim_in, name);
3506 free(name);
3508 build = isl_ast_build_restrict(build, set);
3510 gen->copy_group = group;
3511 gen->copy_bound = group->shared_bound;
3513 if (private) {
3514 space = isl_space_range(isl_map_get_space(schedule));
3515 space = isl_space_range(isl_space_unwrap(space));
3516 build = set_unroll(build, space, 0);
3518 iterators = generate_names(gen->ctx, n, "c");
3519 build = isl_ast_build_set_iterators(build, iterators);
3520 build = isl_ast_build_set_create_leaf(build, &create_copy_leaf, gen);
3521 tree = isl_ast_build_ast_from_schedule(build,
3522 isl_union_map_from_map(schedule));
3523 isl_ast_build_free(build);
3525 return tree;
3528 /* Return code for reading into or writing from shared memory
3529 * the given array reference group.
3531 * If we are performing a read from global memory to shared memory and
3532 * if the array involved is not a scalar, then we copy
3533 * the entire tile to shared memory. This may result in some extra
3534 * elements getting copied, but it should lead to simpler code
3535 * (which means that fewer registers may be needed) and less divergence.
3537 * Otherwise, we only copy the elements that will be read or have been written
3538 * in the kernel.
3541 * The input "sched" is of the form.
3543 * type[S -> A] -> L
3545 * with S the first shared_len dimensions of the computed schedule,
3546 * A the array and L the schedule correponding to the generated loops.
3548 * We first drop "type",
3550 * [S -> A] -> L
3552 * If the above conditions are satisfied, we project out A,
3553 * resulting in
3555 * S -> L
3557 * and then introduce the group tile [S -> T], resulting in
3559 * [S -> T] -> L
3561 static __isl_give isl_ast_node *copy_group_shared_accesses(
3562 struct gpu_gen *gen, struct gpu_array_ref_group *group,
3563 __isl_take isl_map *sched, __isl_take isl_ast_build *build)
3565 const char *type;
3566 int read;
3567 isl_union_map *access;
3569 type = isl_map_get_tuple_name(sched, isl_dim_in);
3570 read = !strcmp(type, "read");
3572 sched = isl_map_reset_tuple_id(sched, isl_dim_in);
3574 if (read && group->array->n_index > 0) {
3575 isl_space *space;
3576 isl_map *map;
3578 space = isl_space_domain(isl_map_get_space(sched));
3579 space = isl_space_unwrap(space);
3580 map = isl_map_domain_map(isl_map_universe(space));
3581 sched = isl_map_apply_domain(sched, map);
3583 map = group_tile(group);
3584 map = isl_map_reverse(isl_map_domain_map(map));
3585 sched = isl_map_apply_domain(sched, map);
3588 return copy_access(gen, sched, type, group, build, 0);
3591 /* Return code for reading into or writing from private memory
3592 * the given array reference group.
3594 * Let S be the first shared_len dimensions of the computed schedule,
3595 * D the iteration domains, A the array and L the schedule correponding
3596 * to the generated loops.
3597 * "sched" is of the form
3599 * type[S -> A] -> L
3601 * where type is either "read" or "write".
3602 * We apply the privatization D -> S(t), with t the thread ids,
3603 * to the access relation D -> A to obtain the privatized access relation
3605 * S(t) -> A
3607 * We drop the type from "sched" and intersect with the privatized access
3608 * relation to obtain
3610 * [S(t) -> A] -> L
3612 static __isl_give isl_ast_node *copy_group_private_accesses(
3613 struct gpu_gen *gen, struct gpu_array_ref_group *group,
3614 __isl_take isl_map *sched, __isl_take isl_ast_build *build)
3616 const char *type;
3617 int read;
3618 isl_union_map *priv;
3619 isl_union_map *access;
3620 isl_map *access_map;
3622 type = isl_map_get_tuple_name(sched, isl_dim_in);
3623 read = !strcmp(type, "read");
3625 priv = isl_union_map_from_map(isl_map_copy(gen->privatization));
3626 priv = isl_union_map_apply_range(isl_union_map_copy(gen->shared_sched),
3627 priv);
3629 access = group_access_relation(group, read, !read);
3630 access = isl_union_map_apply_domain(access, priv);
3631 access_map = isl_map_from_union_map(access);
3633 sched = isl_map_reset_tuple_id(sched, isl_dim_in);
3634 sched = isl_map_intersect_domain(sched, isl_map_wrap(access_map));
3636 return copy_access(gen, sched, type, group, build, 1);
3639 /* Return code for reading into or writing from shared or private memory.
3641 * "schedule" is of the form
3643 * type[S -> A] -> L
3645 * with S be the first shared_len dimensions of the computed schedule,
3646 * A the array and L the schedule correponding to the generated loops.
3647 * The array reference group is attached to "type".
3649 static __isl_give isl_ast_node *create_access_leaf(
3650 struct gpu_gen *gen, __isl_take isl_map *schedule,
3651 __isl_take isl_ast_build *build)
3653 struct gpu_array_ref_group *group;
3654 isl_id *id;
3656 id = isl_map_get_tuple_id(schedule, isl_dim_in);
3657 group = isl_id_get_user(id);
3658 isl_id_free(id);
3660 if (group->private_bound)
3661 return copy_group_private_accesses(gen, group, schedule,
3662 build);
3663 else
3664 return copy_group_shared_accesses(gen, group, schedule,
3665 build);
3668 /* Create a domain node representing a synchronization.
3670 static __isl_give isl_ast_node *create_sync_leaf(
3671 struct gpu_gen *gen, __isl_take isl_map *schedule,
3672 __isl_take isl_ast_build *build)
3674 struct ppcg_kernel_stmt *stmt;
3675 isl_id *id;
3676 isl_space *space;
3677 isl_ast_node *node;
3678 isl_ast_expr *expr;
3680 isl_map_free(schedule);
3682 stmt = isl_calloc_type(gen->ctx, struct ppcg_kernel_stmt);
3683 if (!stmt)
3684 return NULL;
3686 stmt->type = ppcg_kernel_sync;
3688 space = isl_ast_build_get_schedule_space(build);
3689 space = isl_space_from_domain(space);
3690 space = isl_space_set_tuple_name(space, isl_dim_out, "sync");
3691 expr = isl_ast_build_call_from_pw_multi_aff(build,
3692 isl_pw_multi_aff_from_multi_aff(isl_multi_aff_zero(space)));
3693 node = isl_ast_node_alloc_user(expr);
3694 isl_ast_build_free(build);
3696 id = isl_id_alloc(gen->ctx, NULL, stmt);
3697 id = isl_id_set_free_user(id, &ppcg_kernel_stmt_free);
3698 return isl_ast_node_set_annotation(node, id);
3701 /* This function is called during the code generation at the point
3702 * where the schedule domain element is completely determined by
3703 * the generated code. The input schedule contains the original
3704 * statements as well as synchronization and copy "statements".
3705 * The latter are scheduled at different points than any of the original
3706 * statements, so they will only arrive here in isolation.
3708 * If the current schedule only refers to a single statement,
3709 * we check if it is a copy or synchronization statement and
3710 * call the appropriate functions.
3711 * Otherwise, we assume we are dealing with the original statements
3712 * and we call create_domain_leaf.
3714 static __isl_give isl_ast_node *create_kernel_leaf(
3715 __isl_take isl_ast_build *build, void *user)
3717 struct gpu_gen *gen = (struct gpu_gen *) user;
3718 isl_map *map;
3719 isl_union_map *schedule;
3720 const char *name;
3722 schedule = isl_ast_build_get_schedule(build);
3724 if (isl_union_map_n_map(schedule) != 1)
3725 return create_domain_leaf(schedule, build, user);
3727 map = isl_map_from_union_map(schedule);
3728 name = isl_map_get_tuple_name(map, isl_dim_in);
3729 if (!strcmp(name, "read") || !strcmp(name, "write"))
3730 return create_access_leaf(gen, map, build);
3731 if (!strcmp(name, "sync"))
3732 return create_sync_leaf(gen, map, build);
3734 return create_domain_leaf(isl_union_map_from_map(map), build, user);
3737 /* Mark all odd schedule dimensions as "atomic" (when the even dimensions
3738 * have value 0) and all even schedule dimensions as "unroll".
3740 * That is, the options look as follows
3742 * { [0, b, 0, d, ..., 0] -> atomic[i] : exists a : i = 2 a + 1;
3743 * [a, b, c, d, ..., z] -> unroll[i] : exists a : i = 2 a }
3745 * The even positions are used to be able to schedule copying blocks
3746 * and synchronization before or after each level of the shared memory
3747 * tile loops and we want to make sure that code for these is generated
3748 * separately (within each level).
3750 static __isl_give isl_ast_build *set_atomic_and_unroll(
3751 __isl_take isl_ast_build *build,
3752 __isl_take isl_space *space, int sched_len)
3754 isl_ctx *ctx;
3755 isl_map *map;
3756 isl_constraint *c;
3757 isl_union_map *opt;
3758 isl_local_space *ls;
3759 int i, n;
3761 ctx = isl_ast_build_get_ctx(build);
3763 space = isl_space_params(space);
3764 space = isl_space_add_dims(space, isl_dim_set, sched_len);
3765 space = isl_space_from_domain(space);
3766 space = isl_space_add_dims(space, isl_dim_out, 2);
3767 map = isl_map_universe(isl_space_copy(space));
3768 for (i = 0; i < sched_len; i += 2)
3769 map = isl_map_fix_si(map, isl_dim_in, i, 0);
3770 ls = isl_local_space_from_space(isl_map_get_space(map));
3771 c = isl_equality_alloc(ls);
3772 c = isl_constraint_set_coefficient_si(c, isl_dim_out, 0, 1);
3773 c = isl_constraint_set_coefficient_si(c, isl_dim_out, 1, 2);
3774 c = isl_constraint_set_constant_si(c, 1);
3775 map = isl_map_add_constraint(map, c);
3776 map = isl_map_project_out(map, isl_dim_out, 1, 1);
3777 map = isl_map_set_tuple_name(map, isl_dim_out, "atomic");
3778 opt = isl_union_map_from_map(map);
3780 map = isl_map_universe(space);
3781 ls = isl_local_space_from_space(isl_map_get_space(map));
3782 c = isl_equality_alloc(ls);
3783 c = isl_constraint_set_coefficient_si(c, isl_dim_out, 0, 1);
3784 c = isl_constraint_set_coefficient_si(c, isl_dim_out, 1, 2);
3785 map = isl_map_add_constraint(map, c);
3786 map = isl_map_project_out(map, isl_dim_out, 1, 1);
3787 map = isl_map_set_tuple_name(map, isl_dim_out, "unroll");
3788 opt = isl_union_map_add_map(opt, map);
3790 build = isl_ast_build_set_options(build, opt);
3792 return build;
3795 /* Return a map that maps a space of dimension gen->shared_len
3796 * to its last dimensions starting at gen->tile_first.
3797 * The range is of dimension
3799 * 2 * (gen->shared_len - gen->tile_first) + 1
3801 * The input dimensions are mapped to the odd dimensions in the output,
3802 * while the even dimensions (except 2*pos) are fixed to 0.
3803 * Output dimension 2*pos (if pos >= 0) is fixed to "val".
3804 * If pos >= 0, then only the pos first dimensions starting at gen->tile_first
3805 * are mapped to the output. The remaining input dimensions are projected
3806 * out and the corresponding output dimensions are fixed to 0.
3808 static __isl_give isl_map *insert_even(struct gpu_gen *gen,
3809 __isl_take isl_space *space, int pos, int val)
3811 int i, n;
3812 isl_map *proj;
3814 space = isl_space_set_from_params(space);
3815 space = isl_space_add_dims(space, isl_dim_set, gen->shared_len);
3816 space = isl_space_map_from_set(space);
3817 proj = isl_map_identity(space);
3818 proj = isl_map_project_out(proj, isl_dim_out, 0, gen->tile_first);
3819 n = gen->shared_len - gen->tile_first;
3820 for (i = 0; i <= n; ++i) {
3821 proj = isl_map_insert_dims(proj, isl_dim_out, 2 * i, 1);
3822 if (i == pos)
3823 proj = isl_map_fix_si(proj, isl_dim_out, 2 * i, val);
3824 else
3825 proj = isl_map_fix_si(proj, isl_dim_out, 2 * i, 0);
3828 if (pos < 0)
3829 return proj;
3831 proj = isl_map_eliminate(proj, isl_dim_in, gen->tile_first + pos,
3832 gen->shared_len - (gen->tile_first + pos));
3833 for (i = pos; i < n; ++i)
3834 proj = isl_map_fix_si(proj, isl_dim_out, 2 * i + 1, 0);
3836 return proj;
3839 /* Given the AST context schedule "schedule" and the mapping from
3840 * domains to the shared tile loops "shared_sched", add a schedule
3841 * for a synchronization operation at position "val" of loop level "pos".
3843 * schedule is of the form
3845 * D -> L
3847 * (with D the iteration domains and L the already generated loops),
3848 * while shared_sched is of the form
3850 * D -> S
3852 * We combine them into
3854 * L -> S
3856 * apply a mapping
3858 * [s_0,...] -> [0,s_{tile_first},0,..., val, 0, 0, ... 0]
3860 * and use the result as a schedule for "sync".
3862 static __isl_give isl_union_map *add_sync_schedule(struct gpu_gen *gen,
3863 __isl_take isl_union_map *res, __isl_keep isl_union_map *schedule,
3864 __isl_keep isl_union_map *shared_sched, int pos, int val)
3866 isl_space *space;
3867 isl_map *proj, *map;
3869 shared_sched = isl_union_map_copy(shared_sched);
3870 schedule = isl_union_map_copy(schedule);
3872 space = isl_union_map_get_space(shared_sched);
3873 schedule = isl_union_map_apply_domain(shared_sched, schedule);
3874 map = isl_map_from_union_map(schedule);
3876 proj = insert_even(gen, space, pos, val);
3877 map = isl_map_apply_range(map, proj);
3878 map = isl_map_from_range(isl_map_wrap(map));
3879 map = isl_map_set_tuple_name(map, isl_dim_in, "sync");
3881 res = isl_union_map_add_map(res, map);
3883 return res;
3886 /* Given the AST context schedule "schedule" and the mapping from
3887 * domains to the shared tile loops "shared_sched", add a schedule
3888 * for copying an array reference group to/from shared/private memory.
3889 * "read" is set if data should be copied from global memory
3890 * to shared/private memory.
3891 * "k" represents the current group
3892 * "s" is the total number of groups
3894 * We schedule an operation before or after the innermost loop
3895 * of "shared_sched" that affects the tile of the array reference group.
3897 * schedule is of the form
3899 * D -> L
3901 * (with D the iteration domains and L the already generated loops),
3902 * while shared_sched is of the form
3904 * D -> S
3906 * We first compute the access relation for the reference group
3908 * D -> A
3910 * and combine it with shared_sched into
3912 * D -> [S -> A]
3914 * If this results in an empty relation, no copying needs to be performed
3915 * at this point.
3916 * Otherwise, we invert the relation and combine it with "schedule" into
3918 * [S -> A] -> L
3920 * The actual additional piece of the schedule is obtained from combining
3922 * [S -> A] -> S
3924 * with a mapping
3926 * [s_0,...] -> [0,s_{tile_first},0,..., val, 0, 0, ... 0]
3928 * The position of "val" corresponds to the innermost loop that affects
3929 * the tile and the value indicates where the copying is scheduled
3930 * with respect to the actual kernel code (at value 0).
3931 * Reads are schedule before the code, writes to global memory from
3932 * private memory are scheduled at values 1 to s, writes to global
3933 * memory from shared memory are scheduled at values s + 2 to 2 * s + 1.
3935 * If we are scheduling a read from global memory to shared memory,
3936 * we insert a synchronization before the kernel code (at the innermost
3937 * level).
3938 * If we are scheduling a write to global memory, then we add
3939 * a synchronization after all writes (at value 2 *s + 2).
3940 * However, there is no need for a synchronization after the outermost loop.
3941 * A write to global memory from private memory at the innermost level
3942 * does not require a synchronization, because it is covered by
3943 * the synchronization after the kernel inserted by body_schedule.
3945 static __isl_give isl_union_map *add_group_schedule(struct gpu_gen *gen,
3946 __isl_take isl_union_map *res, __isl_keep isl_union_map *schedule,
3947 __isl_keep isl_union_map *shared_sched,
3948 struct gpu_array_ref_group *group, int read, int k, int s)
3950 int n;
3951 int pos, val;
3952 isl_space *space;
3953 isl_union_map *access;
3954 isl_map *map, *proj, *access_map;
3955 isl_id *id;
3957 access = group_access_relation(group, read, !read);
3958 access = isl_union_map_range_product(isl_union_map_copy(shared_sched),
3959 access);
3961 if (isl_union_map_is_empty(access)) {
3962 isl_union_map_free(access);
3963 return res;
3966 access = isl_union_map_reverse(access);
3967 access = isl_union_map_apply_range(access,
3968 isl_union_map_copy(schedule));
3969 access_map = isl_map_from_union_map(access);
3971 space = isl_space_copy(group->array->dim);
3972 space = isl_space_from_range(space);
3973 space = isl_space_add_dims(space, isl_dim_in, gen->shared_len);
3974 map = isl_map_domain_map(isl_map_universe(space));
3976 space = isl_union_map_get_space(schedule);
3977 pos = group->last_shared + 1 - gen->tile_first;
3978 if (read)
3979 val = -2 - k;
3980 else if (group->private_bound)
3981 val = 1 + k;
3982 else
3983 val = 1 + s + 1 + k;
3984 proj = insert_even(gen, space, pos, val);
3985 map = isl_map_apply_range(map, proj);
3987 access_map = isl_map_range_product(access_map, map);
3989 id = isl_id_alloc(gen->ctx, read ? "read" : "write", group);
3990 access_map = isl_map_set_tuple_id(access_map, isl_dim_in, id);
3992 res = isl_union_map_add_map(res, access_map);
3994 n = gen->shared_len - gen->tile_first;
3995 if (read) {
3996 if (!group->private_bound)
3997 res = add_sync_schedule(gen, res, schedule,
3998 shared_sched, n, -1);
3999 } else {
4000 if (pos == 0)
4001 return res;
4002 if (pos == n && group->private_bound)
4003 return res;
4004 res = add_sync_schedule(gen, res, schedule, shared_sched,
4005 pos, 2 * s + 2);
4008 return res;
4011 /* Return a schedule for the shared tile loops based on the current
4012 * AST context schedule.
4014 * We create a "shared_sched" that maps the domains to the first
4015 * shared_len dimensions of the computed schedule, project out the
4016 * first tile_first dimensions (as these are already covered by
4017 * the host code) and insert "statement-level" dimensions at even
4018 * positions so that we can schedule copy blocks and synchronization
4019 * before/after each level.
4021 * In particular, copy blocks are inserted inside the innermost
4022 * level that affect the tile. For the copying to global memory,
4023 * those from private memory are scheduled before those from shared
4024 * memory such that synchronization can be inserted between the two
4025 * at the innermost level.
4026 * Synchronization is inserted at the innermost level before the
4027 * actual kernel code if there is any copying from global memory
4028 * to shared memory. It is inserted unconditionally at the innermost
4029 * level after the actual kernel code and the copying to global memory
4030 * from private memory (if any). Finally, it is inserted after
4031 * any copying to global memory, except at the outermost level
4032 * and at the innermost level if there is no copying from shared
4033 * memory. The copying from private memory is covered by the unconditional
4034 * synchronization at the innermost level.
4036 static __isl_give isl_union_map *body_schedule(struct gpu_gen *gen,
4037 __isl_take isl_union_map *schedule)
4039 isl_space *space;
4040 isl_union_map *res;
4041 isl_union_map *shared_sched;
4042 isl_union_map *sched;
4043 isl_map *proj, *map;
4044 int i, j, k, s;
4046 shared_sched = isl_union_map_copy(gen->tiled_sched);
4047 proj = projection(isl_union_map_get_space(shared_sched),
4048 gen->tiled_len, gen->shared_len);
4049 shared_sched = isl_union_map_apply_range(shared_sched,
4050 isl_union_map_from_map(proj));
4051 space = isl_union_map_get_space(shared_sched);
4052 proj = insert_even(gen, space, -1, 0);
4053 sched = isl_union_map_apply_range(isl_union_map_copy(shared_sched),
4054 isl_union_map_from_map(proj));
4056 res = isl_union_map_range_product(isl_union_map_copy(schedule), sched);
4058 s = 0;
4059 for (i = 0; i < gen->prog->n_array; ++i)
4060 s += gen->prog->array[i].n_group;
4062 k = 0;
4063 for (i = 0; i < gen->prog->n_array; ++i) {
4064 struct gpu_array_info *array = &gen->prog->array[i];
4066 for (j = 0; j < array->n_group; ++j) {
4067 struct gpu_array_ref_group *group;
4069 group = array->groups[j];
4070 if (!group->private_bound && !group->shared_bound)
4071 continue;
4072 res = add_group_schedule(gen, res, schedule,
4073 shared_sched, group, 0, k, s);
4074 res = add_group_schedule(gen, res, schedule,
4075 shared_sched, group, 1, k, s);
4076 ++k;
4080 res = add_sync_schedule(gen, res, schedule, shared_sched,
4081 gen->shared_len - gen->tile_first, 1 + s);
4083 isl_union_map_free(shared_sched);
4084 isl_union_map_free(schedule);
4086 return res;
4089 /* Generate code for "kernel" in the given "context".
4091 * We first generate code for the shared tile loops (T1T, T1P and T2)
4092 * in a context that includes the block ids.
4093 * Within each iteration of these loops an additional code generation
4094 * is performed (within create_kernel_leaf) for the rest of the schedule
4095 * in a context that includes the thread ids.
4097 static __isl_give isl_ast_node *generate_kernel(struct gpu_gen *gen,
4098 __isl_keep isl_ast_build *build, __isl_keep isl_set *host_domain,
4099 __isl_keep isl_multi_pw_aff *grid_size)
4101 isl_space *space;
4102 isl_set *set;
4103 isl_id_list *iterators;
4104 isl_union_map *schedule;
4105 isl_ast_node *tree;
4106 int sched_len;
4108 schedule = isl_ast_build_get_schedule(build);
4110 build = isl_ast_build_copy(build);
4111 build = isl_ast_build_restrict(build, isl_set_copy(host_domain));
4112 space = isl_ast_build_get_schedule_space(build);
4113 set = isl_set_universe(isl_space_copy(space));
4114 set = add_bounded_parameters_dynamic(set, grid_size, "b");
4115 build = isl_ast_build_restrict(build, set);
4117 schedule = body_schedule(gen, schedule);
4119 sched_len = 2 * (gen->shared_len - gen->tile_first) + 1;
4121 build = set_atomic_and_unroll(build, space, sched_len);
4122 iterators = generate_names(gen->ctx, sched_len, "g");
4123 build = isl_ast_build_set_iterators(build, iterators);
4124 build = isl_ast_build_set_create_leaf(build, &create_kernel_leaf, gen);
4125 tree = isl_ast_build_ast_from_schedule(build, schedule);
4126 isl_ast_build_free(build);
4128 return tree;
4131 /* Attach "id" to the given node.
4133 static __isl_give isl_ast_node *attach_id(__isl_take isl_ast_node *node,
4134 __isl_keep isl_ast_build *build, void *user)
4136 isl_id *id = user;
4138 node = isl_ast_node_set_annotation(node, id);
4140 return node;
4143 /* Construct an AST node for performing a kernel launch and attach
4144 * the information about the kernel to that node.
4146 * The kernel AST has been constructed in the context of the range
4147 * of "schedule". In particular, the grid size has been computed
4148 * in the context. We therefore still need to make sure that these
4149 * constraints are expressed in the code. We do this by creating a schedule
4151 * kernel[] -> [S -> []]
4153 * where S is the schedule domain, i.e., the range of "schedule".
4154 * The AST generation will then create a single call surrounded by
4155 * all the condition in "S" that have not been expressed yet.
4157 * The kernel information is attached to this node in attach_id.
4159 static __isl_give isl_ast_node *construct_launch(
4160 __isl_take isl_ast_build *build, __isl_take isl_union_map *schedule,
4161 __isl_take struct ppcg_kernel *kernel)
4163 isl_id *id;
4164 isl_ctx *ctx;
4165 isl_union_set *domain;
4166 isl_set *set;
4167 isl_map *map;
4168 isl_ast_node *node;
4170 ctx = isl_ast_build_get_ctx(build);
4172 id = isl_id_alloc(ctx, NULL, kernel);
4173 id = isl_id_set_free_user(id, &ppcg_kernel_free);
4175 domain = isl_union_map_range(schedule);
4176 set = isl_set_from_union_set(domain);
4177 map = isl_map_from_domain(set);
4178 map = isl_map_from_range(isl_map_wrap(map));
4179 map = isl_map_set_tuple_name(map, isl_dim_in, "kernel");
4180 schedule = isl_union_map_from_map(map);
4182 build = isl_ast_build_set_at_each_domain(build, &attach_id, id);
4183 node = isl_ast_build_ast_from_schedule(build, schedule);
4184 isl_ast_build_free(build);
4186 return node;
4189 /* This function is called for each leaf in the AST of the host code.
4190 * We first specialize the schedule to the site of the leaf, compute
4191 * the size of shared memory and then construct the body of host code
4192 * and the associated kernel.
4194 * The necessary information for printing the kernel launch is
4195 * stored in a struct ppcg_kernel and attached to the leaf node
4196 * created to represent the launch.
4198 static __isl_give isl_ast_node *create_host_leaf(
4199 __isl_take isl_ast_build *build, void *user)
4201 struct gpu_gen *gen = (struct gpu_gen *) user;
4202 isl_id *id;
4203 isl_ast_node *node;
4204 struct ppcg_kernel *kernel;
4205 isl_set *host_domain;
4206 isl_union_map *schedule;
4207 isl_union_map *local_sched;
4208 isl_union_map *access;
4209 isl_union_set *domain;
4210 int i;
4212 schedule = isl_ast_build_get_schedule(build);
4214 isl_union_map_foreach_map(schedule, &extract_tile_len, gen);
4215 read_sizes(gen);
4217 domain = isl_union_map_domain(isl_union_map_copy(schedule));
4219 local_sched = isl_union_map_copy(gen->sched);
4220 local_sched = isl_union_map_intersect_domain(local_sched, domain);
4221 access = isl_union_map_union(isl_union_map_copy(gen->prog->read),
4222 isl_union_map_copy(gen->prog->write));
4223 access = isl_union_map_apply_domain(access,
4224 isl_union_map_copy(local_sched));
4226 gen->tiled_sched = tile_schedule(gen, local_sched);
4227 gen->tiled_sched = parametrize_tiled_schedule(gen, gen->tiled_sched);
4228 gen->tiled_sched = scale_tile_loops(gen, gen->tiled_sched);
4230 kernel = gen->kernel = isl_calloc_type(gen->ctx, struct ppcg_kernel);
4231 if (!kernel)
4232 goto error;
4234 kernel->id = gen->kernel_id++;
4235 kernel->n_block = gen->n_block;
4236 for (i = 0; i < gen->n_block; ++i)
4237 kernel->block_dim[i] = gen->block_dim[i];
4238 kernel->n_grid = gen->n_grid;
4239 for (i = 0; i < gen->n_grid; ++i)
4240 kernel->grid_dim[i] = gen->grid_dim[i];
4241 kernel->context = isl_union_map_params(isl_union_map_copy(schedule));
4242 kernel->grid_size = extract_grid_size(gen, kernel);
4243 kernel->arrays = isl_union_map_range(access);
4244 kernel->space = isl_ast_build_get_schedule_space(build);
4246 gen->local_sched = isl_union_map_copy(gen->tiled_sched);
4248 gen->local_sched = thread_tile_schedule(gen, gen->local_sched);
4249 gen->local_sched = scale_thread_tile_loops(gen, gen->local_sched);
4251 gen->private_access = NULL;
4252 compute_shared_sched(gen);
4253 gen->privatization = compute_privatization(gen);
4254 group_references(gen);
4255 compute_private_size(gen);
4256 check_shared_memory_bound(gen);
4257 host_domain = isl_set_from_union_set(isl_union_map_range(
4258 isl_union_map_copy(schedule)));
4259 localize_bounds(gen, kernel, host_domain);
4261 gen->local_sched = interchange_for_unroll(gen, gen->local_sched);
4263 kernel->tree = generate_kernel(gen, build, host_domain,
4264 kernel->grid_size);
4265 create_kernel_vars(gen, kernel);
4267 free_local_array_info(gen);
4268 isl_map_free(gen->privatization);
4269 isl_union_map_free(gen->private_access);
4270 isl_union_map_free(gen->local_sched);
4271 isl_union_map_free(gen->tiled_sched);
4272 isl_union_map_free(gen->shared_sched);
4273 isl_union_map_free(gen->shared_proj);
4274 isl_set_free(host_domain);
4275 free(gen->tile_size);
4277 node = construct_launch(build, schedule, kernel);
4279 return node;
4280 error:
4281 isl_union_map_free(schedule);
4282 return NULL;
4285 /* Use isl to generate code for the outer gen->tile_first loops
4286 * of the global schedule in gen->sched, resulting in the host code.
4287 * Within each iteration of this partial schedule, i.e., for each kernel
4288 * launch, create_host_leaf takes care of generating the kernel code.
4290 static __isl_give isl_ast_node *generate_host_code(struct gpu_gen *gen)
4292 isl_ast_build *build;
4293 isl_ast_node *tree;
4294 isl_union_map *sched;
4295 isl_map *proj;
4296 isl_id_list *iterators;
4298 sched = isl_union_map_copy(gen->sched);
4299 proj = projection(isl_union_map_get_space(sched),
4300 gen->untiled_len, gen->tile_first);
4301 sched = isl_union_map_apply_range(sched, isl_union_map_from_map(proj));
4303 isl_options_set_ast_build_group_coscheduled(gen->ctx, 1);
4304 build = isl_ast_build_from_context(isl_set_copy(gen->prog->context));
4305 iterators = generate_names(gen->ctx, gen->tile_first, "h");
4306 build = isl_ast_build_set_iterators(build, iterators);
4307 build = isl_ast_build_set_create_leaf(build, &create_host_leaf, gen);
4308 tree = isl_ast_build_ast_from_schedule(build, sched);
4309 isl_ast_build_free(build);
4311 return tree;
4314 __isl_give isl_set *add_context_from_str(__isl_take isl_set *set,
4315 const char *str)
4317 isl_ctx *ctx;
4318 isl_set *context;
4320 if (!str)
4321 return set;
4323 ctx = isl_set_get_ctx(set);
4324 context = isl_set_read_from_str(ctx, str);
4325 context = isl_set_align_params(context, isl_set_get_space(set));
4326 set = isl_set_intersect(set, context);
4328 return set;
4331 __isl_give isl_union_map *extract_sizes_from_str(isl_ctx *ctx, const char *str)
4333 if (!str)
4334 return NULL;
4335 return isl_union_map_read_from_str(ctx, str);
4338 /* Information about the outermost tilable bands in the forest of bands.
4340 * tile_len and n_parallel are only sets on band_info structures
4341 * that correspond to outermost bands. For other bands (in particular,
4342 * ancestors of the outermost bands), n_parallal is set to 0.
4344 * prefix is the (padded) schedule leading up to the outermost tilable bands.
4346 * tile_first is the number of schedule dimensions in prefix.
4348 * suffix is the schedule of the outermost tilable bands and their descendants.
4350 struct band_info {
4351 struct gpu_gen *gen;
4352 int tile_first;
4353 int tile_len;
4354 int n_parallel;
4355 isl_union_map *prefix;
4356 isl_union_map *suffix;
4359 /* Set tile_len and n_parallel of the statement to that of
4360 * their outermost band, recorded in the band_info.
4362 static int set_stmt_tile_len(__isl_take isl_map *map, void *user)
4364 struct band_info *info = user;
4365 struct gpu_stmt *stmt;
4366 isl_id *id;
4368 id = isl_map_get_tuple_id(map, isl_dim_in);
4369 stmt = find_stmt(info->gen->prog, id);
4370 isl_id_free(id);
4372 stmt->tile_len = info->tile_len;
4373 stmt->n_parallel = info->n_parallel;
4375 isl_map_free(map);
4377 return 0;
4380 static void list_select_outer_band(struct gpu_gen *gen,
4381 __isl_take isl_band_list *list, int pos, struct band_info *list_info);
4383 /* Check if this band has any parallel loops. If so, take it as
4384 * the outermost tilable band. If not, continue looking for the
4385 * outermost tilable band in the children of the current band.
4387 static void band_select_outer_band(struct gpu_gen *gen,
4388 __isl_take isl_band *band, int pos, struct band_info *info)
4390 int n = isl_band_n_member(band);
4391 int n_parallel;
4393 for (n_parallel = 0; n_parallel < n; ++n_parallel)
4394 if (!isl_band_member_is_zero_distance(band, n_parallel))
4395 break;
4397 info->n_parallel = n_parallel;
4398 if (n_parallel) {
4399 info->gen = gen;
4400 info->tile_first = pos;
4401 info->tile_len = n;
4402 info->prefix = isl_band_get_prefix_schedule(band);
4403 info->suffix = isl_union_map_flat_range_product(
4404 isl_band_get_partial_schedule(band),
4405 isl_band_get_suffix_schedule(band));
4406 isl_union_map_foreach_map(info->prefix,
4407 &set_stmt_tile_len, info);
4408 } else if (isl_band_has_children(band)) {
4409 isl_band_list *children;
4410 children = isl_band_get_children(band);
4411 list_select_outer_band(gen, children, pos + n, info);
4412 } else {
4413 info->gen = gen;
4414 info->tile_first = pos + n;
4415 info->tile_len = 0;
4416 info->prefix = isl_union_map_flat_range_product(
4417 isl_band_get_prefix_schedule(band),
4418 isl_band_get_partial_schedule(band));
4419 info->suffix = isl_band_get_suffix_schedule(band);
4420 isl_union_map_foreach_map(info->prefix,
4421 &set_stmt_tile_len, info);
4424 isl_band_free(band);
4427 /* Comparison function that returns a non-zero value for band_infos
4428 * with different tile_len fields or different n_parallel fields.
4430 static int cmp_band(const void *p1, const void *p2)
4432 const struct band_info *info1 = p1;
4433 const struct band_info *info2 = p2;
4435 if (info1->tile_len != info2->tile_len)
4436 return info1->tile_len - info2->tile_len;
4438 return info1->n_parallel - info2->n_parallel;
4441 /* Extend "umap" with coordinates with fixed value "val"
4442 * to a total length of "dst_len", assuming the original dimension is "src_len".
4444 static __isl_give isl_union_map *extend_range(
4445 __isl_take isl_union_map *umap, int src_len, int dst_len, int val)
4447 isl_space *dim;
4448 isl_map *map;
4449 int i;
4451 dim = isl_union_map_get_space(umap);
4452 map = isl_map_reverse(projection(dim, dst_len, src_len));
4453 for (i = src_len; i < dst_len; ++i)
4454 map = isl_map_fix_si(map, isl_dim_out, i, val);
4456 umap = isl_union_map_apply_range(umap, isl_union_map_from_map(map));
4458 return umap;
4461 /* Group bands with the same values for tile_len and n_parallel.
4462 * The prefix schedule is then extended with a fixed coordinate that
4463 * is different for each such group.
4464 * Note that the actual values for this coordinate are not important.
4465 * The bands have already been effectively separated at a higher level
4466 * or they are independent and may be executed in parallel.
4467 * The list of band_info has been sorted before this functions is called.
4469 static void separate_bands(struct band_info *info, int n)
4471 int i;
4472 int j = 0;
4474 for (i = 0; i < n; ++i) {
4475 int l = info[i].tile_first;
4477 if (i &&
4478 (info[i].tile_len != info[i - 1].tile_len ||
4479 info[i].n_parallel != info[i - 1].n_parallel))
4480 j++;
4482 info[i].prefix = extend_range(info[i].prefix,
4483 l, l + 1, j);
4484 info[i].tile_first = l + 1;
4488 /* Select the outermost bands in the elements of the list, align
4489 * their prefix schedules, separate bands with different values
4490 * for tile_len and/or n_parallel and then combine the resulting
4491 * prefix and suffix schedules into a single pair of prefix and
4492 * suffix schedules for the entire list.
4494 static void list_select_outer_band(struct gpu_gen *gen,
4495 __isl_take isl_band_list *list, int pos, struct band_info *list_info)
4497 isl_band *band;
4498 int i;
4499 int n = isl_band_list_n_band(list);
4500 isl_ctx *ctx = isl_band_list_get_ctx(list);
4501 struct band_info *info;
4502 int max_tile_first;
4503 isl_union_map *prefix;
4504 isl_union_map *suffix;
4506 assert(n >= 1);
4507 info = isl_calloc_array(ctx, struct band_info, n);
4508 assert(info);
4510 max_tile_first = 0;
4511 for (i = 0; i < n; ++i) {
4512 band = isl_band_list_get_band(list, i);
4513 band_select_outer_band(gen, band, pos, &info[i]);
4514 if (info[i].tile_first > max_tile_first)
4515 max_tile_first = info[i].tile_first;
4518 for (i = 0; i < n; ++i) {
4519 if (info[i].tile_first == max_tile_first)
4520 continue;
4521 info[i].prefix = extend_range(info[i].prefix,
4522 info[i].tile_first, max_tile_first, 0);
4523 info[i].tile_first = max_tile_first;
4526 qsort(info, n, sizeof(struct band_info), &cmp_band);
4528 for (i = 0; i < n - 1; ++i)
4529 if (info[i].tile_len != info[i + 1].tile_len ||
4530 info[i].n_parallel != info[i + 1].n_parallel)
4531 break;
4533 if (i < n -1)
4534 separate_bands(info, n);
4536 prefix = info[0].prefix;
4537 suffix = info[0].suffix;
4539 for (i = 1; i < n; ++i) {
4540 prefix = isl_union_map_union(prefix, info[i].prefix);
4541 suffix = isl_union_map_union(suffix, info[i].suffix);
4544 list_info->tile_first = info[0].tile_first;
4545 list_info->tile_len = -1;
4546 list_info->prefix = prefix;
4547 list_info->suffix = suffix;
4549 isl_band_list_free(list);
4550 free(info);
4553 /* Select the outermost tilable band that (by construction)
4554 * has at least one parallel loop.
4555 * The starting position of the aligned band is stored in the pair
4556 * gen->tile_first.
4557 * The sizes and number of parallel loops may be different in different
4558 * parts of the band forest and are therefore stored in the gpu_stmts.
4560 * Return the complete schedule, with the tilable bands aligned
4561 * at gen->tile_first and padded with zero, if needed.
4563 static __isl_give isl_union_map *select_outer_tilable_band(struct gpu_gen *gen,
4564 __isl_keep isl_schedule *schedule)
4566 isl_band_list *list;
4567 struct band_info info;
4569 gen->n_parallel = 0;
4570 gen->tile_len = -1;
4572 list = isl_schedule_get_band_forest(schedule);
4574 list_select_outer_band(gen, list, 0, &info);
4576 gen->tile_first = info.tile_first;
4577 info.suffix = align_range(info.suffix);
4579 return isl_union_map_flat_range_product(info.prefix, info.suffix);
4582 /* Set gen->untiled_len to the number of scheduling dimensions
4583 * for the schedule of the first domain.
4584 * We assume here that this number is the same for all domains.
4586 static int set_untiled_len(__isl_take isl_map *map, void *user)
4588 unsigned *untiled_len = user;
4590 *untiled_len = isl_map_dim(map, isl_dim_out);
4592 isl_map_free(map);
4593 return -1;
4596 /* Compute an appropriate schedule based on the accesses in
4597 * gen->read and gen->write.
4599 * We first compute dependences and then use those to compute
4600 * a schedule that has a parallel loop in each tilable band.
4601 * Finally, we select the outermost tilable band.
4603 static void compute_schedule(struct gpu_gen *gen,
4604 __isl_take isl_union_map *sched)
4606 isl_union_set *domain;
4607 isl_union_map *dep_raw, *dep2, *dep3, *dep;
4608 isl_union_map *uninitialized;
4609 isl_schedule *schedule;
4611 dep_raw = isl_union_map_copy(gen->prog->scop->dep_flow);
4612 uninitialized = isl_union_map_copy(gen->prog->scop->live_in);
4613 isl_union_map_compute_flow(isl_union_map_copy(gen->prog->write),
4614 isl_union_map_copy(gen->prog->write),
4615 isl_union_map_copy(gen->prog->read),
4616 isl_union_map_copy(sched),
4617 &dep2, &dep3, NULL, NULL);
4618 isl_union_map_free(sched);
4620 gen->prog->copy_in = isl_union_map_range(uninitialized);
4622 dep = isl_union_map_union(dep2, dep3);
4623 dep = isl_union_map_union(dep, dep_raw);
4624 dep = isl_union_map_coalesce(dep);
4626 domain = isl_union_set_copy(gen->prog->scop->domain);
4627 domain = isl_union_set_intersect_params(domain,
4628 isl_set_copy(gen->prog->scop->context));
4629 schedule = isl_union_set_compute_schedule(isl_union_set_copy(domain),
4630 isl_union_map_copy(dep), dep);
4632 sched = select_outer_tilable_band(gen, schedule);
4634 isl_union_map_foreach_map(sched, &set_untiled_len, &gen->untiled_len);
4635 sched = isl_union_map_intersect_domain(sched, domain);
4636 gen->sched = sched;
4638 isl_schedule_free(schedule);
4641 static struct gpu_stmt_access **expr_extract_access(struct pet_expr *expr,
4642 struct gpu_stmt_access **next_access)
4644 struct gpu_stmt_access *access;
4645 isl_ctx *ctx = isl_map_get_ctx(expr->acc.access);
4647 access = isl_alloc_type(ctx, struct gpu_stmt_access);
4648 assert(access);
4649 access->next = NULL;
4650 access->read = expr->acc.read;
4651 access->write = expr->acc.write;
4652 access->access = isl_map_copy(expr->acc.access);
4654 *next_access = access;
4655 next_access = &(*next_access)->next;
4656 return next_access;
4659 static struct gpu_stmt_access **expr_extract_accesses(struct pet_expr *expr,
4660 struct gpu_stmt_access **next_access)
4662 int i;
4664 for (i = 0; i < expr->n_arg; ++i)
4665 next_access = expr_extract_accesses(expr->args[i],
4666 next_access);
4668 if (expr->type == pet_expr_access)
4669 next_access = expr_extract_access(expr, next_access);
4671 return next_access;
4674 static void pet_stmt_extract_accesses(struct gpu_stmt *stmt)
4676 struct gpu_stmt_access **next_access = &stmt->accesses;
4678 stmt->accesses = NULL;
4679 expr_extract_accesses(stmt->body, next_access);
4682 /* Return an array of gpu_stmt representing the statements in "scop".
4684 static struct gpu_stmt *extract_stmts(isl_ctx *ctx, struct ppcg_scop *scop,
4685 __isl_keep isl_set *context)
4687 int i;
4688 struct gpu_stmt *stmts;
4690 stmts = isl_calloc_array(ctx, struct gpu_stmt, scop->n_stmt);
4691 assert(stmts);
4693 for (i = 0; i < scop->n_stmt; ++i) {
4694 struct gpu_stmt *s = &stmts[i];
4696 s->id = isl_set_get_tuple_id(scop->stmts[i]->domain);
4697 s->body = scop->stmts[i]->body;
4698 pet_stmt_extract_accesses(s);
4701 return stmts;
4704 /* Replace the scop in the "input" file by equivalent code
4705 * that uses the GPU. "scop" is assumed to correspond to this scop.
4707 * We first compute a schedule that respects the dependences
4708 * of the original program and select the outermost band
4709 * of tilable dimensions that has at least one parallel loop.
4710 * We then have three blocks of dimensions
4712 * H B G
4714 * The tilable band "B" is first tiled according to "tile" sizes, resulting
4715 * in
4717 * H T P G
4719 * For each iteration of the T loop and for each array, we compute
4720 * the array elements accessed by that iteration, construct a rectangular
4721 * box around it and shift it to the origin. The result is used
4722 * as shared memory for the array.
4724 * We then split off at most 2 parallel loops from the T loops and
4725 * at most 3 parallel loops from the P loops
4727 * H T1 T2 P1 P2 G
4729 * The T1/P1 loops are then tiled or "wrapped" over the blocks/threads,
4730 * according to "grid"/"block" sizes.
4732 * H T1T T1P T2 P1T P1P P2 G
4734 * Finally, the T1P and P1P iterators are equated to the block and
4735 * thread dimensions respectively and so are effectively removed.
4736 * The H loops are run on the host. The T1T, T2, P1T, P2 and G loops
4737 * are run on the GPU.
4739 * Code is generated in three stages. We first generate code for the
4740 * host (the H loops), with iterators h%d. Then, for each leaf node
4741 * of the resulting AST, we generate code for the shared loops (up to
4742 * and including T2), with iterators g%d and after equating the H loops
4743 * to h%d parameters and the T1P loops to the block dimensions.
4744 * Finally, we generate code for the remaining loops in a similar fashion.
4746 __isl_give isl_ast_node *generate_gpu(isl_ctx *ctx, struct gpu_prog *prog,
4747 struct ppcg_options *options)
4749 isl_union_map *sched;
4750 struct gpu_gen gen;
4751 isl_ast_node *tree;
4753 if (!prog)
4754 return NULL;
4756 gen.ctx = ctx;
4757 gen.prog = prog;
4758 gen.sizes = extract_sizes_from_str(ctx, options->sizes);
4759 gen.options = options;
4761 sched = isl_union_map_copy(prog->scop->schedule);
4763 compute_schedule(&gen, sched);
4765 gen.kernel_id = 0;
4766 tree = generate_host_code(&gen);
4768 clear_gpu_gen(&gen);
4770 return tree;
4773 struct gpu_prog *gpu_prog_alloc(isl_ctx *ctx, struct ppcg_scop *scop)
4775 struct gpu_prog *prog;
4777 if (!scop)
4778 return NULL;
4780 prog = isl_calloc_type(ctx, struct gpu_prog);
4781 assert(prog);
4783 prog->ctx = ctx;
4784 prog->scop = scop;
4785 prog->context = isl_set_copy(scop->context);
4786 prog->n_stmts = scop->n_stmt;
4787 prog->stmts = extract_stmts(ctx, scop, prog->context);
4788 prog->read = isl_union_map_copy(scop->reads);
4789 prog->write = isl_union_map_copy(scop->writes);
4791 collect_array_info(prog);
4793 return prog;
4796 void gpu_prog_free(struct gpu_prog *prog)
4798 if (!prog)
4799 return;
4800 free_array_info(prog);
4801 free_stmts(prog->stmts, prog->n_stmts);
4802 isl_union_set_free(prog->copy_in);
4803 isl_union_map_free(prog->read);
4804 isl_union_map_free(prog->write);
4805 isl_set_free(prog->context);
4806 free(prog);