cuda.c: fix typos in comments
[ppcg.git] / cuda.c
blob819dc0d337c8b088bf16faac1b45aff86a42334a
1 /*
2 * Copyright 2010-2011 INRIA Saclay
4 * Use of this software is governed by the GNU LGPLv2.1 license
6 * Written by Sven Verdoolaege, INRIA Saclay - Ile-de-France,
7 * Parc Club Orsay Universite, ZAC des vignes, 4 rue Jacques Monod,
8 * 91893 Orsay, France
9 */
11 #include <assert.h>
12 #include <stdlib.h>
14 #include <isl/polynomial.h>
15 #include <isl/union_set.h>
16 #include <isl/aff.h>
17 #include <isl/ilp.h>
18 #include <isl/flow.h>
19 #include <isl/band.h>
20 #include <isl/schedule.h>
21 #include <isl/options.h>
22 #include <cloog/isl/cloog.h>
24 #include "cuda.h"
25 #include "cuda_common.h"
26 #include "clast_printer.h"
27 #include "schedule.h"
28 #include "pet_printer.h"
29 #include "ppcg_options.h"
31 /* The fields stride, shift and shift_map only contain valid information
32 * if shift != NULL.
33 * If so, they express that current index is such that if you add shift,
34 * then the result is always a multiple of stride.
35 * shift_map contains the mapping
37 * i -> (i + shift)/stride
39 struct cuda_array_bound {
40 isl_int size;
41 isl_aff *lb;
43 isl_int stride;
44 isl_aff *shift;
45 isl_basic_map *shift_map;
48 struct cuda_array_info;
50 /* A group of array references in a kernel that should be handled together.
51 * If private_bound is not NULL, then it is mapped to registers.
52 * Otherwise, if shared_bound is not NULL, it is mapped to shared memory.
53 * Otherwise, it is accessed from global memory.
55 struct cuda_array_ref_group {
56 /* The references in this group access this array. */
57 struct cuda_array_info *array;
58 /* Position of this group in the list of reference groups of array. */
59 int nr;
61 /* The following fields are use during the construction of the groups.
62 * access is the combined access relation relative to the shared
63 * memory tiling.
64 * write is set if any access in the group is a write.
66 isl_map *access;
67 int write;
69 /* For each index, size and offset of piece in shared memory. */
70 struct cuda_array_bound *shared_bound;
72 /* For each index, size and offset of piece in private memory. */
73 struct cuda_array_bound *private_bound;
75 /* References in this group; point to elements of a linked list. */
76 int n_ref;
77 struct cuda_stmt_access **refs;
79 /* Last shared memory tile dimension that affects tile of this group. */
80 int last_shared;
81 /* Dimension at which copying to/from shared memory is printed.
82 * if >= 0, then the value is >= last_shared
83 * if -1, then the copying is done at the leaf level.
85 int print_shared_level;
88 struct cuda_array_info {
89 isl_space *dim;
90 /* Element type. */
91 char *type;
92 /* Element size. */
93 int size;
94 /* Name of the array. */
95 char *name;
96 /* Number of indices. */
97 unsigned n_index;
98 /* For each index, a bound on the array in that direction. */
99 isl_pw_aff **bound;
100 /* For each index, bound[i] specialized to the current kernel. */
101 isl_pw_aff **local_bound;
103 /* All references to this array; point to elements of a linked list. */
104 int n_ref;
105 struct cuda_stmt_access **refs;
107 /* The reference groups associated to this array. */
108 int n_group;
109 struct cuda_array_ref_group **groups;
111 /* For scalars, is this scalar read-only within the entire program? */
112 int read_only;
115 /* Print the name of the local copy of a given group of array references.
117 static __isl_give isl_printer *print_array_name(__isl_take isl_printer *p,
118 struct cuda_array_ref_group *group)
120 int global = 0;
122 if (group->private_bound)
123 p = isl_printer_print_str(p, "private_");
124 else if (group->shared_bound)
125 p = isl_printer_print_str(p, "shared_");
126 else
127 global = 1;
128 p = isl_printer_print_str(p, group->array->name);
129 if (!global && group->array->n_group > 1) {
130 p = isl_printer_print_str(p, "_");
131 p = isl_printer_print_int(p, group->nr);
134 return p;
137 /* Collect all references to the given array and store pointers to them
138 * in array->refs.
140 static void collect_references(struct cuda_gen *gen,
141 struct cuda_array_info *array)
143 int i;
144 int n;
146 n = 0;
147 for (i = 0; i < gen->n_stmts; ++i) {
148 struct cuda_stmt *stmt = &gen->stmts[i];
149 struct cuda_stmt_access *access;
151 for (access = stmt->accesses; access; access = access->next) {
152 const char *name;
153 name = isl_map_get_tuple_name(access->access,
154 isl_dim_out);
155 if (name && !strcmp(array->name, name))
156 n++;
160 array->n_ref = n;
161 array->refs = isl_alloc_array(gen->ctx, struct cuda_stmt_access *, n);
162 assert(array->refs);
164 n = 0;
165 for (i = 0; i < gen->n_stmts; ++i) {
166 struct cuda_stmt *stmt = &gen->stmts[i];
167 struct cuda_stmt_access *access;
169 for (access = stmt->accesses; access; access = access->next) {
170 const char *name;
171 name = isl_map_get_tuple_name(access->access,
172 isl_dim_out);
173 if (!name || strcmp(array->name, name))
174 continue;
176 array->refs[n++] = access;
181 static struct cuda_array_bound *create_bound_list(isl_ctx *ctx, int n_index)
183 int i;
184 struct cuda_array_bound *bound;
186 bound = isl_alloc_array(ctx, struct cuda_array_bound, n_index);
187 assert(bound);
189 for (i = 0; i < n_index; ++i) {
190 isl_int_init(bound[i].size);
191 bound[i].lb = NULL;
192 isl_int_init(bound[i].stride);
193 bound[i].shift = NULL;
194 bound[i].shift_map = NULL;
197 return bound;
200 static void free_bound_list(struct cuda_array_bound *bound, int n_index)
202 int j;
204 if (!bound)
205 return;
207 for (j = 0; j < n_index; ++j) {
208 isl_int_clear(bound[j].size);
209 isl_int_clear(bound[j].stride);
210 isl_aff_free(bound[j].lb);
211 isl_aff_free(bound[j].shift);
212 isl_basic_map_free(bound[j].shift_map);
214 free(bound);
217 static struct pet_array *find_array(struct pet_scop *scop,
218 __isl_keep isl_set *accessed)
220 int i;
221 isl_id *id;
223 id = isl_set_get_tuple_id(accessed);
225 for (i = 0; i < scop->n_array; ++i) {
226 isl_id *id_i;
228 id_i = isl_set_get_tuple_id(scop->arrays[i]->extent);
229 isl_id_free(id_i);
230 if (id == id_i)
231 break;
233 isl_id_free(id);
235 return i < scop->n_array ? scop->arrays[i] : NULL;
238 /* Compute bounds on the host arrays based on the accessed elements
239 * and collect all references to the array.
241 * If the array is zero-dimensional, i.e., a scalar, we check
242 * whether it is read-only.
244 static int extract_array_info(__isl_take isl_set *array, void *user)
246 int i;
247 struct cuda_gen *gen = (struct cuda_gen *)user;
248 const char *name;
249 int n_index;
250 isl_pw_aff **bounds;
251 isl_pw_aff **local_bounds;
252 struct pet_array *pa;
254 n_index = isl_set_dim(array, isl_dim_set);
255 name = isl_set_get_tuple_name(array);
256 bounds = isl_alloc_array(isl_set_get_ctx(array),
257 isl_pw_aff *, n_index);
258 assert(bounds);
259 local_bounds = isl_calloc_array(isl_set_get_ctx(array),
260 isl_pw_aff *, n_index);
261 assert(local_bounds);
262 gen->array[gen->n_array].dim = isl_set_get_space(array);
263 gen->array[gen->n_array].name = strdup(name);
264 gen->array[gen->n_array].n_index = n_index;
265 gen->array[gen->n_array].bound = bounds;
266 gen->array[gen->n_array].local_bound = local_bounds;
268 pa = find_array(gen->scop, array);
269 assert(pa);
271 gen->array[gen->n_array].type = strdup(pa->element_type);
272 gen->array[gen->n_array].size = pa->element_size;
274 if (n_index == 0) {
275 isl_set *space;
276 isl_union_map *write;
277 int empty;
279 write = isl_union_map_copy(gen->write);
280 space = isl_set_universe(isl_set_get_space(array));
281 write = isl_union_map_intersect_range(write,
282 isl_union_set_from_set(space));
283 empty = isl_union_map_is_empty(write);
284 isl_union_map_free(write);
286 gen->array[gen->n_array].read_only = empty;
289 for (i = 0; i < n_index; ++i) {
290 isl_set *dom;
291 isl_local_space *ls;
292 isl_aff *one;
293 isl_pw_aff *bound;
294 isl_set *size = i == 0 ? array : pa->extent;
296 bound = isl_set_dim_max(isl_set_copy(size), i);
297 assert(bound);
298 dom = isl_pw_aff_domain(isl_pw_aff_copy(bound));
299 ls = isl_local_space_from_space(isl_set_get_space(dom));
300 one = isl_aff_zero_on_domain(ls);
301 one = isl_aff_add_constant_si(one, 1);
302 bound = isl_pw_aff_add(bound, isl_pw_aff_alloc(dom, one));
303 bound = isl_pw_aff_gist(bound, isl_set_copy(gen->context));
305 bounds[i] = bound;
308 collect_references(gen, &gen->array[gen->n_array]);
310 gen->n_array++;
312 isl_set_free(array);
313 return 0;
316 void collect_array_info(struct cuda_gen *gen)
318 isl_union_set *arrays;
320 arrays = isl_union_map_range(isl_union_map_copy(gen->read));
321 arrays = isl_union_set_union(arrays,
322 isl_union_map_range(isl_union_map_copy(gen->write)));
323 arrays = isl_union_set_coalesce(arrays);
325 gen->n_array = isl_union_set_n_set(arrays);
326 gen->array = isl_alloc_array(gen->ctx,
327 struct cuda_array_info, gen->n_array);
328 assert(gen->array);
329 gen->n_array = 0;
330 isl_union_set_foreach_set(arrays, &extract_array_info, gen);
331 isl_union_set_free(arrays);
334 static void free_array_info(struct cuda_gen *gen)
336 int i, j;
338 for (i = 0; i < gen->n_array; ++i) {
339 int n_index = gen->array[i].n_index;
340 free(gen->array[i].type);
341 free(gen->array[i].name);
342 for (j = 0; j < n_index; ++j) {
343 isl_pw_aff_free(gen->array[i].bound[j]);
344 isl_pw_aff_free(gen->array[i].local_bound[j]);
346 isl_space_free(gen->array[i].dim);
347 free(gen->array[i].bound);
348 free(gen->array[i].local_bound);
349 free(gen->array[i].refs);
351 free(gen->array);
354 /* Check if a cuda array is a scalar. A scalar is a value that is not stored
355 * as an array or through a pointer reference, but as single data element. At
356 * the moment, scalars are represented as zero dimensional arrays.
358 static int cuda_array_is_scalar(struct cuda_array_info *array)
360 return (array->n_index == 0);
363 /* Is "array" a read-only scalar?
365 static int cuda_array_is_read_only_scalar(struct cuda_array_info *array)
367 return cuda_array_is_scalar(array) && array->read_only;
370 static void declare_device_arrays(struct cuda_gen *gen)
372 int i;
374 for (i = 0; i < gen->n_array; ++i) {
375 if (cuda_array_is_read_only_scalar(&gen->array[i]))
376 continue;
377 fprintf(gen->cuda.host_c, "%s *dev_%s;\n",
378 gen->array[i].type, gen->array[i].name);
380 fprintf(gen->cuda.host_c, "\n");
383 static void print_array_size(struct cuda_gen *gen, FILE *out,
384 struct cuda_array_info *array)
386 int i;
387 isl_printer *prn;
389 prn = isl_printer_to_file(gen->ctx, out);
390 prn = isl_printer_set_output_format(prn, ISL_FORMAT_C);
391 for (i = 0; i < array->n_index; ++i) {
392 prn = isl_printer_print_str(prn, "(");
393 prn = isl_printer_print_pw_aff(prn, array->bound[i]);
394 prn = isl_printer_print_str(prn, ") * ");
396 prn = isl_printer_print_str(prn, "sizeof(");
397 prn = isl_printer_print_str(prn, array->type);
398 prn = isl_printer_print_str(prn, ")");
399 isl_printer_free(prn);
402 static void allocate_device_arrays(struct cuda_gen *gen)
404 int i;
406 for (i = 0; i < gen->n_array; ++i) {
407 if (cuda_array_is_read_only_scalar(&gen->array[i]))
408 continue;
409 fprintf(gen->cuda.host_c,
410 "cudaCheckReturn(cudaMalloc((void **) &dev_%s, ",
411 gen->array[i].name);
412 print_array_size(gen, gen->cuda.host_c, &gen->array[i]);
413 fprintf(gen->cuda.host_c, "));\n");
415 fprintf(gen->cuda.host_c, "\n");
418 static void free_device_arrays(struct cuda_gen *gen)
420 int i;
422 for (i = 0; i < gen->n_array; ++i) {
423 if (cuda_array_is_read_only_scalar(&gen->array[i]))
424 continue;
425 fprintf(gen->cuda.host_c, "cudaCheckReturn(cudaFree(dev_%s));\n",
426 gen->array[i].name);
430 static void copy_arrays_to_device(struct cuda_gen *gen)
432 int i;
434 for (i = 0; i < gen->n_array; ++i) {
435 isl_space *dim;
436 isl_set *read_i;
437 int empty;
439 if (cuda_array_is_read_only_scalar(&gen->array[i]))
440 continue;
442 dim = isl_space_copy(gen->array[i].dim);
443 read_i = isl_union_set_extract_set(gen->copy_in, dim);
444 empty = isl_set_fast_is_empty(read_i);
445 isl_set_free(read_i);
446 if (empty)
447 continue;
449 fprintf(gen->cuda.host_c, "cudaCheckReturn(cudaMemcpy(dev_%s,",
450 gen->array[i].name);
452 if (cuda_array_is_scalar(&(gen->array[i])))
453 fprintf(gen->cuda.host_c, " &%s, ",
454 gen->array[i].name);
455 else
456 fprintf(gen->cuda.host_c, " %s, ", gen->array[i].name);
458 print_array_size(gen, gen->cuda.host_c, &gen->array[i]);
459 fprintf(gen->cuda.host_c, ", cudaMemcpyHostToDevice));\n");
461 fprintf(gen->cuda.host_c, "\n");
464 static void copy_arrays_from_device(struct cuda_gen *gen)
466 int i;
467 isl_union_set *write;
468 write = isl_union_map_range(isl_union_map_copy(gen->write));
470 for (i = 0; i < gen->n_array; ++i) {
471 isl_space *dim;
472 isl_set *write_i;
473 int empty;
475 dim = isl_space_copy(gen->array[i].dim);
476 write_i = isl_union_set_extract_set(write, dim);
477 empty = isl_set_fast_is_empty(write_i);
478 isl_set_free(write_i);
479 if (empty)
480 continue;
482 fprintf(gen->cuda.host_c, "cudaCheckReturn(cudaMemcpy(");
483 if (cuda_array_is_scalar(&gen->array[i]))
484 fprintf(gen->cuda.host_c, "&%s, ", gen->array[i].name);
485 else
486 fprintf(gen->cuda.host_c, "%s, ", gen->array[i].name);
487 fprintf(gen->cuda.host_c, "dev_%s, ", gen->array[i].name);
488 print_array_size(gen, gen->cuda.host_c, &gen->array[i]);
489 fprintf(gen->cuda.host_c, ", cudaMemcpyDeviceToHost));\n");
492 isl_union_set_free(write);
493 fprintf(gen->cuda.host_c, "\n");
496 /* Internal data structure for extract_size_of_type.
497 * "type" specifies the name of the space that we want to extract.
498 * "res" is used to store the subset of that space.
500 struct ppcg_extract_size_data {
501 const char *type;
502 isl_set *res;
505 /* This function is called for each set in a union_set.
506 * If the name of the set matches data->type, we store the
507 * set in data->res.
509 static int extract_size_of_type(__isl_take isl_set *size, void *user)
511 struct ppcg_extract_size_data *data = user;
512 const char *name;
514 name = isl_set_get_tuple_name(size);
515 if (name && !strcmp(name, data->type)) {
516 data->res = size;
517 return -1;
520 isl_set_free(size);
521 return 0;
524 /* Given a union map { kernel[i] -> *[...] },
525 * return the range in the space called "type" for the kernel with
526 * sequence number "id".
528 static __isl_give isl_set *extract_sizes(__isl_keep isl_union_map *sizes,
529 const char *type, int id)
531 isl_space *space;
532 isl_set *dom;
533 isl_union_set *local_sizes;
534 struct ppcg_extract_size_data data = { type, NULL };
536 if (!sizes)
537 return NULL;
539 space = isl_union_map_get_space(sizes);
540 space = isl_space_set_from_params(space);
541 space = isl_space_add_dims(space, isl_dim_set, 1);
542 space = isl_space_set_tuple_name(space, isl_dim_set, "kernel");
543 dom = isl_set_universe(space);
544 dom = isl_set_fix_si(dom, isl_dim_set, 0, id);
546 local_sizes = isl_union_set_apply(isl_union_set_from_set(dom),
547 isl_union_map_copy(sizes));
548 isl_union_set_foreach_set(local_sizes, &extract_size_of_type, &data);
549 isl_union_set_free(local_sizes);
550 return data.res;
553 /* Given a singleton set, extract the first (at most *len) elements
554 * of the single integer tuple into *sizes and update *len if needed.
556 static void read_sizes_from_set(__isl_take isl_set *set, int *sizes, int *len)
558 int i;
559 int dim;
560 isl_int v;
562 if (!set)
563 return;
565 dim = isl_set_dim(set, isl_dim_set);
566 if (dim < *len)
567 *len = dim;
569 isl_int_init(v);
571 for (i = 0; i < *len; ++i) {
572 int ok;
574 ok = isl_set_plain_is_fixed(set, isl_dim_set, i, &v);
575 assert(ok);
577 sizes[i] = isl_int_get_si(v);
580 isl_int_clear(v);
582 isl_set_free(set);
585 /* Extract user specified "tile" sizes from the "sizes" command line option,
586 * defaulting to option->tile_size in each dimension.
588 static void read_tile_sizes(struct cuda_gen *gen)
590 int n;
591 isl_set *size;
593 gen->tile_size = isl_alloc_array(gen->ctx, int, gen->tile_len);
594 assert(gen->tile_size);
595 for (n = 0; n < gen->tile_len; ++n)
596 gen->tile_size[n] = gen->options->tile_size;
598 size = extract_sizes(gen->sizes, "tile", gen->kernel_id);
599 read_sizes_from_set(size, gen->tile_size, &gen->tile_len);
601 if (gen->n_parallel > gen->tile_len)
602 gen->n_parallel = gen->tile_len;
605 /* Extract user specified "block" sizes from the "sizes" command line option,
606 * after filling in some potentially useful defaults.
608 static void read_block_sizes(struct cuda_gen *gen)
610 int n;
611 isl_set *size;
613 n = gen->n_parallel;
614 gen->n_block = (n <= 3) ? n : 3;
615 switch (gen->n_block) {
616 case 1:
617 gen->block_dim[0] = 512;
618 break;
619 case 2:
620 gen->block_dim[0] = 32;
621 gen->block_dim[1] = 16;
622 break;
623 default:
624 gen->block_dim[0] = 32;
625 gen->block_dim[1] = 4;
626 gen->block_dim[2] = 4;
627 break;
630 size = extract_sizes(gen->sizes, "block", gen->kernel_id);
631 read_sizes_from_set(size, gen->block_dim, &gen->n_block);
634 /* Extract user specified "grid" sizes from the "sizes" command line option,
635 * after filling in some potentially useful defaults.
637 static void read_grid_sizes(struct cuda_gen *gen)
639 int n = gen->n_parallel;
640 isl_set *size;
642 gen->n_grid = (n <= 2) ? n : 2;
643 switch (gen->n_grid) {
644 case 1:
645 gen->grid_dim[0] = 32768;
646 break;
647 default:
648 gen->grid_dim[0] = 256;
649 gen->grid_dim[1] = 256;
650 break;
653 size = extract_sizes(gen->sizes, "grid", gen->kernel_id);
654 read_sizes_from_set(size, gen->grid_dim, &gen->n_grid);
657 /* Extract user specified sizes from the "sizes" command line option
658 * after filling in some potentially useful defaults.
660 static void read_sizes(struct cuda_gen *gen)
662 read_tile_sizes(gen);
663 read_block_sizes(gen);
664 read_grid_sizes(gen);
667 static void free_stmts(struct cuda_stmt *stmts, int n)
669 int i;
671 for (i = 0; i < n; ++i) {
672 struct cuda_stmt_access *access, *next;
674 for (access = stmts[i].accesses; access; access = next) {
675 next = access->next;
676 isl_map_free(access->access);
677 free(access);
680 isl_set_free(stmts[i].domain);
682 free(stmts);
685 void clear_cuda_gen(struct cuda_gen *gen)
687 free_stmts(gen->stmts, gen->n_stmts);
688 free_array_info(gen);
689 isl_union_map_free(gen->sizes);
690 isl_set_free(gen->context);
691 isl_union_set_free(gen->copy_in);
692 isl_union_map_free(gen->sched);
693 isl_union_map_free(gen->read);
694 isl_union_map_free(gen->write);
697 static void print_reverse_list(FILE *out, int len, int *list)
699 int i;
701 if (len == 0)
702 return;
704 fprintf(out, "(");
705 for (i = 0; i < len; ++i) {
706 if (i)
707 fprintf(out, ", ");
708 fprintf(out, "%d", list[len - 1 - i]);
710 fprintf(out, ")");
713 static void print_kernel_launch(struct cuda_gen *gen,
714 __isl_keep isl_union_set *arrays)
716 int i;
717 int first = 1;
718 unsigned nparam;
719 isl_space *dim;
721 print_indent(gen->code.dst, gen->code.indent);
722 fprintf(gen->code.dst, "kernel%d <<<k%d_dimGrid, k%d_dimBlock>>> (",
723 gen->kernel_id, gen->kernel_id, gen->kernel_id);
724 fprintf(gen->cuda.kernel_c, "__global__ void kernel%d(",
725 gen->kernel_id);
726 fprintf(gen->cuda.kernel_h, "__global__ void kernel%d(",
727 gen->kernel_id);
729 for (i = 0; i < gen->n_array; ++i) {
730 isl_space *dim;
731 isl_set *arr;
732 int empty;
734 dim = isl_space_copy(gen->array[i].dim);
735 arr = isl_union_set_extract_set(arrays, dim);
736 empty = isl_set_fast_is_empty(arr);
737 isl_set_free(arr);
738 if (empty)
739 continue;
741 if (!first) {
742 fprintf(gen->code.dst, ", ");
743 fprintf(gen->cuda.kernel_c, ", ");
744 fprintf(gen->cuda.kernel_h, ", ");
747 if (cuda_array_is_read_only_scalar(&gen->array[i])) {
748 fprintf(gen->code.dst, "%s", gen->array[i].name);
749 fprintf(gen->cuda.kernel_c, "%s %s",
750 gen->array[i].type, gen->array[i].name);
751 fprintf(gen->cuda.kernel_h, "%s %s",
752 gen->array[i].type, gen->array[i].name);
753 } else {
754 fprintf(gen->code.dst, "dev_%s", gen->array[i].name);
755 fprintf(gen->cuda.kernel_c, "%s *%s",
756 gen->array[i].type, gen->array[i].name);
757 fprintf(gen->cuda.kernel_h, "%s *%s",
758 gen->array[i].type, gen->array[i].name);
761 first = 0;
764 dim = isl_union_set_get_space(arrays);
765 nparam = isl_space_dim(dim, isl_dim_param);
766 for (i = 0; i < nparam; ++i) {
767 const char *name = isl_space_get_dim_name(dim, isl_dim_param, i);
768 if (!first) {
769 fprintf(gen->code.dst, ", ");
770 fprintf(gen->cuda.kernel_c, ", ");
771 fprintf(gen->cuda.kernel_h, ", ");
773 fprintf(gen->code.dst, "%s", name);
774 fprintf(gen->cuda.kernel_c, "int %s", name);
775 fprintf(gen->cuda.kernel_h, "int %s", name);
776 first = 0;
778 isl_space_free(dim);
780 for (i = 0; i < gen->tile_first; ++i) {
781 if (!first) {
782 fprintf(gen->code.dst, ", ");
783 fprintf(gen->cuda.kernel_c, ", ");
784 fprintf(gen->cuda.kernel_h, ", ");
786 fprintf(gen->code.dst, "h%d", i);
787 fprintf(gen->cuda.kernel_c, "int h%d", i);
788 fprintf(gen->cuda.kernel_h, "int h%d", i);
789 first = 0;
792 fprintf(gen->code.dst, ");\n");
793 fprintf(gen->cuda.kernel_c, ")\n");
794 fprintf(gen->cuda.kernel_h, ");\n");
796 fprintf(gen->code.dst, "cudaCheckKernel();\n");
799 /* Construct a map from a domain of dimensionality "len"
800 * to a domain of dimensionality "len" + "tile_len" that tiles
801 * the "tile_len" coordinates starting at "first".
802 * In particular, [s_i] -> [s_i / tile_size[i], s_i % tile_size[i]].
803 * "dim" prescribes the parameters.
805 static __isl_give isl_map *tile(__isl_take isl_space *dim, int len,
806 int first, int tile_len, int *tile_size)
808 int i;
809 isl_int v;
810 isl_basic_map *bmap;
811 isl_constraint *c;
812 isl_local_space *ls;
814 isl_int_init(v);
816 dim = isl_space_add_dims(dim, isl_dim_in, len);
817 dim = isl_space_add_dims(dim, isl_dim_out, len + tile_len);
818 bmap = isl_basic_map_universe(isl_space_copy(dim));
819 ls = isl_local_space_from_space(dim);
821 for (i = 0; i < len - tile_len; ++i) {
822 int j = i < first ? i : i + tile_len;
823 int k = i < first ? i : i + 2 * tile_len;
825 c = isl_equality_alloc(isl_local_space_copy(ls));
826 isl_int_set_si(v, -1);
827 isl_constraint_set_coefficient(c, isl_dim_in, j, v);
828 isl_int_set_si(v, 1);
829 isl_constraint_set_coefficient(c, isl_dim_out, k, v);
830 bmap = isl_basic_map_add_constraint(bmap, c);
833 for (i = 0; i < tile_len; ++i) {
834 c = isl_equality_alloc(isl_local_space_copy(ls));
835 isl_int_set_si(v, -1);
836 isl_constraint_set_coefficient(c, isl_dim_in, first + i, v);
837 isl_int_set_si(v, tile_size[i]);
838 isl_constraint_set_coefficient(c, isl_dim_out, first + i, v);
839 isl_int_set_si(v, 1);
840 isl_constraint_set_coefficient(c, isl_dim_out,
841 first + i + tile_len, v);
842 bmap = isl_basic_map_add_constraint(bmap, c);
844 c = isl_inequality_alloc(isl_local_space_copy(ls));
845 isl_int_set_si(v, 1);
846 isl_constraint_set_coefficient(c, isl_dim_out,
847 first + i + tile_len, v);
848 bmap = isl_basic_map_add_constraint(bmap, c);
850 c = isl_inequality_alloc(isl_local_space_copy(ls));
851 isl_int_set_si(v, -1);
852 isl_constraint_set_coefficient(c, isl_dim_out,
853 first + i + tile_len, v);
854 isl_int_set_si(v, tile_size[i] - 1);
855 isl_constraint_set_constant(c, v);
856 bmap = isl_basic_map_add_constraint(bmap, c);
859 isl_local_space_free(ls);
860 isl_int_clear(v);
862 return isl_map_from_basic_map(bmap);
865 /* Construct a map from a domain of dimensionality "len"
866 * to a domain of dimensionality "len" + "wrap_len" that "wraps"
867 * the "wrap_len" coordinates starting at "first" according to "wrap_size".
868 * In particular, [s_i] -> [s_i, s_i % wrap_size[i]].
869 * To do so, we need extra variables corresponding to [s_i / wrap_size[i]],
870 * that are projected out at the end.
871 * "dim" prescribes the parameters.
873 static __isl_give isl_map *wrap(__isl_take isl_space *dim, int len,
874 int first, int wrap_len, int *wrap_size)
876 int i;
877 isl_basic_map *bmap;
878 isl_constraint *c;
879 isl_local_space *ls;
881 dim = isl_space_add_dims(dim, isl_dim_in, len);
882 dim = isl_space_add_dims(dim, isl_dim_out, len + 2 * wrap_len);
883 bmap = isl_basic_map_universe(isl_space_copy(dim));
884 ls = isl_local_space_from_space(dim);
886 for (i = 0; i < len; ++i) {
887 int k = i < first + wrap_len ? i : i + 2 * wrap_len;
889 c = isl_equality_alloc(isl_local_space_copy(ls));
890 isl_constraint_set_coefficient_si(c, isl_dim_in, i, -1);
891 isl_constraint_set_coefficient_si(c, isl_dim_out, k, 1);
892 bmap = isl_basic_map_add_constraint(bmap, c);
895 for (i = 0; i < wrap_len; ++i) {
896 c = isl_equality_alloc(isl_local_space_copy(ls));
897 isl_constraint_set_coefficient_si(c, isl_dim_out,
898 first + i, -1);
899 isl_constraint_set_coefficient_si(c, isl_dim_out,
900 first + wrap_len + i, 1);
901 isl_constraint_set_coefficient_si(c, isl_dim_out,
902 first + 2 * wrap_len + i, wrap_size[i]);
903 bmap = isl_basic_map_add_constraint(bmap, c);
905 c = isl_inequality_alloc(isl_local_space_copy(ls));
906 isl_constraint_set_coefficient_si(c, isl_dim_out,
907 first + wrap_len + i, 1);
908 bmap = isl_basic_map_add_constraint(bmap, c);
910 c = isl_inequality_alloc(isl_local_space_copy(ls));
911 isl_constraint_set_coefficient_si(c, isl_dim_out,
912 first + wrap_len + i, -1);
913 isl_constraint_set_constant_si(c, wrap_size[i] - 1);
914 bmap = isl_basic_map_add_constraint(bmap, c);
917 isl_local_space_free(ls);
919 bmap = isl_basic_map_project_out(bmap, isl_dim_out,
920 first + 2 * wrap_len, wrap_len);
922 return isl_map_from_basic_map(bmap);
925 /* Add "n" parameters named prefix%d.
927 static __isl_give isl_set *add_params( __isl_take isl_set *set,
928 int n, const char *prefix)
930 int i;
931 unsigned nparam;
932 char name[20];
934 nparam = isl_set_dim(set, isl_dim_param);
935 set = isl_set_add_dims(set, isl_dim_param, n);
937 for (i = 0; i < n; ++i) {
938 snprintf(name, sizeof(name), "%s%d", prefix, i);
939 set = isl_set_set_dim_name(set, isl_dim_param,
940 nparam + i, name);
943 return set;
946 /* Equate the "n" dimensions of "set" starting at "first" to
947 * freshly created parameters named prefix%d.
949 static __isl_give isl_set *parametrize(__isl_take isl_set *set,
950 int first, int n, const char *prefix)
952 int i;
953 unsigned nparam;
954 isl_int v;
955 isl_space *dim;
956 isl_basic_set *bset;
957 isl_constraint *c;
958 isl_local_space *ls;
960 nparam = isl_set_dim(set, isl_dim_param);
962 set = add_params(set, n, prefix);
964 dim = isl_set_get_space(set);
965 bset = isl_basic_set_universe(isl_space_copy(dim));
966 ls = isl_local_space_from_space(dim);
968 isl_int_init(v);
970 for (i = 0; i < n; ++i) {
971 c = isl_equality_alloc(isl_local_space_copy(ls));
972 isl_int_set_si(v, -1);
973 isl_constraint_set_coefficient(c, isl_dim_param, nparam + i, v);
974 isl_int_set_si(v, 1);
975 isl_constraint_set_coefficient(c, isl_dim_set, first + i, v);
976 bset = isl_basic_set_add_constraint(bset, c);
979 isl_int_clear(v);
980 isl_local_space_free(ls);
982 return isl_set_intersect(set, isl_set_from_basic_set(bset));
985 static __isl_give isl_set *parametrization(__isl_take isl_space *dim,
986 int len, int first, int n, const char *prefix)
988 isl_set *set;
990 dim = isl_space_add_dims(dim, isl_dim_set, len);
991 set = isl_set_universe(dim);
993 return parametrize(set, first, n, prefix);
996 /* Tile the B loops over the tile sizes and then tile/wrap
997 * the T1 loops over the blocks.
999 static __isl_give isl_union_map *tile_schedule(struct cuda_gen *gen,
1000 __isl_take isl_union_map *sched)
1002 isl_space *dim;
1003 isl_map *tiling, *block_tiling;
1005 dim = isl_union_map_get_space(sched);
1006 tiling = tile(isl_space_copy(dim), gen->untiled_len,
1007 gen->tile_first, gen->tile_len, gen->tile_size);
1009 if (gen->options->wrap)
1010 block_tiling = wrap(dim, gen->untiled_len + gen->tile_len,
1011 gen->tile_first, gen->n_grid, gen->grid_dim);
1012 else
1013 block_tiling = tile(dim, gen->untiled_len + gen->tile_len,
1014 gen->tile_first, gen->n_grid, gen->grid_dim);
1016 gen->tiled_len = gen->untiled_len + gen->tile_len + gen->n_grid;
1018 tiling = isl_map_apply_range(tiling, block_tiling);
1020 sched = isl_union_map_apply_range(sched,
1021 isl_union_map_from_map(tiling));
1023 gen->shared_len = gen->tile_first + gen->tile_len + gen->n_grid;
1025 return sched;
1028 static __isl_give isl_union_map *parametrize_tiled_schedule(
1029 struct cuda_gen *gen, __isl_take isl_union_map *sched)
1031 isl_space *dim;
1032 isl_set *par;
1034 dim = isl_union_map_get_space(sched);
1035 par = parametrization(dim, gen->tiled_len, 0, gen->tile_first, "h");
1036 sched = isl_union_map_intersect_range(sched,
1037 isl_union_set_from_set(par));
1039 dim = isl_union_map_get_space(sched);
1040 par = parametrization(dim, gen->tiled_len,
1041 gen->tile_first + gen->n_grid, gen->n_grid, "b");
1042 sched = isl_union_map_intersect_range(sched,
1043 isl_union_set_from_set(par));
1045 return sched;
1048 /* Tile/wrap the P1 loops over the threads.
1050 static __isl_give isl_union_map *thread_tile_schedule(struct cuda_gen *gen,
1051 __isl_take isl_union_map *sched)
1053 isl_space *dim;
1054 isl_map *tiling;
1055 isl_set *par;
1057 dim = isl_union_map_get_space(sched);
1059 if (gen->options->wrap)
1060 tiling = wrap(isl_space_copy(dim), gen->tiled_len,
1061 gen->shared_len, gen->n_block, gen->block_dim);
1062 else
1063 tiling = tile(isl_space_copy(dim), gen->tiled_len,
1064 gen->shared_len, gen->n_block, gen->block_dim);
1065 gen->thread_tiled_len = gen->tiled_len + gen->n_block;
1067 sched = isl_union_map_apply_range(sched,
1068 isl_union_map_from_map(tiling));
1070 par = parametrization(dim, gen->thread_tiled_len,
1071 gen->tile_first + gen->tile_len + gen->n_grid + gen->n_block,
1072 gen->n_block, "t");
1073 sched = isl_union_map_intersect_range(sched,
1074 isl_union_set_from_set(par));
1076 gen->shared_len = gen->tile_first + gen->tile_len + gen->n_grid;
1078 return sched;
1081 /* If the user asked for it, scale the shared memory tile loops
1082 * (T1T and T2) of "sched" by gen->tile_size[i].
1083 * If we are not performing "wrapping", then additionally scale the T1P
1084 * loops by gen->grid_dim[i].
1086 static __isl_give isl_union_map *scale_tile_loops(struct cuda_gen *gen,
1087 __isl_take isl_union_map *sched)
1089 int i;
1090 isl_space *dim;
1091 isl_basic_map *scale;
1092 isl_constraint *c;
1093 isl_local_space *ls;
1095 if (!gen->options->scale_tile_loops)
1096 return sched;
1098 dim = isl_union_map_get_space(sched);
1099 dim = isl_space_add_dims(dim, isl_dim_in, gen->tiled_len);
1100 dim = isl_space_add_dims(dim, isl_dim_out, gen->tiled_len);
1101 scale = isl_basic_map_universe(isl_space_copy(dim));
1102 ls = isl_local_space_from_space(dim);
1104 for (i = 0; i < gen->tiled_len; ++i) {
1105 int f = 1;
1107 if (i >= gen->tile_first && i < gen->tile_first + gen->n_grid) {
1108 f = gen->tile_size[i - gen->tile_first];
1109 if (!gen->options->wrap)
1110 f *= gen->grid_dim[i - gen->tile_first];
1111 } else if (i >= gen->tile_first + gen->n_grid &&
1112 i < gen->tile_first + gen->n_grid + gen->tile_len) {
1113 f = gen->tile_size[i - (gen->tile_first + gen->n_grid)];
1116 c = isl_equality_alloc(isl_local_space_copy(ls));
1117 isl_constraint_set_coefficient_si(c, isl_dim_in, i, f);
1118 isl_constraint_set_coefficient_si(c, isl_dim_out, i, -1);
1119 scale = isl_basic_map_add_constraint(scale, c);
1122 isl_local_space_free(ls);
1124 sched = isl_union_map_apply_range(sched,
1125 isl_union_map_from_map(isl_map_from_basic_map(scale)));
1127 return sched;
1130 /* If we are not performing "wrapping" and if the user asked for it,
1131 * scale the thread tile loops (P1T) of "sched" by gen->block_dim[i].
1133 static __isl_give isl_union_map *scale_thread_tile_loops(struct cuda_gen *gen,
1134 __isl_take isl_union_map *sched)
1136 int i;
1137 isl_space *dim;
1138 isl_basic_map *scale;
1139 isl_constraint *c;
1140 isl_local_space *ls;
1142 if (gen->options->wrap)
1143 return sched;
1144 if (!gen->options->scale_tile_loops)
1145 return sched;
1147 dim = isl_union_map_get_space(sched);
1148 dim = isl_space_add_dims(dim, isl_dim_in, gen->thread_tiled_len);
1149 dim = isl_space_add_dims(dim, isl_dim_out, gen->thread_tiled_len);
1150 scale = isl_basic_map_universe(isl_space_copy(dim));
1151 ls = isl_local_space_from_space(dim);
1153 for (i = 0; i < gen->thread_tiled_len; ++i) {
1154 int f = 1;
1156 if (i >= gen->shared_len &&
1157 i < gen->shared_len + gen->n_block)
1158 f = gen->block_dim[i - gen->shared_len];
1160 c = isl_equality_alloc(isl_local_space_copy(ls));
1161 isl_constraint_set_coefficient_si(c, isl_dim_in, i, f);
1162 isl_constraint_set_coefficient_si(c, isl_dim_out, i, -1);
1163 scale = isl_basic_map_add_constraint(scale, c);
1166 isl_local_space_free(ls);
1168 sched = isl_union_map_apply_range(sched,
1169 isl_union_map_from_map(isl_map_from_basic_map(scale)));
1171 return sched;
1174 /* If we are not performing "wrapping" and if the user asked for it,
1175 * scale the "n_tile" loops starting at "first" of "sched" by gen->block_dim[i].
1177 static __isl_give isl_union_map *scale_access_tile_loops(struct cuda_gen *gen,
1178 __isl_take isl_union_map *sched, int len, int first, int n_tile)
1180 int i;
1181 isl_space *dim;
1182 isl_basic_map *scale;
1183 isl_constraint *c;
1184 isl_local_space *ls;
1186 if (gen->options->wrap)
1187 return sched;
1188 if (!gen->options->scale_tile_loops)
1189 return sched;
1191 dim = isl_union_map_get_space(sched);
1192 dim = isl_space_add_dims(dim, isl_dim_in, len);
1193 dim = isl_space_add_dims(dim, isl_dim_out, len);
1194 scale = isl_basic_map_universe(isl_space_copy(dim));
1195 ls = isl_local_space_from_space(dim);
1197 for (i = 0; i < len; ++i) {
1198 int f = 1;
1200 if (i >= first && i < first + n_tile)
1201 f = gen->block_dim[i - first];
1203 c = isl_equality_alloc(isl_local_space_copy(ls));
1204 isl_constraint_set_coefficient_si(c, isl_dim_in, i, f);
1205 isl_constraint_set_coefficient_si(c, isl_dim_out, i, -1);
1206 scale = isl_basic_map_add_constraint(scale, c);
1209 isl_local_space_free(ls);
1211 sched = isl_union_map_apply_range(sched,
1212 isl_union_map_from_map(isl_map_from_basic_map(scale)));
1214 return sched;
1217 /* If print_user_stmt is set, we want to print the statements ourselves,
1218 * instead of relying on the C preprocessor. If so, we need to use
1219 * the stop option so that the domains will be saved on the statement
1220 * nodes.
1222 static void print_cloog_shared_body(struct cuda_gen *gen,
1223 __isl_keep isl_set *context, __isl_keep isl_union_map *sched, int len,
1224 void (*print_user_stmt)(struct clast_printer_info *info,
1225 struct clast_user_stmt *s),
1226 int first_unroll)
1228 int i;
1229 CloogOptions *options;
1230 CloogDomain *cloog_context;
1231 CloogUnionDomain *ud;
1232 CloogInput *input;
1233 struct clast_stmt *stmt;
1234 char name[20];
1236 sched = isl_union_map_copy(sched);
1237 sched = isl_union_map_align_params(sched, isl_set_get_space(context));
1239 options = cloog_options_malloc(gen->state);
1240 options->language = CLOOG_LANGUAGE_C;
1241 options->strides = 1;
1242 options->sh = 1;
1243 options->f = len;
1244 options->l = -1;
1245 options->override = 1;
1246 options->save_domains = 1;
1247 options->noscalars = 1;
1248 options->first_unroll = first_unroll;
1250 ud = cloog_union_domain_from_isl_union_map(sched);
1251 for (i = 0; i < len; ++i) {
1252 snprintf(name, sizeof(name), "c%d", i);
1253 ud = cloog_union_domain_set_name(ud, CLOOG_SCAT, i, name);
1255 cloog_context = cloog_domain_from_isl_set(isl_set_copy(context));
1256 input = cloog_input_alloc(cloog_context, ud);
1258 stmt = cloog_clast_create_from_input(input, options);
1260 gen->stmt_code.indent = gen->kernel_code.indent;
1261 gen->stmt_code.dst = gen->cuda.kernel_c;
1262 gen->stmt_code.print_user_stmt = print_user_stmt;
1263 gen->stmt_code.print_user_stmt_list = NULL;
1264 gen->stmt_code.print_for_head = NULL;
1265 gen->stmt_code.print_for_foot = NULL;
1266 gen->stmt_code.user = gen;
1267 print_clast(&gen->stmt_code, stmt);
1269 cloog_clast_free(stmt);
1270 cloog_options_free(options);
1273 /* Add "len" parameters p[i] called prefix%d,
1274 * with bounds to 0 <= p[i] < size[i].
1276 __isl_give isl_set *add_bounded_parameters(__isl_take isl_set *set,
1277 int len, int *size, const char *prefix)
1279 int i;
1280 unsigned nparam;
1281 isl_int v;
1282 isl_space *dim;
1283 isl_basic_set *bset;
1284 isl_constraint *c;
1285 isl_local_space *ls;
1286 char name[20];
1288 nparam = isl_set_dim(set, isl_dim_param);
1289 set = isl_set_add_dims(set, isl_dim_param, len);
1291 for (i = 0; i < len; ++i) {
1292 snprintf(name, sizeof(name), "%s%d", prefix, i);
1293 set = isl_set_set_dim_name(set, isl_dim_param,
1294 nparam + i, name);
1297 dim = isl_set_get_space(set);
1298 bset = isl_basic_set_universe(isl_space_copy(dim));
1299 ls = isl_local_space_from_space(dim);
1301 isl_int_init(v);
1303 for (i = 0; i < len; ++i) {
1304 c = isl_inequality_alloc(isl_local_space_copy(ls));
1305 isl_int_set_si(v, 1);
1306 isl_constraint_set_coefficient(c, isl_dim_param, nparam + i, v);
1307 bset = isl_basic_set_add_constraint(bset, c);
1309 c = isl_inequality_alloc(isl_local_space_copy(ls));
1310 isl_int_set_si(v, -1);
1311 isl_constraint_set_coefficient(c, isl_dim_param, nparam + i, v);
1312 isl_int_set_si(v, size[i] - 1);
1313 isl_constraint_set_constant(c, v);
1314 bset = isl_basic_set_add_constraint(bset, c);
1317 isl_int_clear(v);
1318 isl_local_space_free(ls);
1320 return isl_set_intersect(set, isl_set_from_basic_set(bset));
1323 static void print_shared_body(struct cuda_gen *gen,
1324 __isl_keep isl_set *shared_domain, __isl_keep isl_union_map *sched,
1325 int len, void (*print_user_stmt)(struct clast_printer_info *info,
1326 struct clast_user_stmt *s),
1327 int first_unroll)
1329 isl_set *context;
1331 context = isl_set_copy(shared_domain);
1332 context = parametrize(context, 0, gen->shared_len, "g");
1333 context = isl_set_project_out(context, isl_dim_set, 0, gen->shared_len);
1334 context = add_bounded_parameters(context,
1335 gen->n_block, gen->block_dim, "t");
1337 print_cloog_shared_body(gen, context, sched, len, print_user_stmt,
1338 first_unroll);
1340 isl_set_free(context);
1343 /* Given a tile of an array, construct a map that maps each element
1344 * of the tile to a copy of the tile shifted to the origin
1345 * (based on the lower bounds in group->private_bound or group->shared_bound).
1346 * If any of the indices is strided, then {private,shared}_bound[i].shift_map
1347 * is applied to the index first.
1348 * The domain of the resulting map is "access",
1349 * while the range space is anonymous.
1351 static __isl_give isl_map *shift_access(__isl_take isl_set *access,
1352 struct cuda_array_ref_group *group)
1354 int i;
1355 isl_space *dim;
1356 isl_basic_set *bset;
1357 isl_basic_map *bmap;
1358 isl_aff *lb;
1359 isl_basic_set *offset;
1360 isl_basic_map *shift;
1361 isl_basic_map *pre_shift;
1362 isl_map *sched;
1363 const char *name;
1364 struct cuda_array_bound *bounds;
1365 int n_index = group->array->n_index;
1367 bounds = group->private_bound;
1368 if (!bounds)
1369 bounds = group->shared_bound;
1371 dim = isl_set_get_space(access);
1372 dim = isl_space_drop_dims(dim, isl_dim_set, 0, n_index);
1373 offset = isl_basic_set_universe(dim);
1374 for (i = 0; i < n_index; ++i) {
1375 lb = isl_aff_copy(bounds[i].lb);
1376 bmap = isl_basic_map_from_aff(lb);
1377 bset = isl_basic_map_range(bmap);
1378 offset = isl_basic_set_flat_product(offset, bset);
1380 offset = isl_basic_set_neg(offset);
1382 dim = isl_space_map_from_set(isl_set_get_space(access));
1383 shift = isl_basic_map_identity(dim);
1384 shift = isl_basic_map_set_tuple_name(shift, isl_dim_out, NULL);
1386 bset = isl_basic_set_universe(isl_set_get_space(access));
1387 bmap = isl_basic_map_from_domain_and_range(bset, offset);
1389 shift = isl_basic_map_sum(shift, bmap);
1391 dim = isl_set_get_space(access);
1392 dim = isl_space_drop_dims(dim, isl_dim_set, 0, n_index);
1393 dim = isl_space_map_from_set(dim);
1394 pre_shift = isl_basic_map_universe(isl_space_copy(dim));
1395 dim = isl_space_add_dims(dim, isl_dim_in, 1);
1396 dim = isl_space_add_dims(dim, isl_dim_out, 1);
1397 for (i = 0; i < n_index; ++i) {
1398 if (!bounds[i].shift_map)
1399 bmap = isl_basic_map_identity(isl_space_copy(dim));
1400 else
1401 bmap = isl_basic_map_copy(bounds[i].shift_map);
1402 pre_shift = isl_basic_map_flat_product(pre_shift, bmap);
1404 isl_space_free(dim);
1405 name = isl_basic_map_get_tuple_name(shift, isl_dim_in);
1406 pre_shift = isl_basic_map_set_tuple_name(pre_shift, isl_dim_in, name);
1407 pre_shift = isl_basic_map_set_tuple_name(pre_shift, isl_dim_out, name);
1408 shift = isl_basic_map_apply_range(pre_shift, shift);
1410 sched = isl_map_from_basic_map(shift);
1411 sched = isl_map_intersect_domain(sched, access);
1413 return sched;
1416 /* Construct a schedule for iterating over all elements in the given
1417 * piece of an array. The schedule iterates over a copy of the piece
1418 * that is shifted to the origin.
1419 * We subsequently also perform the tiling/wrapping over the threads.
1421 * In particular, we tile the final iterators so that the final thread
1422 * dimension runs over the final array dimension.
1423 * However, if those final iterators have only a single iteration,
1424 * we try to tile earlier iterators instead.
1426 static __isl_give isl_union_map *access_schedule(struct cuda_gen *gen,
1427 __isl_take isl_set *access, struct cuda_array_ref_group *group)
1429 isl_space *dim;
1430 isl_map *sched;
1431 isl_union_map *usched;
1432 isl_map *tiling;
1433 isl_set *par;
1434 unsigned nvar = isl_set_dim(access, isl_dim_set);
1435 int n_tile;
1436 int first;
1438 sched = shift_access(access, group);
1440 n_tile = gen->n_block;
1441 if (n_tile > nvar) {
1442 int i;
1443 sched = isl_map_insert_dims(sched,
1444 isl_dim_out, 0, n_tile - nvar);
1445 for (i = 0; i < n_tile - nvar; ++i)
1446 sched = isl_map_fix_si(sched, isl_dim_out, i, 0);
1447 nvar = n_tile;
1450 first = nvar - n_tile;
1452 for (; first > 0; first --)
1453 if (!isl_map_plain_is_fixed(sched, isl_dim_out,
1454 first + n_tile - 1, NULL))
1455 break;
1457 dim = isl_map_get_space(sched);
1458 dim = isl_space_params(dim);
1459 if (gen->options->wrap)
1460 tiling = wrap(isl_space_copy(dim), nvar, first,
1461 n_tile, gen->block_dim);
1462 else
1463 tiling = tile(isl_space_copy(dim), nvar, first,
1464 n_tile, gen->block_dim);
1465 sched = isl_map_apply_range(sched, tiling);
1467 par = parametrization(dim, nvar + n_tile, first + n_tile, n_tile, "t");
1468 usched = isl_union_map_from_map(sched);
1469 usched = isl_union_map_intersect_range(usched,
1470 isl_union_set_from_set(par));
1472 usched = scale_access_tile_loops(gen, usched, nvar + n_tile,
1473 first, n_tile);
1475 return usched;
1478 /* Print an access to the element in the global memory copy of the
1479 * given array that corresponds to the element described by "pma".
1480 * of the original array.
1481 * The copy in global memory has been linearized, so we need to take
1482 * the array size into account.
1484 static void print_global_index(FILE *out,
1485 struct cuda_array_info *array, __isl_keep isl_pw_multi_aff *pma,
1486 __isl_keep isl_set *domain)
1488 int i;
1489 isl_ctx *ctx = isl_pw_multi_aff_get_ctx(pma);
1490 isl_printer *prn;
1492 if (cuda_array_is_scalar(array)) {
1493 fprintf(out, "*%s", array->name);
1494 return;
1497 fprintf(out, "%s[", array->name);
1498 prn = isl_printer_to_file(ctx, out);
1499 prn = isl_printer_set_output_format(prn, ISL_FORMAT_C);
1500 for (i = 0; i + 1 < array->n_index; ++i)
1501 prn = isl_printer_print_str(prn, "(");
1502 for (i = 0; i < array->n_index; ++i) {
1503 isl_pw_aff *pa = isl_pw_multi_aff_get_pw_aff(pma, i);
1504 pa = isl_pw_aff_coalesce(pa);
1505 pa = isl_pw_aff_gist(pa, isl_set_copy(domain));
1506 if (i) {
1507 prn = isl_printer_print_str(prn, ") * (");
1508 prn = isl_printer_print_pw_aff(prn,
1509 array->local_bound[i]);
1510 prn = isl_printer_print_str(prn, ") + ");
1512 prn = isl_printer_print_pw_aff(prn, pa);
1513 isl_pw_aff_free(pa);
1515 isl_printer_free(prn);
1516 fprintf(out, "]");
1519 /* Given an index expression into a tile of an array, adjust the expression
1520 * to a shift of the tile to the origin
1521 * (based on the lower bounds in array->shared_bound).
1522 * If the index is strided, then we first add
1523 * bound->shift and divide by bound->stride.
1525 static __isl_give isl_pw_aff *shift_index(__isl_take isl_pw_aff *pa,
1526 struct cuda_array_info *array,
1527 struct cuda_array_bound *bound, __isl_take isl_set *domain)
1529 isl_aff *lb;
1530 isl_pw_aff *tmp;
1532 if (bound->shift) {
1533 isl_aff *shift;
1534 shift = bound->shift;
1535 shift = isl_aff_copy(shift);
1536 shift = isl_aff_project_domain_on_params(shift);
1537 shift = isl_aff_align_params(shift, isl_pw_aff_get_space(pa));
1538 tmp = isl_pw_aff_alloc(isl_set_copy(domain), shift);
1539 pa = isl_pw_aff_add(pa, tmp);
1540 pa = isl_pw_aff_scale_down(pa, bound->stride);
1543 lb = isl_aff_copy(bound->lb);
1544 lb = isl_aff_project_domain_on_params(lb);
1546 lb = isl_aff_align_params(lb, isl_pw_aff_get_space(pa));
1548 tmp = isl_pw_aff_alloc(isl_set_copy(domain), lb);
1549 pa = isl_pw_aff_sub(pa, tmp);
1550 pa = isl_pw_aff_coalesce(pa);
1551 pa = isl_pw_aff_gist(pa, domain);
1553 return pa;
1556 /* Print an access to the element in the private/shared memory copy of the
1557 * given array reference group that corresponds to the element described
1558 * by "pma" of the original array.
1559 * Since the array in private/shared memory is just a shifted copy of part
1560 * of the original array, we simply need to subtract the lower bound,
1561 * which was computed in can_tile_for_shared_memory.
1562 * If any of the indices is strided, then we first add
1563 * bounds[i].shift and divide by bounds[i].stride.
1565 static void print_local_index(FILE *out,
1566 struct cuda_array_ref_group *group, struct cuda_array_bound *bounds,
1567 __isl_keep isl_pw_multi_aff *pma, __isl_keep isl_set *domain)
1569 int i;
1570 isl_ctx *ctx = isl_pw_multi_aff_get_ctx(pma);
1571 isl_printer *prn;
1572 struct cuda_array_info *array = group->array;
1574 prn = isl_printer_to_file(ctx, out);
1575 prn = isl_printer_set_output_format(prn, ISL_FORMAT_C);
1577 prn = print_array_name(prn, group);
1578 for (i = 0; i < array->n_index; ++i) {
1579 isl_pw_aff *pa = isl_pw_multi_aff_get_pw_aff(pma, i);
1581 pa = shift_index(pa, array, &bounds[i], isl_set_copy(domain));
1583 fprintf(out, "[");
1584 prn = isl_printer_print_pw_aff(prn, pa);
1585 fprintf(out, "]");
1586 isl_pw_aff_free(pa);
1589 isl_printer_free(prn);
1592 /* This function is called for each leaf in the clast of the code
1593 * for copying to or from shared/private memory.
1594 * The statement name is {read,write}_{shared,private}_<array>.
1596 * The schedule iterates over the array elements, so we can use
1597 * the domain of copy_sched at the current scheduling position
1598 * as the index of the array.
1600 static void print_copy_statement(struct clast_printer_info *code,
1601 struct clast_user_stmt *u)
1603 struct cuda_gen *gen = code->user;
1604 isl_set *domain;
1605 isl_map *sched;
1606 struct cuda_array_ref_group *group = gen->copy_group;
1607 struct cuda_array_bound *bounds = gen->copy_bound;
1608 unsigned n_in;
1609 unsigned n_out;
1610 isl_space *dim;
1611 isl_set *param;
1612 isl_set *index;
1613 isl_pw_multi_aff *pma;
1614 int read;
1616 read = !strncmp(u->statement->name, "read", 4);
1618 domain = extract_host_domain(u);
1619 assert(domain);
1621 sched = isl_map_copy(gen->copy_sched);
1622 sched = isl_map_reverse(sched);
1623 sched = isl_map_intersect_domain(sched, domain);
1624 n_in = isl_map_dim(sched, isl_dim_in);
1625 n_out = isl_map_dim(sched, isl_dim_out);
1626 dim = isl_map_get_space(sched);
1627 dim = isl_space_drop_dims(dim, isl_dim_in, 0, n_in);
1628 dim = isl_space_drop_dims(dim, isl_dim_out, 0, n_out);
1629 param = parametrization(dim, n_in, 0, n_in, "c");
1630 sched = isl_map_align_params(sched, isl_set_get_space(param));
1631 sched = isl_map_intersect_domain(sched, param);
1632 index = isl_map_range(sched);
1633 domain = isl_set_copy(index);
1634 pma = isl_pw_multi_aff_from_set(index);
1635 pma = isl_pw_multi_aff_coalesce(pma);
1636 domain = isl_set_params(domain);
1638 print_indent(code->dst, code->indent);
1639 if (read) {
1640 print_local_index(code->dst, group, bounds, pma, domain);
1641 fprintf(code->dst, " = ");
1642 print_global_index(code->dst, group->array, pma, domain);
1643 } else {
1644 print_global_index(code->dst, group->array, pma, domain);
1645 fprintf(code->dst, " = ");
1646 print_local_index(code->dst, group, bounds, pma, domain);
1648 fprintf(code->dst, ";\n");
1650 isl_pw_multi_aff_free(pma);
1651 isl_set_free(domain);
1654 static void print_shared_access(struct cuda_gen *gen,
1655 __isl_keep isl_set *shared_domain, __isl_take isl_set *access,
1656 const char *type, struct cuda_array_ref_group *group)
1658 const char *array_name;
1659 char *name;
1660 isl_ctx *ctx;
1661 isl_union_map *sched;
1662 unsigned nvar = isl_set_dim(access, isl_dim_set);
1663 int n_tile;
1665 ctx = isl_set_get_ctx(access);
1666 array_name = isl_set_get_tuple_name(access);
1667 name = isl_alloc_array(ctx, char,
1668 strlen(type) + sizeof("_shared_") + strlen(array_name) + 20);
1669 if (group->array->n_group > 1)
1670 sprintf(name, "%s_shared_%s_%d", type, array_name, group->nr);
1671 else
1672 sprintf(name, "%s_shared_%s", type, array_name);
1673 access = isl_set_set_tuple_name(access, name);
1674 free(name);
1676 sched = access_schedule(gen, access, group);
1678 n_tile = gen->n_block;
1679 if (n_tile > nvar)
1680 n_tile = nvar;
1682 gen->copy_sched = isl_map_from_union_map(isl_union_map_copy(sched));
1683 gen->copy_group = group;
1684 gen->copy_bound = group->shared_bound;
1686 print_shared_body(gen, shared_domain, sched, nvar + n_tile,
1687 &print_copy_statement, -1);
1689 isl_union_map_free(sched);
1690 isl_map_free(gen->copy_sched);
1693 /* Return the union of all read (read = 1) and/or write (write = 1)
1694 * access relations in the group.
1696 static __isl_give isl_union_map *group_access_relation(
1697 struct cuda_array_ref_group *group, int read, int write)
1699 int i;
1700 isl_union_map *access;
1702 access = isl_union_map_empty(isl_map_get_space(group->access));
1703 for (i = 0; i < group->n_ref; ++i) {
1704 isl_map *map_i;
1706 if (!((read && group->refs[i]->read) ||
1707 (write && group->refs[i]->write)))
1708 continue;
1709 map_i = isl_map_copy(group->refs[i]->access);
1710 access = isl_union_map_union(access,
1711 isl_union_map_from_map(map_i));
1714 return access;
1717 /* Check that none of the shared memory tiles involve any strides.
1719 static int no_strides(struct cuda_array_ref_group *group)
1721 int i;
1722 int n_index = group->array->n_index;
1724 for (i = 0; i < n_index; ++i)
1725 if (group->shared_bound[i].shift)
1726 return 0;
1728 return 1;
1731 /* Return a set containing the values of the given index i
1732 * of the elements in the array tile in global memory that corresponds
1733 * to the shared memory copy.
1734 * In particular, if a is the index, we return a set with constraints
1736 * tile_offset <= a <= tile_offset + tile_size - 1
1738 * and
1740 * 0 <= a <= array_size - 1
1743 static __isl_give isl_set *group_tile_dim(struct cuda_array_ref_group *group,
1744 int i)
1746 isl_basic_set *tile;
1747 isl_aff *aff;
1748 isl_constraint *c;
1749 isl_local_space *ls;
1750 isl_pw_aff *bound;
1751 isl_set *dom;
1752 isl_set *tile_set;
1754 aff = isl_aff_copy(group->shared_bound[i].lb);
1755 aff = isl_aff_add_dims(aff, isl_dim_in, 1);
1756 ls = isl_aff_get_domain_local_space(aff);
1757 aff = isl_aff_neg(aff);
1758 aff = isl_aff_add_coefficient_si(aff, isl_dim_in, 0, 1);
1759 c = isl_inequality_from_aff(isl_aff_copy(aff));
1760 tile = isl_basic_set_from_constraint(c);
1762 aff = isl_aff_neg(aff);
1763 aff = isl_aff_add_constant(aff, group->shared_bound[i].size);
1764 aff = isl_aff_add_constant_si(aff, -1);
1765 c = isl_inequality_from_aff(aff);
1766 tile = isl_basic_set_add_constraint(tile, c);
1768 aff = isl_aff_zero_on_domain(ls);
1769 aff = isl_aff_add_coefficient_si(aff, isl_dim_in, 0, 1);
1770 c = isl_inequality_from_aff(aff);
1771 tile = isl_basic_set_add_constraint(tile, c);
1773 bound = isl_pw_aff_copy(group->array->bound[i]);
1774 bound = isl_pw_aff_add_dims(bound, isl_dim_in, 1);
1775 ls = isl_local_space_from_space(isl_pw_aff_get_domain_space(bound));
1776 aff = isl_aff_zero_on_domain(ls);
1777 aff = isl_aff_add_coefficient_si(aff, isl_dim_in, 0, 1);
1778 aff = isl_aff_add_constant_si(aff, 1);
1779 dom = isl_pw_aff_domain(isl_pw_aff_copy(bound));
1781 tile_set = isl_pw_aff_ge_set(bound, isl_pw_aff_alloc(dom, aff));
1782 tile_set = isl_set_align_params(tile_set, isl_basic_set_get_space(tile));
1783 tile_set = isl_set_intersect(tile_set, isl_set_from_basic_set(tile));
1785 return tile_set;
1788 /* Return a set containing the elements in the array tile in
1789 * global memory that corresponds to the shared memory copy.
1791 static __isl_give isl_set *group_tile(struct cuda_array_ref_group *group)
1793 int i;
1794 int n_index = group->array->n_index;
1795 isl_set *tile;
1797 tile = group_tile_dim(group, 0);
1798 for (i = 1; i < n_index; ++i) {
1799 isl_set *tile_i;
1801 tile_i = group_tile_dim(group, i);
1802 tile = isl_set_flat_product(tile, tile_i);
1805 tile = isl_set_set_tuple_name(tile, group->array->name);
1807 return tile;
1810 /* Print code for reading into or writing from shared memory
1811 * the given array reference group.
1813 * sched maps the original iteration domains to the shared memory tile loops.
1815 * If we are performing a read from global memory to shared memory,
1816 * if the array involved is not a scalar and if the definition of the
1817 * shared memory tiles does not involve any strides, then we copy
1818 * the entire tile to shared memory. This may result in some extra
1819 * elements getting copied, but it should lead to simpler code
1820 * (which means that fewer registers may be needed) and less divergence.
1822 * Otherwise, we only copy the elements that will be read or have been written
1823 * in the kernel.
1825 * Note that the absence of stride requirement can easily be lifted.
1826 * We would just need to add constraints of the form
1828 * shift + a = stride * alpha
1830 static int print_group_shared_accesses(struct cuda_gen *gen,
1831 struct cuda_array_ref_group *group, const char *type,
1832 __isl_keep isl_set *shared_domain, __isl_keep isl_union_map *sched)
1834 int read;
1835 isl_union_map *access;
1836 isl_union_set *uset;
1837 isl_set *access_set;
1839 if (group->private_bound)
1840 return 0;
1841 if (!group->shared_bound)
1842 return 0;
1844 read = !strcmp(type, "read");
1846 access = group_access_relation(group, read, !read);
1847 access = isl_union_map_apply_domain(access, isl_union_map_copy(sched));
1848 uset = isl_union_map_range(access);
1850 if (isl_union_set_is_empty(uset)) {
1851 isl_union_set_free(uset);
1852 return 0;
1855 if (read && group->array->n_index > 0 && no_strides(group)) {
1856 isl_union_set_free(uset);
1857 access_set = group_tile(group);
1858 print_shared_access(gen, shared_domain, access_set,
1859 type, group);
1860 return 1;
1863 access_set = isl_set_from_union_set(uset);
1864 access_set = isl_set_coalesce(access_set);
1866 print_shared_access(gen, shared_domain, access_set, type, group);
1868 return 1;
1871 /* Print code for reading into or writing from shared memory at
1872 * the given level (-1 for innermost).
1874 * If we are not printing at the innermost level, then the dimensionality
1875 * of shared_domain may be smaller than gen->shared_len.
1876 * As the rest of the code assumes that the domain of access has
1877 * gen->shared_len dimensions, we therefore may need to embed this domain
1878 * in a higher dimensional space after intersection with shared_domain.
1880 static void print_shared_accesses(struct cuda_gen *gen,
1881 __isl_keep isl_set *shared_domain, __isl_keep isl_union_map *access,
1882 const char *type, int level)
1884 int i, j;
1885 isl_space *dim;
1886 isl_map *proj;
1887 isl_set *par;
1888 int shared_len = isl_set_dim(shared_domain, isl_dim_set);
1889 int sync = 0;
1890 isl_union_map *sched;
1892 shared_domain = isl_set_copy(shared_domain);
1893 sched = isl_union_map_copy(gen->tiled_sched);
1894 dim = isl_union_map_get_space(sched);
1895 proj = projection(dim, gen->tiled_len, shared_len);
1896 sched = isl_union_map_apply_range(sched, isl_union_map_from_map(proj));
1897 sched = isl_union_map_intersect_range(sched,
1898 isl_union_set_from_set(isl_set_copy(shared_domain)));
1899 if (shared_len != gen->shared_len) {
1900 dim = isl_union_map_get_space(sched);
1901 proj = projection(dim, gen->shared_len, shared_len);
1902 proj = isl_map_reverse(proj);
1903 shared_domain = isl_set_apply(shared_domain,
1904 isl_map_copy(proj));
1905 sched = isl_union_map_apply_range(sched,
1906 isl_union_map_from_map(proj));
1909 dim = isl_union_map_get_space(sched);
1910 par = parametrization(dim, gen->shared_len, 0, gen->shared_len, "g");
1911 sched = isl_union_map_intersect_range(sched,
1912 isl_union_set_from_set(par));
1914 for (i = 0; i < gen->n_array; ++i) {
1915 struct cuda_array_info *array = &gen->array[i];
1917 for (j = 0; j < array->n_group; ++j) {
1918 if (array->groups[j]->print_shared_level != level)
1919 continue;
1921 if (print_group_shared_accesses(gen, array->groups[j],
1922 type, shared_domain, sched))
1923 sync = 1;
1927 isl_union_map_free(sched);
1928 isl_set_free(shared_domain);
1930 if (sync) {
1931 print_indent(gen->cuda.kernel_c, gen->kernel_code.indent);
1932 fprintf(gen->cuda.kernel_c, "__syncthreads();\n");
1936 /* This function is called for each access to an array in some statement
1937 * in the original code.
1938 * Replace that access by an access to shared or (linearized) global memory.
1939 * Since the array in shared memory is just
1940 * a shifted copy of part of the original array, we simply need
1941 * to subtract the lower bound, which was computed
1942 * in can_tile_for_shared_memory.
1943 * If any of the indices is strided, then we first add
1944 * shared_bound[i].shift and divide by shared_bound[i].stride.
1946 * If the given array is accessed directly from global memory,
1947 * we don't need to perform any shifting and simply simplify
1948 * the expression in the context of the domain instead.
1950 * If the array space (range of access) has no name, then we are
1951 * accessing an iterator in the original program.
1953 static __isl_give isl_printer *print_access(__isl_take isl_printer *p,
1954 struct cuda_gen *gen, __isl_take isl_map *access, int group_nr)
1956 int i;
1957 const char *name;
1958 unsigned n_index;
1959 struct cuda_array_info *array = NULL;
1960 isl_pw_multi_aff *pma;
1961 isl_set *data_set;
1962 isl_set *domain;
1963 struct cuda_array_bound *bounds = NULL;
1965 access = isl_map_align_params(access,
1966 isl_set_get_space(gen->stmt_domain));
1968 data_set = isl_set_apply(isl_set_copy(gen->stmt_domain), access);
1970 name = isl_set_get_tuple_name(data_set);
1972 if (!name)
1973 fprintf(gen->cuda.kernel_c, "(");
1974 else {
1975 struct cuda_array_ref_group *group;
1977 for (i = 0; i < gen->n_array; ++i) {
1978 if (strcmp(name, gen->array[i].name))
1979 continue;
1980 array = &gen->array[i];
1982 assert(array);
1983 group = array->groups[group_nr];
1984 bounds = group->private_bound;
1985 if (!bounds)
1986 bounds = group->shared_bound;
1988 if (!bounds && cuda_array_is_scalar(array) && !array->read_only)
1989 fprintf(gen->cuda.kernel_c, "*");
1990 p = print_array_name(p, group);
1992 if (cuda_array_is_scalar(array)) {
1993 isl_set_free(data_set);
1994 return p;
1997 fprintf(gen->cuda.kernel_c, "[");
2001 n_index = isl_set_dim(data_set, isl_dim_set);
2002 pma = isl_pw_multi_aff_from_set(data_set);
2003 pma = isl_pw_multi_aff_coalesce(pma);
2005 if (!bounds)
2006 for (i = 0; i + 1 < n_index; ++i)
2007 p = isl_printer_print_str(p, "(");
2009 for (i = 0; i < n_index; ++i) {
2010 isl_pw_aff *index;
2012 index = isl_pw_multi_aff_get_pw_aff(pma, i);
2014 if (!array) {
2015 p = isl_printer_print_pw_aff(p, index);
2016 isl_pw_aff_free(index);
2017 continue;
2020 domain = isl_set_copy(gen->stmt_domain);
2021 domain = isl_set_params(domain);
2022 if (!bounds) {
2023 index = isl_pw_aff_coalesce(index);
2024 index = isl_pw_aff_gist(index, domain);
2025 } else
2026 index = shift_index(index, array, &bounds[i], domain);
2028 if (i) {
2029 if (!bounds) {
2030 p = isl_printer_print_str(p, ") * (");
2031 p = isl_printer_print_pw_aff(p,
2032 array->local_bound[i]);
2033 p = isl_printer_print_str(p, ") + ");
2034 } else
2035 p = isl_printer_print_str(p, "][");
2037 p = isl_printer_print_pw_aff(p, index);
2038 isl_pw_aff_free(index);
2040 if (!name)
2041 p = isl_printer_print_str(p, ")");
2042 else
2043 p = isl_printer_print_str(p, "]");
2045 isl_pw_multi_aff_free(pma);
2047 return p;
2050 struct cuda_access_print_info {
2051 struct cuda_gen *gen;
2052 struct cuda_stmt_access *access;
2055 /* To print the cuda accesses we walk the list of cuda accesses simultaneously
2056 * with the pet printer. This means that whenever the pet printer prints a
2057 * pet access expression we have the corresponding cuda access available and can
2058 * print the modified access.
2060 static __isl_give isl_printer *print_cuda_access(__isl_take isl_printer *p,
2061 struct pet_expr *expr, void *usr)
2063 struct cuda_access_print_info *info =
2064 (struct cuda_access_print_info *) usr;
2066 p = print_access(p, info->gen, isl_map_copy(info->access->access),
2067 info->access->group);
2068 info->access = info->access->next;
2070 return p;
2073 static void print_stmt_body(struct cuda_gen *gen,
2074 FILE *out, struct cuda_stmt *stmt)
2076 struct cuda_access_print_info info;
2077 isl_printer *p;
2079 p = isl_printer_to_file(gen->ctx, out);
2080 p = isl_printer_set_output_format(p, ISL_FORMAT_C);
2082 info.gen = gen;
2083 info.access = stmt->accesses;
2085 p = print_pet_expr(p, stmt->body, &print_cuda_access, &info);
2086 fprintf(out, ";\n");
2088 isl_printer_free(p);
2091 /* This function is called for each leaf in the innermost clast,
2092 * i.e., for each statement.
2093 * We print the statement body, simplifying the accesses based
2094 * on the schedule.
2096 static void print_statement(struct clast_printer_info *code,
2097 struct clast_user_stmt *u)
2099 struct cuda_gen *gen = code->user;
2100 isl_space *dim;
2101 isl_set *par;
2102 isl_set *stmt_domain;
2103 isl_union_map *stmt_sched;
2104 isl_union_set *uset;
2105 int nr;
2106 struct cuda_stmt *stmt;
2108 nr = atoi(u->statement->name + 2);
2109 stmt = &gen->stmts[nr];
2111 stmt_domain = extract_host_domain(u);
2113 stmt_sched = isl_union_map_intersect_range(
2114 isl_union_map_copy(gen->local_sched),
2115 isl_union_set_from_set(extend(stmt_domain,
2116 gen->thread_tiled_len)));
2117 dim = isl_union_map_get_space(stmt_sched);
2118 par = parametrization(dim, gen->thread_tiled_len, 0,
2119 gen->thread_tiled_len, "c");
2120 stmt_sched = isl_union_map_intersect_range(stmt_sched,
2121 isl_union_set_from_set(par));
2123 uset = isl_union_map_domain(stmt_sched);
2124 dim = isl_union_set_get_space(uset);
2125 dim = isl_space_add_dims(dim, isl_dim_set,
2126 isl_set_dim(stmt->domain, isl_dim_set));
2127 dim = isl_space_set_tuple_name(dim, isl_dim_set, u->statement->name);
2128 gen->stmt_domain = isl_union_set_extract_set(uset, dim);
2129 isl_union_set_free(uset);
2131 print_indent(code->dst, code->indent);
2132 print_stmt_body(gen, code->dst, stmt);
2134 isl_set_free(gen->stmt_domain);
2137 static void print_private_access(struct cuda_gen *gen,
2138 __isl_keep isl_set *shared_domain, __isl_take isl_set *access,
2139 const char *type, struct cuda_array_ref_group *group)
2141 const char *array_name;
2142 char *name;
2143 isl_ctx *ctx;
2144 unsigned nvar = isl_set_dim(access, isl_dim_set);
2145 isl_union_map *usched;
2147 if (isl_set_fast_is_empty(access)) {
2148 isl_set_free(access);
2149 return;
2152 ctx = isl_set_get_ctx(access);
2153 array_name = isl_set_get_tuple_name(access);
2154 name = isl_alloc_array(ctx, char,
2155 strlen(type) + sizeof("_private_") + strlen(array_name) + 20);
2156 if (group->array->n_group > 1)
2157 sprintf(name, "%s_private_%s_%d", type, array_name, group->nr);
2158 else
2159 sprintf(name, "%s_private_%s", type, array_name);
2160 access = isl_set_set_tuple_name(access, name);
2161 free(name);
2163 gen->copy_sched = shift_access(access, group);
2164 gen->copy_group = group;
2165 gen->copy_bound = group->private_bound;
2167 usched = isl_union_map_from_map(isl_map_copy(gen->copy_sched));
2168 print_shared_body(gen, shared_domain, usched, nvar,
2169 &print_copy_statement, 1);
2170 isl_union_map_free(usched);
2172 isl_map_free(gen->copy_sched);
2175 /* Print code for reading into or writing from private memory
2176 * the given array reference group.
2178 * sched maps the original iteration domains to the shared memory tile loops.
2180 static void print_group_private_accesses(struct cuda_gen *gen,
2181 struct cuda_array_ref_group *group,
2182 const char *type, __isl_keep isl_set *shared_domain,
2183 unsigned first_shared, int shared_len, __isl_keep isl_union_map *sched)
2185 int read;
2186 isl_union_map *access;
2187 isl_union_set *uset;
2188 isl_set *access_set;
2190 if (!group->private_bound)
2191 return;
2193 read = !strcmp(type, "read");
2195 access = group_access_relation(group, read, !read);
2196 access = isl_union_map_apply_domain(access, isl_union_map_copy(sched));
2197 access = isl_union_map_intersect(access,
2198 isl_union_map_copy(gen->private_access));
2199 uset = isl_union_map_range(access);
2201 if (isl_union_set_is_empty(uset)) {
2202 isl_union_set_free(uset);
2203 return;
2206 access_set = isl_set_from_union_set(uset);
2207 access_set = isl_set_coalesce(access_set);
2208 access_set = isl_set_eliminate(access_set, isl_dim_param,
2209 first_shared + shared_len,
2210 gen->shared_len - shared_len);
2212 print_private_access(gen, shared_domain, access_set, type, group);
2215 /* Print code for reading into or writing from private memory at
2216 * the given level (-1 for innermost).
2218 * If we are not printing at the innermost level, then the dimensionality
2219 * of shared_domain may be smaller than gen->shared_len.
2220 * As the rest of the code assumes that the domain of access has
2221 * gen->shared_len dimensions, we therefore may need to embed this domain
2222 * in a higher dimensional space after intersection with shared_domain.
2224 * This code is very similar to print_shared_accesses.
2225 * The main difference is that we to take into account gen->private_access.
2227 static void print_private_accesses(struct cuda_gen *gen,
2228 __isl_keep isl_set *shared_domain, __isl_keep isl_union_map *access,
2229 const char *type, int level)
2231 int i, j;
2232 isl_space *dim;
2233 isl_map *proj;
2234 int shared_len = isl_set_dim(shared_domain, isl_dim_set);
2235 unsigned first_shared;
2236 isl_union_map *sched;
2238 shared_domain = isl_set_copy(shared_domain);
2239 sched = isl_union_map_copy(gen->tiled_sched);
2240 dim = isl_union_map_get_space(sched);
2241 first_shared = isl_space_dim(dim, isl_dim_param);
2242 proj = projection(dim, gen->tiled_len, shared_len);
2243 sched = isl_union_map_apply_range(sched, isl_union_map_from_map(proj));
2244 sched = isl_union_map_intersect_range(sched,
2245 isl_union_set_from_set(isl_set_copy(shared_domain)));
2246 if (shared_len != gen->shared_len) {
2247 dim = isl_union_map_get_space(sched);
2248 proj = projection(dim, gen->shared_len, shared_len);
2249 proj = isl_map_reverse(proj);
2250 shared_domain = isl_set_apply(shared_domain,
2251 isl_map_copy(proj));
2252 sched = isl_union_map_apply_range(sched,
2253 isl_union_map_from_map(proj));
2256 for (i = 0; i < gen->n_array; ++i) {
2257 struct cuda_array_info *array = &gen->array[i];
2259 for (j = 0; j < array->n_group; ++j) {
2260 if (array->groups[j]->print_shared_level != level)
2261 continue;
2263 print_group_private_accesses(gen, array->groups[j],
2264 type, shared_domain,
2265 first_shared, shared_len, sched);
2269 isl_union_map_free(sched);
2270 isl_set_free(shared_domain);
2273 /* Set unroll[j] if the input dimension j is involved in
2274 * the index expression represented by bmap.
2276 static int check_unroll(__isl_take isl_basic_map *bmap, void *user)
2278 int i, j;
2279 int n_in = isl_basic_map_dim(bmap, isl_dim_in);
2280 int n_out = isl_basic_map_dim(bmap, isl_dim_out);
2281 int *unroll = user;
2283 for (i = 0; i < n_out; ++i) {
2284 isl_constraint *c;
2285 int ok;
2287 ok = isl_basic_map_has_defining_equality(bmap,
2288 isl_dim_out, i, &c);
2289 assert(ok);
2290 for (j = 0; j < n_in; ++j)
2291 if (isl_constraint_involves_dims(c, isl_dim_in, j, 1))
2292 unroll[j] = 1;
2293 isl_constraint_free(c);
2296 isl_basic_map_free(bmap);
2297 return 0;
2300 /* Given an array pos mapping input dimensions to the corresponding
2301 * output dimension, construct the corresponding map.
2303 static __isl_give isl_map *permutation(__isl_take isl_space *dim,
2304 int *pos, int len)
2306 int i;
2307 isl_constraint *c;
2308 isl_basic_map *bmap;
2309 isl_local_space *ls;
2311 dim = isl_space_add_dims(dim, isl_dim_in, len);
2312 dim = isl_space_add_dims(dim, isl_dim_out, len);
2313 bmap = isl_basic_map_universe(isl_space_copy(dim));
2314 ls = isl_local_space_from_space(dim);
2316 for (i = 0; i < len; ++i) {
2317 c = isl_equality_alloc(isl_local_space_copy(ls));
2318 isl_constraint_set_coefficient_si(c, isl_dim_in, i, -1);
2319 isl_constraint_set_coefficient_si(c, isl_dim_out, pos[i], 1);
2320 bmap = isl_basic_map_add_constraint(bmap, c);
2322 isl_local_space_free(ls);
2324 return isl_map_from_basic_map(bmap);
2327 /* Find all loops involved in any of the index expressions for any of
2328 * the private accesses, move them innermost and then mark them as
2329 * requiring unrolling by setting gen->first_unroll.
2330 * The loops involved should all be parallel because of the checks
2331 * we performed in check_private_group_access. Moving them innermost
2332 * is therefore a valid transformation.
2334 * Loops up to gen->shared_len are generated before the mapping to
2335 * threads is applied. They should therefore be ignored.
2337 static __isl_give isl_union_map *interchange_for_unroll(struct cuda_gen *gen,
2338 __isl_take isl_union_map *sched)
2340 int i, j;
2341 int unroll[gen->thread_tiled_len];
2342 int perm[gen->thread_tiled_len];
2343 isl_space *dim;
2344 isl_map *permute;
2345 int len = gen->shared_len + gen->n_parallel + gen->n_block;
2347 gen->first_unroll = -1;
2349 for (i = 0; i < gen->thread_tiled_len; ++i)
2350 unroll[i] = 0;
2351 for (i = 0; i < gen->n_array; ++i) {
2352 struct cuda_array_info *array = &gen->array[i];
2354 for (j = 0; j < array->n_group; ++j) {
2355 isl_union_map *access;
2356 isl_map *acc;
2358 if (!array->groups[j]->private_bound)
2359 continue;
2361 access = group_access_relation(array->groups[j], 1, 1);
2362 access = isl_union_map_apply_domain(access,
2363 isl_union_map_copy(sched));
2365 acc = isl_map_from_union_map(access);
2366 isl_map_foreach_basic_map(acc, &check_unroll, unroll);
2368 isl_map_free(acc);
2372 for (i = gen->shared_len; i < len; ++i)
2373 if (unroll[i])
2374 break;
2376 if (i >= len)
2377 return sched;
2379 for (i = len; i < gen->thread_tiled_len; ++i)
2380 if (unroll[i])
2381 return sched;
2383 j = 0;
2384 for (i = 0; i < gen->shared_len; ++i)
2385 perm[i] = j++;
2386 for (i = gen->shared_len; i < gen->thread_tiled_len; ++i)
2387 if (!unroll[i])
2388 perm[i] = j++;
2389 gen->first_unroll = 1 + j;
2390 for (i = gen->shared_len; i < len; ++i)
2391 if (unroll[i])
2392 perm[i] = j++;
2394 dim = isl_union_map_get_space(sched);
2395 permute = permutation(dim, perm, gen->thread_tiled_len);
2396 sched = isl_union_map_apply_range(sched,
2397 isl_union_map_from_map(permute));
2399 return sched;
2402 /* This function is called for each leaf in the clast of the kernel code.
2403 * We first specialize the schedule to the site of the leaf and
2404 * print code for reading into shared memory, performing the actual
2405 * computations and writing from shared memory, with the required
2406 * synchronizations.
2408 static void print_kernel_user(struct clast_printer_info *code,
2409 struct clast_user_stmt *u)
2411 struct cuda_gen *gen = code->user;
2412 isl_set *shared_domain;
2414 shared_domain = extract_entire_host_domain(&u->stmt);
2416 print_shared_accesses(gen, shared_domain, gen->read, "read", -1);
2418 print_private_accesses(gen, shared_domain, gen->read, "read", -1);
2420 print_shared_body(gen, shared_domain, gen->local_sched,
2421 gen->thread_tiled_len, &print_statement,
2422 gen->first_unroll);
2424 print_private_accesses(gen, shared_domain, gen->write, "write", -1);
2426 print_indent(gen->cuda.kernel_c, gen->kernel_code.indent);
2427 fprintf(gen->cuda.kernel_c, "__syncthreads();\n");
2429 print_shared_accesses(gen, shared_domain, gen->write, "write", -1);
2431 isl_set_free(shared_domain);
2434 /* Check if we need to perform any copying to shared memory at this level
2435 * and if so, print the copying instructions.
2436 * Any array for which we are allowed to print copying instructions at
2437 * this level, but haven't done so already, is printed.
2439 static void copy_to_local(struct cuda_gen *gen, __isl_keep isl_set *domain)
2441 int i, j;
2442 int level;
2443 int print = 0;
2445 level = isl_set_dim(domain, isl_dim_set);
2447 for (i = 0; i < gen->n_array; ++i) {
2448 struct cuda_array_info *array = &gen->array[i];
2450 for (j = 0; j < array->n_group; ++j) {
2451 if (array->groups[j]->print_shared_level >= 0)
2452 continue;
2453 if (array->groups[j]->last_shared >= level)
2454 continue;
2455 array->groups[j]->print_shared_level = level;
2456 print = 1;
2460 if (print) {
2461 print_shared_accesses(gen, domain, gen->read, "read", level);
2462 print_private_accesses(gen, domain, gen->read, "read", level);
2467 /* This function is called for each for loop in the clast,
2468 * right after the opening brace has been printed.
2470 * Print copying instructions to shared or private memory if needed.
2472 static void print_kernel_for_head(struct clast_printer_info *code,
2473 struct clast_for *f)
2475 struct cuda_gen *gen = code->user;
2476 isl_set *domain;
2478 domain = isl_set_from_cloog_domain(cloog_domain_copy(f->domain));
2479 copy_to_local(gen, domain);
2481 isl_set_free(domain);
2484 /* Print instructions for copying from shared memory for each array
2485 * for which print_kernel_for_head has added copying instructions
2486 * to shared memory.
2488 static void copy_from_local(struct cuda_gen *gen, __isl_keep isl_set *domain)
2490 int i, j;
2491 int level;
2492 int print = 0;
2494 level = isl_set_dim(domain, isl_dim_set);
2496 for (i = 0; i < gen->n_array; ++i) {
2497 struct cuda_array_info *array = &gen->array[i];
2499 for (j = 0; j < array->n_group; ++j) {
2500 if (array->groups[j]->print_shared_level != level)
2501 continue;
2502 print = 1;
2503 break;
2505 if (print)
2506 break;
2509 if (print) {
2510 print_private_accesses(gen, domain, gen->write, "write", level);
2511 print_shared_accesses(gen, domain, gen->write, "write", level);
2515 /* This function is called for each for loop in the clast,
2516 * right before the closing brace is printed.
2518 * Print copying instructions from shared or private memory if needed.
2520 static void print_kernel_for_foot(struct clast_printer_info *code,
2521 struct clast_for *f)
2523 struct cuda_gen *gen = code->user;
2524 isl_set *domain;
2526 domain = isl_set_from_cloog_domain(cloog_domain_copy(f->domain));
2527 copy_from_local(gen, domain);
2529 isl_set_free(domain);
2532 /* Use CLooG to generate code for the outer gen->shared_first loops
2533 * of the local schedule "sched".
2534 * The pretty printing of this code is handled by print_clast,
2535 * which calls print_kernel_user for each iteration of the shared tile loops.
2537 static void print_cloog_kernel_body(struct cuda_gen *gen,
2538 __isl_keep isl_set *context, __isl_keep isl_union_map *sched)
2540 int i;
2541 CloogOptions *options;
2542 CloogDomain *cloog_context;
2543 CloogUnionDomain *ud;
2544 CloogInput *input;
2545 struct clast_stmt *stmt;
2546 char name[20];
2548 sched = isl_union_map_copy(sched);
2549 sched = isl_union_map_align_params(sched, isl_set_get_space(context));
2551 options = cloog_options_malloc(gen->state);
2552 options->language = CLOOG_LANGUAGE_C;
2553 options->strides = 1;
2554 options->sh = 1;
2555 options->stop = gen->shared_len;
2556 options->f = gen->tiled_len;
2557 options->l = gen->tiled_len;
2558 options->save_domains = 1;
2559 options->noscalars = 1;
2561 ud = cloog_union_domain_from_isl_union_map(sched);
2562 for (i = 0; i < gen->shared_len; ++i) {
2563 snprintf(name, sizeof(name), "g%d", i);
2564 ud = cloog_union_domain_set_name(ud, CLOOG_SCAT, i, name);
2566 cloog_context = cloog_domain_from_isl_set(isl_set_copy(context));
2567 input = cloog_input_alloc(cloog_context, ud);
2569 stmt = cloog_clast_create_from_input(input, options);
2571 gen->kernel_code.indent = 4;
2572 gen->kernel_code.dst = gen->cuda.kernel_c;
2573 gen->kernel_code.print_user_stmt = NULL;
2574 gen->kernel_code.print_user_stmt_list = &print_kernel_user;
2575 gen->kernel_code.print_for_head = &print_kernel_for_head;
2576 gen->kernel_code.print_for_foot = &print_kernel_for_foot;
2577 gen->kernel_code.user = gen;
2578 copy_to_local(gen, context);
2579 print_clast(&gen->kernel_code, stmt);
2580 copy_from_local(gen, context);
2582 cloog_clast_free(stmt);
2583 cloog_options_free(options);
2586 static void print_kernel_iterators(struct cuda_gen *gen)
2588 int i;
2589 const char *block_dims[] = { "blockIdx.x", "blockIdx.y" };
2590 const char *thread_dims[] = { "threadIdx.x", "threadIdx.y",
2591 "threadIdx.z" };
2593 if (gen->n_grid > 0) {
2594 print_indent(gen->cuda.kernel_c, 4);
2595 fprintf(gen->cuda.kernel_c, "int ");
2596 for (i = 0; i < gen->n_grid; ++i) {
2597 if (i)
2598 fprintf(gen->cuda.kernel_c, ", ");
2599 fprintf(gen->cuda.kernel_c, "b%d = %s",
2600 i, block_dims[gen->n_grid - 1 - i]);
2602 fprintf(gen->cuda.kernel_c, ";\n");
2605 if (gen->n_block > 0) {
2606 print_indent(gen->cuda.kernel_c, 4);
2607 fprintf(gen->cuda.kernel_c, "int ");
2608 for (i = 0; i < gen->n_block; ++i) {
2609 if (i)
2610 fprintf(gen->cuda.kernel_c, ", ");
2611 fprintf(gen->cuda.kernel_c, "t%d = %s",
2612 i, thread_dims[gen->n_block - 1 - i]);
2614 fprintf(gen->cuda.kernel_c, ";\n");
2618 static void print_group_shared_array(struct cuda_gen *gen,
2619 struct cuda_array_ref_group *group)
2621 int j;
2622 struct cuda_array_bound *bounds;
2623 isl_printer *p;
2625 bounds = group->private_bound;
2626 if (!bounds)
2627 bounds = group->shared_bound;
2628 if (!bounds)
2629 return;
2631 print_indent(gen->cuda.kernel_c, 4);
2632 fprintf(gen->cuda.kernel_c, "%s%s ",
2633 group->private_bound ? "" : "__shared__ ", group->array->type);
2634 p = isl_printer_to_file(gen->ctx, gen->cuda.kernel_c);
2635 p = print_array_name(p, group);
2636 isl_printer_free(p);
2637 for (j = 0; j < group->array->n_index; ++j) {
2638 fprintf(gen->cuda.kernel_c, "[");
2639 isl_int_print(gen->cuda.kernel_c, bounds[j].size, 0);
2640 fprintf(gen->cuda.kernel_c, "]");
2642 fprintf(gen->cuda.kernel_c, ";\n");
2645 static void print_shared_arrays(struct cuda_gen *gen)
2647 int i, j;
2649 for (i = 0; i < gen->n_array; ++i) {
2650 struct cuda_array_info *array = &gen->array[i];
2652 for (j = 0; j < array->n_group; ++j)
2653 print_group_shared_array(gen, array->groups[j]);
2657 static void print_kernel_body(struct cuda_gen *gen,
2658 __isl_keep isl_set *host_domain, __isl_keep isl_union_map *sched)
2660 isl_set *context;
2662 context = isl_set_copy(host_domain);
2663 context = parametrize(context, 0, gen->tile_first, "h");
2664 context = isl_set_project_out(context, isl_dim_set, 0, gen->tile_first);
2665 context = add_bounded_parameters(context,
2666 gen->n_grid, gen->grid_dim, "b");
2668 print_kernel_iterators(gen);
2669 print_shared_arrays(gen);
2671 fprintf(gen->cuda.kernel_c, "\n");
2673 print_cloog_kernel_body(gen, context, sched);
2675 isl_set_free(context);
2678 /* Given a constraint
2680 * a(p,i) + j = g f(e)
2682 * or -a(p,i) - j = g f(e) if sign < 0,
2683 * store a(p,i) in bound->shift and g (stride) in bound->stride.
2684 * a(p,i) is assumed to be an expression in only the parameters.
2686 static void extract_stride(__isl_keep isl_constraint *c,
2687 struct cuda_array_bound *bound, isl_int stride, int sign)
2689 int i;
2690 isl_int v;
2691 isl_space *dim;
2692 unsigned nparam;
2693 isl_aff *aff;
2695 isl_int_set(bound->stride, stride);
2697 dim = isl_constraint_get_space(c);
2698 dim = isl_space_params(dim);
2700 nparam = isl_space_dim(dim, isl_dim_param);
2702 isl_int_init(v);
2704 isl_constraint_get_constant(c, &v);
2705 if (sign < 0)
2706 isl_int_neg(v, v);
2707 aff = isl_aff_zero_on_domain(isl_local_space_from_space(dim));
2708 aff = isl_aff_set_constant(aff, v);
2710 for (i = 0; i < nparam; ++i) {
2711 isl_constraint_get_coefficient(c, isl_dim_param, i, &v);
2712 if (isl_int_is_zero(v))
2713 continue;
2714 if (sign < 0)
2715 isl_int_neg(v, v);
2716 aff = isl_aff_add_coefficient(aff, isl_dim_param, i, v);
2719 isl_int_clear(v);
2721 bound->shift = aff;
2724 /* Given an equality constraint of a map with a single output dimension j,
2725 * check if the constraint is of the form
2727 * a(p,i) + j = g f(e)
2729 * with a(p,i) an expression in the parameters and input dimensions
2730 * and f(e) an expression in the existentially quantified variables.
2731 * If so, and if g is larger than any such g from a previously considered
2732 * constraint, then call extract_stride to record the stride information
2733 * in bound.
2735 static int check_stride_constraint(__isl_take isl_constraint *c, void *user)
2737 int i;
2738 isl_int v, stride;
2739 unsigned n_div;
2740 struct cuda_array_bound *bound = user;
2742 isl_int_init(v);
2743 isl_int_init(stride);
2745 n_div = isl_constraint_dim(c, isl_dim_div);
2746 isl_constraint_get_coefficient(c, isl_dim_out, 0, &v);
2748 if (n_div && (isl_int_is_one(v) || isl_int_is_negone(v))) {
2749 int s = isl_int_sgn(v);
2750 isl_int_set_si(stride, 0);
2751 for (i = 0; i < n_div; ++i) {
2752 isl_constraint_get_coefficient(c, isl_dim_div, i, &v);
2753 isl_int_gcd(stride, stride, v);
2755 if (!isl_int_is_zero(stride) &&
2756 isl_int_gt(stride, bound->stride))
2757 extract_stride(c, bound, stride, s);
2760 isl_int_clear(stride);
2761 isl_int_clear(v);
2763 isl_constraint_free(c);
2764 return 0;
2767 /* Given contraints on an array index i, check if we can find
2768 * a shift a(p) and a stride g such that
2770 * a(p) + i = 0 mod g
2772 * If so, record the information in bound and apply the mapping
2773 * i -> (i + a(p))/g to the array index in bounds and return
2774 * the new constraints.
2775 * If not, simply return the original constraints.
2777 static __isl_give isl_basic_map *check_stride(struct cuda_gen *gen,
2778 struct cuda_array_bound *bound, __isl_take isl_basic_map *bounds)
2780 isl_basic_map *aff;
2781 isl_basic_map *shift;
2782 isl_aff *aff_shift;
2784 isl_int_set_si(bound->stride, -1);
2786 aff = isl_basic_map_affine_hull(isl_basic_map_copy(bounds));
2788 isl_basic_map_foreach_constraint(aff, &check_stride_constraint, bound);
2790 isl_basic_map_free(aff);
2792 if (isl_int_is_neg(bound->stride))
2793 return bounds;
2795 aff_shift = isl_aff_copy(bound->shift);
2796 aff_shift = isl_aff_add_dims(aff_shift, isl_dim_in, 1);
2797 aff_shift = isl_aff_add_coefficient_si(aff_shift, isl_dim_in, 0, 1);
2798 aff_shift = isl_aff_scale_down(aff_shift, bound->stride);
2799 shift = isl_basic_map_from_aff(aff_shift);
2801 bound->shift_map = isl_basic_map_copy(shift);
2802 bounds = isl_basic_map_apply_range(bounds, shift);
2804 return bounds;
2807 struct cuda_size_info {
2808 isl_basic_set *bset;
2809 struct cuda_array_bound *bound;
2810 int pos;
2813 /* Given a constraint from the basic set describing the bounds on
2814 * an array index, check if it is a lower bound, say m i >= b(x), and,
2815 * if so, check whether the expression "i - ceil(b(x)/m) + 1" has a constant
2816 * upper bound. If so, and if this bound is smaller than any bound
2817 * derived from earlier constraints, set the size to this bound on
2818 * the expression and the lower bound to ceil(b(x)/m).
2820 static int compute_size_in_direction(__isl_take isl_constraint *c, void *user)
2822 struct cuda_size_info *size = user;
2823 unsigned nparam;
2824 unsigned n_div;
2825 isl_int v;
2827 nparam = isl_basic_set_dim(size->bset, isl_dim_param);
2828 n_div = isl_constraint_dim(c, isl_dim_div);
2830 if (isl_constraint_involves_dims(c, isl_dim_div, 0, n_div)) {
2831 isl_constraint_free(c);
2832 return 0;
2835 isl_int_init(v);
2837 isl_constraint_get_coefficient(c, isl_dim_set, size->pos, &v);
2839 if (isl_int_is_pos(v)) {
2840 isl_aff *aff;
2841 isl_aff *lb;
2842 enum isl_lp_result res;
2844 aff = isl_constraint_get_bound(c, isl_dim_set, size->pos);
2845 aff = isl_aff_ceil(aff);
2847 lb = isl_aff_copy(aff);
2849 aff = isl_aff_neg(aff);
2850 aff = isl_aff_add_coefficient_si(aff, isl_dim_in, size->pos, 1);
2852 res = isl_basic_set_max(size->bset, aff, &v);
2853 isl_aff_free(aff);
2855 if (res == isl_lp_ok) {
2856 isl_int_add_ui(v, v, 1);
2857 if (isl_int_is_neg(size->bound->size) ||
2858 isl_int_lt(v, size->bound->size)) {
2859 isl_int_set(size->bound->size, v);
2860 lb = isl_aff_drop_dims(lb, isl_dim_in,
2861 0, size->pos + 1);
2862 isl_aff_free(size->bound->lb);
2863 size->bound->lb = isl_aff_copy(lb);
2866 isl_aff_free(lb);
2869 isl_int_clear(v);
2870 isl_constraint_free(c);
2872 return 0;
2875 /* Given a basic map "bounds" that maps parameters and input dimensions
2876 * to a single output dimension, look for an expression in the parameters
2877 * and input dimensions such that the range of the output dimension shifted
2878 * by this expression is a constant.
2880 * In particular, we currently only consider lower bounds on the output
2881 * dimension as candidate expressions.
2883 static int compute_array_dim_size(struct cuda_gen *gen,
2884 struct cuda_array_bound *bound, __isl_take isl_basic_map *bounds)
2886 struct cuda_size_info size;
2888 bounds = isl_basic_map_detect_equalities(bounds);
2889 bounds = check_stride(gen, bound, bounds);
2891 isl_int_set_si(bound->size, -1);
2892 bound->lb = NULL;
2894 size.bound = bound;
2895 size.pos = isl_basic_map_dim(bounds, isl_dim_in);
2896 size.bset = isl_basic_map_wrap(bounds);
2897 size.bset = isl_basic_set_flatten(size.bset);
2898 size.bset = isl_set_simple_hull(isl_basic_set_compute_divs(size.bset));
2899 isl_basic_set_foreach_constraint(size.bset, &compute_size_in_direction,
2900 &size);
2901 isl_basic_set_free(size.bset);
2903 return isl_int_is_nonneg(bound->size) ? 0 : -1;
2906 /* Check if we can find a shared memory tile for the given array
2907 * based on the given accesses, and if so, put the results
2908 * in array->shared_bound.
2910 * We project the accesses on each index in turn and look for a parametric
2911 * offset such that the size is constant.
2913 static int can_tile_for_shared_memory(struct cuda_gen *gen,
2914 struct cuda_array_info *array, __isl_keep isl_map *access,
2915 struct cuda_array_bound *bounds)
2917 int i;
2919 for (i = 0; i < array->n_index; ++i) {
2920 isl_map *access_i;
2921 isl_basic_map *hull;
2923 access_i = isl_map_copy(access);
2924 access_i = isl_map_project_out(access_i, isl_dim_out, 0, i);
2925 access_i = isl_map_project_out(access_i, isl_dim_out,
2926 1, array->n_index - (i + 1));
2927 access_i = isl_map_compute_divs(access_i);
2928 hull = isl_map_simple_hull(access_i);
2929 if (compute_array_dim_size(gen, &bounds[i], hull) < 0)
2930 return 0;
2933 return 1;
2936 /* Construct a map with input the shared tile loops and the loops that
2937 * will be wrapped around the threads that relates these later loops
2938 * to the thread indices and then projects them out.
2940 static __isl_give isl_map *compute_privatization(struct cuda_gen *gen)
2942 isl_map *priv;
2943 isl_map *tiling;
2944 isl_map *proj;
2945 isl_set *par;
2946 isl_space *dim;
2948 dim = isl_union_map_get_space(gen->shared_sched);
2950 if (gen->options->wrap)
2951 tiling = wrap(isl_space_copy(dim), gen->shared_len + gen->n_block,
2952 gen->shared_len, gen->n_block, gen->block_dim);
2953 else
2954 tiling = tile(isl_space_copy(dim), gen->shared_len + gen->n_block,
2955 gen->shared_len, gen->n_block, gen->block_dim);
2957 priv = tiling;
2959 par = parametrization(dim, gen->shared_len + 2 * gen->n_block,
2960 gen->tile_first + gen->tile_len + gen->n_grid + gen->n_block,
2961 gen->n_block, "t");
2963 priv = isl_map_align_params(priv, isl_set_get_space(par));
2964 priv = isl_map_intersect_range(priv, par);
2966 dim = isl_map_get_space(priv);
2967 dim = isl_space_drop_dims(dim, isl_dim_in, 0, isl_space_dim(dim, isl_dim_in));
2968 dim = isl_space_drop_dims(dim, isl_dim_out, 0, isl_space_dim(dim, isl_dim_out));
2969 proj = projection(dim, gen->shared_len + 2 * gen->n_block,
2970 gen->shared_len);
2972 priv = isl_map_apply_range(priv, proj);
2974 return priv;
2977 /* Construct a map from domain_dim to domain_dim that increments
2978 * the dimension at position "pos" and leaves all other dimensions
2979 * constant.
2981 static __isl_give isl_map *next(__isl_take isl_space *domain_dim, int pos)
2983 int i;
2984 int len = isl_space_dim(domain_dim, isl_dim_set);
2985 isl_space *dim;
2986 isl_basic_map *next;
2987 isl_local_space *ls;
2989 dim = isl_space_map_from_set(domain_dim);
2990 next = isl_basic_map_universe(isl_space_copy(dim));
2991 ls = isl_local_space_from_space(dim);
2993 for (i = 0; i < len; ++i) {
2994 isl_constraint *c;
2996 c = isl_equality_alloc(isl_local_space_copy(ls));
2997 isl_constraint_set_coefficient_si(c, isl_dim_in, i, 1);
2998 isl_constraint_set_coefficient_si(c, isl_dim_out, i, -1);
2999 if (i == pos)
3000 isl_constraint_set_constant_si(c, 1);
3001 next = isl_basic_map_add_constraint(next, c);
3004 isl_local_space_free(ls);
3006 return isl_map_from_basic_map(next);
3009 /* Check if the given access is coalesced.
3010 * That is, check whether incrementing the dimension that will get
3011 * wrapped over the last thread index results in incrementing
3012 * the last array index.
3014 * This function is only called for access relations without reuse.
3016 static int access_is_coalesced(struct cuda_gen *gen,
3017 __isl_keep isl_union_map *access)
3019 isl_space *dim;
3020 isl_map *access_map;
3021 isl_map *next_thread_x;
3022 isl_map *next_element;
3023 isl_map *map;
3024 int coalesced;
3026 access = isl_union_map_copy(access);
3027 access = isl_union_map_apply_domain(access,
3028 isl_union_map_copy(gen->tiled_sched));
3029 access_map = isl_map_from_union_map(access);
3031 dim = isl_map_get_space(access_map);
3032 dim = isl_space_domain(dim);
3033 next_thread_x = next(dim, gen->shared_len + gen->n_block - 1);
3035 dim = isl_map_get_space(access_map);
3036 dim = isl_space_range(dim);
3037 next_element = next(dim, isl_space_dim(dim, isl_dim_set) - 1);
3039 map = isl_map_apply_domain(next_thread_x, isl_map_copy(access_map));
3040 map = isl_map_apply_range(map, access_map);
3042 coalesced = isl_map_is_subset(map, next_element);
3044 isl_map_free(next_element);
3045 isl_map_free(map);
3047 return coalesced;
3050 /* For the given array reference group, check whether the access is private
3051 * to the thread. That is, check that any given array element
3052 * is only accessed by a single thread.
3053 * We compute an access relation that maps the shared tile loop iterators
3054 * and the shared point loop iterators that will be wrapped over the
3055 * threads to the array elements.
3056 * We actually check that those iterators that will be wrapped
3057 * partition the array space. This check is stricter than necessary
3058 * since several iterations may be mapped onto the same thread
3059 * and then they could be allowed to access the same memory elements,
3060 * but our check does not allow this situation.
3062 * We also check that the index expression only depends on parallel
3063 * loops. That way, we can move those loops innermost and unroll them.
3064 * Again, we use a test that is stricter than necessary.
3065 * We actually check whether the index expression only depends
3066 * on the iterators that are wrapped over the threads.
3067 * These are necessarily parallel, but there may be more parallel loops.
3069 * Combining the injectivity of the first test with the single-valuedness
3070 * of the second test, we simply test for bijectivity.
3072 * If it turns out we can use registers, we compute the private memory
3073 * tile size using can_tile_for_shared_memory, after introducing a dependence
3074 * on the thread indices.
3076 * Before performing any of the above computations, we first check
3077 * if there is any reuse on the reference group. If not, we simply
3078 * return. If, moreover, the access is coalesced then we also remove
3079 * the shared memory tiling since we should just use global memory instead.
3081 static void check_private_group_access(struct cuda_gen *gen,
3082 struct cuda_array_ref_group *group)
3084 isl_map *acc;
3085 isl_union_map *access;
3086 int n_index = group->array->n_index;
3088 access = group_access_relation(group, 1, 1);
3089 if (isl_union_map_is_injective(access)) {
3090 if (group->shared_bound && access_is_coalesced(gen, access)) {
3091 free_bound_list(group->shared_bound, n_index);
3092 group->shared_bound = NULL;
3094 isl_union_map_free(access);
3095 return;
3097 access = isl_union_map_apply_domain(access,
3098 isl_union_map_copy(gen->shared_sched));
3100 acc = isl_map_from_union_map(access);
3102 if (!isl_map_is_bijective(acc)) {
3103 isl_map_free(acc);
3104 return;
3107 group->private_bound = create_bound_list(gen->ctx, n_index);
3108 acc = isl_map_align_params(acc, isl_map_get_space(gen->privatization));
3109 acc = isl_map_apply_domain(acc, isl_map_copy(gen->privatization));
3110 if (!can_tile_for_shared_memory(gen, group->array, acc,
3111 group->private_bound)) {
3112 free_bound_list(group->private_bound, n_index);
3113 group->private_bound = NULL;
3116 isl_map_free(acc);
3119 /* Look for the last shared tile loop that affects the offset of the
3120 * shared or private tile and store the result in array->last_shared.
3122 static void set_last_shared(struct cuda_gen *gen,
3123 struct cuda_array_ref_group *group)
3125 int i, j;
3126 struct cuda_array_bound *bounds;
3127 unsigned first_shared = gen->first_shared;
3128 int n_index = group->array->n_index;
3130 bounds = group->private_bound;
3131 if (!bounds)
3132 bounds = group->shared_bound;
3133 if (!bounds)
3134 return;
3136 for (j = gen->shared_len - 1; j >= 0; --j) {
3137 for (i = 0; i < n_index; ++i) {
3138 isl_aff *lb;
3139 isl_aff *shift;
3141 lb = bounds[i].lb;
3142 if (isl_aff_involves_dims(lb, isl_dim_param,
3143 first_shared + j, 1))
3144 break;
3146 shift = bounds[i].shift;
3147 if (!shift)
3148 continue;
3149 if (isl_aff_involves_dims(shift, isl_dim_param,
3150 first_shared + j, 1))
3151 break;
3153 if (i < n_index)
3154 break;
3156 group->last_shared = j;
3159 /* Compute the sizes of all private arrays for the current kernel,
3160 * as well as the offsets of the private pieces in the original arrays.
3161 * If we cannot or don't want to privatize a given array group,
3162 * we use the shared memory tile sizes computed in
3163 * compute_group_shared_bound instead.
3165 * If we have been able to find a private or shared tile,
3166 * we also look for the last shared tile loop that affects the offset
3167 * (and therefore the group tile) and store the result in group->last_shared.
3169 * A privatized copy of all access relations from reference groups that
3170 * are mapped to private memory is stored in gen->privatization.
3172 static void compute_private_size(struct cuda_gen *gen)
3174 int i, j;
3175 isl_union_map *private;
3177 if (!gen->options->use_private_memory)
3178 return;
3180 private = isl_union_map_empty(isl_union_map_get_space(gen->shared_sched));
3182 for (i = 0; i < gen->n_array; ++i) {
3183 struct cuda_array_info *array = &gen->array[i];
3185 if (cuda_array_is_read_only_scalar(array))
3186 continue;
3188 for (j = 0; j < array->n_group; ++j) {
3189 check_private_group_access(gen, array->groups[j]);
3191 if (!array->groups[j]->private_bound)
3192 continue;
3194 private = isl_union_map_union(private,
3195 group_access_relation(array->groups[j], 1, 1));
3198 for (j = 0; j < array->n_group; ++j) {
3199 array->groups[j]->last_shared = gen->shared_len - 1;
3200 array->groups[j]->print_shared_level = -1;
3201 set_last_shared(gen, array->groups[j]);
3205 if (isl_union_map_is_empty(private))
3206 isl_union_map_free(private);
3207 else {
3208 isl_union_map *priv;
3210 private = isl_union_map_apply_domain(private,
3211 isl_union_map_copy(gen->shared_sched));
3212 priv = isl_union_map_from_map(isl_map_copy(gen->privatization));
3213 private = isl_union_map_apply_domain(private, priv);
3214 gen->private_access = private;
3218 /* Compute the size of the tile specified by the list "bound" of n_index
3219 * cuda_array_bounds in number of elements and put the result in *size.
3221 static void tile_size(unsigned n_index, struct cuda_array_bound *bound,
3222 isl_int *size)
3224 int i;
3226 isl_int_set_si(*size, 1);
3228 for (i = 0; i < n_index; ++i)
3229 isl_int_mul(*size, *size, bound[i].size);
3232 /* If max_shared_memory is not set to infinity (-1), then make
3233 * sure that the total amount of shared memory required by the
3234 * array reference groups mapped to shared memory is no larger
3235 * than this maximum.
3237 * We apply a greedy approach and discard (keep in global memory)
3238 * those groups that would result in a total memory size that
3239 * is larger than the maximum.
3241 static void check_shared_memory_bound(struct cuda_gen *gen)
3243 int i, j;
3244 isl_int left, size;
3246 if (gen->options->max_shared_memory < 0)
3247 return;
3249 isl_int_init(left);
3250 isl_int_init(size);
3251 isl_int_set_si(left, gen->options->max_shared_memory);
3253 for (i = 0; i < gen->n_array; ++i) {
3254 struct cuda_array_info *array = &gen->array[i];
3256 for (j = 0; j < array->n_group; ++j) {
3257 struct cuda_array_ref_group *group;
3259 group = array->groups[j];
3260 if (!group->shared_bound)
3261 continue;
3263 tile_size(array->n_index, group->shared_bound, &size);
3264 isl_int_mul_ui(size, size, array->size);
3266 if (isl_int_le(size, left)) {
3267 isl_int_sub(left, left, size);
3268 continue;
3271 free_bound_list(group->shared_bound, array->n_index);
3272 group->shared_bound = NULL;
3276 isl_int_clear(size);
3277 isl_int_clear(left);
3280 /* Fill up the groups array with singleton groups, i.e., one group
3281 * per reference, initializing the array, access, write and refs fields.
3282 * In particular the access field is initialized to the scheduled
3283 * access relation of the array reference.
3285 * Return the number of elements initialized, i.e., the number of
3286 * active references in the current kernel.
3288 static int populate_array_references(struct cuda_gen *gen,
3289 struct cuda_array_info *array, __isl_keep isl_union_map *sched,
3290 struct cuda_array_ref_group **groups)
3292 int i;
3293 int n;
3294 isl_ctx *ctx = isl_union_map_get_ctx(sched);
3296 n = 0;
3297 for (i = 0; i < array->n_ref; ++i) {
3298 isl_union_map *umap;
3299 isl_map *map;
3300 struct cuda_array_ref_group *group;
3301 struct cuda_stmt_access *access = array->refs[i];
3303 map = isl_map_copy(access->access);
3304 umap = isl_union_map_from_map(map);
3305 umap = isl_union_map_apply_domain(umap,
3306 isl_union_map_copy(sched));
3308 if (isl_union_map_is_empty(umap)) {
3309 isl_union_map_free(umap);
3310 continue;
3313 map = isl_map_from_union_map(umap);
3314 map = isl_map_detect_equalities(map);
3316 group = isl_calloc_type(ctx, struct cuda_array_ref_group);
3317 assert(group);
3318 group->array = array;
3319 group->access = map;
3320 group->write = access->write;
3321 group->refs = &array->refs[i];
3323 groups[n++] = group;
3326 return n;
3329 static void free_array_ref_group(struct cuda_array_ref_group *group,
3330 int n_index)
3332 if (!group)
3333 return;
3334 free_bound_list(group->shared_bound, n_index);
3335 free_bound_list(group->private_bound, n_index);
3336 isl_map_free(group->access);
3337 free(group->refs);
3338 free(group);
3341 /* Given a set where the parameters gen->first_shared up to
3342 * gen->first_shared + gen->shared_len represent the tile loops,
3343 * eliminate the innermost of those that have a fixed value
3344 * until we reach one that does not (obviously) have a fixed value.
3346 static __isl_give isl_set *eliminate_fixed_inner_loops(struct cuda_gen *gen,
3347 __isl_take isl_set *access)
3349 int i;
3351 for (i = gen->shared_len - 1; i >= 0; --i) {
3352 int pos = gen->first_shared + i;
3353 if (!isl_set_plain_is_fixed(access, isl_dim_param, pos, NULL))
3354 break;
3355 access = isl_set_eliminate(access, isl_dim_param, pos, 1);
3357 return access;
3360 /* Check if the accessed set of group1 and group2 overlap within
3361 * the innermost loop. In particular, ignore any inner dimension
3362 * with a fixed value.
3363 * The copying to and from shared memory will be performed within
3364 * the innermost actual loop so we are only allowed to consider
3365 * the dimensions up to that innermost loop while checking whether
3366 * two access sets overlap.
3368 static int accesses_overlap(struct cuda_gen *gen,
3369 struct cuda_array_ref_group *group1,
3370 struct cuda_array_ref_group *group2)
3372 int empty;
3373 isl_set *access1, *access2;
3375 access1 = isl_map_range(isl_map_copy(group1->access));
3376 access1 = eliminate_fixed_inner_loops(gen, access1);
3377 access2 = isl_map_range(isl_map_copy(group2->access));
3378 access2 = eliminate_fixed_inner_loops(gen, access2);
3379 access1 = isl_set_intersect(access1, access2);
3380 empty = isl_set_is_empty(access1);
3381 isl_set_free(access1);
3383 return !empty;
3386 /* If two groups have overlapping access relations (within the innermost
3387 * loop) and if one of them involves a write, then merge the two groups
3388 * into one.
3390 * We keep track of the grouping in "leader". leader[j] points to
3391 * an earlier group array element that belongs to the same group,
3392 * or the array element j itself if this element is the first in the group.
3394 * Return the number of group leaders.
3396 static int group_overlapping_writes(struct cuda_gen *gen, int n,
3397 struct cuda_array_ref_group **groups, int *leader)
3399 int i, j;
3400 int n_group = n;
3402 for (i = 0; i < n; ++i) {
3403 int l = i;
3404 groups[l]->n_ref = 1;
3405 for (j = i - 1; j >= 0; --j) {
3406 if (leader[j] != j)
3407 continue;
3408 if (!groups[l]->write && !groups[j]->write)
3409 continue;
3411 if (!accesses_overlap(gen, groups[l], groups[j]))
3412 continue;
3414 groups[j]->access = isl_map_union(groups[j]->access,
3415 groups[l]->access);
3416 groups[j]->write = 1;
3417 groups[l]->access = NULL;
3418 groups[j]->n_ref += groups[l]->n_ref;
3419 l = leader[l] = j;
3420 n_group--;
3422 leader[i] = l;
3425 return n_group;
3428 /* Compute the size of the shared array corresponding to the given
3429 * array reference group, based on the accesses from the current kernel,
3430 * as well as the offset of the shared piece in the original array.
3432 static void compute_group_shared_bound(struct cuda_gen *gen,
3433 struct cuda_array_info *array, struct cuda_array_ref_group *group)
3435 isl_ctx *ctx = isl_space_get_ctx(array->dim);
3437 if (!gen->options->use_shared_memory)
3438 return;
3439 if (cuda_array_is_read_only_scalar(array))
3440 return;
3442 group->shared_bound = create_bound_list(ctx, array->n_index);
3443 if (!can_tile_for_shared_memory(gen, array, group->access,
3444 group->shared_bound)) {
3445 free_bound_list(group->shared_bound, array->n_index);
3446 group->shared_bound = NULL;
3450 /* Is the size of the tile specified by "bound" smaller than the sum of
3451 * the sizes of the tiles specified by "bound1" and "bound2"?
3453 static int smaller_tile(unsigned n_index, struct cuda_array_bound *bound,
3454 struct cuda_array_bound *bound1, struct cuda_array_bound *bound2)
3456 int smaller;
3457 isl_int size, size1, size2;
3459 isl_int_init(size);
3460 isl_int_init(size1);
3461 isl_int_init(size2);
3463 tile_size(n_index, bound, &size);
3464 tile_size(n_index, bound1, &size1);
3465 tile_size(n_index, bound2, &size2);
3467 isl_int_sub(size, size, size1);
3468 isl_int_sub(size, size, size2);
3469 smaller = isl_int_is_neg(size);
3471 isl_int_clear(size2);
3472 isl_int_clear(size1);
3473 isl_int_clear(size);
3475 return smaller;
3478 /* Given an initial grouping of array references and shared memory tiles
3479 * for each group that allows for a shared memory tile, merge two groups
3480 * if both have a shared memory tile, the merged group also has
3481 * a shared memory tile and the size of the tile for the merge group
3482 * is smaller than the sum of the tile sizes of the individual groups.
3484 * Return the number of group leaders after merging.
3486 static int group_common_shared_memory_tile(struct cuda_gen *gen,
3487 struct cuda_array_info *array, int n,
3488 struct cuda_array_ref_group **groups, int *leader, int n_group)
3490 int i, j;
3491 isl_ctx *ctx = isl_space_get_ctx(array->dim);
3493 for (i = 0; n_group > 1 && i < n; ++i) {
3494 int l = i;
3495 if (leader[i] != i)
3496 continue;
3497 if (!groups[i]->shared_bound)
3498 continue;
3499 for (j = i - 1; j >= 0; --j) {
3500 isl_map *map;
3501 int empty;
3502 struct cuda_array_bound *shared_bound;
3504 if (leader[j] != j)
3505 continue;
3506 if (!groups[j]->shared_bound)
3507 continue;
3509 map = isl_map_intersect(isl_map_copy(groups[l]->access),
3510 isl_map_copy(groups[j]->access));
3511 empty = isl_map_is_empty(map);
3512 isl_map_free(map);
3514 if (empty)
3515 continue;
3517 map = isl_map_union(isl_map_copy(groups[l]->access),
3518 isl_map_copy(groups[j]->access));
3519 shared_bound = create_bound_list(ctx, array->n_index);
3520 if (!can_tile_for_shared_memory(gen, array, map,
3521 shared_bound) ||
3522 !smaller_tile(array->n_index, shared_bound,
3523 groups[l]->shared_bound,
3524 groups[j]->shared_bound)) {
3525 isl_map_free(map);
3526 free_bound_list(shared_bound, array->n_index);
3527 continue;
3530 free_bound_list(groups[j]->shared_bound,
3531 array->n_index);
3532 groups[j]->shared_bound = shared_bound;
3533 isl_map_free(groups[j]->access);
3534 groups[j]->access = map;
3535 groups[j]->n_ref += groups[l]->n_ref;
3536 l = leader[l] = j;
3537 n_group--;
3541 return n_group;
3544 /* Extract an array of array reference groups from the array of references
3545 * and the grouping information in "leader".
3547 * Store the results in array->n_group and array->groups.
3549 static void extract_array_groups(isl_ctx *ctx, struct cuda_array_info *array,
3550 int n, struct cuda_array_ref_group **groups, int *leader, int n_group)
3552 int i, j;
3554 for (i = 2; i < n; ++i)
3555 leader[i] = leader[leader[i]];
3557 array->n_group = n_group;
3558 array->groups = isl_alloc_array(ctx, struct cuda_array_ref_group *,
3559 n_group);
3560 assert(array->groups);
3562 j = 0;
3563 for (i = 0; i < n; ++i) {
3564 int k, l;
3565 struct cuda_stmt_access **refs;
3567 if (leader[i] != i) {
3568 groups[i]->refs = NULL;
3569 free_array_ref_group(groups[i], array->n_index);
3570 continue;
3573 refs = isl_alloc_array(ctx, struct cuda_stmt_access *,
3574 groups[i]->n_ref);
3575 assert(refs);
3576 l = 0;
3577 for (k = i; k < n; ++k)
3578 if (leader[k] == i) {
3579 refs[l++] = *groups[k]->refs;
3580 (*groups[k]->refs)->group = j;
3583 groups[i]->refs = refs;
3584 groups[i]->nr = j;
3585 array->groups[j++] = groups[i];
3589 /* Group array references that should be considered together when
3590 * deciding whether to access them from private, shared or global memory.
3592 * In particular, if two array references overlap and if one of them
3593 * is a write, then the two references are grouped together.
3594 * Furthermore, if two groups admit a shared memory tile and if the
3595 * combination of the two also admits a shared memory tile, we merge
3596 * the two groups.
3598 * During the construction the group->refs field points to a single
3599 * array reference inside the array of array references, while
3600 * group->n_ref contains the number of element in leader that
3601 * (directly or indirectly) point to this group, provided the group
3602 * is a leader.
3604 static void group_array_references(struct cuda_gen *gen,
3605 struct cuda_array_info *array, __isl_keep isl_union_map *sched)
3607 int i;
3608 int n, n_group;
3609 isl_ctx *ctx = isl_union_map_get_ctx(sched);
3610 struct cuda_array_ref_group **groups;
3611 int *leader;
3613 groups = isl_calloc_array(ctx, struct cuda_array_ref_group *,
3614 array->n_ref);
3615 assert(groups);
3617 n = populate_array_references(gen, array, sched, groups);
3619 leader = isl_alloc_array(ctx, int, n);
3620 assert(leader);
3622 n_group = group_overlapping_writes(gen, n, groups, leader);
3624 for (i = 0; i < n; ++i)
3625 if (leader[i] == i)
3626 compute_group_shared_bound(gen, array, groups[i]);
3628 n_group = group_common_shared_memory_tile(gen, array, n, groups,
3629 leader, n_group);
3631 extract_array_groups(ctx, array, n, groups, leader, n_group);
3633 free(leader);
3634 free(groups);
3637 /* Take tiled_sched, project it onto the shared tile loops and
3638 * the loops that will be wrapped over the threads,
3639 * parametrize the shared tile loops and store the result in gen->shared_sched.
3640 * The position of the first of these parameters is stored in gen->first_shared.
3641 * Also compute a projection that projects out the loops that will be
3642 * wrapped over the threads and store this projection in gen->shared_proj.
3644 static void compute_shared_sched(struct cuda_gen *gen)
3646 isl_space *dim;
3647 isl_map *proj;
3648 isl_set *par;
3649 isl_union_map *sched;
3651 sched = isl_union_map_copy(gen->tiled_sched);
3653 dim = isl_union_map_get_space(sched);
3654 gen->first_shared = isl_space_dim(dim, isl_dim_param);
3655 proj = projection(dim, gen->tiled_len, gen->shared_len + gen->n_block);
3656 sched = isl_union_map_apply_range(sched, isl_union_map_from_map(proj));
3658 dim = isl_union_map_get_space(sched);
3659 par = parametrization(dim, gen->shared_len + gen->n_block,
3660 0, gen->shared_len, "g");
3661 sched = isl_union_map_intersect_range(sched,
3662 isl_union_set_from_set(par));
3664 dim = isl_union_map_get_space(sched);
3665 proj = projection(dim, gen->shared_len + gen->n_block, gen->shared_len);
3667 gen->shared_sched = sched;
3668 gen->shared_proj = isl_union_map_from_map(proj);
3671 /* Group references of all arrays in the program.
3673 static void group_references(struct cuda_gen *gen)
3675 int i;
3676 isl_union_map *sched;
3678 sched = isl_union_map_apply_range(isl_union_map_copy(gen->shared_sched),
3679 isl_union_map_copy(gen->shared_proj));
3681 for (i = 0; i < gen->n_array; ++i)
3682 group_array_references(gen, &gen->array[i], sched);
3684 isl_union_map_free(sched);
3687 /* Free all array information that is local to the current kernel.
3689 static void free_local_array_info(struct cuda_gen *gen)
3691 int i, j;
3693 for (i = 0; i < gen->n_array; ++i) {
3694 struct cuda_array_info *array = &gen->array[i];
3696 for (j = 0; j < array->n_group; ++j)
3697 free_array_ref_group(array->groups[j], array->n_index);
3698 free(array->groups);
3700 if (array->n_group == 0)
3701 continue;
3702 for (j = 0; j < gen->array[i].n_index; ++j) {
3703 isl_pw_aff_free(gen->array[i].local_bound[j]);
3704 gen->array[i].local_bound[j] = NULL;
3709 /* The sizes of the arrays on the host that have been computed by
3710 * extract_array_info may depend on the parameters. Use the extra
3711 * constraints on the parameters that are valid at "host_domain"
3712 * to simplify these expressions.
3714 static void localize_bounds(struct cuda_gen *gen,
3715 __isl_keep isl_set *host_domain)
3717 int i, j;
3718 isl_set *context;
3720 context = isl_set_copy(host_domain);
3721 context = isl_set_params(host_domain);
3723 for (i = 0; i < gen->n_array; ++i) {
3724 struct cuda_array_info *array = &gen->array[i];
3726 if (array->n_group == 0)
3727 continue;
3729 for (j = 0; j < array->n_index; ++j) {
3730 isl_pw_aff *pwaff;
3732 pwaff = isl_pw_aff_copy(array->bound[j]);
3733 pwaff = isl_pw_aff_gist(pwaff, isl_set_copy(context));
3734 array->local_bound[j] = pwaff;
3737 isl_set_free(context);
3740 /* Set gen->tile_len and gen->n_parallel to those of the first statement
3741 * in the statement list u.
3742 * Because of the way the schedule is constructed, the other statements
3743 * in the list, if any, should have the same values for these properties.
3745 static void set_tile_len(struct cuda_gen *gen, struct clast_user_stmt *u)
3747 int nr;
3748 struct cuda_stmt *stmt;
3750 nr = atoi(u->statement->name + 2);
3751 stmt = &gen->stmts[nr];
3753 gen->tile_len = stmt->tile_len;
3754 gen->n_parallel = stmt->n_parallel;
3757 /* Extract a description of the grid, i.e., the possible values
3758 * of the block ids, from gen->tiled_sched.
3759 * The block ids are parameters in gen->tiled_sched.
3760 * We simply need to change them into set dimensions.
3762 static __isl_give isl_set *extract_grid(struct cuda_gen *gen)
3764 int i;
3765 isl_set *grid;
3767 grid = isl_union_map_params(isl_union_map_copy(gen->tiled_sched));
3768 grid = isl_set_from_params(grid);
3769 grid = isl_set_add_dims(grid, isl_dim_set, gen->n_grid);
3770 for (i = 0; i < gen->n_grid; ++i) {
3771 int pos;
3772 char name[20];
3774 snprintf(name, sizeof(name), "b%d", i);
3775 pos = isl_set_find_dim_by_name(grid, isl_dim_param, name);
3776 assert(pos >= 0);
3777 grid = isl_set_equate(grid, isl_dim_param, pos, isl_dim_set, i);
3778 grid = isl_set_project_out(grid, isl_dim_param, pos, 1);
3781 return grid;
3784 /* Print the effective grid size as a list of the sizes in each
3785 * dimension, from innermost to outermost.
3787 * The grid size specified by the user or set by default
3788 * in read_grid_sizes() and applied in tile_schedule(),
3789 * may be too large for the given code in the sense that
3790 * it may contain blocks that don't need to execute anything.
3791 * We therefore don't print this grid size, but instead the
3792 * smallest grid size that ensures that all blocks that actually
3793 * execute code are included in the grid.
3795 * For each block dimension, we compute the maximal value of the block id
3796 * and add one.
3798 static void print_grid_size(struct cuda_gen *gen, __isl_take isl_set *context)
3800 int i;
3801 isl_printer *prn;
3802 isl_set *grid;
3804 if (gen->n_grid == 0) {
3805 isl_set_free(context);
3806 return;
3809 grid = extract_grid(gen);
3811 prn = isl_printer_to_file(gen->ctx, gen->cuda.host_c);
3812 prn = isl_printer_set_output_format(prn, ISL_FORMAT_C);
3814 prn = isl_printer_print_str(prn, "(");
3815 for (i = gen->n_grid - 1; i >= 0; --i) {
3816 isl_space *space;
3817 isl_aff *one;
3818 isl_pw_aff *bound = isl_set_dim_max(isl_set_copy(grid), i);
3820 bound = isl_pw_aff_coalesce(bound);
3821 bound = isl_pw_aff_gist(bound, isl_set_copy(context));
3823 space = isl_pw_aff_get_domain_space(bound);
3824 one = isl_aff_zero_on_domain(isl_local_space_from_space(space));
3825 one = isl_aff_add_constant_si(one, 1);
3826 bound = isl_pw_aff_add(bound, isl_pw_aff_from_aff(one));
3827 prn = isl_printer_print_pw_aff(prn, bound);
3828 isl_pw_aff_free(bound);
3830 if (i > 0)
3831 prn = isl_printer_print_str(prn, ", ");
3833 prn = isl_printer_print_str(prn, ")");
3835 isl_printer_free(prn);
3836 isl_set_free(grid);
3837 isl_set_free(context);
3840 /* This function is called for each leaf in the clast of the host code.
3841 * We first specialize the schedule to the site of the leaf, compute
3842 * the size of shared memory and then print the body of host code
3843 * and the associated kernel (through a call to print_kernel_body).
3845 static void print_host_user(struct clast_printer_info *code,
3846 struct clast_user_stmt *u)
3848 struct cuda_gen *gen = code->user;
3849 isl_space *dim;
3850 isl_set *par;
3851 isl_set *host_domain;
3852 isl_union_map *access;
3853 isl_union_map *local_sched;
3854 isl_union_set *arrays;
3856 set_tile_len(gen, u);
3857 read_sizes(gen);
3859 host_domain = extract_entire_host_domain(&u->stmt);
3861 local_sched = isl_union_map_intersect_range(
3862 isl_union_map_copy(gen->sched),
3863 isl_union_set_from_set(extend(isl_set_copy(host_domain),
3864 gen->untiled_len)));
3865 access = isl_union_map_union(isl_union_map_copy(gen->read),
3866 isl_union_map_copy(gen->write));
3867 access = isl_union_map_apply_domain(access,
3868 isl_union_map_copy(local_sched));
3869 arrays = isl_union_map_range(access);
3871 print_indent(code->dst, code->indent);
3872 fprintf(code->dst, "dim3 k%d_dimBlock", gen->kernel_id);
3873 print_reverse_list(code->dst, gen->n_block, gen->block_dim);
3874 fprintf(code->dst, ";\n");
3876 gen->tiled_sched = tile_schedule(gen, local_sched);
3877 gen->tiled_sched = parametrize_tiled_schedule(gen, gen->tiled_sched);
3878 gen->tiled_sched = scale_tile_loops(gen, gen->tiled_sched);
3880 print_indent(code->dst, code->indent);
3881 fprintf(code->dst, "dim3 k%d_dimGrid", gen->kernel_id);
3882 print_grid_size(gen, isl_set_params(isl_set_copy(host_domain)));
3883 fprintf(code->dst, ";\n");
3885 gen->local_sched = isl_union_map_copy(gen->tiled_sched);
3887 dim = isl_union_map_get_space(gen->local_sched);
3888 par = parametrization(dim, gen->tiled_len, 0, gen->shared_len, "g");
3889 gen->local_sched = isl_union_map_intersect_range(gen->local_sched,
3890 isl_union_set_from_set(par));
3892 gen->local_sched = thread_tile_schedule(gen, gen->local_sched);
3893 gen->local_sched = scale_thread_tile_loops(gen, gen->local_sched);
3895 gen->private_access = NULL;
3896 compute_shared_sched(gen);
3897 gen->privatization = compute_privatization(gen);
3898 group_references(gen);
3899 compute_private_size(gen);
3900 check_shared_memory_bound(gen);
3901 localize_bounds(gen, host_domain);
3903 gen->local_sched = interchange_for_unroll(gen, gen->local_sched);
3905 print_kernel_launch(gen, arrays);
3907 fprintf(gen->cuda.kernel_c, "{\n");
3909 print_kernel_body(gen, host_domain, gen->tiled_sched);
3911 fprintf(gen->cuda.kernel_c, "}\n");
3913 free_local_array_info(gen);
3914 isl_map_free(gen->privatization);
3915 isl_union_map_free(gen->private_access);
3916 isl_union_map_free(gen->local_sched);
3917 isl_union_map_free(gen->tiled_sched);
3918 isl_union_map_free(gen->shared_sched);
3919 isl_union_map_free(gen->shared_proj);
3920 isl_union_set_free(arrays);
3921 isl_set_free(host_domain);
3923 free(gen->tile_size);
3924 gen->kernel_id++;
3927 /* Use CLooG to generate code for the outer gen->tile_first loops
3928 * of the global schedule in gen->sched.
3929 * The pretty printing of this code is handled by print_clast,
3930 * which calls print_host_user for each kernel invocation location.
3932 static void print_cloog_host_code(struct cuda_gen *gen)
3934 int i;
3935 isl_set *context;
3936 isl_union_map *sched;
3937 CloogOptions *options;
3938 CloogDomain *cloog_context;
3939 CloogUnionDomain *ud;
3940 CloogInput *input;
3941 struct clast_stmt *stmt;
3942 char name[20];
3944 options = cloog_options_malloc(gen->state);
3945 options->language = CLOOG_LANGUAGE_C;
3946 options->otl = 0;
3947 options->strides = 1;
3948 options->stop = gen->tile_first;
3949 options->f = gen->untiled_len;
3950 options->l = gen->untiled_len;
3951 options->save_domains = 1;
3952 options->noscalars = 1;
3954 sched = isl_union_map_copy(gen->sched);
3955 ud = cloog_union_domain_from_isl_union_map(sched);
3956 for (i = 0; i < options->stop; ++i) {
3957 snprintf(name, sizeof(name), "h%d", i);
3958 ud = cloog_union_domain_set_name(ud, CLOOG_SCAT, i, name);
3960 context = isl_set_copy(gen->context);
3961 cloog_context = cloog_domain_from_isl_set(context);
3962 input = cloog_input_alloc(cloog_context, ud);
3964 stmt = cloog_clast_create_from_input(input, options);
3966 gen->code.indent = 0;
3967 gen->code.dst = gen->cuda.host_c;
3968 gen->code.print_user_stmt = NULL;
3969 gen->code.print_user_stmt_list = &print_host_user;
3970 gen->code.print_for_head = NULL;
3971 gen->code.print_for_foot = NULL;
3972 gen->code.user = gen;
3973 print_clast(&gen->code, stmt);
3975 cloog_clast_free(stmt);
3976 cloog_options_free(options);
3977 fprintf(gen->cuda.host_c, "\n");
3980 void print_cuda_macros(struct cuda_gen *gen)
3982 const char *macros =
3983 "#define cudaCheckReturn(ret) assert((ret) == cudaSuccess)\n"
3984 "#define cudaCheckKernel()"
3985 " assert(cudaGetLastError() == cudaSuccess)\n\n";
3986 fputs(macros, gen->cuda.host_c);
3989 void print_host_code(struct cuda_gen *gen)
3991 fprintf(gen->cuda.host_c, "{\n");
3992 print_cloog_macros(gen->cuda.host_c);
3993 print_cloog_macros(gen->cuda.kernel_c);
3995 print_cuda_macros(gen);
3997 declare_device_arrays(gen);
3999 allocate_device_arrays(gen);
4000 copy_arrays_to_device(gen);
4002 gen->kernel_id = 0;
4003 print_cloog_host_code(gen);
4005 copy_arrays_from_device(gen);
4006 free_device_arrays(gen);
4008 fprintf(gen->cuda.host_c, "}\n");
4011 __isl_give isl_set *add_context_from_str(__isl_take isl_set *set,
4012 const char *str)
4014 isl_ctx *ctx;
4015 isl_set *context;
4017 if (!str)
4018 return set;
4020 ctx = isl_set_get_ctx(set);
4021 context = isl_set_read_from_str(ctx, str);
4022 context = isl_set_align_params(context, isl_set_get_space(set));
4023 set = isl_set_intersect(set, context);
4025 return set;
4028 __isl_give isl_union_map *extract_sizes_from_str(isl_ctx *ctx, const char *str)
4030 if (!str)
4031 return NULL;
4032 return isl_union_map_read_from_str(ctx, str);
4035 /* Return the union of all iteration domains of the gen->stmts[i].
4037 static __isl_give isl_union_set *extract_domain(struct cuda_gen *gen)
4039 int i;
4040 isl_union_set *domain;
4042 domain = isl_union_set_empty(isl_set_get_space(gen->context));
4043 for (i = 0; i < gen->n_stmts; ++i) {
4044 isl_set *domain_i;
4046 domain_i = isl_set_copy(gen->stmts[i].domain);
4047 domain = isl_union_set_union(domain,
4048 isl_union_set_from_set(domain_i));
4051 return domain;
4054 /* Information about the outermost tilable bands in the forest of bands.
4056 * tile_len and n_parallel are only sets on band_info structures
4057 * that correspond to outermost bands. For other bands (in particular,
4058 * ancestors of the outermost bands), n_parallal is set to 0.
4060 * prefix is the (padded) schedule leading up to the outermost tilable bands.
4062 * tile_first is the number of schedule dimensions in prefix.
4064 * suffix is the schedule of the outermost tilable bands and their descendants.
4066 struct band_info {
4067 struct cuda_gen *gen;
4068 int tile_first;
4069 int tile_len;
4070 int n_parallel;
4071 isl_union_map *prefix;
4072 isl_union_map *suffix;
4075 /* Set tile_len and n_parallel of the statement to that of
4076 * their outermost band, recorded in the band_info.
4078 static int set_stmt_tile_len(__isl_take isl_map *map, void *user)
4080 struct band_info *info = user;
4081 int nr;
4082 struct cuda_stmt *stmt;
4084 nr = atoi(isl_map_get_tuple_name(map, isl_dim_in) + 2);
4085 stmt = &info->gen->stmts[nr];
4087 stmt->tile_len = info->tile_len;
4088 stmt->n_parallel = info->n_parallel;
4090 isl_map_free(map);
4092 return 0;
4095 static void list_select_outer_band(struct cuda_gen *gen,
4096 __isl_take isl_band_list *list, int pos, struct band_info *list_info);
4098 /* Check if this band has any parallel loops. If so, take it as
4099 * the outermost tilable band. If not, continue looking for the
4100 * outermost tilable band in the children of the current band.
4102 static void band_select_outer_band(struct cuda_gen *gen,
4103 __isl_take isl_band *band, int pos, struct band_info *info)
4105 int n = isl_band_n_member(band);
4106 int n_parallel;
4108 for (n_parallel = 0; n_parallel < n; ++n_parallel)
4109 if (!isl_band_member_is_zero_distance(band, n_parallel))
4110 break;
4112 info->n_parallel = n_parallel;
4113 if (n_parallel) {
4114 info->gen = gen;
4115 info->tile_first = pos;
4116 info->tile_len = n;
4117 info->prefix = isl_band_get_prefix_schedule(band);
4118 info->suffix = isl_union_map_flat_range_product(
4119 isl_band_get_partial_schedule(band),
4120 isl_band_get_suffix_schedule(band));
4121 isl_union_map_foreach_map(info->prefix,
4122 &set_stmt_tile_len, info);
4123 } else if (isl_band_has_children(band)) {
4124 isl_band_list *children;
4125 children = isl_band_get_children(band);
4126 list_select_outer_band(gen, children, pos + n, info);
4127 } else {
4128 info->gen = gen;
4129 info->tile_first = pos + n;
4130 info->tile_len = 0;
4131 info->prefix = isl_union_map_flat_range_product(
4132 isl_band_get_prefix_schedule(band),
4133 isl_band_get_partial_schedule(band));
4134 info->suffix = isl_band_get_suffix_schedule(band);
4135 isl_union_map_foreach_map(info->prefix,
4136 &set_stmt_tile_len, info);
4139 isl_band_free(band);
4142 /* Comparison function that returns a non-zero value for band_infos
4143 * with different tile_len fields or different n_parallel fields.
4145 static int cmp_band(const void *p1, const void *p2)
4147 const struct band_info *info1 = p1;
4148 const struct band_info *info2 = p2;
4150 if (info1->tile_len != info2->tile_len)
4151 return info1->tile_len - info2->tile_len;
4153 return info1->n_parallel - info2->n_parallel;
4156 /* Extend "umap" with coordinates with fixed value "val"
4157 * to a total length of "dst_len", assuming the original dimension is "src_len".
4159 static __isl_give isl_union_map *extend_range(__isl_take isl_union_map *umap,
4160 int src_len, int dst_len, int val)
4162 isl_space *dim;
4163 isl_map *map;
4164 int i;
4166 dim = isl_union_map_get_space(umap);
4167 map = isl_map_reverse(projection(dim, dst_len, src_len));
4168 for (i = src_len; i < dst_len; ++i)
4169 map = isl_map_fix_si(map, isl_dim_out, i, val);
4171 umap = isl_union_map_apply_range(umap, isl_union_map_from_map(map));
4173 return umap;
4176 /* Group bands with the same values for tile_len and n_parallel.
4177 * The prefix schedule is then extended with a fixed coordinate that
4178 * is different for each such group.
4179 * Note that the actual values for this coordinate are not important.
4180 * The bands have already been effectively separated at a higher level
4181 * or they are independent and may be executed in parallel.
4182 * The list of band_info has been sorted before this functions is called.
4184 static void separate_bands(struct band_info *info, int n)
4186 int i;
4187 int j = 0;
4189 for (i = 0; i < n; ++i) {
4190 int l = info[i].tile_first;
4192 if (i &&
4193 (info[i].tile_len != info[i - 1].tile_len ||
4194 info[i].n_parallel != info[i - 1].n_parallel))
4195 j++;
4197 info[i].prefix = extend_range(info[i].prefix,
4198 l, l + 1, j);
4199 info[i].tile_first = l + 1;
4203 /* Select the outermost bands in the elements of the list, align
4204 * their prefix schedules, separate bands with different values
4205 * for tile_len and/or n_parallel and then combine the resulting
4206 * prefix and suffix schedules into a single pair of prefix and
4207 * suffix schedules for the entire list.
4209 static void list_select_outer_band(struct cuda_gen *gen,
4210 __isl_take isl_band_list *list, int pos, struct band_info *list_info)
4212 isl_band *band;
4213 int i;
4214 int n = isl_band_list_n_band(list);
4215 isl_ctx *ctx = isl_band_list_get_ctx(list);
4216 struct band_info *info;
4217 int max_tile_first;
4218 isl_union_map *prefix;
4219 isl_union_map *suffix;
4221 assert(n >= 1);
4222 info = isl_calloc_array(ctx, struct band_info, n);
4223 assert(info);
4225 max_tile_first = 0;
4226 for (i = 0; i < n; ++i) {
4227 band = isl_band_list_get_band(list, i);
4228 band_select_outer_band(gen, band, pos, &info[i]);
4229 if (info[i].tile_first > max_tile_first)
4230 max_tile_first = info[i].tile_first;
4233 for (i = 0; i < n; ++i) {
4234 if (info[i].tile_first == max_tile_first)
4235 continue;
4236 info[i].prefix = extend_range(info[i].prefix,
4237 info[i].tile_first, max_tile_first, 0);
4238 info[i].tile_first = max_tile_first;
4241 qsort(info, n, sizeof(struct band_info), &cmp_band);
4243 for (i = 0; i < n - 1; ++i)
4244 if (info[i].tile_len != info[i + 1].tile_len ||
4245 info[i].n_parallel != info[i + 1].n_parallel)
4246 break;
4248 if (i < n -1)
4249 separate_bands(info, n);
4251 prefix = info[0].prefix;
4252 suffix = info[0].suffix;
4254 for (i = 1; i < n; ++i) {
4255 prefix = isl_union_map_union(prefix, info[i].prefix);
4256 suffix = isl_union_map_union(suffix, info[i].suffix);
4259 list_info->tile_first = info[0].tile_first;
4260 list_info->tile_len = -1;
4261 list_info->prefix = prefix;
4262 list_info->suffix = suffix;
4264 isl_band_list_free(list);
4265 free(info);
4268 /* Set max_out to the maximal number of output dimensions over
4269 * all maps.
4271 static int update_max_out(__isl_take isl_map *map, void *user)
4273 int *max_out = user;
4274 int n_out = isl_map_dim(map, isl_dim_out);
4276 if (n_out > *max_out)
4277 *max_out = n_out;
4279 isl_map_free(map);
4280 return 0;
4283 struct align_range_data {
4284 int max_out;
4285 isl_union_map *res;
4288 /* Extend the dimension of the range of the given map to data->max_out and
4289 * then add the result to data->res.
4291 static int map_align_range(__isl_take isl_map *map, void *user)
4293 struct align_range_data *data = user;
4294 int i;
4295 isl_space *dim;
4296 isl_map *proj;
4297 int n_out = isl_map_dim(map, isl_dim_out);
4299 dim = isl_union_map_get_space(data->res);
4300 proj = isl_map_reverse(projection(dim, data->max_out, n_out));
4301 for (i = n_out; i < data->max_out; ++i)
4302 proj = isl_map_fix_si(proj, isl_dim_out, i, 0);
4304 map = isl_map_apply_range(map, proj);
4306 data->res = isl_union_map_add_map(data->res, map);
4308 return 0;
4311 /* Extend the ranges of the maps in the union map such they all have
4312 * the same dimension.
4314 static __isl_give isl_union_map *align_range(__isl_take isl_union_map *umap)
4316 struct align_range_data data;
4318 data.max_out = 0;
4319 isl_union_map_foreach_map(umap, &update_max_out, &data.max_out);
4321 data.res = isl_union_map_empty(isl_union_map_get_space(umap));
4322 isl_union_map_foreach_map(umap, &map_align_range, &data);
4324 isl_union_map_free(umap);
4325 return data.res;
4328 /* Select the outermost tilable band that (by construction)
4329 * has at least one parallel loop.
4330 * The starting position of the aligned band is stored in the pair
4331 * gen->tile_first.
4332 * The sizes and number of parallel loops may be different in different
4333 * parts of the band forest and are therefore stored in the cuda_stmts.
4335 * Return the complete schedule, with the tilable bands aligned
4336 * at gen->tile_first and padded with zero, if needed.
4338 static __isl_give isl_union_map *select_outer_tilable_band(struct cuda_gen *gen,
4339 __isl_keep isl_schedule *schedule)
4341 isl_band_list *list;
4342 struct band_info info;
4344 gen->n_parallel = 0;
4345 gen->tile_len = -1;
4347 list = isl_schedule_get_band_forest(schedule);
4349 list_select_outer_band(gen, list, 0, &info);
4351 gen->tile_first = info.tile_first;
4352 info.suffix = align_range(info.suffix);
4354 return isl_union_map_flat_range_product(info.prefix, info.suffix);
4357 /* Set gen->untiled_len to the number of scheduling dimensions
4358 * for the schedule of the first domain.
4359 * We assume here that this number is the same for all domains.
4361 static int set_untiled_len(__isl_take isl_map *map, void *user)
4363 unsigned *untiled_len = user;
4365 *untiled_len = isl_map_dim(map, isl_dim_out);
4367 isl_map_free(map);
4368 return -1;
4371 /* Compute an appropriate schedule based on the accesses in
4372 * gen->read and gen->write.
4374 * We first compute dependences and then use those to compute
4375 * a schedule that has a parallel loop in each tilable band.
4376 * Finally, we select the outermost tilable band.
4378 static void compute_schedule(struct cuda_gen *gen,
4379 __isl_take isl_union_map *sched)
4381 isl_union_set *domain;
4382 isl_union_map *empty;
4383 isl_union_map *dep_raw, *dep2, *dep3, *dep;
4384 isl_union_map *uninitialized;
4385 isl_schedule *schedule;
4387 empty = isl_union_map_empty(isl_union_map_get_space(sched));
4389 isl_union_map_compute_flow(isl_union_map_copy(gen->read),
4390 isl_union_map_copy(gen->write), empty,
4391 isl_union_map_copy(sched),
4392 &dep_raw, NULL, &uninitialized, NULL);
4393 isl_union_map_compute_flow(isl_union_map_copy(gen->write),
4394 isl_union_map_copy(gen->write),
4395 isl_union_map_copy(gen->read),
4396 isl_union_map_copy(sched),
4397 &dep2, &dep3, NULL, NULL);
4398 isl_union_map_free(sched);
4400 gen->copy_in = isl_union_map_range(uninitialized);
4402 dep = isl_union_map_union(dep2, dep3);
4403 dep = isl_union_map_union(dep, dep_raw);
4404 dep = isl_union_map_coalesce(dep);
4406 domain = extract_domain(gen);
4407 schedule = isl_union_set_compute_schedule(isl_union_set_copy(domain),
4408 isl_union_map_copy(dep), dep);
4410 sched = select_outer_tilable_band(gen, schedule);
4412 isl_union_map_foreach_map(sched, &set_untiled_len, &gen->untiled_len);
4413 sched = isl_union_map_intersect_domain(sched, domain);
4414 gen->sched = sched;
4416 isl_schedule_free(schedule);
4419 static struct cuda_stmt_access **expr_extract_access(struct pet_expr *expr,
4420 struct cuda_stmt_access **next_access)
4422 struct cuda_stmt_access *access;
4423 isl_ctx *ctx = isl_map_get_ctx(expr->acc.access);
4425 access = isl_alloc_type(ctx, struct cuda_stmt_access);
4426 assert(access);
4427 access->next = NULL;
4428 access->read = expr->acc.read;
4429 access->write = expr->acc.write;
4430 access->access = isl_map_copy(expr->acc.access);
4432 *next_access = access;
4433 next_access = &(*next_access)->next;
4434 return next_access;
4437 static struct cuda_stmt_access **expr_extract_accesses(struct pet_expr *expr,
4438 struct cuda_stmt_access **next_access)
4440 int i;
4442 for (i = 0; i < expr->n_arg; ++i)
4443 next_access = expr_extract_accesses(expr->args[i],
4444 next_access);
4446 if (expr->type == pet_expr_access)
4447 next_access = expr_extract_access(expr, next_access);
4449 return next_access;
4452 static void pet_stmt_extract_accesses(struct cuda_stmt *stmt)
4454 struct cuda_stmt_access **next_access = &stmt->accesses;
4456 stmt->accesses = NULL;
4457 expr_extract_accesses(stmt->body, next_access);
4460 /* Return an array of cuda_stmt representing the statements in "scop".
4462 static struct cuda_stmt *extract_stmts(isl_ctx *ctx, struct pet_scop *scop,
4463 __isl_keep isl_set *context)
4465 int i;
4466 struct cuda_stmt *stmts;
4468 stmts = isl_calloc_array(ctx, struct cuda_stmt, scop->n_stmt);
4469 assert(stmts);
4471 for (i = 0; i < scop->n_stmt; ++i) {
4472 struct cuda_stmt *s = &stmts[i];
4474 s->domain = isl_set_copy(scop->stmts[i]->domain);
4475 s->domain = isl_set_intersect_params(s->domain,
4476 isl_set_copy(context));
4477 s->body = scop->stmts[i]->body;
4478 pet_stmt_extract_accesses(s);
4481 return stmts;
4484 /* Replace the scop in the "input" file by equivalent code
4485 * that uses the GPU. "scop" is assumed to correspond to this scop.
4487 * We first compute a schedule that respects the dependences
4488 * of the original program and select the outermost band
4489 * of tilable dimensions that has at least one parallel loop.
4490 * We then have three blocks of dimensions
4492 * H B G
4494 * The tilable band "B" is first tiled according to "tile" sizes, resulting
4495 * in
4497 * H T P G
4499 * For each iteration of the T loop and for each array, we compute
4500 * the array elements accessed by that iteration, construct a rectangular
4501 * box around it and shift it to the origin. The result is used
4502 * as shared memory for the array.
4504 * We then split off at most 2 parallel loops from the T loops and
4505 * at most 3 parallel loops from the P loops
4507 * H T1 T2 P1 P2 G
4509 * The T1/P1 loops are then tiled or "wrapped" over the blocks/threads,
4510 * according to "grid"/"block" sizes.
4512 * H T1T T1P T2 P1T P1P P2 G
4514 * Finally, the T1P and P1P iterators are equated to the block and
4515 * thread dimensions respectively and so are effectively removed.
4516 * The H loops are run on the host. The T1T, T2, P1T, P2 and G loops
4517 * are run on the GPU.
4519 * Code is generated in three stages. We first generate code for the
4520 * host (the H loops), with iterators h%d. Then, for each leaf node
4521 * of the resulting AST, we generate code for the shared loops (up to
4522 * and including T2), with iterators g%d and after equating the H loops
4523 * to h%d parameters and the T1P loops to the block dimensions.
4524 * Finally, we generate code for the remaining loops in a similar fashion.
4526 int generate_cuda(isl_ctx *ctx, struct pet_scop *scop,
4527 struct ppcg_options *options, const char *input)
4529 isl_union_map *sched;
4530 struct cuda_gen gen;
4532 if (!scop)
4533 return -1;
4535 scop = pet_scop_align_params(scop);
4537 gen.ctx = ctx;
4538 gen.context = isl_set_copy(scop->context);
4539 gen.context = add_context_from_str(gen.context, options->ctx);
4540 gen.sizes = extract_sizes_from_str(ctx, options->sizes);
4541 gen.n_stmts = scop->n_stmt;
4542 gen.stmts = extract_stmts(ctx, scop, gen.context);
4543 gen.read = pet_scop_collect_reads(scop);
4544 gen.write = pet_scop_collect_writes(scop);
4545 gen.options = options;
4546 gen.state = cloog_isl_state_malloc(gen.ctx);
4547 gen.scop = scop;
4549 cuda_open_files(&gen.cuda, input);
4551 collect_array_info(&gen);
4553 sched = pet_scop_collect_schedule(scop);
4555 compute_schedule(&gen, sched);
4557 print_host_code(&gen);
4559 cloog_state_free(gen.state);
4560 clear_cuda_gen(&gen);
4562 cuda_close_files(&gen.cuda);
4564 return 0;