Add option to disable the use of private and/or shared memory.
[ppcg.git] / cuda.c
blob326c3541f060a63795b0740c0552537cc33a92e6
1 /*
2 * Copyright 2010-2011 INRIA Saclay
4 * Use of this software is governed by the GNU LGPLv2.1 license
6 * Written by Sven Verdoolaege, INRIA Saclay - Ile-de-France,
7 * Parc Club Orsay Universite, ZAC des vignes, 4 rue Jacques Monod,
8 * 91893 Orsay, France
9 */
11 #include <assert.h>
12 #include <stdlib.h>
14 #include <isl/polynomial.h>
15 #include <isl/union_set.h>
16 #include <isl/aff.h>
17 #include <isl/ilp.h>
18 #include <isl/flow.h>
19 #include <isl/band.h>
20 #include <isl/schedule.h>
21 #include <isl/options.h>
22 #include <cloog/isl/cloog.h>
24 #include "cuda.h"
25 #include "cuda_common.h"
26 #include "gpucode.h"
27 #include "schedule.h"
28 #include "ppcg_options.h"
30 /* The fields stride, shift and shift_map only contain valid information
31 * if shift != NULL.
32 * If so, they express that current index is such that if you add shift,
33 * then the result is always a multiple of stride.
34 * shift_map contains the mapping
36 * i -> (i + shift)/stride
38 struct cuda_array_bound {
39 isl_int size;
40 isl_aff *lb;
42 isl_int stride;
43 isl_qpolynomial *shift;
44 isl_basic_map *shift_map;
47 struct cuda_array_info;
49 /* A group of array references in a kernel that should be handled together.
50 * If private_bound is not NULL, then it is mapped to registers.
51 * Otherwise, if shared_bound is not NULL, it is mapped to shared memory.
52 * Otherwise, it is accesses from global memory.
54 struct cuda_array_ref_group {
55 /* The references in this group access this array. */
56 struct cuda_array_info *array;
57 /* Position of this group in the list of reference groups of array. */
58 int nr;
60 /* The following fields are use during the construction of the groups.
61 * access is the combined access relation relative to the shared
62 * memory tiling.
63 * write is set if any access in the group is a write.
65 isl_map *access;
66 int write;
68 /* For each index, size and offset of piece in shared memory. */
69 struct cuda_array_bound *shared_bound;
71 /* For each index, size and offset of piece in private memory. */
72 struct cuda_array_bound *private_bound;
74 /* References in this group; point to elements of a linked list. */
75 int n_ref;
76 struct cuda_stmt_access **refs;
79 struct cuda_array_info {
80 isl_dim *dim;
81 /* Element type. */
82 char *type;
83 /* Name of the array. */
84 char *name;
85 /* Number of indices. */
86 unsigned n_index;
87 /* For each index, a bound on the array in that direction. */
88 isl_pw_aff **bound;
89 /* For each index, bound[i] specialized to the current kernel. */
90 isl_pw_aff **local_bound;
92 /* All references to this array; point to elements of a linked list. */
93 int n_ref;
94 struct cuda_stmt_access **refs;
96 /* The reference groups associated to this array. */
97 int n_group;
98 struct cuda_array_ref_group **groups;
100 /* Last shared memory tile dimension that affects tile of this array. */
101 int last_shared;
102 /* Dimension at which copying to/from shared memory is printed.
103 * if >= 0, then the value is >= last_shared
104 * if -1, then the copying is done at the leaf level.
106 int print_shared_level;
109 /* Print the name of the local copy of a given group of array references.
111 static void print_array_name(FILE *out, struct cuda_array_ref_group *group)
113 int global = 0;
115 if (group->private_bound)
116 fprintf(out, "private_");
117 else if (group->shared_bound)
118 fprintf(out, "shared_");
119 else
120 global = 1;
121 fprintf(out, "%s", group->array->name);
122 if (!global && group->array->n_group > 1)
123 fprintf(out, "_%d", group->nr);
126 /* Collect all references to the given array and store pointers to them
127 * in array->refs.
129 static void collect_references(struct cuda_gen *gen,
130 struct cuda_array_info *array)
132 int i;
133 int n;
135 n = 0;
136 for (i = 0; i < gen->n_stmts; ++i) {
137 struct cuda_stmt *stmt = &gen->stmts[i];
138 struct cuda_stmt_access *access;
140 for (access = stmt->accesses; access; access = access->next) {
141 const char *name;
142 name = isl_map_get_tuple_name(access->access,
143 isl_dim_out);
144 if (name && !strcmp(array->name, name))
145 n++;
149 array->n_ref = n;
150 array->refs = isl_alloc_array(gen->ctx, struct cuda_stmt_access *, n);
151 assert(array->refs);
153 n = 0;
154 for (i = 0; i < gen->n_stmts; ++i) {
155 struct cuda_stmt *stmt = &gen->stmts[i];
156 struct cuda_stmt_access *access;
158 for (access = stmt->accesses; access; access = access->next) {
159 const char *name;
160 name = isl_map_get_tuple_name(access->access,
161 isl_dim_out);
162 if (!name || strcmp(array->name, name))
163 continue;
165 array->refs[n++] = access;
170 static struct cuda_array_bound *create_bound_list(isl_ctx *ctx, int n_index)
172 int i;
173 struct cuda_array_bound *bound;
175 bound = isl_alloc_array(ctx, struct cuda_array_bound, n_index);
176 assert(bound);
178 for (i = 0; i < n_index; ++i) {
179 isl_int_init(bound[i].size);
180 bound[i].lb = NULL;
181 isl_int_init(bound[i].stride);
182 bound[i].shift = NULL;
183 bound[i].shift_map = NULL;
186 return bound;
189 static void free_bound_list(struct cuda_array_bound *bound, int n_index)
191 int j;
193 if (!bound)
194 return;
196 for (j = 0; j < n_index; ++j) {
197 isl_int_clear(bound[j].size);
198 isl_int_clear(bound[j].stride);
199 isl_aff_free(bound[j].lb);
200 isl_qpolynomial_free(bound[j].shift);
201 isl_basic_map_free(bound[j].shift_map);
203 free(bound);
206 static struct pet_array *find_array(struct pet_scop *scop,
207 __isl_keep isl_set *accessed)
209 int i;
210 isl_id *id;
212 id = isl_set_get_tuple_id(accessed);
214 for (i = 0; i < scop->n_array; ++i) {
215 isl_id *id_i;
217 id_i = isl_set_get_tuple_id(scop->arrays[i]->extent);
218 isl_id_free(id_i);
219 if (id == id_i)
220 break;
222 isl_id_free(id);
224 return i < scop->n_array ? scop->arrays[i] : NULL;
227 /* Compute bounds on the host arrays based on the accessed elements
228 * and collect all references to the array.
230 static int extract_array_info(__isl_take isl_set *array, void *user)
232 int i;
233 struct cuda_gen *gen = (struct cuda_gen *)user;
234 const char *name;
235 int n_index;
236 isl_pw_aff **bounds;
237 isl_pw_aff **local_bounds;
238 struct pet_array *pa;
240 n_index = isl_set_dim(array, isl_dim_set);
241 name = isl_set_get_tuple_name(array);
242 bounds = isl_alloc_array(isl_set_get_ctx(array),
243 isl_pw_aff *, n_index);
244 assert(bounds);
245 local_bounds = isl_calloc_array(isl_set_get_ctx(array),
246 isl_pw_aff *, n_index);
247 assert(local_bounds);
248 gen->array[gen->n_array].dim = isl_set_get_dim(array);
249 gen->array[gen->n_array].name = strdup(name);
250 gen->array[gen->n_array].n_index = n_index;
251 gen->array[gen->n_array].bound = bounds;
252 gen->array[gen->n_array].local_bound = local_bounds;
254 pa = find_array(gen->scop, array);
255 assert(pa);
257 gen->array[gen->n_array].type = strdup(pa->element_type);
259 for (i = 0; i < n_index; ++i) {
260 isl_set *dom;
261 isl_local_space *ls;
262 isl_aff *one;
263 isl_pw_aff *bound;
264 isl_set *size = i == 0 ? array : pa->extent;
266 bound = isl_set_dim_max(isl_set_copy(size), i);
267 assert(bound);
268 dom = isl_pw_aff_domain(isl_pw_aff_copy(bound));
269 ls = isl_local_space_from_dim(isl_set_get_dim(dom));
270 one = isl_aff_zero(ls);
271 one = isl_aff_add_constant_si(one, 1);
272 bound = isl_pw_aff_add(bound, isl_pw_aff_alloc(dom, one));
273 bound = isl_pw_aff_gist(bound, isl_set_copy(gen->context));
275 bounds[i] = bound;
278 collect_references(gen, &gen->array[gen->n_array]);
280 gen->n_array++;
282 isl_set_free(array);
283 return 0;
286 void collect_array_info(struct cuda_gen *gen)
288 isl_union_set *arrays;
290 arrays = isl_union_map_range(isl_union_map_copy(gen->read));
291 arrays = isl_union_set_union(arrays,
292 isl_union_map_range(isl_union_map_copy(gen->write)));
293 arrays = isl_union_set_coalesce(arrays);
295 gen->n_array = isl_union_set_n_set(arrays);
296 gen->array = isl_alloc_array(gen->ctx,
297 struct cuda_array_info, gen->n_array);
298 assert(gen->array);
299 gen->n_array = 0;
300 isl_union_set_foreach_set(arrays, &extract_array_info, gen);
301 isl_union_set_free(arrays);
304 static void free_array_info(struct cuda_gen *gen)
306 int i, j;
308 for (i = 0; i < gen->n_array; ++i) {
309 int n_index = gen->array[i].n_index;
310 free(gen->array[i].type);
311 free(gen->array[i].name);
312 for (j = 0; j < n_index; ++j) {
313 isl_pw_aff_free(gen->array[i].bound[j]);
314 isl_pw_aff_free(gen->array[i].local_bound[j]);
316 isl_dim_free(gen->array[i].dim);
317 free(gen->array[i].bound);
318 free(gen->array[i].local_bound);
319 free(gen->array[i].refs);
321 free(gen->array);
324 static void declare_device_arrays(struct cuda_gen *gen)
326 int i;
328 for (i = 0; i < gen->n_array; ++i)
329 fprintf(gen->cuda.host_c, "%s *dev_%s;\n",
330 gen->array[i].type, gen->array[i].name);
331 fprintf(gen->cuda.host_c, "\n");
334 static void print_array_size(struct cuda_gen *gen, FILE *out,
335 struct cuda_array_info *array)
337 int i;
338 isl_printer *prn;
340 prn = isl_printer_to_file(gen->ctx, out);
341 prn = isl_printer_set_output_format(prn, ISL_FORMAT_C);
342 for (i = 0; i < array->n_index; ++i) {
343 prn = isl_printer_print_str(prn, "(");
344 prn = isl_printer_print_pw_aff(prn, array->bound[i]);
345 prn = isl_printer_print_str(prn, ") * ");
347 prn = isl_printer_print_str(prn, "sizeof(");
348 prn = isl_printer_print_str(prn, array->type);
349 prn = isl_printer_print_str(prn, ")");
350 isl_printer_free(prn);
353 static void allocate_device_arrays(struct cuda_gen *gen)
355 int i;
357 for (i = 0; i < gen->n_array; ++i) {
358 fprintf(gen->cuda.host_c,
359 "cudaCheckReturn(cudaMalloc((void **) &dev_%s, ",
360 gen->array[i].name);
361 print_array_size(gen, gen->cuda.host_c, &gen->array[i]);
362 fprintf(gen->cuda.host_c, "));\n");
364 fprintf(gen->cuda.host_c, "\n");
367 static void free_device_arrays(struct cuda_gen *gen)
369 int i;
371 for (i = 0; i < gen->n_array; ++i)
372 fprintf(gen->cuda.host_c, "cudaCheckReturn(cudaFree(dev_%s));\n",
373 gen->array[i].name);
376 /* Check if a cuda array is a scalar. A scalar is a value that is not stored
377 * as an array or through a pointer reference, but as single data element. At
378 * the moment, scalars are represented as zero dimensional arrays.
380 static int cuda_array_is_scalar(struct cuda_array_info *array)
382 return (array->n_index == 0);
385 static void copy_arrays_to_device(struct cuda_gen *gen)
387 int i;
389 for (i = 0; i < gen->n_array; ++i) {
390 isl_dim *dim;
391 isl_set *read_i;
392 int empty;
394 dim = isl_dim_copy(gen->array[i].dim);
395 read_i = isl_union_set_extract_set(gen->copy_in, dim);
396 empty = isl_set_fast_is_empty(read_i);
397 isl_set_free(read_i);
398 if (empty)
399 continue;
401 fprintf(gen->cuda.host_c, "cudaCheckReturn(cudaMemcpy(dev_%s,",
402 gen->array[i].name);
404 if (cuda_array_is_scalar(&(gen->array[i])))
405 fprintf(gen->cuda.host_c, " &%s, ",
406 gen->array[i].name);
407 else
408 fprintf(gen->cuda.host_c, " %s, ", gen->array[i].name);
410 print_array_size(gen, gen->cuda.host_c, &gen->array[i]);
411 fprintf(gen->cuda.host_c, ", cudaMemcpyHostToDevice));\n");
413 fprintf(gen->cuda.host_c, "\n");
416 static void copy_arrays_from_device(struct cuda_gen *gen)
418 int i;
419 isl_union_set *write;
420 write = isl_union_map_range(isl_union_map_copy(gen->write));
422 for (i = 0; i < gen->n_array; ++i) {
423 isl_dim *dim;
424 isl_set *write_i;
425 int empty;
427 dim = isl_dim_copy(gen->array[i].dim);
428 write_i = isl_union_set_extract_set(write, dim);
429 empty = isl_set_fast_is_empty(write_i);
430 isl_set_free(write_i);
431 if (empty)
432 continue;
434 fprintf(gen->cuda.host_c,
435 "cudaCheckReturn(cudaMemcpy(%s, dev_%s, ",
436 gen->array[i].name, gen->array[i].name);
437 print_array_size(gen, gen->cuda.host_c, &gen->array[i]);
438 fprintf(gen->cuda.host_c, ", cudaMemcpyDeviceToHost));\n");
441 isl_union_set_free(write);
442 fprintf(gen->cuda.host_c, "\n");
445 static void read_sizes_from_file(struct cuda_gen *gen, const char *filename,
446 int *sizes, int len)
448 int i;
449 FILE *file;
451 file = fopen(filename, "r");
452 if (!file)
453 return;
455 for (i = 0; i < len; ++i)
456 if (fscanf(file, "%d", &sizes[i]) < 1)
457 break;
459 fclose(file);
462 static void reverse_list(int *list, int len)
464 int i;
465 int t;
467 for (i = 0; 2 * i < len; ++i) {
468 t = list[i];
469 list[i] = list[len - 1 - i];
470 list[len - 1 - i] = t;
474 /* Read user specified sizes from "tile.sizes", "block.sizes" and "grid.sizes"
475 * after filling in some potentially useful defaults.
477 static void read_sizes(struct cuda_gen *gen)
479 int n;
481 gen->tile_size = isl_alloc_array(gen->ctx, int, gen->tile_len);
482 assert(gen->tile_size);
483 for (n = 0; n < gen->tile_len; ++n)
484 gen->tile_size[n] = gen->options->tile_size;
485 read_sizes_from_file(gen, "tile.sizes", gen->tile_size, gen->tile_len);
487 n = gen->n_parallel;
488 gen->n_block = (n <= 3) ? n : 3;
489 switch (gen->n_block) {
490 case 1:
491 gen->block_dim[0] = 512;
492 break;
493 case 2:
494 gen->block_dim[0] = 32;
495 gen->block_dim[1] = 16;
496 break;
497 default:
498 gen->block_dim[0] = 32;
499 gen->block_dim[1] = 4;
500 gen->block_dim[2] = 4;
501 break;
503 read_sizes_from_file(gen, "block.sizes", gen->block_dim, gen->n_block);
504 reverse_list(gen->block_dim, gen->n_block);
506 gen->n_grid = (n <= 2) ? n : 2;
507 switch (gen->n_grid) {
508 case 1:
509 gen->grid_dim[0] = 32768;
510 break;
511 default:
512 gen->grid_dim[0] = 256;
513 gen->grid_dim[1] = 256;
514 break;
516 read_sizes_from_file(gen, "grid.sizes", gen->grid_dim, gen->n_grid);
517 reverse_list(gen->grid_dim, gen->n_grid);
520 static void free_stmts(struct cuda_stmt *stmts, int n)
522 int i;
524 for (i = 0; i < n; ++i) {
525 struct cuda_stmt_access *access, *next;
527 for (access = stmts[i].accesses; access; access = next) {
528 next = access->next;
529 isl_map_free(access->access);
530 free(access);
533 isl_set_free(stmts[i].domain);
535 free(stmts);
538 void clear_cuda_gen(struct cuda_gen *gen)
540 free_stmts(gen->stmts, gen->n_stmts);
541 free_array_info(gen);
542 isl_set_free(gen->context);
543 isl_union_set_free(gen->copy_in);
544 isl_union_map_free(gen->sched);
545 isl_union_map_free(gen->read);
546 isl_union_map_free(gen->write);
549 static void print_reverse_list(FILE *out, int len, int *list)
551 int i;
553 for (i = 0; i < len; ++i) {
554 if (i)
555 fprintf(out, ", ");
556 fprintf(out, "%d", list[len - 1 - i]);
560 static void print_kernel_launch(struct cuda_gen *gen,
561 __isl_keep isl_union_set *arrays)
563 int i;
564 int first = 1;
565 unsigned nparam;
566 isl_dim *dim;
568 print_indent(gen->code.dst, gen->code.indent);
569 fprintf(gen->code.dst, "kernel%d <<<k%d_dimGrid, k%d_dimBlock>>> (",
570 gen->kernel_id, gen->kernel_id, gen->kernel_id);
571 fprintf(gen->cuda.kernel_c, "__global__ void kernel%d(",
572 gen->kernel_id);
573 fprintf(gen->cuda.kernel_h, "__global__ void kernel%d(",
574 gen->kernel_id);
576 for (i = 0; i < gen->n_array; ++i) {
577 isl_dim *dim;
578 isl_set *arr;
579 int empty;
581 dim = isl_dim_copy(gen->array[i].dim);
582 arr = isl_union_set_extract_set(arrays, dim);
583 empty = isl_set_fast_is_empty(arr);
584 isl_set_free(arr);
585 if (empty)
586 continue;
588 if (!first) {
589 fprintf(gen->code.dst, ", ");
590 fprintf(gen->cuda.kernel_c, ", ");
591 fprintf(gen->cuda.kernel_h, ", ");
594 fprintf(gen->code.dst, "dev_%s", gen->array[i].name);
595 fprintf(gen->cuda.kernel_c, "%s *%s",
596 gen->array[i].type, gen->array[i].name);
597 fprintf(gen->cuda.kernel_h, "%s *%s",
598 gen->array[i].type, gen->array[i].name);
600 first = 0;
603 dim = isl_union_set_get_dim(arrays);
604 nparam = isl_dim_size(dim, isl_dim_param);
605 for (i = 0; i < nparam; ++i) {
606 const char *name = isl_dim_get_name(dim, isl_dim_param, i);
607 if (!first) {
608 fprintf(gen->code.dst, ", ");
609 fprintf(gen->cuda.kernel_c, ", ");
610 fprintf(gen->cuda.kernel_h, ", ");
612 fprintf(gen->code.dst, "%s", name);
613 fprintf(gen->cuda.kernel_c, "int %s", name);
614 fprintf(gen->cuda.kernel_h, "int %s", name);
615 first = 0;
617 isl_dim_free(dim);
619 for (i = 0; i < gen->tile_first; ++i) {
620 if (!first) {
621 fprintf(gen->code.dst, ", ");
622 fprintf(gen->cuda.kernel_c, ", ");
623 fprintf(gen->cuda.kernel_h, ", ");
625 fprintf(gen->code.dst, "h%d", i);
626 fprintf(gen->cuda.kernel_c, "int h%d", i);
627 fprintf(gen->cuda.kernel_h, "int h%d", i);
628 first = 0;
631 fprintf(gen->code.dst, ");\n");
632 fprintf(gen->cuda.kernel_c, ")\n");
633 fprintf(gen->cuda.kernel_h, ");\n");
635 fprintf(gen->code.dst, "cudaCheckKernel();\n");
638 /* Construct a map from a domain of dimensionality "len"
639 * to a domain of dimensionality "len" + "tile_len" that tiles
640 * the "tile_len" coordinates starting at "first".
641 * In particular, [s_i] -> [s_i / tile_size[i], s_i % tile_size[i]].
642 * "dim" prescribes the parameters.
644 static __isl_give isl_map *tile(__isl_take isl_dim *dim, int len,
645 int first, int tile_len, int *tile_size)
647 int i;
648 isl_int v;
649 isl_basic_map *bmap;
650 isl_constraint *c;
652 isl_int_init(v);
654 dim = isl_dim_add(dim, isl_dim_in, len);
655 dim = isl_dim_add(dim, isl_dim_out, len + tile_len);
656 bmap = isl_basic_map_universe(isl_dim_copy(dim));
658 for (i = 0; i < len - tile_len; ++i) {
659 int j = i < first ? i : i + tile_len;
660 int k = i < first ? i : i + 2 * tile_len;
662 c = isl_equality_alloc(isl_dim_copy(dim));
663 isl_int_set_si(v, -1);
664 isl_constraint_set_coefficient(c, isl_dim_in, j, v);
665 isl_int_set_si(v, 1);
666 isl_constraint_set_coefficient(c, isl_dim_out, k, v);
667 bmap = isl_basic_map_add_constraint(bmap, c);
670 for (i = 0; i < tile_len; ++i) {
671 c = isl_equality_alloc(isl_dim_copy(dim));
672 isl_int_set_si(v, -1);
673 isl_constraint_set_coefficient(c, isl_dim_in, first + i, v);
674 isl_int_set_si(v, tile_size[i]);
675 isl_constraint_set_coefficient(c, isl_dim_out, first + i, v);
676 isl_int_set_si(v, 1);
677 isl_constraint_set_coefficient(c, isl_dim_out,
678 first + i + tile_len, v);
679 bmap = isl_basic_map_add_constraint(bmap, c);
681 c = isl_inequality_alloc(isl_dim_copy(dim));
682 isl_int_set_si(v, 1);
683 isl_constraint_set_coefficient(c, isl_dim_out,
684 first + i + tile_len, v);
685 bmap = isl_basic_map_add_constraint(bmap, c);
687 c = isl_inequality_alloc(isl_dim_copy(dim));
688 isl_int_set_si(v, -1);
689 isl_constraint_set_coefficient(c, isl_dim_out,
690 first + i + tile_len, v);
691 isl_int_set_si(v, tile_size[i] - 1);
692 isl_constraint_set_constant(c, v);
693 bmap = isl_basic_map_add_constraint(bmap, c);
696 isl_dim_free(dim);
697 isl_int_clear(v);
699 return isl_map_from_basic_map(bmap);
702 /* Construct a map from a domain of dimensionality "len"
703 * to a domain of dimensionality "len" + "wrap_len" that "wraps"
704 * the "wrap_len" coordinates starting at "first" according to "wrap_size".
705 * In particular, [s_i] -> [s_i, s_i % wrap_size[i]].
706 * To do so, we need extra variables corresponding to [s_i / wrap_size[i]],
707 * that are projected out at the end.
708 * "dim" prescribes the parameters.
710 static __isl_give isl_map *wrap(__isl_take isl_dim *dim, int len,
711 int first, int wrap_len, int *wrap_size)
713 int i;
714 isl_basic_map *bmap;
715 isl_constraint *c;
717 dim = isl_dim_add(dim, isl_dim_in, len);
718 dim = isl_dim_add(dim, isl_dim_out, len + 2 * wrap_len);
719 bmap = isl_basic_map_universe(isl_dim_copy(dim));
721 for (i = 0; i < len; ++i) {
722 int k = i < first + wrap_len ? i : i + 2 * wrap_len;
724 c = isl_equality_alloc(isl_dim_copy(dim));
725 isl_constraint_set_coefficient_si(c, isl_dim_in, i, -1);
726 isl_constraint_set_coefficient_si(c, isl_dim_out, k, 1);
727 bmap = isl_basic_map_add_constraint(bmap, c);
730 for (i = 0; i < wrap_len; ++i) {
731 c = isl_equality_alloc(isl_dim_copy(dim));
732 isl_constraint_set_coefficient_si(c, isl_dim_out,
733 first + i, -1);
734 isl_constraint_set_coefficient_si(c, isl_dim_out,
735 first + wrap_len + i, 1);
736 isl_constraint_set_coefficient_si(c, isl_dim_out,
737 first + 2 * wrap_len + i, wrap_size[i]);
738 bmap = isl_basic_map_add_constraint(bmap, c);
740 c = isl_inequality_alloc(isl_dim_copy(dim));
741 isl_constraint_set_coefficient_si(c, isl_dim_out,
742 first + wrap_len + i, 1);
743 bmap = isl_basic_map_add_constraint(bmap, c);
745 c = isl_inequality_alloc(isl_dim_copy(dim));
746 isl_constraint_set_coefficient_si(c, isl_dim_out,
747 first + wrap_len + i, -1);
748 isl_constraint_set_constant_si(c, wrap_size[i] - 1);
749 bmap = isl_basic_map_add_constraint(bmap, c);
752 isl_dim_free(dim);
754 bmap = isl_basic_map_project_out(bmap, isl_dim_out,
755 first + 2 * wrap_len, wrap_len);
757 return isl_map_from_basic_map(bmap);
760 /* Add "n" parameters named prefix%d.
762 static __isl_give isl_set *add_params( __isl_take isl_set *set,
763 int n, const char *prefix)
765 int i;
766 unsigned nparam;
767 char name[20];
769 nparam = isl_set_dim(set, isl_dim_param);
770 set = isl_set_add_dims(set, isl_dim_param, n);
772 for (i = 0; i < n; ++i) {
773 snprintf(name, sizeof(name), "%s%d", prefix, i);
774 set = isl_set_set_dim_name(set, isl_dim_param,
775 nparam + i, name);
778 return set;
781 /* Equate the "n" dimensions of "set" starting at "first" to
782 * freshly created parameters named prefix%d.
784 static __isl_give isl_set *parametrize(__isl_take isl_set *set,
785 int first, int n, const char *prefix)
787 int i;
788 unsigned nparam;
789 isl_int v;
790 isl_dim *dim;
791 isl_basic_set *bset;
792 isl_constraint *c;
794 nparam = isl_set_dim(set, isl_dim_param);
796 set = add_params(set, n, prefix);
798 dim = isl_set_get_dim(set);
799 bset = isl_basic_set_universe(isl_dim_copy(dim));
801 isl_int_init(v);
803 for (i = 0; i < n; ++i) {
804 c = isl_equality_alloc(isl_dim_copy(dim));
805 isl_int_set_si(v, -1);
806 isl_constraint_set_coefficient(c, isl_dim_param, nparam + i, v);
807 isl_int_set_si(v, 1);
808 isl_constraint_set_coefficient(c, isl_dim_set, first + i, v);
809 bset = isl_basic_set_add_constraint(bset, c);
812 isl_int_clear(v);
813 isl_dim_free(dim);
815 return isl_set_intersect(set, isl_set_from_basic_set(bset));
818 static __isl_give isl_set *parametrization(__isl_take isl_dim *dim,
819 int len, int first, int n, const char *prefix)
821 isl_set *set;
823 dim = isl_dim_add(dim, isl_dim_set, len);
824 set = isl_set_universe(dim);
826 return parametrize(set, first, n, prefix);
829 /* Tile the B loops over the tile sizes and then tile/wrap
830 * the T1 loops over the blocks.
832 static __isl_give isl_union_map *tile_schedule(struct cuda_gen *gen,
833 __isl_take isl_union_map *sched)
835 isl_dim *dim;
836 isl_map *tiling, *block_tiling;
838 dim = isl_union_map_get_dim(sched);
839 tiling = tile(isl_dim_copy(dim), gen->untiled_len,
840 gen->tile_first, gen->tile_len, gen->tile_size);
842 if (gen->options->wrap)
843 block_tiling = wrap(dim, gen->untiled_len + gen->tile_len,
844 gen->tile_first, gen->n_grid, gen->grid_dim);
845 else
846 block_tiling = tile(dim, gen->untiled_len + gen->tile_len,
847 gen->tile_first, gen->n_grid, gen->grid_dim);
849 gen->tiled_len = gen->untiled_len + gen->tile_len + gen->n_grid;
851 tiling = isl_map_apply_range(tiling, block_tiling);
853 sched = isl_union_map_apply_range(sched,
854 isl_union_map_from_map(tiling));
856 gen->shared_len = gen->tile_first + gen->tile_len + gen->n_grid;
858 return sched;
861 static __isl_give isl_union_map *parametrize_tiled_schedule(
862 struct cuda_gen *gen, __isl_take isl_union_map *sched)
864 isl_dim *dim;
865 isl_set *par;
867 dim = isl_union_map_get_dim(sched);
868 par = parametrization(dim, gen->tiled_len, 0, gen->tile_first, "h");
869 sched = isl_union_map_intersect_range(sched,
870 isl_union_set_from_set(par));
872 dim = isl_union_map_get_dim(sched);
873 par = parametrization(dim, gen->tiled_len,
874 gen->tile_first + gen->n_grid, gen->n_grid, "b");
875 sched = isl_union_map_intersect_range(sched,
876 isl_union_set_from_set(par));
878 return sched;
881 /* Tile/wrap the P1 loops over the threads.
883 static __isl_give isl_union_map *thread_tile_schedule(struct cuda_gen *gen,
884 __isl_take isl_union_map *sched)
886 isl_dim *dim;
887 isl_map *tiling;
888 isl_set *par;
890 dim = isl_union_map_get_dim(sched);
892 if (gen->options->wrap)
893 tiling = wrap(isl_dim_copy(dim), gen->tiled_len,
894 gen->shared_len, gen->n_block, gen->block_dim);
895 else
896 tiling = tile(isl_dim_copy(dim), gen->tiled_len,
897 gen->shared_len, gen->n_block, gen->block_dim);
898 gen->thread_tiled_len = gen->tiled_len + gen->n_block;
900 sched = isl_union_map_apply_range(sched,
901 isl_union_map_from_map(tiling));
903 par = parametrization(dim, gen->thread_tiled_len,
904 gen->tile_first + gen->tile_len + gen->n_grid + gen->n_block,
905 gen->n_block, "t");
906 sched = isl_union_map_intersect_range(sched,
907 isl_union_set_from_set(par));
909 gen->shared_len = gen->tile_first + gen->tile_len + gen->n_grid;
911 return sched;
914 /* If the user asked for it, scale the shared memory tile loops
915 * (T1P and T2) of "sched" by gen->tile_size[i].
916 * If we are not performing "wrapping", then additionally scale the T1P
917 * loops by gen->grid_dim[i].
919 static __isl_give isl_union_map *scale_tile_loops(struct cuda_gen *gen,
920 __isl_take isl_union_map *sched)
922 int i;
923 isl_dim *dim;
924 isl_basic_map *scale;
925 isl_constraint *c;
927 if (!gen->options->scale_tile_loops)
928 return sched;
930 dim = isl_union_map_get_dim(sched);
931 dim = isl_dim_add(dim, isl_dim_in, gen->tiled_len);
932 dim = isl_dim_add(dim, isl_dim_out, gen->tiled_len);
933 scale = isl_basic_map_universe(isl_dim_copy(dim));
935 for (i = 0; i < gen->tiled_len; ++i) {
936 int f = 1;
938 if (i >= gen->tile_first && i < gen->tile_first + gen->n_grid) {
939 f = gen->tile_size[i - gen->tile_first];
940 if (!gen->options->wrap)
941 f *= gen->grid_dim[i - gen->tile_first];
942 } else if (i >= gen->tile_first + gen->n_grid &&
943 i < gen->tile_first + gen->n_grid + gen->tile_len) {
944 f = gen->tile_size[i - (gen->tile_first + gen->n_grid)];
947 c = isl_equality_alloc(isl_dim_copy(dim));
948 isl_constraint_set_coefficient_si(c, isl_dim_in, i, f);
949 isl_constraint_set_coefficient_si(c, isl_dim_out, i, -1);
950 scale = isl_basic_map_add_constraint(scale, c);
953 isl_dim_free(dim);
955 sched = isl_union_map_apply_range(sched,
956 isl_union_map_from_map(isl_map_from_basic_map(scale)));
958 return sched;
961 /* If we are not performing "wrapping" and if the user asked for it,
962 * scale the thread tile loops (P1T) of "sched" by gen->block_dim[i].
964 static __isl_give isl_union_map *scale_thread_tile_loops(struct cuda_gen *gen,
965 __isl_take isl_union_map *sched)
967 int i;
968 isl_dim *dim;
969 isl_basic_map *scale;
970 isl_constraint *c;
972 if (gen->options->wrap)
973 return sched;
974 if (!gen->options->scale_tile_loops)
975 return sched;
977 dim = isl_union_map_get_dim(sched);
978 dim = isl_dim_add(dim, isl_dim_in, gen->thread_tiled_len);
979 dim = isl_dim_add(dim, isl_dim_out, gen->thread_tiled_len);
980 scale = isl_basic_map_universe(isl_dim_copy(dim));
982 for (i = 0; i < gen->thread_tiled_len; ++i) {
983 int f = 1;
985 if (i >= gen->shared_len &&
986 i < gen->shared_len + gen->n_block)
987 f = gen->block_dim[i - gen->shared_len];
989 c = isl_equality_alloc(isl_dim_copy(dim));
990 isl_constraint_set_coefficient_si(c, isl_dim_in, i, f);
991 isl_constraint_set_coefficient_si(c, isl_dim_out, i, -1);
992 scale = isl_basic_map_add_constraint(scale, c);
995 isl_dim_free(dim);
997 sched = isl_union_map_apply_range(sched,
998 isl_union_map_from_map(isl_map_from_basic_map(scale)));
1000 return sched;
1003 /* If we are not performing "wrapping" and if the user asked for it,
1004 * scale the "n_tile" loops starting at "first" of "sched" by gen->block_dim[i].
1006 static __isl_give isl_union_map *scale_access_tile_loops(struct cuda_gen *gen,
1007 __isl_take isl_union_map *sched, int len, int first, int n_tile)
1009 int i;
1010 isl_dim *dim;
1011 isl_basic_map *scale;
1012 isl_constraint *c;
1014 if (gen->options->wrap)
1015 return sched;
1016 if (!gen->options->scale_tile_loops)
1017 return sched;
1019 dim = isl_union_map_get_dim(sched);
1020 dim = isl_dim_add(dim, isl_dim_in, len);
1021 dim = isl_dim_add(dim, isl_dim_out, len);
1022 scale = isl_basic_map_universe(isl_dim_copy(dim));
1024 for (i = 0; i < len; ++i) {
1025 int f = 1;
1027 if (i >= first && i < first + n_tile)
1028 f = gen->block_dim[i - first];
1030 c = isl_equality_alloc(isl_dim_copy(dim));
1031 isl_constraint_set_coefficient_si(c, isl_dim_in, i, f);
1032 isl_constraint_set_coefficient_si(c, isl_dim_out, i, -1);
1033 scale = isl_basic_map_add_constraint(scale, c);
1036 isl_dim_free(dim);
1038 sched = isl_union_map_apply_range(sched,
1039 isl_union_map_from_map(isl_map_from_basic_map(scale)));
1041 return sched;
1044 /* If print_user_stmt is set, we want to print the statements ourselves,
1045 * instead of relying on the C preprocessor. If so, we need to use
1046 * the stop option so that the domains will be saved on the statement
1047 * nodes.
1049 static void print_cloog_shared_body(struct cuda_gen *gen,
1050 __isl_keep isl_set *context, __isl_keep isl_union_map *sched, int len,
1051 void (*print_user_stmt)(struct gpucode_info *info,
1052 struct clast_user_stmt *s),
1053 int first_unroll)
1055 int i;
1056 CloogOptions *options;
1057 CloogDomain *cloog_context;
1058 CloogUnionDomain *ud;
1059 CloogInput *input;
1060 struct clast_stmt *stmt;
1061 char name[20];
1063 sched = isl_union_map_copy(sched);
1064 sched = isl_union_map_align_params(sched, isl_set_get_dim(context));
1066 options = cloog_options_malloc(gen->state);
1067 options->language = LANGUAGE_C;
1068 options->strides = 1;
1069 options->sh = 1;
1070 options->f = len;
1071 options->l = -1;
1072 options->override = 1;
1073 options->save_domains = 1;
1074 options->noscalars = 1;
1075 options->first_unroll = first_unroll;
1077 ud = cloog_union_domain_from_isl_union_map(sched);
1078 for (i = 0; i < len; ++i) {
1079 snprintf(name, sizeof(name), "c%d", i);
1080 ud = cloog_union_domain_set_name(ud, CLOOG_SCAT, i, name);
1082 cloog_context = cloog_domain_from_isl_set(isl_set_copy(context));
1083 input = cloog_input_alloc(cloog_context, ud);
1085 stmt = cloog_clast_create_from_input(input, options);
1087 gen->stmt_code.indent = gen->kernel_code.indent;
1088 gen->stmt_code.dst = gen->cuda.kernel_c;
1089 gen->stmt_code.print_user_stmt = print_user_stmt;
1090 gen->stmt_code.print_user_stmt_list = NULL;
1091 gen->stmt_code.print_for_head = NULL;
1092 gen->stmt_code.print_for_foot = NULL;
1093 gen->stmt_code.user = gen;
1094 gpu_print_host_stmt(&gen->stmt_code, stmt);
1096 cloog_clast_free(stmt);
1097 cloog_options_free(options);
1100 /* Add "len" parameters p[i] called prefix%d,
1101 * with bounds to 0 <= p[i] < size[i].
1103 __isl_give isl_set *add_bounded_parameters(__isl_take isl_set *set,
1104 int len, int *size, const char *prefix)
1106 int i;
1107 unsigned nparam;
1108 isl_int v;
1109 isl_dim *dim;
1110 isl_basic_set *bset;
1111 isl_constraint *c;
1112 char name[20];
1114 nparam = isl_set_dim(set, isl_dim_param);
1115 set = isl_set_add_dims(set, isl_dim_param, len);
1117 for (i = 0; i < len; ++i) {
1118 snprintf(name, sizeof(name), "%s%d", prefix, i);
1119 set = isl_set_set_dim_name(set, isl_dim_param,
1120 nparam + i, name);
1123 dim = isl_set_get_dim(set);
1124 bset = isl_basic_set_universe(isl_dim_copy(dim));
1126 isl_int_init(v);
1128 for (i = 0; i < len; ++i) {
1129 c = isl_inequality_alloc(isl_dim_copy(dim));
1130 isl_int_set_si(v, 1);
1131 isl_constraint_set_coefficient(c, isl_dim_param, nparam + i, v);
1132 bset = isl_basic_set_add_constraint(bset, c);
1134 c = isl_inequality_alloc(isl_dim_copy(dim));
1135 isl_int_set_si(v, -1);
1136 isl_constraint_set_coefficient(c, isl_dim_param, nparam + i, v);
1137 isl_int_set_si(v, size[i] - 1);
1138 isl_constraint_set_constant(c, v);
1139 bset = isl_basic_set_add_constraint(bset, c);
1142 isl_int_clear(v);
1143 isl_dim_free(dim);
1145 return isl_set_intersect(set, isl_set_from_basic_set(bset));
1148 static void print_shared_body(struct cuda_gen *gen,
1149 __isl_keep isl_set *shared_domain, __isl_keep isl_union_map *sched,
1150 int len, void (*print_user_stmt)(struct gpucode_info *info,
1151 struct clast_user_stmt *s),
1152 int first_unroll)
1154 isl_set *context;
1156 context = isl_set_copy(shared_domain);
1157 context = parametrize(context, 0, gen->shared_len, "g");
1158 context = isl_set_project_out(context, isl_dim_set, 0, gen->shared_len);
1159 context = add_bounded_parameters(context,
1160 gen->n_block, gen->block_dim, "t");
1162 print_cloog_shared_body(gen, context, sched, len, print_user_stmt,
1163 first_unroll);
1165 isl_set_free(context);
1168 /* Given a tile of an array, construct a map that maps each element
1169 * of the tile to a copy of the tile shifted to the origin
1170 * (based on the lower bounds in group->private_bound or group->shared_bound).
1171 * If any of the indices is strided, then {private,shared}_bound[i].shift_map
1172 * is applied to the index first.
1173 * The domain of the resulting map is "access",
1174 * while the range space is anonymous.
1176 static __isl_give isl_map *shift_access(__isl_take isl_set *access,
1177 struct cuda_array_ref_group *group)
1179 int i;
1180 isl_dim *dim;
1181 isl_basic_set *bset;
1182 isl_basic_map *bmap;
1183 isl_aff *lb;
1184 isl_basic_set *offset;
1185 isl_basic_map *shift;
1186 isl_basic_map *pre_shift;
1187 isl_map *sched;
1188 const char *name;
1189 struct cuda_array_bound *bounds;
1190 int n_index = group->array->n_index;
1192 bounds = group->private_bound;
1193 if (!bounds)
1194 bounds = group->shared_bound;
1196 dim = isl_set_get_dim(access);
1197 dim = isl_dim_drop(dim, isl_dim_set, 0, n_index);
1198 offset = isl_basic_set_universe(dim);
1199 for (i = 0; i < n_index; ++i) {
1200 lb = isl_aff_copy(bounds[i].lb);
1201 bmap = isl_basic_map_from_aff(lb);
1202 bset = isl_basic_map_range(bmap);
1203 offset = isl_basic_set_flat_product(offset, bset);
1205 offset = isl_basic_set_neg(offset);
1207 dim = isl_dim_map_from_set(isl_set_get_dim(access));
1208 shift = isl_basic_map_identity(dim);
1209 shift = isl_basic_map_set_tuple_name(shift, isl_dim_out, NULL);
1211 bset = isl_basic_set_universe(isl_set_get_dim(access));
1212 bmap = isl_basic_map_from_domain_and_range(bset, offset);
1214 shift = isl_basic_map_sum(shift, bmap);
1216 dim = isl_set_get_dim(access);
1217 dim = isl_dim_drop(dim, isl_dim_set, 0, n_index);
1218 dim = isl_dim_map_from_set(dim);
1219 pre_shift = isl_basic_map_universe(isl_dim_copy(dim));
1220 dim = isl_dim_add(dim, isl_dim_in, 1);
1221 dim = isl_dim_add(dim, isl_dim_out, 1);
1222 for (i = 0; i < n_index; ++i) {
1223 if (!bounds[i].shift_map)
1224 bmap = isl_basic_map_identity(isl_dim_copy(dim));
1225 else
1226 bmap = isl_basic_map_copy(bounds[i].shift_map);
1227 pre_shift = isl_basic_map_flat_product(pre_shift, bmap);
1229 isl_dim_free(dim);
1230 name = isl_basic_map_get_tuple_name(shift, isl_dim_in);
1231 pre_shift = isl_basic_map_set_tuple_name(pre_shift, isl_dim_in, name);
1232 pre_shift = isl_basic_map_set_tuple_name(pre_shift, isl_dim_out, name);
1233 shift = isl_basic_map_apply_range(pre_shift, shift);
1235 sched = isl_map_from_basic_map(shift);
1236 sched = isl_map_intersect_domain(sched, access);
1238 return sched;
1241 /* Construct a schedule for iterating over all elements in the given
1242 * piece of an array. The schedule iterates over a copy of the piece
1243 * that is shifted to the origin.
1244 * We subsequently also perform the tiling/wrapping over the threads.
1246 * In particular, we tile the final iterators so that the final thread
1247 * dimension runs over the final array dimension.
1248 * However, if those final iterators have only a single iteration,
1249 * we try to tile earlier iterators instead.
1251 static __isl_give isl_union_map *access_schedule(struct cuda_gen *gen,
1252 __isl_take isl_set *access, struct cuda_array_ref_group *group)
1254 isl_dim *dim;
1255 isl_map *sched;
1256 isl_union_map *usched;
1257 isl_map *tiling;
1258 isl_set *par;
1259 unsigned nvar = isl_set_dim(access, isl_dim_set);
1260 int n_tile;
1261 int first;
1263 sched = shift_access(access, group);
1265 n_tile = gen->n_block;
1266 if (n_tile > nvar) {
1267 int i;
1268 sched = isl_map_insert_dims(sched,
1269 isl_dim_out, 0, n_tile - nvar);
1270 for (i = 0; i < n_tile - nvar; ++i)
1271 sched = isl_map_fix_si(sched, isl_dim_out, i, 0);
1272 nvar = n_tile;
1275 first = nvar - n_tile;
1277 for (; first > 0; first --)
1278 if (!isl_map_plain_is_fixed(sched, isl_dim_out,
1279 first + n_tile - 1, NULL))
1280 break;
1282 dim = isl_map_get_dim(sched);
1283 dim = isl_dim_drop(dim, isl_dim_in, 0, isl_dim_size(dim, isl_dim_in));
1284 dim = isl_dim_drop(dim, isl_dim_out, 0, nvar);
1285 if (gen->options->wrap)
1286 tiling = wrap(isl_dim_copy(dim), nvar, first,
1287 n_tile, gen->block_dim);
1288 else
1289 tiling = tile(isl_dim_copy(dim), nvar, first,
1290 n_tile, gen->block_dim);
1291 sched = isl_map_apply_range(sched, tiling);
1293 par = parametrization(dim, nvar + n_tile, first + n_tile, n_tile, "t");
1294 usched = isl_union_map_from_map(sched);
1295 usched = isl_union_map_intersect_range(usched,
1296 isl_union_set_from_set(par));
1298 usched = scale_access_tile_loops(gen, usched, nvar + n_tile,
1299 first, n_tile);
1301 return usched;
1304 static void print_shared_access(struct cuda_gen *gen,
1305 __isl_keep isl_set *shared_domain, __isl_take isl_set *access,
1306 const char *type, struct cuda_array_ref_group *group)
1308 const char *array_name;
1309 char *name;
1310 isl_ctx *ctx;
1311 isl_union_map *sched;
1312 unsigned nvar = isl_set_dim(access, isl_dim_set);
1313 int n_tile;
1315 ctx = isl_set_get_ctx(access);
1316 array_name = isl_set_get_tuple_name(access);
1317 name = isl_alloc_array(ctx, char,
1318 strlen(type) + sizeof("_shared_") + strlen(array_name) + 20);
1319 if (group->array->n_group > 1)
1320 sprintf(name, "%s_shared_%s_%d", type, array_name, group->nr);
1321 else
1322 sprintf(name, "%s_shared_%s", type, array_name);
1323 access = isl_set_set_tuple_name(access, name);
1324 free(name);
1326 sched = access_schedule(gen, access, group);
1328 n_tile = gen->n_block;
1329 if (n_tile > nvar)
1330 n_tile = nvar;
1332 print_shared_body(gen, shared_domain, sched, nvar + n_tile, NULL, -1);
1334 isl_union_map_free(sched);
1337 /* Return the union of all read (read = 1) and/or write (write = 1)
1338 * access relations in the group.
1340 static __isl_give isl_union_map *group_access_relation(
1341 struct cuda_array_ref_group *group, int read, int write)
1343 int i;
1344 isl_union_map *access;
1346 access = isl_union_map_empty(isl_map_get_dim(group->access));
1347 for (i = 0; i < group->n_ref; ++i) {
1348 isl_map *map_i;
1350 if (!((read && group->refs[i]->read) ||
1351 (write && group->refs[i]->write)))
1352 continue;
1353 map_i = isl_map_copy(group->refs[i]->access);
1354 access = isl_union_map_union(access,
1355 isl_union_map_from_map(map_i));
1358 return access;
1361 /* Check that none of the shared memory tiles involve any strides.
1363 static int no_strides(struct cuda_array_ref_group *group)
1365 int i;
1366 int n_index = group->array->n_index;
1368 for (i = 0; i < n_index; ++i)
1369 if (group->shared_bound[i].shift)
1370 return 0;
1372 return 1;
1375 /* Return a set containing the values of the given index i
1376 * of the elements in the array tile in global memory that corresponds
1377 * to the shared memory copy.
1378 * In particular, if a is the index, we return a set with constraints
1380 * tile_offset <= a <= tile_offset + tile_size - 1
1382 * and
1384 * 0 <= a <= array_size - 1
1387 static __isl_give isl_set *group_tile_dim(struct cuda_array_ref_group *group,
1388 int i)
1390 isl_basic_set *tile;
1391 isl_aff *aff;
1392 isl_constraint *c;
1393 isl_local_space *ls;
1394 isl_pw_aff *bound;
1395 isl_set *dom;
1396 isl_set *tile_set;
1398 aff = isl_aff_copy(group->shared_bound[i].lb);
1399 aff = isl_aff_add_dims(aff, isl_dim_set, 1);
1400 ls = isl_aff_get_local_space(aff);
1401 aff = isl_aff_neg(aff);
1402 aff = isl_aff_add_coefficient_si(aff, isl_dim_set, 0, 1);
1403 c = isl_inequality_from_aff(isl_aff_copy(aff));
1404 tile = isl_basic_set_from_constraint(c);
1406 aff = isl_aff_neg(aff);
1407 aff = isl_aff_add_constant(aff, group->shared_bound[i].size);
1408 aff = isl_aff_add_constant_si(aff, -1);
1409 c = isl_inequality_from_aff(aff);
1410 tile = isl_basic_set_add_constraint(tile, c);
1412 aff = isl_aff_zero(ls);
1413 aff = isl_aff_add_coefficient_si(aff, isl_dim_set, 0, 1);
1414 c = isl_inequality_from_aff(aff);
1415 tile = isl_basic_set_add_constraint(tile, c);
1417 bound = isl_pw_aff_copy(group->array->bound[i]);
1418 bound = isl_pw_aff_add_dims(bound, isl_dim_set, 1);
1419 ls = isl_local_space_from_dim(isl_pw_aff_get_dim(bound));
1420 aff = isl_aff_zero(ls);
1421 aff = isl_aff_add_coefficient_si(aff, isl_dim_set, 0, 1);
1422 aff = isl_aff_add_constant_si(aff, 1);
1423 dom = isl_pw_aff_domain(isl_pw_aff_copy(bound));
1425 tile_set = isl_pw_aff_ge_set(bound, isl_pw_aff_alloc(dom, aff));
1426 tile_set = isl_set_align_params(tile_set, isl_basic_set_get_dim(tile));
1427 tile_set = isl_set_intersect(tile_set, isl_set_from_basic_set(tile));
1429 return tile_set;
1432 /* Return a set containing the elements in the array tile in
1433 * global memory that corresponds to the shared memory copy.
1435 static __isl_give isl_set *group_tile(struct cuda_array_ref_group *group)
1437 int i;
1438 int n_index = group->array->n_index;
1439 isl_set *tile;
1441 tile = group_tile_dim(group, 0);
1442 for (i = 1; i < n_index; ++i) {
1443 isl_set *tile_i;
1445 tile_i = group_tile_dim(group, i);
1446 tile = isl_set_flat_product(tile, tile_i);
1449 tile = isl_set_set_tuple_name(tile, group->array->name);
1451 return tile;
1454 /* Print code for reading into or writing from shared memory
1455 * the given array reference group.
1457 * sched maps the original iteration domains to the shared memory tile loops.
1459 * If we are performing a read from global memory to shared memory,
1460 * if the array involved is not a scalar and if the definition of the
1461 * shared memory tiles does not involve any strides, then we copy
1462 * the entire tile to shared memory. This may result in some extra
1463 * elements getting copied, but it should lead to simpler code
1464 * (which means that fewer registers may be needed) and less divergence.
1466 * Otherwise, we only copy the elements that will be read or have been written
1467 * in the kernel.
1469 * Note that the absence of stride requirement can easily be lifted.
1470 * We would just need to add constraints of the form
1472 * shift + a = stride * alpha
1474 static int print_group_shared_accesses(struct cuda_gen *gen,
1475 struct cuda_array_ref_group *group, const char *type,
1476 __isl_keep isl_set *shared_domain, __isl_keep isl_union_map *sched)
1478 int read;
1479 isl_union_map *access;
1480 isl_union_set *uset;
1481 isl_set *access_set;
1483 if (group->private_bound)
1484 return 0;
1485 if (!group->shared_bound)
1486 return 0;
1488 read = !strcmp(type, "read");
1490 access = group_access_relation(group, read, !read);
1491 access = isl_union_map_apply_domain(access, isl_union_map_copy(sched));
1492 uset = isl_union_map_range(access);
1494 if (isl_union_set_is_empty(uset)) {
1495 isl_union_set_free(uset);
1496 return 0;
1499 if (read && group->array->n_index > 0 && no_strides(group)) {
1500 isl_union_set_free(uset);
1501 access_set = group_tile(group);
1502 print_shared_access(gen, shared_domain, access_set,
1503 type, group);
1504 return 1;
1507 access_set = isl_set_from_union_set(uset);
1508 access_set = isl_set_coalesce(access_set);
1510 print_shared_access(gen, shared_domain, access_set, type, group);
1512 return 1;
1515 /* Print code for reading into or writing from shared memory at
1516 * the given level (-1 for innermost).
1518 * If we are not printing at the innermost level, then the dimensionality
1519 * of shared_domain may be smaller than gen->shared_len.
1520 * As the rest of the code assumes that the domain of access has
1521 * gen->shared_len dimensions, we therefore may need to embed this domain
1522 * in a higher dimensional space after intersection with shared_domain.
1524 static void print_shared_accesses(struct cuda_gen *gen,
1525 __isl_keep isl_set *shared_domain, __isl_keep isl_union_map *access,
1526 const char *type, int level)
1528 int i, j;
1529 isl_dim *dim;
1530 isl_map *proj;
1531 isl_set *par;
1532 int shared_len = isl_set_dim(shared_domain, isl_dim_set);
1533 int sync = 0;
1534 isl_union_map *sched;
1536 shared_domain = isl_set_copy(shared_domain);
1537 sched = isl_union_map_copy(gen->tiled_sched);
1538 dim = isl_union_map_get_dim(sched);
1539 proj = projection(dim, gen->tiled_len, shared_len);
1540 sched = isl_union_map_apply_range(sched, isl_union_map_from_map(proj));
1541 sched = isl_union_map_intersect_range(sched,
1542 isl_union_set_from_set(isl_set_copy(shared_domain)));
1543 if (shared_len != gen->shared_len) {
1544 dim = isl_union_map_get_dim(sched);
1545 proj = projection(dim, gen->shared_len, shared_len);
1546 proj = isl_map_reverse(proj);
1547 shared_domain = isl_set_apply(shared_domain,
1548 isl_map_copy(proj));
1549 sched = isl_union_map_apply_range(sched,
1550 isl_union_map_from_map(proj));
1553 dim = isl_union_map_get_dim(sched);
1554 par = parametrization(dim, gen->shared_len, 0, gen->shared_len, "g");
1555 sched = isl_union_map_intersect_range(sched,
1556 isl_union_set_from_set(par));
1558 for (i = 0; i < gen->n_array; ++i) {
1559 struct cuda_array_info *array = &gen->array[i];
1561 if (gen->array[i].print_shared_level != level)
1562 continue;
1564 for (j = 0; j < array->n_group; ++j) {
1565 if (print_group_shared_accesses(gen, array->groups[j],
1566 type, shared_domain, sched))
1567 sync = 1;
1571 isl_union_map_free(sched);
1572 isl_set_free(shared_domain);
1574 if (sync) {
1575 print_indent(gen->cuda.kernel_c, gen->kernel_code.indent);
1576 fprintf(gen->cuda.kernel_c, "__syncthreads();\n");
1580 /* Given an index expression into a tile of an array, adjust the expression
1581 * to a shift of the tile to the origin
1582 * (based on the lower bounds in array->shared_bound).
1583 * If the index is strided, then we first add
1584 * bound->shift and divide by bound->stride.
1586 static __isl_give isl_qpolynomial *shift_index(__isl_take isl_qpolynomial *qp,
1587 struct cuda_array_info *array,
1588 struct cuda_array_bound *bound, __isl_take isl_set *domain)
1590 isl_qpolynomial *lb;
1592 if (bound->shift) {
1593 isl_qpolynomial *shift, *t;
1594 isl_int one;
1595 isl_dim *dim;
1596 shift = bound->shift;
1597 shift = isl_qpolynomial_copy(shift);
1598 shift = isl_qpolynomial_drop_dims(shift, isl_dim_set, 0,
1599 isl_qpolynomial_dim(shift, isl_dim_set));
1600 shift = isl_qpolynomial_align_params(shift,
1601 isl_qpolynomial_get_dim(qp));
1602 qp = isl_qpolynomial_add(qp, shift);
1603 dim = isl_qpolynomial_get_dim(qp);
1604 isl_int_init(one);
1605 isl_int_set_si(one, 1);
1606 t = isl_qpolynomial_rat_cst(dim, one, bound->stride);
1607 isl_int_clear(one);
1608 qp = isl_qpolynomial_mul(qp, t);
1611 lb = isl_qpolynomial_from_aff(isl_aff_copy(bound->lb));
1612 lb = isl_qpolynomial_drop_dims(lb, isl_dim_set, 0,
1613 isl_qpolynomial_dim(lb, isl_dim_set));
1615 lb = isl_qpolynomial_align_params(lb, isl_qpolynomial_get_dim(qp));
1617 qp = isl_qpolynomial_sub(qp, lb);
1618 qp = isl_qpolynomial_gist(qp, domain);
1620 return qp;
1623 /* This function is called for each access to an array in some statement
1624 * in the original code.
1625 * Replace that access by an access to shared or (linearized) global memory.
1626 * Since the array in shared memory is just
1627 * a shifted copy of part of the original array, we simply need
1628 * to subtract the lower bound, which was computed
1629 * in can_tile_for_shared_memory.
1630 * If any of the indices is strided, then we first add
1631 * shared_bound[i].shift and divide by shared_bound[i].stride.
1633 * If the given array is accessed directly from global memory,
1634 * we don't need to perform any shifting and simply simplify
1635 * expression in the context of the domain instead.
1637 * If the array space (range of access) has no name, then we are
1638 * accessing an iterator in the original program.
1640 static void print_access(struct cuda_gen *gen, __isl_take isl_map *access,
1641 int group_nr)
1643 int i;
1644 const char *name;
1645 unsigned n_index;
1646 struct cuda_array_info *array = NULL;
1647 isl_printer *prn;
1648 isl_basic_set *aff;
1649 isl_set *data_set;
1650 isl_set *domain;
1651 struct cuda_array_bound *bounds = NULL;
1653 access = isl_map_align_params(access,
1654 isl_set_get_dim(gen->stmt_domain));
1656 data_set = isl_set_apply(isl_set_copy(gen->stmt_domain), access);
1658 name = isl_set_get_tuple_name(data_set);
1660 if (!name)
1661 fprintf(gen->cuda.kernel_c, "(");
1662 else {
1663 struct cuda_array_ref_group *group;
1665 for (i = 0; i < gen->n_array; ++i) {
1666 if (strcmp(name, gen->array[i].name))
1667 continue;
1668 array = &gen->array[i];
1670 assert(array);
1671 group = array->groups[group_nr];
1672 bounds = group->private_bound;
1673 if (!bounds)
1674 bounds = group->shared_bound;
1676 print_array_name(gen->cuda.kernel_c, group);
1678 if (cuda_array_is_scalar(array)) {
1679 isl_set_free(data_set);
1680 return;
1683 fprintf(gen->cuda.kernel_c, "[");
1687 n_index = isl_set_dim(data_set, isl_dim_set);
1688 aff = isl_set_affine_hull(data_set);
1690 prn = isl_printer_to_file(gen->ctx, gen->cuda.kernel_c);
1691 prn = isl_printer_set_output_format(prn, ISL_FORMAT_C);
1693 if (!bounds)
1694 for (i = 0; i + 1 < n_index; ++i)
1695 prn = isl_printer_print_str(prn, "(");
1697 for (i = 0; i < n_index; ++i) {
1698 isl_constraint *c;
1699 isl_qpolynomial *qp;
1700 int ok;
1702 ok = isl_basic_set_has_defining_equality(aff,
1703 isl_dim_out, i, &c);
1704 assert(ok);
1705 qp = isl_qpolynomial_from_constraint(c, isl_dim_out, i);
1706 qp = isl_qpolynomial_drop_dims(qp, isl_dim_set, 0,
1707 isl_qpolynomial_dim(qp, isl_dim_set));
1709 if (!array) {
1710 prn = isl_printer_print_qpolynomial(prn, qp);
1711 isl_qpolynomial_free(qp);
1712 continue;
1715 domain = isl_set_copy(gen->stmt_domain);
1716 domain = isl_set_project_out(domain, isl_dim_set, 0,
1717 isl_set_dim(domain, isl_dim_set));
1718 if (!bounds)
1719 qp = isl_qpolynomial_gist(qp, domain);
1720 else
1721 qp = shift_index(qp, array, &bounds[i], domain);
1723 if (i) {
1724 if (!bounds) {
1725 prn = isl_printer_print_str(prn, ") * (");
1726 prn = isl_printer_print_pw_aff(prn,
1727 array->local_bound[i]);
1728 prn = isl_printer_print_str(prn, ") + ");
1729 } else
1730 prn = isl_printer_print_str(prn, "][");
1732 prn = isl_printer_print_qpolynomial(prn, qp);
1733 isl_qpolynomial_free(qp);
1735 if (!name)
1736 prn = isl_printer_print_str(prn, ")");
1737 else
1738 prn = isl_printer_print_str(prn, "]");
1739 isl_printer_free(prn);
1741 isl_basic_set_free(aff);
1744 static struct cuda_stmt_access *print_expr(struct cuda_gen *gen, FILE *out,
1745 struct pet_expr *expr, struct cuda_stmt_access *access, int outer)
1747 int i;
1749 switch (expr->type) {
1750 case pet_expr_double:
1751 fprintf(out, "%g", expr->d);
1752 break;
1753 case pet_expr_access:
1754 print_access(gen, isl_map_copy(access->access), access->group);
1755 access = access->next;
1756 break;
1757 case pet_expr_unary:
1758 if (!outer)
1759 fprintf(out, "(");
1760 fprintf(out, " %s ", pet_op_str(expr->op));
1761 access = print_expr(gen, out, expr->args[pet_un_arg],
1762 access, 0);
1763 if (!outer)
1764 fprintf(out, ")");
1765 break;
1766 case pet_expr_binary:
1767 if (!outer)
1768 fprintf(out, "(");
1769 access = print_expr(gen, out, expr->args[pet_bin_lhs],
1770 access, 0);
1771 fprintf(out, " %s ", pet_op_str(expr->op));
1772 access = print_expr(gen, out, expr->args[pet_bin_rhs],
1773 access, 0);
1774 if (!outer)
1775 fprintf(out, ")");
1776 break;
1777 case pet_expr_ternary:
1778 if (!outer)
1779 fprintf(out, "(");
1780 access = print_expr(gen, out, expr->args[pet_ter_cond],
1781 access, 0);
1782 fprintf(out, " ? ");
1783 access = print_expr(gen, out, expr->args[pet_ter_true],
1784 access, 0);
1785 fprintf(out, " : ");
1786 access = print_expr(gen, out, expr->args[pet_ter_false],
1787 access, 0);
1788 if (!outer)
1789 fprintf(out, ")");
1790 break;
1791 case pet_expr_call:
1792 fprintf(out, "%s(", expr->name);
1793 for (i = 0; i < expr->n_arg; ++i) {
1794 if (i)
1795 fprintf(out, ", ");
1796 access = print_expr(gen, out, expr->args[i],
1797 access, 1);
1799 fprintf(out, ")");
1801 return access;
1804 static void print_stmt_body(struct cuda_gen *gen,
1805 FILE *out, struct cuda_stmt *stmt)
1807 print_expr(gen, out, stmt->body, stmt->accesses, 1);
1808 fprintf(out, ";\n");
1811 /* This function is called for each leaf in the innermost clast,
1812 * i.e., for each statemetn.
1813 * We print the statement body, simplifying the accesses based
1814 * on the schedule.
1816 static void print_statement(struct gpucode_info *code,
1817 struct clast_user_stmt *u)
1819 struct cuda_gen *gen = code->user;
1820 isl_dim *dim;
1821 isl_set *par;
1822 isl_set *stmt_domain;
1823 isl_union_map *stmt_sched;
1824 isl_union_set *uset;
1825 int nr;
1826 struct cuda_stmt *stmt;
1828 nr = atoi(u->statement->name + 2);
1829 stmt = &gen->stmts[nr];
1831 stmt_domain = extract_host_domain(u);
1833 stmt_sched = isl_union_map_intersect_range(
1834 isl_union_map_copy(gen->local_sched),
1835 isl_union_set_from_set(extend(stmt_domain,
1836 gen->thread_tiled_len)));
1837 dim = isl_union_map_get_dim(stmt_sched);
1838 par = parametrization(dim, gen->thread_tiled_len, 0,
1839 gen->thread_tiled_len, "c");
1840 stmt_sched = isl_union_map_intersect_range(stmt_sched,
1841 isl_union_set_from_set(par));
1843 uset = isl_union_map_domain(stmt_sched);
1844 dim = isl_union_set_get_dim(uset);
1845 dim = isl_dim_add(dim, isl_dim_set,
1846 isl_set_dim(stmt->domain, isl_dim_set));
1847 dim = isl_dim_set_tuple_name(dim, isl_dim_set, u->statement->name);
1848 gen->stmt_domain = isl_union_set_extract_set(uset, dim);
1849 isl_union_set_free(uset);
1851 print_indent(code->dst, code->indent);
1852 print_stmt_body(gen, code->dst, stmt);
1854 isl_set_free(gen->stmt_domain);
1857 /* Print an access to the element in the global memory copy of the
1858 * given array that corresponds to element [qp[0]][qp[1]]...
1859 * of the original array.
1860 * The copy in global memory has been linearized, so we need to take
1861 * the array size into account.
1863 static void print_private_global_index(isl_ctx *ctx, FILE *out,
1864 struct cuda_array_info *array, __isl_keep isl_qpolynomial **qp)
1866 int i;
1867 isl_printer *prn;
1869 fprintf(out, "%s[", array->name);
1870 prn = isl_printer_to_file(ctx, out);
1871 prn = isl_printer_set_output_format(prn, ISL_FORMAT_C);
1872 for (i = 0; i + 1 < array->n_index; ++i)
1873 prn = isl_printer_print_str(prn, "(");
1874 for (i = 0; i < array->n_index; ++i) {
1875 if (i) {
1876 prn = isl_printer_print_str(prn, ") * (");
1877 prn = isl_printer_print_pw_aff(prn,
1878 array->local_bound[i]);
1879 prn = isl_printer_print_str(prn, ") + ");
1881 prn = isl_printer_print_qpolynomial(prn, qp[i]);
1883 isl_printer_free(prn);
1884 fprintf(out, "]");
1887 /* Print an access to the element in the shared memory copy of the
1888 * given array reference group that corresponds to element [qps[0]][qps[1]]...
1889 * of the original array.
1890 * Since the array in shared memory is just a shifted copy of part
1891 * of the original array, we simply need to subtract the lower bound,
1892 * which was computed in can_tile_for_shared_memory.
1893 * If any of the indices is strided, then we first add
1894 * shared_bound[i].shift and divide by shared_bound[i].stride.
1896 static void print_private_local_index(isl_ctx *ctx, FILE *out,
1897 struct cuda_array_ref_group *group,
1898 __isl_keep isl_qpolynomial **qps, __isl_keep isl_set *domain)
1900 int i;
1901 isl_printer *prn;
1902 struct cuda_array_info *array = group->array;
1903 struct cuda_array_bound *bounds = group->private_bound;
1905 print_array_name(out, group);
1906 for (i = 0; i < array->n_index; ++i) {
1907 isl_qpolynomial *qp = isl_qpolynomial_copy(qps[i]);
1909 qp = shift_index(qp, array, &bounds[i], isl_set_copy(domain));
1911 fprintf(out, "[");
1912 prn = isl_printer_to_file(ctx, out);
1913 prn = isl_printer_set_output_format(prn, ISL_FORMAT_C);
1914 prn = isl_printer_print_qpolynomial(prn, qp);
1915 isl_printer_free(prn);
1916 fprintf(out, "]");
1917 isl_qpolynomial_free(qp);
1921 /* This function is called for each leaf in the clast of the code
1922 * for copying to or from private memory.
1923 * The statement name is read_private_<array> or write_private_<array>.
1925 * The schedule iterates over the array elements, so we can use
1926 * the domain of private_sched at the current scheduling position
1927 * as the index of the array.
1929 static void print_private_copy_statement(struct gpucode_info *code,
1930 struct clast_user_stmt *u)
1932 struct cuda_gen *gen = code->user;
1933 isl_set *domain;
1934 isl_map *sched;
1935 struct cuda_array_ref_group *group = gen->private_group;
1936 int i;
1937 unsigned n_in;
1938 unsigned n_out;
1939 isl_dim *dim;
1940 isl_set *param;
1941 isl_set *index;
1942 isl_basic_set *aff;
1943 isl_ctx *ctx;
1944 isl_qpolynomial **qp;
1945 int read;
1947 read = !strncmp(u->statement->name, "read", 4);
1949 domain = extract_host_domain(u);
1950 assert(domain);
1952 sched = isl_map_copy(gen->private_sched);
1953 sched = isl_map_reverse(sched);
1954 sched = isl_map_intersect_domain(sched, domain);
1955 n_in = isl_map_dim(sched, isl_dim_in);
1956 n_out = isl_map_dim(sched, isl_dim_out);
1957 dim = isl_map_get_dim(sched);
1958 dim = isl_dim_drop(dim, isl_dim_in, 0, n_in);
1959 dim = isl_dim_drop(dim, isl_dim_out, 0, n_out);
1960 param = parametrization(dim, n_in, 0, n_in, "c");
1961 sched = isl_map_align_params(sched, isl_set_get_dim(param));
1962 sched = isl_map_intersect_domain(sched, param);
1963 index = isl_map_range(sched);
1964 domain = isl_set_copy(index);
1965 aff = isl_set_affine_hull(index);
1966 domain = isl_set_project_out(domain, isl_dim_set, 0, n_out);
1968 ctx = isl_basic_set_get_ctx(aff);
1969 qp = isl_alloc_array(ctx, isl_qpolynomial *, n_out);
1970 assert(qp);
1972 for (i = 0; i < n_out; ++i) {
1973 isl_constraint *c;
1974 int ok;
1976 ok = isl_basic_set_has_defining_equality(aff,
1977 isl_dim_set, i, &c);
1978 assert(ok);
1979 qp[i] = isl_qpolynomial_from_constraint(c, isl_dim_set, i);
1980 qp[i] = isl_qpolynomial_drop_dims(qp[i], isl_dim_set, 0, n_out);
1983 print_indent(code->dst, code->indent);
1984 if (read) {
1985 print_private_local_index(ctx, code->dst, group, qp, domain);
1986 fprintf(code->dst, " = ");
1987 print_private_global_index(ctx, code->dst, group->array, qp);
1988 } else {
1989 print_private_global_index(ctx, code->dst, group->array, qp);
1990 fprintf(code->dst, " = ");
1991 print_private_local_index(ctx, code->dst, group, qp, domain);
1993 fprintf(code->dst, ";\n");
1995 for (i = 0; i < n_out; ++i)
1996 isl_qpolynomial_free(qp[i]);
1997 free(qp);
1999 isl_basic_set_free(aff);
2000 isl_set_free(domain);
2003 static void print_private_access(struct cuda_gen *gen,
2004 __isl_keep isl_set *shared_domain, __isl_take isl_set *access,
2005 const char *type, struct cuda_array_ref_group *group)
2007 const char *array_name;
2008 char *name;
2009 isl_ctx *ctx;
2010 unsigned nvar = isl_set_dim(access, isl_dim_set);
2011 isl_union_map *usched;
2013 if (isl_set_fast_is_empty(access)) {
2014 isl_set_free(access);
2015 return;
2018 ctx = isl_set_get_ctx(access);
2019 array_name = isl_set_get_tuple_name(access);
2020 name = isl_alloc_array(ctx, char,
2021 strlen(type) + sizeof("_private_") + strlen(array_name) + 20);
2022 if (group->array->n_group > 1)
2023 sprintf(name, "%s_private_%s_%d", type, array_name, group->nr);
2024 else
2025 sprintf(name, "%s_private_%s", type, array_name);
2026 access = isl_set_set_tuple_name(access, name);
2027 free(name);
2029 gen->private_sched = shift_access(access, group);
2030 gen->private_group = group;
2032 usched = isl_union_map_from_map(isl_map_copy(gen->private_sched));
2033 print_shared_body(gen, shared_domain, usched, nvar,
2034 &print_private_copy_statement, 1);
2035 isl_union_map_free(usched);
2037 isl_map_free(gen->private_sched);
2040 /* Print code for reading into or writing from private memory
2041 * the given array reference group.
2043 * sched maps the original iteration domains to the shared memory tile loops.
2045 static void print_group_private_accesses(struct cuda_gen *gen,
2046 struct cuda_array_ref_group *group,
2047 const char *type, __isl_keep isl_set *shared_domain,
2048 unsigned first_shared, int shared_len, __isl_keep isl_union_map *sched)
2050 int read;
2051 isl_union_map *access;
2052 isl_union_set *uset;
2053 isl_set *access_set;
2055 if (!group->private_bound)
2056 return;
2058 read = !strcmp(type, "read");
2060 access = group_access_relation(group, read, !read);
2061 access = isl_union_map_apply_domain(access, isl_union_map_copy(sched));
2062 access = isl_union_map_intersect(access,
2063 isl_union_map_copy(gen->private_access));
2064 uset = isl_union_map_range(access);
2066 if (isl_union_set_is_empty(uset)) {
2067 isl_union_set_free(uset);
2068 return;
2071 access_set = isl_set_from_union_set(uset);
2072 access_set = isl_set_coalesce(access_set);
2073 access_set = isl_set_eliminate(access_set, isl_dim_param,
2074 first_shared + shared_len,
2075 gen->shared_len - shared_len);
2077 print_private_access(gen, shared_domain, access_set, type, group);
2080 /* Print code for reading into or writing from private memory at
2081 * the given level (-1 for innermost).
2083 * If we are not printing at the innermost level, then the dimensionality
2084 * of shared_domain may be smaller than gen->shared_len.
2085 * As the rest of the code assumes that the domain of access has
2086 * gen->shared_len dimensions, we therefore may need to embed this domain
2087 * in a higher dimensional space after intersection with shared_domain.
2089 * This code is very similar to print_shared_accesses.
2090 * The main difference is that we to take into account gen->private_access.
2092 static void print_private_accesses(struct cuda_gen *gen,
2093 __isl_keep isl_set *shared_domain, __isl_keep isl_union_map *access,
2094 const char *type, int level)
2096 int i, j;
2097 isl_dim *dim;
2098 isl_map *proj;
2099 int shared_len = isl_set_dim(shared_domain, isl_dim_set);
2100 unsigned first_shared;
2101 isl_union_map *sched;
2103 shared_domain = isl_set_copy(shared_domain);
2104 sched = isl_union_map_copy(gen->tiled_sched);
2105 dim = isl_union_map_get_dim(sched);
2106 first_shared = isl_dim_size(dim, isl_dim_param);
2107 proj = projection(dim, gen->tiled_len, shared_len);
2108 sched = isl_union_map_apply_range(sched, isl_union_map_from_map(proj));
2109 sched = isl_union_map_intersect_range(sched,
2110 isl_union_set_from_set(isl_set_copy(shared_domain)));
2111 if (shared_len != gen->shared_len) {
2112 dim = isl_union_map_get_dim(sched);
2113 proj = projection(dim, gen->shared_len, shared_len);
2114 proj = isl_map_reverse(proj);
2115 shared_domain = isl_set_apply(shared_domain,
2116 isl_map_copy(proj));
2117 sched = isl_union_map_apply_range(sched,
2118 isl_union_map_from_map(proj));
2121 for (i = 0; i < gen->n_array; ++i) {
2122 struct cuda_array_info *array = &gen->array[i];
2124 if (gen->array[i].print_shared_level != level)
2125 continue;
2127 for (j = 0; j < array->n_group; ++j)
2128 print_group_private_accesses(gen, array->groups[j],
2129 type, shared_domain,
2130 first_shared, shared_len, sched);
2133 isl_union_map_free(sched);
2134 isl_set_free(shared_domain);
2137 /* Set unroll[j] if the input dimension j is involved in
2138 * the index expression represented by bmap.
2140 static int check_unroll(__isl_take isl_basic_map *bmap, void *user)
2142 int i, j;
2143 int n_in = isl_basic_map_dim(bmap, isl_dim_in);
2144 int n_out = isl_basic_map_dim(bmap, isl_dim_out);
2145 int *unroll = user;
2147 for (i = 0; i < n_out; ++i) {
2148 isl_constraint *c;
2149 int ok;
2151 ok = isl_basic_map_has_defining_equality(bmap,
2152 isl_dim_out, i, &c);
2153 assert(ok);
2154 for (j = 0; j < n_in; ++j)
2155 if (isl_constraint_involves_dims(c, isl_dim_in, j, 1))
2156 unroll[j] = 1;
2157 isl_constraint_free(c);
2160 isl_basic_map_free(bmap);
2161 return 0;
2164 /* Given an array pos mapping input dimensions to the corresponding
2165 * output dimension, construct the corresponding map.
2167 static __isl_give isl_map *permutation(__isl_take isl_dim *dim,
2168 int *pos, int len)
2170 int i;
2171 isl_constraint *c;
2172 isl_basic_map *bmap;
2174 dim = isl_dim_add(dim, isl_dim_in, len);
2175 dim = isl_dim_add(dim, isl_dim_out, len);
2176 bmap = isl_basic_map_universe(isl_dim_copy(dim));
2178 for (i = 0; i < len; ++i) {
2179 c = isl_equality_alloc(isl_dim_copy(dim));
2180 isl_constraint_set_coefficient_si(c, isl_dim_in, i, -1);
2181 isl_constraint_set_coefficient_si(c, isl_dim_out, pos[i], 1);
2182 bmap = isl_basic_map_add_constraint(bmap, c);
2184 isl_dim_free(dim);
2186 return isl_map_from_basic_map(bmap);
2189 /* Find all loops involved in any of the index expressions for any of
2190 * the private accesses, move them innermost and then mark them as
2191 * requiring unrolling by setting gen->first_unroll.
2192 * The loops involved should all be parallel because of the checks
2193 * we performed in check_private_group_access. Moving them innermost
2194 * is therefore a valid transformation.
2196 static __isl_give isl_union_map *interchange_for_unroll(struct cuda_gen *gen,
2197 __isl_take isl_union_map *sched)
2199 int i, j;
2200 int unroll[gen->thread_tiled_len];
2201 int perm[gen->thread_tiled_len];
2202 isl_dim *dim;
2203 isl_map *permute;
2204 int len = gen->shared_len + gen->n_parallel + gen->n_block;
2206 gen->first_unroll = -1;
2208 for (i = 0; i < gen->thread_tiled_len; ++i)
2209 unroll[i] = 0;
2210 for (i = 0; i < gen->n_array; ++i) {
2211 struct cuda_array_info *array = &gen->array[i];
2213 for (j = 0; j < array->n_group; ++j) {
2214 isl_union_map *access;
2215 isl_map *acc;
2217 if (!array->groups[j]->private_bound)
2218 continue;
2220 access = group_access_relation(array->groups[j], 1, 1);
2221 access = isl_union_map_apply_domain(access,
2222 isl_union_map_copy(sched));
2224 acc = isl_map_from_union_map(access);
2225 isl_map_foreach_basic_map(acc, &check_unroll, unroll);
2227 isl_map_free(acc);
2231 for (i = 0; i < gen->shared_len; ++i)
2232 if (unroll[i])
2233 return sched;
2235 for (i = gen->shared_len; i < len; ++i)
2236 if (unroll[i])
2237 break;
2239 if (i >= len)
2240 return sched;
2242 for (i = len; i < gen->thread_tiled_len; ++i)
2243 if (unroll[i])
2244 return sched;
2246 j = 0;
2247 for (i = 0; i < gen->thread_tiled_len; ++i)
2248 if (!unroll[i])
2249 perm[i] = j++;
2250 gen->first_unroll = 1 + j;
2251 for (i = 0; i < len; ++i)
2252 if (unroll[i])
2253 perm[i] = j++;
2255 dim = isl_union_map_get_dim(sched);
2256 permute = permutation(dim, perm, gen->thread_tiled_len);
2257 sched = isl_union_map_apply_range(sched,
2258 isl_union_map_from_map(permute));
2260 return sched;
2263 /* This function is called for each leaf in the clast of the kernel code.
2264 * We first specialize the schedule to the site of the leaf and
2265 * print code for reading into shared memory, performing the actual
2266 * computations and writing from shared memory, with the required
2267 * synchronizations.
2269 static void print_kernel_user(struct gpucode_info *code,
2270 struct clast_user_stmt *u)
2272 struct cuda_gen *gen = code->user;
2273 isl_set *shared_domain;
2275 shared_domain = extract_entire_host_domain(u);
2277 print_shared_accesses(gen, shared_domain, gen->read, "read", -1);
2279 print_private_accesses(gen, shared_domain, gen->read, "read", -1);
2281 print_shared_body(gen, shared_domain, gen->local_sched,
2282 gen->thread_tiled_len, &print_statement,
2283 gen->first_unroll);
2285 print_private_accesses(gen, shared_domain, gen->write, "write", -1);
2287 print_indent(gen->cuda.kernel_c, gen->kernel_code.indent);
2288 fprintf(gen->cuda.kernel_c, "__syncthreads();\n");
2290 print_shared_accesses(gen, shared_domain, gen->write, "write", -1);
2292 isl_set_free(shared_domain);
2295 /* Check if we need to perform any copying to shared memory at this level
2296 * and if so, print the copying instructions.
2297 * Any array for which we are allowed to print copying instructions at
2298 * this level, but haven't done so already, is printed.
2300 static void print_kernel_for_head(struct gpucode_info *code,
2301 struct clast_for *f)
2303 int i;
2304 struct cuda_gen *gen = code->user;
2305 isl_set *domain;
2306 int level;
2307 int print = 0;
2309 domain = isl_set_from_cloog_domain(cloog_domain_copy(f->domain));
2310 level = isl_set_dim(domain, isl_dim_set) - 1;
2312 for (i = 0; i < gen->n_array; ++i) {
2313 if (gen->array[i].print_shared_level >= 0)
2314 continue;
2315 if (gen->array[i].last_shared > level)
2316 continue;
2317 gen->array[i].print_shared_level = level;
2318 print = 1;
2321 if (print) {
2322 print_shared_accesses(gen, domain, gen->read, "read", level);
2323 print_private_accesses(gen, domain, gen->read, "read", level);
2326 isl_set_free(domain);
2329 /* Print instructions for copying from shared memory for each array
2330 * for which print_kernel_for_head has added copying instructions
2331 * to shared memory.
2333 static void print_kernel_for_foot(struct gpucode_info *code,
2334 struct clast_for *f)
2336 int i;
2337 struct cuda_gen *gen = code->user;
2338 isl_set *domain;
2339 int level;
2340 int print = 0;
2342 domain = isl_set_from_cloog_domain(cloog_domain_copy(f->domain));
2343 level = isl_set_dim(domain, isl_dim_set) - 1;
2345 for (i = 0; i < gen->n_array; ++i) {
2346 if (gen->array[i].print_shared_level != level)
2347 continue;
2348 print = 1;
2349 break;
2352 if (print) {
2353 print_private_accesses(gen, domain, gen->write, "write", level);
2354 print_shared_accesses(gen, domain, gen->write, "write", level);
2357 isl_set_free(domain);
2360 /* Use CLooG to generate code for the outer gen->shared_first loops
2361 * of the local schedule "sched".
2362 * The pretty printing of this code is handled by gpu_print_host_stmt,
2363 * which calls print_kernel_user for each iteration of the shared tile loops.
2365 static void print_cloog_kernel_body(struct cuda_gen *gen,
2366 __isl_keep isl_set *context, __isl_keep isl_union_map *sched)
2368 int i;
2369 CloogOptions *options;
2370 CloogDomain *cloog_context;
2371 CloogUnionDomain *ud;
2372 CloogInput *input;
2373 struct clast_stmt *stmt;
2374 char name[20];
2376 sched = isl_union_map_copy(sched);
2377 sched = isl_union_map_align_params(sched, isl_set_get_dim(context));
2379 options = cloog_options_malloc(gen->state);
2380 options->language = LANGUAGE_C;
2381 options->strides = 1;
2382 options->sh = 1;
2383 options->stop = gen->shared_len;
2384 options->f = gen->tiled_len;
2385 options->l = gen->tiled_len;
2386 options->save_domains = 1;
2387 options->noscalars = 1;
2389 ud = cloog_union_domain_from_isl_union_map(sched);
2390 for (i = 0; i < gen->shared_len; ++i) {
2391 snprintf(name, sizeof(name), "g%d", i);
2392 ud = cloog_union_domain_set_name(ud, CLOOG_SCAT, i, name);
2394 cloog_context = cloog_domain_from_isl_set(isl_set_copy(context));
2395 input = cloog_input_alloc(cloog_context, ud);
2397 stmt = cloog_clast_create_from_input(input, options);
2399 gen->kernel_code.indent = 4;
2400 gen->kernel_code.dst = gen->cuda.kernel_c;
2401 gen->kernel_code.print_user_stmt = NULL;
2402 gen->kernel_code.print_user_stmt_list = &print_kernel_user;
2403 gen->kernel_code.print_for_head = &print_kernel_for_head;
2404 gen->kernel_code.print_for_foot = &print_kernel_for_foot;
2405 gen->kernel_code.user = gen;
2406 gpu_print_host_stmt(&gen->kernel_code, stmt);
2408 cloog_clast_free(stmt);
2409 cloog_options_free(options);
2412 static void print_kernel_iterators(struct cuda_gen *gen)
2414 int i;
2415 const char *block_dims[] = { "blockIdx.x", "blockIdx.y" };
2416 const char *thread_dims[] = { "threadIdx.x", "threadIdx.y",
2417 "threadIdx.z" };
2419 if (gen->n_grid > 0) {
2420 print_indent(gen->cuda.kernel_c, 4);
2421 fprintf(gen->cuda.kernel_c, "int ");
2422 for (i = 0; i < gen->n_grid; ++i) {
2423 if (i)
2424 fprintf(gen->cuda.kernel_c, ", ");
2425 fprintf(gen->cuda.kernel_c, "b%d = %s",
2426 i, block_dims[gen->n_grid - 1 - i]);
2428 fprintf(gen->cuda.kernel_c, ";\n");
2431 if (gen->n_block > 0) {
2432 print_indent(gen->cuda.kernel_c, 4);
2433 fprintf(gen->cuda.kernel_c, "int ");
2434 for (i = 0; i < gen->n_block; ++i) {
2435 if (i)
2436 fprintf(gen->cuda.kernel_c, ", ");
2437 fprintf(gen->cuda.kernel_c, "t%d = %s",
2438 i, thread_dims[gen->n_block - 1 - i]);
2440 fprintf(gen->cuda.kernel_c, ";\n");
2444 static void print_group_shared_array(struct cuda_gen *gen,
2445 struct cuda_array_ref_group *group)
2447 int j;
2448 struct cuda_array_bound *bounds;
2450 bounds = group->private_bound;
2451 if (!bounds)
2452 bounds = group->shared_bound;
2453 if (!bounds)
2454 return;
2456 print_indent(gen->cuda.kernel_c, 4);
2457 fprintf(gen->cuda.kernel_c, "%s%s ",
2458 group->private_bound ? "" : "__shared__ ", group->array->type);
2459 print_array_name(gen->cuda.kernel_c, group);
2460 for (j = 0; j < group->array->n_index; ++j) {
2461 fprintf(gen->cuda.kernel_c, "[");
2462 isl_int_print(gen->cuda.kernel_c, bounds[j].size, 0);
2463 fprintf(gen->cuda.kernel_c, "]");
2465 fprintf(gen->cuda.kernel_c, ";\n");
2468 static void print_shared_arrays(struct cuda_gen *gen)
2470 int i, j;
2472 for (i = 0; i < gen->n_array; ++i) {
2473 struct cuda_array_info *array = &gen->array[i];
2475 for (j = 0; j < array->n_group; ++j)
2476 print_group_shared_array(gen, array->groups[j]);
2480 static void print_kernel_body(struct cuda_gen *gen,
2481 __isl_keep isl_set *host_domain, __isl_keep isl_union_map *sched)
2483 isl_set *context;
2485 context = isl_set_copy(host_domain);
2486 context = parametrize(context, 0, gen->tile_first, "h");
2487 context = isl_set_project_out(context, isl_dim_set, 0, gen->tile_first);
2488 context = add_bounded_parameters(context,
2489 gen->n_grid, gen->grid_dim, "b");
2491 print_kernel_iterators(gen);
2492 print_shared_arrays(gen);
2494 fprintf(gen->cuda.kernel_c, "\n");
2496 print_cloog_kernel_body(gen, context, sched);
2498 isl_set_free(context);
2501 /* Given a constraint
2503 * a(p,i) + j = g f(e)
2505 * or -a(p,i) - j = g f(e) if sign < 0,
2506 * store a(p,i) in bound->shift and g (stride) in bound->stride.
2507 * a(p,i) is assumed to be an expression in only the parameters.
2509 static void extract_stride(__isl_keep isl_constraint *c,
2510 struct cuda_array_bound *bound, isl_int stride, int sign)
2512 int i;
2513 isl_int v;
2514 isl_int one;
2515 isl_dim *dim;
2516 unsigned nparam;
2517 isl_qpolynomial *qp;
2519 isl_int_set(bound->stride, stride);
2521 dim = isl_constraint_get_dim(c);
2522 dim = isl_dim_drop(dim, isl_dim_out, 0, 1);
2523 dim = isl_dim_drop(dim, isl_dim_in, 0, isl_dim_size(dim, isl_dim_in));
2524 dim = isl_dim_domain(dim);
2526 nparam = isl_dim_size(dim, isl_dim_param);
2528 isl_int_init(v);
2529 isl_int_init(one);
2530 isl_int_set_si(one, 1);
2532 isl_constraint_get_constant(c, &v);
2533 if (sign < 0)
2534 isl_int_neg(v, v);
2535 qp = isl_qpolynomial_rat_cst(isl_dim_copy(dim), v, one);
2537 for (i = 0; i < nparam; ++i) {
2538 isl_qpolynomial *t, *p;
2540 isl_constraint_get_coefficient(c, isl_dim_param, i, &v);
2541 if (isl_int_is_zero(v))
2542 continue;
2543 if (sign < 0)
2544 isl_int_neg(v, v);
2545 t = isl_qpolynomial_rat_cst(isl_dim_copy(dim), v, one);
2546 p = isl_qpolynomial_var(isl_dim_copy(dim), isl_dim_param, i);
2547 t = isl_qpolynomial_mul(t, p);
2548 qp = isl_qpolynomial_add(qp, t);
2551 isl_dim_free(dim);
2552 isl_int_clear(one);
2553 isl_int_clear(v);
2555 bound->shift = qp;
2558 /* Given an equality constraint of a map with a single output dimension j,
2559 * check if the constraint is of the form
2561 * a(p,i) + j = g f(e)
2563 * with a(p,i) an expression in the parameters and input dimensions
2564 * and f(e) an expression in the existentially quantified variables.
2565 * If so, and if g is larger than any such g from a previously considered
2566 * constraint, then call extract_stride. to record the stride information
2567 * in bound.
2569 static int check_stride_constraint(__isl_take isl_constraint *c, void *user)
2571 int i;
2572 isl_int v, stride;
2573 unsigned n_div;
2574 struct cuda_array_bound *bound = user;
2576 isl_int_init(v);
2577 isl_int_init(stride);
2579 n_div = isl_constraint_dim(c, isl_dim_div);
2580 isl_constraint_get_coefficient(c, isl_dim_out, 0, &v);
2582 if (n_div && (isl_int_is_one(v) || isl_int_is_negone(v))) {
2583 int s = isl_int_sgn(v);
2584 isl_int_set_si(stride, 0);
2585 for (i = 0; i < n_div; ++i) {
2586 isl_constraint_get_coefficient(c, isl_dim_div, i, &v);
2587 isl_int_gcd(stride, stride, v);
2589 if (!isl_int_is_zero(stride) &&
2590 isl_int_gt(stride, bound->stride))
2591 extract_stride(c, bound, stride, s);
2594 isl_int_clear(stride);
2595 isl_int_clear(v);
2597 isl_constraint_free(c);
2598 return 0;
2601 /* Given contraints on an array index i, check if we can find
2602 * a shift a(p) and a stride g such that
2604 * a(p) + i = 0 mod g
2606 * If so, record the information in bound and apply the mapping
2607 * i -> (i + a(p))/g to the array index in bounds and return
2608 * the new constraints.
2609 * If not, simply return the original constraints.
2611 static __isl_give isl_basic_map *check_stride(struct cuda_gen *gen,
2612 struct cuda_array_bound *bound, __isl_take isl_basic_map *bounds)
2614 isl_dim *dim;
2615 isl_basic_map *aff;
2616 isl_basic_map *shift;
2617 isl_qpolynomial *qp, *t;
2618 isl_int one;
2620 isl_int_set_si(bound->stride, -1);
2622 aff = isl_basic_map_affine_hull(isl_basic_map_copy(bounds));
2624 isl_basic_map_foreach_constraint(aff, &check_stride_constraint, bound);
2626 isl_basic_map_free(aff);
2628 if (isl_int_is_neg(bound->stride))
2629 return bounds;
2631 qp = isl_qpolynomial_copy(bound->shift);
2632 qp = isl_qpolynomial_add_dims(qp, isl_dim_set, 1);
2633 dim = isl_qpolynomial_get_dim(qp);
2634 t = isl_qpolynomial_var(isl_dim_copy(dim), isl_dim_set, 0);
2635 qp = isl_qpolynomial_add(qp, t);
2636 isl_int_init(one);
2637 isl_int_set_si(one, 1);
2638 t = isl_qpolynomial_rat_cst(dim, one, bound->stride);
2639 isl_int_clear(one);
2640 qp = isl_qpolynomial_mul(qp, t);
2641 shift = isl_basic_map_from_qpolynomial(qp);
2643 bound->shift_map = isl_basic_map_copy(shift);
2644 bounds = isl_basic_map_apply_range(bounds, shift);
2646 return bounds;
2649 struct cuda_size_info {
2650 isl_basic_set *bset;
2651 struct cuda_array_bound *bound;
2652 int pos;
2655 /* Given a constraint from the basic set describing the bounds on
2656 * an array index, check if it is a lower bound, say m i >= b(x), and,
2657 * if so, check whether the expression "i - ceil(b(x)/m) + 1" has a constant
2658 * upper bound. If so, and if this bound is smaller than any bound
2659 * derived from earlier constraints, set the size to this bound on
2660 * the expression and the lower bound to ceil(b(x)/m).
2662 static int compute_size_in_direction(__isl_take isl_constraint *c, void *user)
2664 struct cuda_size_info *size = user;
2665 unsigned nparam;
2666 unsigned n_div;
2667 isl_int v;
2669 nparam = isl_basic_set_dim(size->bset, isl_dim_param);
2670 n_div = isl_constraint_dim(c, isl_dim_div);
2672 if (isl_constraint_involves_dims(c, isl_dim_div, 0, n_div)) {
2673 isl_constraint_free(c);
2674 return 0;
2677 isl_int_init(v);
2679 isl_constraint_get_coefficient(c, isl_dim_set, size->pos, &v);
2681 if (isl_int_is_pos(v)) {
2682 isl_aff *aff;
2683 isl_aff *lb;
2684 enum isl_lp_result res;
2686 aff = isl_constraint_get_bound(c, isl_dim_set, size->pos);
2687 aff = isl_aff_ceil(aff);
2689 lb = isl_aff_copy(aff);
2691 aff = isl_aff_neg(aff);
2692 aff = isl_aff_add_coefficient_si(aff, isl_dim_set, size->pos, 1);
2694 res = isl_basic_set_max(size->bset, aff, &v);
2695 isl_aff_free(aff);
2697 if (res == isl_lp_ok) {
2698 isl_int_add_ui(v, v, 1);
2699 if (isl_int_is_neg(size->bound->size) ||
2700 isl_int_lt(v, size->bound->size)) {
2701 isl_int_set(size->bound->size, v);
2702 lb = isl_aff_drop_dims(lb, isl_dim_set,
2703 0, size->pos + 1);
2704 isl_aff_free(size->bound->lb);
2705 size->bound->lb = isl_aff_copy(lb);
2708 isl_aff_free(lb);
2711 isl_int_clear(v);
2712 isl_constraint_free(c);
2714 return 0;
2717 /* Given a basic map "bounds" that maps parameters and input dimensions
2718 * to a single output dimension, look for an expression in the parameters
2719 * and input dimensions such that the range of the output dimension shifted
2720 * by this expression is a constant.
2722 * In particular, we currently only consider lower bounds on the output
2723 * dimension as candidate expressions.
2725 static int compute_array_dim_size(struct cuda_gen *gen,
2726 struct cuda_array_bound *bound, __isl_take isl_basic_map *bounds)
2728 struct cuda_size_info size;
2730 bounds = check_stride(gen, bound, bounds);
2732 isl_int_set_si(bound->size, -1);
2733 bound->lb = NULL;
2735 size.bound = bound;
2736 size.pos = isl_basic_map_dim(bounds, isl_dim_in);
2737 size.bset = isl_basic_map_wrap(bounds);
2738 size.bset = isl_basic_set_flatten(size.bset);
2739 isl_basic_set_foreach_constraint(size.bset, &compute_size_in_direction,
2740 &size);
2741 isl_basic_set_free(size.bset);
2743 return isl_int_is_nonneg(bound->size) ? 0 : -1;
2746 /* Check if we can find a shared memory tile for the given array
2747 * based on the given accesses, and if so, put the results
2748 * in array->shared_bound.
2750 * We project the accesses on each index in turn and look for a parametric
2751 * offset such that the size is constant.
2753 static int can_tile_for_shared_memory(struct cuda_gen *gen,
2754 struct cuda_array_info *array, __isl_keep isl_map *access,
2755 struct cuda_array_bound *bounds)
2757 int i;
2759 for (i = 0; i < array->n_index; ++i) {
2760 isl_map *access_i;
2761 isl_basic_map *hull;
2763 access_i = isl_map_copy(access);
2764 access_i = isl_map_project_out(access_i, isl_dim_out, 0, i);
2765 access_i = isl_map_project_out(access_i, isl_dim_out,
2766 1, array->n_index - (i + 1));
2767 access_i = isl_map_compute_divs(access_i);
2768 hull = isl_map_simple_hull(access_i);
2769 if (compute_array_dim_size(gen, &bounds[i], hull) < 0)
2770 return 0;
2773 return 1;
2776 /* Construct a map with input the shared tile loops and the loops that
2777 * will be wrapped around the threads that relates these later loops
2778 * to the thread indices and the projects them out.
2780 static __isl_give isl_map *compute_privatization(struct cuda_gen *gen)
2782 isl_map *priv;
2783 isl_map *tiling;
2784 isl_map *proj;
2785 isl_set *par;
2786 isl_dim *dim;
2788 dim = isl_union_map_get_dim(gen->shared_sched);
2790 if (gen->options->wrap)
2791 tiling = wrap(isl_dim_copy(dim), gen->shared_len + gen->n_block,
2792 gen->shared_len, gen->n_block, gen->block_dim);
2793 else
2794 tiling = tile(isl_dim_copy(dim), gen->shared_len + gen->n_block,
2795 gen->shared_len, gen->n_block, gen->block_dim);
2797 priv = tiling;
2799 par = parametrization(dim, gen->shared_len + 2 * gen->n_block,
2800 gen->tile_first + gen->tile_len + gen->n_grid + gen->n_block,
2801 gen->n_block, "t");
2803 priv = isl_map_align_params(priv, isl_set_get_dim(par));
2804 priv = isl_map_intersect_range(priv, par);
2806 dim = isl_map_get_dim(priv);
2807 dim = isl_dim_drop(dim, isl_dim_in, 0, isl_dim_size(dim, isl_dim_in));
2808 dim = isl_dim_drop(dim, isl_dim_out, 0, isl_dim_size(dim, isl_dim_out));
2809 proj = projection(dim, gen->shared_len + 2 * gen->n_block,
2810 gen->shared_len);
2812 priv = isl_map_apply_range(priv, proj);
2814 return priv;
2817 /* Construct a map from domain_dim to domain_dim that increments
2818 * the dimension at position "pos" and leaves all other dimensions
2819 * constant.
2821 static __isl_give isl_map *next(__isl_take isl_dim *domain_dim, int pos)
2823 int i;
2824 int len = isl_dim_size(domain_dim, isl_dim_set);
2825 isl_dim *dim;
2826 isl_basic_map *next;
2828 dim = isl_dim_map_from_set(domain_dim);
2829 next = isl_basic_map_universe(isl_dim_copy(dim));
2831 for (i = 0; i < len; ++i) {
2832 isl_constraint *c;
2834 c = isl_equality_alloc(isl_dim_copy(dim));
2835 isl_constraint_set_coefficient_si(c, isl_dim_in, i, 1);
2836 isl_constraint_set_coefficient_si(c, isl_dim_out, i, -1);
2837 if (i == pos)
2838 isl_constraint_set_constant_si(c, 1);
2839 next = isl_basic_map_add_constraint(next, c);
2842 isl_dim_free(dim);
2844 return isl_map_from_basic_map(next);
2847 /* Check if the given access is coalesced.
2848 * That is, check whether incrementing the dimension that will get
2849 * wrapped over the last thread index results in incrementing
2850 * the last array index.
2852 * This function is only called for access relations without reuse.
2854 static int access_is_coalesced(struct cuda_gen *gen,
2855 __isl_keep isl_union_map *access)
2857 isl_dim *dim;
2858 isl_map *access_map;
2859 isl_map *next_thread_x;
2860 isl_map *next_element;
2861 isl_map *map;
2862 int coalesced;
2864 access = isl_union_map_copy(access);
2865 access = isl_union_map_apply_domain(access,
2866 isl_union_map_copy(gen->tiled_sched));
2867 access_map = isl_map_from_union_map(access);
2869 dim = isl_map_get_dim(access_map);
2870 dim = isl_dim_domain(dim);
2871 next_thread_x = next(dim, gen->shared_len + gen->n_block - 1);
2873 dim = isl_map_get_dim(access_map);
2874 dim = isl_dim_range(dim);
2875 next_element = next(dim, isl_dim_size(dim, isl_dim_set) - 1);
2877 map = isl_map_apply_domain(next_thread_x, isl_map_copy(access_map));
2878 map = isl_map_apply_range(map, access_map);
2880 coalesced = isl_map_is_subset(map, next_element);
2882 isl_map_free(next_element);
2883 isl_map_free(map);
2885 return coalesced;
2888 /* For the given array reference group, check whether the access is private
2889 * to the thread. That is, check that any given array element
2890 * is only accessed by a single thread.
2891 * We compute an access relation that maps the shared tile loop iterators
2892 * and the shared point loop iterators that will be wrapped over the
2893 * threads to the array elements.
2894 * We actually check that those iterators that will be wrapped
2895 * partition the array space. This check is stricter than necessary
2896 * since several iterations may be mapped onto the same thread
2897 * and then they could be allowed to access the same memory elements,
2898 * but our check does not allow this situation.
2900 * We also check that the index expression only depends on parallel
2901 * loops. That way, we can move those loops innermost and unroll them.
2902 * Again, we use a test that is stricter than necessary.
2903 * We actually check whether the index expression only depends
2904 * on the iterators that are wrapped over the threads.
2905 * These are necessarily parallel, but there may be more parallel loops.
2907 * Combining the injectivity of the first test with the single-valuedness
2908 * of the second test, we simply test for bijectivity.
2910 * If it turns out we can use registers, we compute the private memory
2911 * tile size using can_tile_for_shared_memory, after introducing a dependence
2912 * on the thread indices.
2914 * Before performing any of the above computations, we first check
2915 * if there is any reuse on the reference group. If not, we simply
2916 * return. If, moreover, the access is coalesced then we also remove
2917 * the shared memory tiling since we should just use global memory instead.
2919 static void check_private_group_access(struct cuda_gen *gen,
2920 struct cuda_array_ref_group *group)
2922 isl_map *acc;
2923 isl_union_map *access;
2924 int n_index = group->array->n_index;
2926 access = group_access_relation(group, 1, 1);
2927 if (isl_union_map_is_injective(access)) {
2928 if (group->shared_bound && access_is_coalesced(gen, access)) {
2929 free_bound_list(group->shared_bound, n_index);
2930 group->shared_bound = NULL;
2932 isl_union_map_free(access);
2933 return;
2935 access = isl_union_map_apply_domain(access,
2936 isl_union_map_copy(gen->shared_sched));
2938 acc = isl_map_from_union_map(access);
2940 if (!isl_map_is_bijective(acc)) {
2941 isl_map_free(acc);
2942 return;
2945 group->private_bound = create_bound_list(gen->ctx, n_index);
2946 acc = isl_map_align_params(acc, isl_map_get_dim(gen->privatization));
2947 acc = isl_map_apply_domain(acc, isl_map_copy(gen->privatization));
2948 if (!can_tile_for_shared_memory(gen, group->array, acc,
2949 group->private_bound)) {
2950 free_bound_list(group->private_bound, n_index);
2951 group->private_bound = NULL;
2954 isl_map_free(acc);
2957 /* Look for the last shared tile loop that affects the offset of the
2958 * shared or private tile and store the result in array->last_shared.
2960 static void set_last_shared(struct cuda_gen *gen,
2961 struct cuda_array_ref_group *group)
2963 int i, j;
2964 struct cuda_array_bound *bounds;
2965 unsigned first_shared = gen->first_shared;
2966 int n_index = group->array->n_index;
2968 bounds = group->private_bound;
2969 if (!bounds)
2970 bounds = group->shared_bound;
2971 if (!bounds)
2972 return;
2974 for (j = gen->shared_len - 1; j >= 0; --j) {
2975 for (i = 0; i < n_index; ++i) {
2976 isl_aff *lb;
2977 isl_qpolynomial *shift;
2979 lb = bounds[i].lb;
2980 if (isl_aff_involves_dims(lb, isl_dim_param,
2981 first_shared + j, 1))
2982 break;
2984 shift = bounds[i].shift;
2985 if (!shift)
2986 continue;
2987 if (isl_qpolynomial_involves_dims(shift, isl_dim_param,
2988 first_shared + j, 1))
2989 break;
2991 if (i < n_index)
2992 break;
2994 group->array->last_shared = j;
2997 /* Compute the sizes of all private arrays for the current kernel,
2998 * as well as the offsets of the private pieces in the original arrays.
2999 * If we cannot or don't want to privatize a given array group,
3000 * we use the shared memory tile sizes computed in
3001 * compute_group_shared_bound instead.
3003 * If a given Array only has a single reference group and if we have
3004 * been able to find a privated or shared tile,
3005 * we also look for the last shared tile loop that affects the offset
3006 * (and therefore the array tile) and store the result in array->last_shared.
3008 * A privatized copy of all access relations from reference groups that
3009 * are mapped to private memory is stored in gen->privatization.
3011 static void compute_private_size(struct cuda_gen *gen)
3013 int i, j;
3014 isl_union_map *private;
3016 if (!gen->options->use_private_memory)
3017 return;
3019 private = isl_union_map_empty(isl_union_map_get_dim(gen->shared_sched));
3021 for (i = 0; i < gen->n_array; ++i) {
3022 struct cuda_array_info *array = &gen->array[i];
3024 for (j = 0; j < array->n_group; ++j) {
3025 check_private_group_access(gen, array->groups[j]);
3027 if (!array->groups[j]->private_bound)
3028 continue;
3030 private = isl_union_map_union(private,
3031 group_access_relation(array->groups[j], 1, 1));
3034 array->last_shared = gen->shared_len - 1;
3035 array->print_shared_level = -1;
3037 if (array->n_group != 1)
3038 continue;
3039 set_last_shared(gen, array->groups[0]);
3042 if (isl_union_map_is_empty(private))
3043 isl_union_map_free(private);
3044 else {
3045 isl_union_map *priv;
3047 private = isl_union_map_apply_domain(private,
3048 isl_union_map_copy(gen->shared_sched));
3049 priv = isl_union_map_from_map(isl_map_copy(gen->privatization));
3050 private = isl_union_map_apply_domain(private, priv);
3051 gen->private_access = private;
3055 /* Fill up the groups array with singleton groups, i.e., one group
3056 * per reference, initializing the array, access, write and refs fields.
3057 * In particular the access field is initialized to the scheduled
3058 * access relation of the array reference.
3060 * Return the number of elements initialized, i.e., the number of
3061 * active references in the current kernel.
3063 static int populate_array_references(struct cuda_gen *gen,
3064 struct cuda_array_info *array, __isl_keep isl_union_map *sched,
3065 struct cuda_array_ref_group **groups)
3067 int i;
3068 int n;
3069 isl_ctx *ctx = isl_union_map_get_ctx(sched);
3071 n = 0;
3072 for (i = 0; i < array->n_ref; ++i) {
3073 isl_union_map *umap;
3074 isl_map *map;
3075 struct cuda_array_ref_group *group;
3076 struct cuda_stmt_access *access = array->refs[i];
3078 map = isl_map_copy(access->access);
3079 umap = isl_union_map_from_map(map);
3080 umap = isl_union_map_apply_domain(umap,
3081 isl_union_map_copy(sched));
3083 if (isl_union_map_is_empty(umap)) {
3084 isl_union_map_free(umap);
3085 continue;
3088 map = isl_map_from_union_map(umap);
3090 group = isl_calloc_type(ctx, struct cuda_array_ref_group);
3091 assert(group);
3092 group->array = array;
3093 group->access = map;
3094 group->write = access->write;
3095 group->refs = &array->refs[i];
3097 groups[n++] = group;
3100 return n;
3103 static void free_array_ref_group(struct cuda_array_ref_group *group,
3104 int n_index)
3106 if (!group)
3107 return;
3108 free_bound_list(group->shared_bound, n_index);
3109 free_bound_list(group->private_bound, n_index);
3110 isl_map_free(group->access);
3111 free(group->refs);
3112 free(group);
3115 /* If two groups have overlapping access relations and if one of them
3116 * involves a write, then merge the two groups into one.
3118 * We keep track of the grouping in "leader". leader[j] points to
3119 * an earlier group array element that belongs to the same group,
3120 * or the array element j itself if this element is the first in the group.
3122 * Return the number of group leaders.
3124 static int group_overlapping_writes(int n,
3125 struct cuda_array_ref_group **groups, int *leader)
3127 int i, j;
3128 int n_group = n;
3130 for (i = 0; i < n; ++i) {
3131 int l = i;
3132 groups[l]->n_ref = 1;
3133 for (j = i - 1; j >= 0; --j) {
3134 isl_map *map;
3135 int empty;
3137 if (leader[j] != j)
3138 continue;
3139 if (!groups[l]->write && !groups[j]->write)
3140 continue;
3142 map = isl_map_intersect(isl_map_copy(groups[l]->access),
3143 isl_map_copy(groups[j]->access));
3144 empty = isl_map_is_empty(map);
3145 isl_map_free(map);
3147 if (empty)
3148 continue;
3150 groups[j]->access = isl_map_union(groups[j]->access,
3151 groups[l]->access);
3152 groups[j]->write = 1;
3153 groups[l]->access = NULL;
3154 groups[j]->n_ref += groups[l]->n_ref;
3155 l = leader[l] = j;
3156 n_group--;
3158 leader[i] = l;
3161 return n_group;
3164 /* Compute the size of the shared array corresponding to the given array
3165 * array refrence group, based on the accesses from the current kernel,
3166 * as well as the offset of the shared piece in the original array.
3168 static void compute_group_shared_bound(struct cuda_gen *gen,
3169 struct cuda_array_info *array, struct cuda_array_ref_group *group)
3171 isl_ctx *ctx = isl_dim_get_ctx(array->dim);
3173 if (!gen->options->use_shared_memory)
3174 return;
3176 group->shared_bound = create_bound_list(ctx, array->n_index);
3177 if (!can_tile_for_shared_memory(gen, array, group->access,
3178 group->shared_bound)) {
3179 free_bound_list(group->shared_bound, array->n_index);
3180 group->shared_bound = NULL;
3184 /* Given an initial grouping of array references and shared memory tiles
3185 * for each group that allows for a shared memory tile, merge two groups
3186 * if both have a shared memory tile and if the merged group also has
3187 * a shared memory tile.
3189 * Return the number of group leaders after merging.
3191 static int group_common_shared_memory_tile(struct cuda_gen *gen,
3192 struct cuda_array_info *array, int n,
3193 struct cuda_array_ref_group **groups, int *leader, int n_group)
3195 int i, j;
3196 isl_ctx *ctx = isl_dim_get_ctx(array->dim);
3198 for (i = 0; n_group > 1 && i < n; ++i) {
3199 int l = i;
3200 if (leader[i] != i)
3201 continue;
3202 if (!groups[i]->shared_bound)
3203 continue;
3204 for (j = i - 1; j >= 0; --j) {
3205 isl_map *map;
3206 int empty;
3207 struct cuda_array_bound *shared_bound;
3209 if (leader[j] != j)
3210 continue;
3211 if (!groups[j]->shared_bound)
3212 continue;
3214 map = isl_map_intersect(isl_map_copy(groups[l]->access),
3215 isl_map_copy(groups[j]->access));
3216 empty = isl_map_is_empty(map);
3217 isl_map_free(map);
3219 if (empty)
3220 continue;
3222 map = isl_map_union(isl_map_copy(groups[l]->access),
3223 isl_map_copy(groups[j]->access));
3224 shared_bound = create_bound_list(ctx, array->n_index);
3225 if (!can_tile_for_shared_memory(gen, array, map,
3226 shared_bound)) {
3227 isl_map_free(map);
3228 free_bound_list(shared_bound, array->n_index);
3229 continue;
3232 free_bound_list(groups[j]->shared_bound,
3233 array->n_index);
3234 groups[j]->shared_bound = shared_bound;
3235 isl_map_free(groups[j]->access);
3236 groups[j]->access = map;
3237 groups[j]->n_ref += groups[l]->n_ref;
3238 l = leader[l] = j;
3239 n_group--;
3243 return n_group;
3246 /* Extract an array of array reference groups from the array of references
3247 * and the grouping information in "leader".
3249 * Store the results in array->n_group and array->groups.
3251 static void extract_array_groups(isl_ctx *ctx, struct cuda_array_info *array,
3252 int n, struct cuda_array_ref_group **groups, int *leader, int n_group)
3254 int i, j;
3256 for (i = 2; i < n; ++i)
3257 leader[i] = leader[leader[i]];
3259 array->n_group = n_group;
3260 array->groups = isl_alloc_array(ctx, struct cuda_array_ref_group *,
3261 n_group);
3262 assert(array->groups);
3264 j = 0;
3265 for (i = 0; i < n; ++i) {
3266 int k, l;
3267 struct cuda_stmt_access **refs;
3269 if (leader[i] != i) {
3270 groups[i]->refs = NULL;
3271 free_array_ref_group(groups[i], array->n_index);
3272 continue;
3275 refs = isl_alloc_array(ctx, struct cuda_stmt_access *,
3276 groups[i]->n_ref);
3277 assert(refs);
3278 l = 0;
3279 for (k = i; k < n; ++k)
3280 if (leader[k] == i) {
3281 refs[l++] = *groups[k]->refs;
3282 (*groups[k]->refs)->group = j;
3285 groups[i]->refs = refs;
3286 groups[i]->nr = j;
3287 array->groups[j++] = groups[i];
3291 /* Group array references that should be considered together when
3292 * deciding whether to access them from private, shared or global memory.
3294 * In particular, if two array references overlap and if one of them
3295 * is a write, then the two references are grouped together.
3296 * Furthermore, if two groups admit a shared memory tile and if the
3297 * combination of the two also admits a shared memory tile, we merge
3298 * the two groups.
3300 * During the construction the group->refs field points to a single
3301 * array reference inside the array of array references, while
3302 * group->n_ref contains the number of element in leader that
3303 * (directly or indirectly) point to this group, provided the group
3304 * is a leader.
3306 static void group_array_references(struct cuda_gen *gen,
3307 struct cuda_array_info *array, __isl_keep isl_union_map *sched)
3309 int i;
3310 int n, n_group;
3311 isl_ctx *ctx = isl_union_map_get_ctx(sched);
3312 struct cuda_array_ref_group **groups;
3313 int *leader;
3315 groups = isl_calloc_array(ctx, struct cuda_array_ref_group *,
3316 array->n_ref);
3317 assert(groups);
3319 n = populate_array_references(gen, array, sched, groups);
3321 leader = isl_alloc_array(ctx, int, n);
3322 assert(leader);
3324 n_group = group_overlapping_writes(n, groups, leader);
3326 for (i = 0; i < n; ++i)
3327 if (leader[i] == i)
3328 compute_group_shared_bound(gen, array, groups[i]);
3330 n_group = group_common_shared_memory_tile(gen, array, n, groups,
3331 leader, n_group);
3333 extract_array_groups(ctx, array, n, groups, leader, n_group);
3335 free(leader);
3336 free(groups);
3339 /* Take tiled_sched, project it onto the shared tile loops and
3340 * the loops that will be wrapped over the threads,
3341 * parametrize the shared tile loops and store the result in gen->shared_sched.
3342 * The position of the first of these parameters is stored in gen->first_shared.
3343 * Also compute a projection that projects out the loops that will be
3344 * wrapped over the threads and store this projection in gen->shared_proj.
3346 static void compute_shared_sched(struct cuda_gen *gen)
3348 isl_dim *dim;
3349 isl_map *proj;
3350 isl_set *par;
3351 isl_union_map *sched;
3353 sched = isl_union_map_copy(gen->tiled_sched);
3355 dim = isl_union_map_get_dim(sched);
3356 gen->first_shared = isl_dim_size(dim, isl_dim_param);
3357 proj = projection(dim, gen->tiled_len, gen->shared_len + gen->n_block);
3358 sched = isl_union_map_apply_range(sched, isl_union_map_from_map(proj));
3360 dim = isl_union_map_get_dim(sched);
3361 par = parametrization(dim, gen->shared_len + gen->n_block,
3362 0, gen->shared_len, "g");
3363 sched = isl_union_map_intersect_range(sched,
3364 isl_union_set_from_set(par));
3366 dim = isl_union_map_get_dim(sched);
3367 proj = projection(dim, gen->shared_len + gen->n_block, gen->shared_len);
3369 gen->shared_sched = sched;
3370 gen->shared_proj = isl_union_map_from_map(proj);
3373 /* Group references of all arrays in the program.
3375 static void group_references(struct cuda_gen *gen)
3377 int i;
3378 isl_union_map *sched;
3380 sched = isl_union_map_apply_range(isl_union_map_copy(gen->shared_sched),
3381 isl_union_map_copy(gen->shared_proj));
3383 for (i = 0; i < gen->n_array; ++i)
3384 group_array_references(gen, &gen->array[i], sched);
3386 isl_union_map_free(sched);
3389 /* Free all array information that is local to the current kernel.
3391 static void free_local_array_info(struct cuda_gen *gen)
3393 int i, j;
3395 for (i = 0; i < gen->n_array; ++i) {
3396 struct cuda_array_info *array = &gen->array[i];
3398 for (j = 0; j < array->n_group; ++j)
3399 free_array_ref_group(array->groups[j], array->n_index);
3400 free(array->groups);
3402 if (array->n_group == 0)
3403 continue;
3404 for (j = 0; j < gen->array[i].n_index; ++j) {
3405 isl_pw_aff_free(gen->array[i].local_bound[j]);
3406 gen->array[i].local_bound[j] = NULL;
3411 static void print_iterator_list(FILE *out, int len, const char *prefix,
3412 int parens)
3414 int i;
3416 fprintf(out, "(");
3417 for (i = 0; i < len; ++i) {
3418 if (i)
3419 fprintf(out, ", ");
3420 if (parens)
3421 fprintf(out, "(%s%d)", prefix, i);
3422 else
3423 fprintf(out, "%s%d", prefix, i);
3425 fprintf(out, ")");
3428 /* Print an access to the element in the global memory copy of the
3429 * given array that corresponds to element [a0][a1]... of the original array.
3430 * The copy in global memory has been linearized, so we need to take
3431 * the array size into account.
3433 static void print_global_index(isl_ctx *ctx, FILE *out,
3434 struct cuda_array_info *array)
3436 int i;
3437 isl_printer *prn;
3439 if (cuda_array_is_scalar(array)) {
3440 fprintf(out, "*%s", array->name);
3441 return;
3444 fprintf(out, "%s[", array->name);
3445 for (i = 0; i + 1 < array->n_index; ++i)
3446 fprintf(out, "(");
3447 for (i = 0; i < array->n_index; ++i) {
3448 if (i) {
3449 prn = isl_printer_to_file(ctx, out);
3450 prn = isl_printer_set_output_format(prn, ISL_FORMAT_C);
3451 prn = isl_printer_print_str(prn, ") * (");
3452 prn = isl_printer_print_pw_aff(prn,
3453 array->local_bound[i]);
3454 prn = isl_printer_print_str(prn, ") + ");
3455 isl_printer_free(prn);
3457 fprintf(out, "a%d", i);
3459 fprintf(out, "]");
3462 /* Print an access to the element in the shared memory copy of the
3463 * given array that corresponds to element [a0][a1]... of the original array.
3464 * Since the array in shared memory is just a shifted copy of part
3465 * of the original array, we simply need to subtract the lower bound,
3466 * which was computed in can_tile_for_shared_memory.
3467 * If any of the indices is strided, then we first add
3468 * shared_bound[i].shift and divide by shared_bound[i].stride.
3470 static void print_local_index(FILE *out, struct cuda_array_ref_group *group)
3472 int i;
3473 isl_ctx *ctx;
3474 isl_printer *prn;
3475 struct cuda_array_bound *bounds = group->shared_bound;
3477 ctx = isl_dim_get_ctx(group->array->dim);
3478 print_array_name(out, group);
3479 for (i = 0; i < group->array->n_index; ++i) {
3480 fprintf(out, "[(a%d", i);
3481 if (bounds[i].shift) {
3482 fprintf(out, " + (");
3483 prn = isl_printer_to_file(ctx, out);
3484 prn = isl_printer_set_output_format(prn, ISL_FORMAT_C);
3485 prn = isl_printer_print_qpolynomial(prn,
3486 bounds[i].shift);
3487 prn = isl_printer_print_str(prn, "))/");
3488 prn = isl_printer_print_isl_int(prn,
3489 bounds[i].stride);
3490 isl_printer_free(prn);
3491 } else
3492 fprintf(out, ")");
3493 fprintf(out, " - (");
3494 prn = isl_printer_to_file(ctx, out);
3495 prn = isl_printer_set_output_format(prn, ISL_FORMAT_C);
3496 prn = isl_printer_print_aff(prn, bounds[i].lb);
3497 isl_printer_free(prn);
3498 fprintf(out, ")]");
3502 /* Print '#define's for copying data from global memory to shared
3503 * memory and back for the given array.
3505 static void print_array_copy_defines(struct cuda_gen *gen,
3506 struct cuda_array_ref_group *group)
3508 int i;
3509 const char *type[] = { "read", "write" };
3510 struct cuda_array_info *array = group->array;
3511 int n_index = array->n_index;
3513 for (i = 0; i < 2; ++i) {
3514 fprintf(gen->cuda.kernel_c, "#define %s_", type[i]);
3515 print_array_name(gen->cuda.kernel_c, group);
3516 print_iterator_list(gen->cuda.kernel_c, n_index, "a", 0);
3517 fprintf(gen->cuda.kernel_c, " %s_", type[i]);
3518 print_array_name(gen->cuda.kernel_c, group);
3519 fprintf(gen->cuda.kernel_c, "_");
3520 print_iterator_list(gen->cuda.kernel_c, n_index, "a", 1);
3521 fprintf(gen->cuda.kernel_c, "\n");
3523 fprintf(gen->cuda.kernel_c, "#define %s_", type[i]);
3524 print_array_name(gen->cuda.kernel_c, group);
3525 fprintf(gen->cuda.kernel_c, "_");
3526 print_iterator_list(gen->cuda.kernel_c, n_index, "a", 0);
3527 if (i) {
3528 fprintf(gen->cuda.kernel_c, " ");
3529 print_global_index(gen->ctx, gen->cuda.kernel_c, array);
3530 fprintf(gen->cuda.kernel_c, " = ");
3531 print_local_index(gen->cuda.kernel_c, group);
3532 } else {
3533 fprintf(gen->cuda.kernel_c, " ");
3534 print_local_index(gen->cuda.kernel_c, group);
3535 fprintf(gen->cuda.kernel_c, " = ");
3536 print_global_index(gen->ctx, gen->cuda.kernel_c, array);
3538 fprintf(gen->cuda.kernel_c, "\n");
3542 static void print_copy_defines(struct cuda_gen *gen)
3544 int i, j;
3546 for (i = 0; i < gen->n_array; ++i) {
3547 struct cuda_array_info *array = &gen->array[i];
3549 for (j = 0; j < array->n_group; ++j) {
3550 if (array->groups[j]->private_bound)
3551 continue;
3552 if (!array->groups[j]->shared_bound)
3553 continue;
3554 print_array_copy_defines(gen, array->groups[j]);
3559 /* The sizes of the arrays on the host that have been computed by
3560 * extract_array_info may depend on the parameters. Use the extra
3561 * constraints on the parameters that are valid at "host_domain"
3562 * to simplify these expressions.
3564 static void localize_bounds(struct cuda_gen *gen,
3565 __isl_keep isl_set *host_domain)
3567 int i, j;
3568 isl_set *context;
3569 unsigned nvar;
3571 context = isl_set_copy(host_domain);
3572 nvar = isl_set_dim(host_domain, isl_dim_set);
3573 context = isl_set_project_out(host_domain, isl_dim_set, 0, nvar);
3575 for (i = 0; i < gen->n_array; ++i) {
3576 struct cuda_array_info *array = &gen->array[i];
3578 if (array->n_group == 0)
3579 continue;
3581 for (j = 0; j < array->n_index; ++j) {
3582 isl_pw_aff *pwaff;
3584 pwaff = isl_pw_aff_copy(array->bound[j]);
3585 pwaff = isl_pw_aff_gist(pwaff, isl_set_copy(context));
3586 array->local_bound[j] = pwaff;
3589 isl_set_free(context);
3592 /* Set gen->tile_len and gen->n_parallel to those of the first statement
3593 * in the statement list u.
3594 * Because of the way the schedule is constructed, the other statements
3595 * in the list, if any, should have the same values for these properties.
3597 static void set_tile_len(struct cuda_gen *gen, struct clast_user_stmt *u)
3599 int nr;
3600 struct cuda_stmt *stmt;
3602 nr = atoi(u->statement->name + 2);
3603 stmt = &gen->stmts[nr];
3605 gen->tile_len = stmt->tile_len;
3606 gen->n_parallel = stmt->n_parallel;
3609 /* This function is called for each leaf in the clast of the host code.
3610 * We first specialize the schedule to the site of the leaf, compute
3611 * the size of shared memory and then print the body of host code
3612 * and the associated kernel (through a call to print_kernel_body).
3614 static void print_host_user(struct gpucode_info *code,
3615 struct clast_user_stmt *u)
3617 struct cuda_gen *gen = code->user;
3618 isl_dim *dim;
3619 isl_set *par;
3620 isl_set *host_domain;
3621 isl_union_map *access;
3622 isl_union_map *local_sched;
3623 isl_union_set *arrays;
3625 set_tile_len(gen, u);
3626 read_sizes(gen);
3628 host_domain = extract_entire_host_domain(u);
3630 local_sched = isl_union_map_intersect_range(
3631 isl_union_map_copy(gen->sched),
3632 isl_union_set_from_set(extend(isl_set_copy(host_domain),
3633 gen->untiled_len)));
3634 access = isl_union_map_union(isl_union_map_copy(gen->read),
3635 isl_union_map_copy(gen->write));
3636 access = isl_union_map_apply_domain(access,
3637 isl_union_map_copy(local_sched));
3638 arrays = isl_union_map_range(access);
3640 print_indent(code->dst, code->indent);
3641 fprintf(code->dst, "dim3 k%d_dimBlock(", gen->kernel_id);
3642 print_reverse_list(code->dst, gen->n_block, gen->block_dim);
3643 fprintf(code->dst, ");\n");
3645 print_indent(code->dst, code->indent);
3646 fprintf(code->dst, "dim3 k%d_dimGrid(", gen->kernel_id);
3647 print_reverse_list(code->dst, gen->n_grid, gen->grid_dim);
3648 fprintf(code->dst, ");\n");
3650 gen->tiled_sched = tile_schedule(gen, local_sched);
3651 gen->tiled_sched = parametrize_tiled_schedule(gen, gen->tiled_sched);
3652 gen->tiled_sched = scale_tile_loops(gen, gen->tiled_sched);
3654 gen->local_sched = isl_union_map_copy(gen->tiled_sched);
3656 dim = isl_union_map_get_dim(gen->local_sched);
3657 par = parametrization(dim, gen->tiled_len, 0, gen->shared_len, "g");
3658 gen->local_sched = isl_union_map_intersect_range(gen->local_sched,
3659 isl_union_set_from_set(par));
3661 gen->local_sched = thread_tile_schedule(gen, gen->local_sched);
3662 gen->local_sched = scale_thread_tile_loops(gen, gen->local_sched);
3664 gen->private_access = NULL;
3665 compute_shared_sched(gen);
3666 gen->privatization = compute_privatization(gen);
3667 group_references(gen);
3668 compute_private_size(gen);
3669 localize_bounds(gen, host_domain);
3671 gen->local_sched = interchange_for_unroll(gen, gen->local_sched);
3673 print_copy_defines(gen);
3674 print_kernel_launch(gen, arrays);
3676 fprintf(gen->cuda.kernel_c, "{\n");
3678 print_kernel_body(gen, host_domain, gen->tiled_sched);
3680 fprintf(gen->cuda.kernel_c, "}\n");
3682 free_local_array_info(gen);
3683 isl_map_free(gen->privatization);
3684 isl_union_map_free(gen->private_access);
3685 isl_union_map_free(gen->local_sched);
3686 isl_union_map_free(gen->tiled_sched);
3687 isl_union_map_free(gen->shared_sched);
3688 isl_union_map_free(gen->shared_proj);
3689 isl_union_set_free(arrays);
3690 isl_set_free(host_domain);
3692 free(gen->tile_size);
3693 gen->kernel_id++;
3696 /* Use CLooG to generate code for the outer gen->tile_first loops
3697 * of the global schedule in gen->sched.
3698 * The pretty printing of this code is handled by gpu_print_host_stmt,
3699 * which calls print_host_user for each kernel invocation location.
3701 static void print_cloog_host_code(struct cuda_gen *gen)
3703 int i;
3704 isl_set *context;
3705 isl_union_map *sched;
3706 CloogOptions *options;
3707 CloogDomain *cloog_context;
3708 CloogUnionDomain *ud;
3709 CloogInput *input;
3710 struct clast_stmt *stmt;
3711 char name[20];
3713 options = cloog_options_malloc(gen->state);
3714 options->language = LANGUAGE_C;
3715 options->otl = 0;
3716 options->strides = 1;
3717 options->stop = gen->tile_first;
3718 options->f = gen->untiled_len;
3719 options->l = gen->untiled_len;
3720 options->save_domains = 1;
3721 options->noscalars = 1;
3723 sched = isl_union_map_copy(gen->sched);
3724 ud = cloog_union_domain_from_isl_union_map(sched);
3725 for (i = 0; i < options->stop; ++i) {
3726 snprintf(name, sizeof(name), "h%d", i);
3727 ud = cloog_union_domain_set_name(ud, CLOOG_SCAT, i, name);
3729 context = isl_set_copy(gen->context);
3730 cloog_context = cloog_domain_from_isl_set(context);
3731 input = cloog_input_alloc(cloog_context, ud);
3733 stmt = cloog_clast_create_from_input(input, options);
3735 gen->code.indent = 0;
3736 gen->code.dst = gen->cuda.host_c;
3737 gen->code.print_user_stmt = NULL;
3738 gen->code.print_user_stmt_list = &print_host_user;
3739 gen->code.print_for_head = NULL;
3740 gen->code.print_for_foot = NULL;
3741 gen->code.user = gen;
3742 gpu_print_host_stmt(&gen->code, stmt);
3744 cloog_clast_free(stmt);
3745 cloog_options_free(options);
3746 fprintf(gen->cuda.host_c, "\n");
3749 void print_cuda_macros(struct cuda_gen *gen)
3751 const char *macros =
3752 "#define cudaCheckReturn(ret) assert((ret) == cudaSuccess)\n"
3753 "#define cudaCheckKernel()"
3754 " assert(cudaGetLastError() == cudaSuccess)\n\n";
3755 fputs(macros, gen->cuda.host_c);
3758 void print_host_code(struct cuda_gen *gen)
3760 fprintf(gen->cuda.host_c, "{\n");
3761 print_cloog_macros(gen->cuda.host_c);
3762 print_cloog_macros(gen->cuda.kernel_c);
3764 print_cuda_macros(gen);
3766 declare_device_arrays(gen);
3768 allocate_device_arrays(gen);
3769 copy_arrays_to_device(gen);
3771 gen->kernel_id = 0;
3772 print_cloog_host_code(gen);
3774 copy_arrays_from_device(gen);
3775 free_device_arrays(gen);
3777 fprintf(gen->cuda.host_c, "}\n");
3780 __isl_give isl_set *add_context_from_str(__isl_take isl_set *set,
3781 const char *str)
3783 isl_ctx *ctx;
3784 isl_set *context;
3786 if (!str)
3787 return set;
3789 ctx = isl_set_get_ctx(set);
3790 context = isl_set_read_from_str(ctx, str, -1);
3791 context = isl_set_align_params(context, isl_set_get_dim(set));
3792 set = isl_set_intersect(set, context);
3794 return set;
3797 /* Return the union of all iteration domains of the gen->stmts[i].
3799 static __isl_give isl_union_set *extract_domain(struct cuda_gen *gen)
3801 int i;
3802 isl_union_set *domain;
3804 domain = isl_union_set_empty(isl_set_get_dim(gen->context));
3805 for (i = 0; i < gen->n_stmts; ++i) {
3806 isl_set *domain_i;
3808 domain_i = isl_set_copy(gen->stmts[i].domain);
3809 domain = isl_union_set_union(domain,
3810 isl_union_set_from_set(domain_i));
3813 return domain;
3816 /* Information about the outermost tilable bands in the forest of bands.
3818 * tile_len and n_parallel are only sets on band_info structures
3819 * that correspond to outermost bands. For other bands (in particular,
3820 * ancestors of the outermost bands), n_parallal is set to 0.
3822 * prefix is the (padded) schedule leading up to the outermost tilable bands.
3824 * tile_first is the number of schedule dimensions in prefix.
3826 * suffix is the schedule of the outermost tilable bands and their descendants.
3828 struct band_info {
3829 struct cuda_gen *gen;
3830 int tile_first;
3831 int tile_len;
3832 int n_parallel;
3833 isl_union_map *prefix;
3834 isl_union_map *suffix;
3837 /* Set tile_len and n_parallel of the statement to that of
3838 * their outermost band, recorded in the band_info.
3840 static int set_stmt_tile_len(__isl_take isl_map *map, void *user)
3842 struct band_info *info = user;
3843 int nr;
3844 struct cuda_stmt *stmt;
3846 nr = atoi(isl_map_get_tuple_name(map, isl_dim_in) + 2);
3847 stmt = &info->gen->stmts[nr];
3849 stmt->tile_len = info->tile_len;
3850 stmt->n_parallel = info->n_parallel;
3852 isl_map_free(map);
3854 return 0;
3857 static void list_select_outer_band(struct cuda_gen *gen,
3858 __isl_take isl_band_list *list, int pos, struct band_info *list_info);
3860 /* Check if this band has any parallel loops. If so, take it as
3861 * the outermost tilable band. If not, continue looking for the
3862 * outermost tilable band in the children of the current band.
3864 static void band_select_outer_band(struct cuda_gen *gen,
3865 __isl_take isl_band *band, int pos, struct band_info *info)
3867 int n = isl_band_n_member(band);
3868 int n_parallel;
3870 for (n_parallel = 0; n_parallel < n; ++n_parallel)
3871 if (!isl_band_member_is_zero_distance(band, n_parallel))
3872 break;
3874 info->n_parallel = n_parallel;
3875 if (n_parallel) {
3876 info->gen = gen;
3877 info->tile_first = pos;
3878 info->tile_len = n;
3879 info->prefix = isl_band_get_prefix_schedule(band);
3880 info->suffix = isl_union_map_flat_range_product(
3881 isl_band_get_partial_schedule(band),
3882 isl_band_get_suffix_schedule(band));
3883 isl_union_map_foreach_map(info->prefix,
3884 &set_stmt_tile_len, info);
3885 } else {
3886 isl_band_list *children;
3887 if (!isl_band_has_children(band))
3888 isl_die(isl_band_get_ctx(band), isl_error_unknown,
3889 "unable to detect any parallelism", abort());
3890 children = isl_band_get_children(band);
3891 list_select_outer_band(gen, children, pos + n, info);
3894 isl_band_free(band);
3897 /* Comparison function that returns a non-zero value for band_infos
3898 * with different tile_len fields or different n_parallel fields.
3900 static int cmp_band(const void *p1, const void *p2)
3902 const struct band_info *info1 = p1;
3903 const struct band_info *info2 = p2;
3905 if (info1->tile_len != info2->tile_len)
3906 return info1->tile_len - info2->tile_len;
3908 return info1->n_parallel - info2->n_parallel;
3911 /* Extend "umap" with coordinates with fixed value "val"
3912 * to a total length of "dst_len", assuming the original dimension is "src_len".
3914 static __isl_give isl_union_map *extend_range(__isl_take isl_union_map *umap,
3915 int src_len, int dst_len, int val)
3917 isl_dim *dim;
3918 isl_map *map;
3919 int i;
3921 dim = isl_union_map_get_dim(umap);
3922 map = isl_map_reverse(projection(dim, dst_len, src_len));
3923 for (i = src_len; i < dst_len; ++i)
3924 map = isl_map_fix_si(map, isl_dim_out, i, val);
3926 umap = isl_union_map_apply_range(umap, isl_union_map_from_map(map));
3928 return umap;
3931 /* Group bands with the same values for tile_len and n_parallel.
3932 * The prefix schedule is then extended with a fixed coordinate that
3933 * is different for each such group.
3934 * Note that the actual values for this coordinate are not important.
3935 * The bands have already been effectively separated at a higher level
3936 * or they are independent and may be executed in parallel.
3937 * The list of band_info has been sorted before this functions is called.
3939 static void separate_bands(struct band_info *info, int n)
3941 int i;
3942 int j = 0;
3944 for (i = 0; i < n; ++i) {
3945 int l = info[i].tile_first;
3947 if (i &&
3948 (info[i].tile_len != info[i - 1].tile_len ||
3949 info[i].n_parallel != info[i - 1].n_parallel))
3950 j++;
3952 info[i].prefix = extend_range(info[i].prefix,
3953 l, l + 1, j);
3954 info[i].tile_first = l + 1;
3958 /* Select the outermost bands in the elements of the list, align
3959 * their prefix schedules, separate bands with different values
3960 * for tile_len and/or n_parallel and then combine the resulting
3961 * prefix and suffix schedules into a single pair of prefix and
3962 * suffix schedules for the entire list.
3964 static void list_select_outer_band(struct cuda_gen *gen,
3965 __isl_take isl_band_list *list, int pos, struct band_info *list_info)
3967 isl_band *band;
3968 int i;
3969 int n = isl_band_list_n_band(list);
3970 isl_ctx *ctx = isl_band_list_get_ctx(list);
3971 struct band_info *info;
3972 int max_tile_first;
3973 isl_union_map *prefix;
3974 isl_union_map *suffix;
3976 assert(n >= 1);
3977 info = isl_calloc_array(ctx, struct band_info, n);
3978 assert(info);
3980 max_tile_first = 0;
3981 for (i = 0; i < n; ++i) {
3982 band = isl_band_list_get_band(list, i);
3983 band_select_outer_band(gen, band, pos, &info[i]);
3984 if (info[i].tile_first > max_tile_first)
3985 max_tile_first = info[i].tile_first;
3988 for (i = 0; i < n; ++i) {
3989 if (info[i].tile_first == max_tile_first)
3990 continue;
3991 info[i].prefix = extend_range(info[i].prefix,
3992 info[i].tile_first, max_tile_first, 0);
3995 qsort(info, n, sizeof(struct band_info), &cmp_band);
3997 for (i = 0; i < n - 1; ++i)
3998 if (info[i].tile_len != info[i + 1].tile_len ||
3999 info[i].n_parallel != info[i + 1].n_parallel)
4000 break;
4002 if (i < n -1)
4003 separate_bands(info, n);
4005 prefix = info[0].prefix;
4006 suffix = info[0].suffix;
4008 for (i = 1; i < n; ++i) {
4009 prefix = isl_union_map_union(prefix, info[i].prefix);
4010 suffix = isl_union_map_union(suffix, info[i].suffix);
4013 list_info->tile_first = info[0].tile_first;
4014 list_info->tile_len = -1;
4015 list_info->prefix = prefix;
4016 list_info->suffix = suffix;
4018 isl_band_list_free(list);
4019 free(info);
4022 /* Set max_out to the maximal number of output dimensions over
4023 * all maps.
4025 static int update_max_out(__isl_take isl_map *map, void *user)
4027 int *max_out = user;
4028 int n_out = isl_map_dim(map, isl_dim_out);
4030 if (n_out > *max_out)
4031 *max_out = n_out;
4033 isl_map_free(map);
4034 return 0;
4037 struct align_range_data {
4038 int max_out;
4039 isl_union_map *res;
4042 /* Extend the dimension of the range of the given map to data->max_out and
4043 * then add the result to data->res.
4045 static int map_align_range(__isl_take isl_map *map, void *user)
4047 struct align_range_data *data = user;
4048 int i;
4049 isl_dim *dim;
4050 isl_map *proj;
4051 int n_out = isl_map_dim(map, isl_dim_out);
4053 dim = isl_union_map_get_dim(data->res);
4054 proj = isl_map_reverse(projection(dim, data->max_out, n_out));
4055 for (i = n_out; i < data->max_out; ++i)
4056 proj = isl_map_fix_si(proj, isl_dim_out, i, 0);
4058 map = isl_map_apply_range(map, proj);
4060 data->res = isl_union_map_add_map(data->res, map);
4062 return 0;
4065 /* Extend the ranges of the maps in the union map such they all have
4066 * the same dimension.
4068 static __isl_give isl_union_map *align_range(__isl_take isl_union_map *umap)
4070 struct align_range_data data;
4072 data.max_out = 0;
4073 isl_union_map_foreach_map(umap, &update_max_out, &data.max_out);
4075 data.res = isl_union_map_empty(isl_union_map_get_dim(umap));
4076 isl_union_map_foreach_map(umap, &map_align_range, &data);
4078 isl_union_map_free(umap);
4079 return data.res;
4082 /* Select the outermost tilable band that (by construction)
4083 * has at least one parallel loop.
4084 * The starting position of the aligned band is stored in the pair
4085 * gen->tile_first.
4086 * The sizes and number of parallel loops may be different in different
4087 * parts of the band forest and are therefore stored in the cuda_stmts.
4089 * Return the complete schedule, with the tilable bands aligned
4090 * at gen->tile_first and padded with zero, if needed.
4092 static __isl_give isl_union_map *select_outer_tilable_band(struct cuda_gen *gen,
4093 __isl_keep isl_schedule *schedule)
4095 isl_band_list *list;
4096 struct band_info info;
4098 gen->n_parallel = 0;
4099 gen->tile_len = -1;
4101 list = isl_schedule_get_band_forest(schedule);
4103 list_select_outer_band(gen, list, 0, &info);
4105 gen->tile_first = info.tile_first;
4106 info.suffix = align_range(info.suffix);
4108 return isl_union_map_flat_range_product(info.prefix, info.suffix);
4111 /* Set gen->untiled_len to the number of scheduling dimensions
4112 * for the schedule of the first domain.
4113 * We assume here that this number is the same for all domains.
4115 static int set_untiled_len(__isl_take isl_map *map, void *user)
4117 unsigned *untiled_len = user;
4119 *untiled_len = isl_map_dim(map, isl_dim_out);
4121 isl_map_free(map);
4122 return -1;
4125 /* Compute an appropriate schedule based on the accesses in
4126 * gen->read and gen->write.
4128 * We first compute dependences and then use those to compute
4129 * a schedule that has a parallel loop in each tilable band.
4130 * Finally, we select the outermost tilable band.
4132 static void compute_schedule(struct cuda_gen *gen,
4133 __isl_take isl_union_map *sched)
4135 isl_ctx *ctx = isl_union_map_get_ctx(sched);
4136 isl_union_set *domain;
4137 isl_union_map *empty;
4138 isl_union_map *dep_raw, *dep2, *dep3, *dep;
4139 isl_union_map *uninitialized;
4140 isl_schedule *schedule;
4141 struct isl_options *options;
4143 empty = isl_union_map_empty(isl_union_map_get_dim(sched));
4145 isl_union_map_compute_flow(isl_union_map_copy(gen->read),
4146 isl_union_map_copy(gen->write), empty,
4147 isl_union_map_copy(sched),
4148 &dep_raw, NULL, &uninitialized, NULL);
4149 isl_union_map_compute_flow(isl_union_map_copy(gen->write),
4150 isl_union_map_copy(gen->write),
4151 isl_union_map_copy(gen->read),
4152 isl_union_map_copy(sched),
4153 &dep2, &dep3, NULL, NULL);
4154 isl_union_map_free(sched);
4156 gen->copy_in = isl_union_map_range(uninitialized);
4158 dep = isl_union_map_union(dep2, dep3);
4159 dep = isl_union_map_union(dep, dep_raw);
4160 dep = isl_union_map_coalesce(dep);
4162 domain = extract_domain(gen);
4163 options = isl_ctx_peek_options(ctx, isl_options_arg);
4164 options->schedule_outer_zero_distance = 1;
4165 schedule = isl_union_set_compute_schedule(isl_union_set_copy(domain),
4166 isl_union_map_copy(dep), dep);
4168 sched = select_outer_tilable_band(gen, schedule);
4170 isl_union_map_foreach_map(sched, &set_untiled_len, &gen->untiled_len);
4171 sched = isl_union_map_intersect_domain(sched, domain);
4172 gen->sched = sched;
4174 isl_schedule_free(schedule);
4177 static struct cuda_stmt_access **expr_extract_access(struct pet_expr *expr,
4178 struct cuda_stmt_access **next_access)
4180 struct cuda_stmt_access *access;
4181 isl_ctx *ctx = isl_map_get_ctx(expr->acc.access);
4183 access = isl_alloc_type(ctx, struct cuda_stmt_access);
4184 assert(access);
4185 access->next = NULL;
4186 access->read = expr->acc.read;
4187 access->write = expr->acc.write;
4188 access->access = isl_map_copy(expr->acc.access);
4190 *next_access = access;
4191 next_access = &(*next_access)->next;
4192 return next_access;
4195 static struct cuda_stmt_access **expr_extract_accesses(struct pet_expr *expr,
4196 struct cuda_stmt_access **next_access)
4198 int i;
4200 for (i = 0; i < expr->n_arg; ++i)
4201 next_access = expr_extract_accesses(expr->args[i],
4202 next_access);
4204 if (expr->type == pet_expr_access)
4205 next_access = expr_extract_access(expr, next_access);
4207 return next_access;
4210 static void pet_stmt_extract_accesses(struct cuda_stmt *stmt)
4212 struct cuda_stmt_access **next_access = &stmt->accesses;
4214 stmt->accesses = NULL;
4215 expr_extract_accesses(stmt->body, next_access);
4218 /* Return an array of cuda_stmt representing the statements in "scop".
4220 static struct cuda_stmt *extract_stmts(isl_ctx *ctx, struct pet_scop *scop,
4221 __isl_keep isl_set *context)
4223 int i;
4224 struct cuda_stmt *stmts;
4226 stmts = isl_calloc_array(ctx, struct cuda_stmt, scop->n_stmt);
4227 assert(stmts);
4229 for (i = 0; i < scop->n_stmt; ++i) {
4230 struct cuda_stmt *s = &stmts[i];
4232 s->domain = isl_set_copy(scop->stmts[i]->domain);
4233 s->domain = isl_set_intersect(s->domain, isl_set_copy(context));
4234 s->body = scop->stmts[i]->body;
4235 pet_stmt_extract_accesses(s);
4238 return stmts;
4241 /* Replace the scop in the "input" file by equivalent code
4242 * that uses the GPU. "scop" is assumed to correspond to this scop.
4244 * We first compute a schedule that respects the dependences
4245 * of the original program and select the outermost band
4246 * of tilable dimensions that has at least one parallel loop.
4247 * We then have three blocks of dimensions
4249 * H B G
4251 * The tilable band "B" is first tiled according to "tile.sizes", resulting
4252 * in
4254 * H T P G
4256 * For each iteration of the T loop and for each array, we compute
4257 * the array elements accessed by that iteration, construct a rectangular
4258 * box around it and shift it to the origin. The result is used
4259 * as shared memory for the array.
4261 * We then split off at most 2 parallel loops from the T loops and
4262 * at most 3 parallel loops from the P loops
4264 * H T1 T2 P1 P2 G
4266 * The T1/P1 loops are then tiled or "wrapped" over the blocks/threads,
4267 * according to "grid.sizes"/"block.sizes".
4269 * H T1T T1P T2 P1T P1P P2 G
4271 * Finally, the T1P and P1P iterators are equated to the block and
4272 * thread dimensions respectively and so are effectively removed.
4273 * The H loops are run on the host. The T1T, T2, P1T, P2 and G loops
4274 * are run on the GPU.
4276 * Code is generated in three stages. We first generate code for the
4277 * host (the H loops), with iterators h%d. Then, for each leaf node
4278 * of the resulting AST, we generate code for the shared loops (up to
4279 * and including T2), with iterators g%d and after equating the H loops
4280 * to h%d parameters and the T1P loops to the block dimensions.
4281 * Finally, we generate code for the remaining loops in a similar fashion.
4283 int cuda_pet(isl_ctx *ctx, struct pet_scop *scop, struct ppcg_options *options,
4284 const char *input)
4286 isl_union_map *sched;
4287 struct cuda_gen gen;
4289 if (!scop)
4290 return -1;
4292 scop = pet_scop_align_params(scop);
4294 gen.ctx = ctx;
4295 gen.context = isl_set_copy(scop->context);
4296 gen.context = add_context_from_str(gen.context, options->ctx);
4297 gen.n_stmts = scop->n_stmt;
4298 gen.stmts = extract_stmts(ctx, scop, gen.context);
4299 gen.read = pet_scop_collect_reads(scop);
4300 gen.write = pet_scop_collect_writes(scop);
4301 gen.options = options;
4302 gen.state = cloog_isl_state_malloc(gen.ctx);
4303 gen.scop = scop;
4305 cuda_open_files(&gen.cuda, input);
4307 collect_array_info(&gen);
4309 sched = pet_scop_collect_schedule(scop);
4311 compute_schedule(&gen, sched);
4313 print_host_code(&gen);
4315 cloog_state_free(gen.state);
4316 clear_cuda_gen(&gen);
4318 cuda_close_files(&gen.cuda);
4320 return 0;