Explicitly cast to void** when allocating memory
[ppcg.git] / cuda.c
blob57638b475dd38553e650a98b973da3e5e56756a0
1 /*
2 * Copyright 2010-2011 INRIA Saclay
4 * Use of this software is governed by the GNU LGPLv2.1 license
6 * Written by Sven Verdoolaege, INRIA Saclay - Ile-de-France,
7 * Parc Club Orsay Universite, ZAC des vignes, 4 rue Jacques Monod,
8 * 91893 Orsay, France
9 */
11 #include <assert.h>
12 #include <stdlib.h>
14 #include <isl/polynomial.h>
15 #include <isl/union_set.h>
16 #include <isl/aff.h>
17 #include <isl/ilp.h>
18 #include <isl/flow.h>
19 #include <isl/band.h>
20 #include <isl/schedule.h>
21 #include <isl/options.h>
22 #include <cloog/isl/cloog.h>
24 #include "cuda.h"
25 #include "cuda_common.h"
26 #include "gpucode.h"
27 #include "schedule.h"
28 #include "ppcg_options.h"
30 /* The fields stride, shift and shift_map only contain valid information
31 * if shift != NULL.
32 * If so, they express that current index is such that if you add shift,
33 * then the result is always a multiple of stride.
34 * shift_map contains the mapping
36 * i -> (i + shift)/stride
38 struct cuda_array_bound {
39 isl_int size;
40 isl_aff *lb;
42 isl_int stride;
43 isl_qpolynomial *shift;
44 isl_basic_map *shift_map;
47 struct cuda_array_info;
49 /* A group of array references in a kernel that should be handled together.
50 * If private_bound is not NULL, then it is mapped to registers.
51 * Otherwise, if shared_bound is not NULL, it is mapped to shared memory.
52 * Otherwise, it is accesses from global memory.
54 struct cuda_array_ref_group {
55 /* The references in this group access this array. */
56 struct cuda_array_info *array;
57 /* Position of this group in the list of reference groups of array. */
58 int nr;
60 /* The following fields are use during the construction of the groups.
61 * access is the combined access relation relative to the shared
62 * memory tiling.
63 * write is set if any access in the group is a write.
65 isl_map *access;
66 int write;
68 /* For each index, size and offset of piece in shared memory. */
69 struct cuda_array_bound *shared_bound;
71 /* For each index, size and offset of piece in private memory. */
72 struct cuda_array_bound *private_bound;
74 /* References in this group; point to elements of a linked list. */
75 int n_ref;
76 struct cuda_stmt_access **refs;
79 struct cuda_array_info {
80 isl_dim *dim;
81 /* Element type. */
82 char *type;
83 /* Name of the array. */
84 char *name;
85 /* Number of indices. */
86 unsigned n_index;
87 /* For each index, a bound on the array in that direction. */
88 isl_pw_aff **bound;
89 /* For each index, bound[i] specialized to the current kernel. */
90 isl_pw_aff **local_bound;
92 /* All references to this array; point to elements of a linked list. */
93 int n_ref;
94 struct cuda_stmt_access **refs;
96 /* The reference groups associated to this array. */
97 int n_group;
98 struct cuda_array_ref_group **groups;
100 /* Last shared memory tile dimension that affects tile of this array. */
101 int last_shared;
102 /* Dimension at which copying to/from shared memory is printed.
103 * if >= 0, then the value is >= last_shared
104 * if -1, then the copying is done at the leaf level.
106 int print_shared_level;
109 /* Print the name of the local copy of a given group of array references.
111 static void print_array_name(FILE *out, struct cuda_array_ref_group *group)
113 int global = 0;
115 if (group->private_bound)
116 fprintf(out, "private_");
117 else if (group->shared_bound)
118 fprintf(out, "shared_");
119 else
120 global = 1;
121 fprintf(out, "%s", group->array->name);
122 if (!global && group->array->n_group > 1)
123 fprintf(out, "_%d", group->nr);
126 /* Collect all references to the given array and store pointers to them
127 * in array->refs.
129 static void collect_references(struct cuda_gen *gen,
130 struct cuda_array_info *array)
132 int i;
133 int n;
135 n = 0;
136 for (i = 0; i < gen->n_stmts; ++i) {
137 struct cuda_stmt *stmt = &gen->stmts[i];
138 struct cuda_stmt_access *access;
140 for (access = stmt->accesses; access; access = access->next) {
141 const char *name;
142 name = isl_map_get_tuple_name(access->access,
143 isl_dim_out);
144 if (name && !strcmp(array->name, name))
145 n++;
149 array->n_ref = n;
150 array->refs = isl_alloc_array(gen->ctx, struct cuda_stmt_access *, n);
151 assert(array->refs);
153 n = 0;
154 for (i = 0; i < gen->n_stmts; ++i) {
155 struct cuda_stmt *stmt = &gen->stmts[i];
156 struct cuda_stmt_access *access;
158 for (access = stmt->accesses; access; access = access->next) {
159 const char *name;
160 name = isl_map_get_tuple_name(access->access,
161 isl_dim_out);
162 if (!name || strcmp(array->name, name))
163 continue;
165 array->refs[n++] = access;
170 static struct cuda_array_bound *create_bound_list(isl_ctx *ctx, int n_index)
172 int i;
173 struct cuda_array_bound *bound;
175 bound = isl_alloc_array(ctx, struct cuda_array_bound, n_index);
176 assert(bound);
178 for (i = 0; i < n_index; ++i) {
179 isl_int_init(bound[i].size);
180 bound[i].lb = NULL;
181 isl_int_init(bound[i].stride);
182 bound[i].shift = NULL;
183 bound[i].shift_map = NULL;
186 return bound;
189 static void free_bound_list(struct cuda_array_bound *bound, int n_index)
191 int j;
193 if (!bound)
194 return;
196 for (j = 0; j < n_index; ++j) {
197 isl_int_clear(bound[j].size);
198 isl_int_clear(bound[j].stride);
199 isl_aff_free(bound[j].lb);
200 isl_qpolynomial_free(bound[j].shift);
201 isl_basic_map_free(bound[j].shift_map);
203 free(bound);
206 static struct pet_array *find_array(struct pet_scop *scop,
207 __isl_keep isl_set *accessed)
209 int i;
210 isl_id *id;
212 id = isl_set_get_tuple_id(accessed);
214 for (i = 0; i < scop->n_array; ++i) {
215 isl_id *id_i;
217 id_i = isl_set_get_tuple_id(scop->arrays[i]->extent);
218 isl_id_free(id_i);
219 if (id == id_i)
220 break;
222 isl_id_free(id);
224 return i < scop->n_array ? scop->arrays[i] : NULL;
227 /* Compute bounds on the host arrays based on the accessed elements
228 * and collect all references to the array.
230 static int extract_array_info(__isl_take isl_set *array, void *user)
232 int i;
233 struct cuda_gen *gen = (struct cuda_gen *)user;
234 const char *name;
235 int n_index;
236 isl_pw_aff **bounds;
237 isl_pw_aff **local_bounds;
238 struct pet_array *pa;
240 n_index = isl_set_dim(array, isl_dim_set);
241 name = isl_set_get_tuple_name(array);
242 bounds = isl_alloc_array(isl_set_get_ctx(array),
243 isl_pw_aff *, n_index);
244 assert(bounds);
245 local_bounds = isl_calloc_array(isl_set_get_ctx(array),
246 isl_pw_aff *, n_index);
247 assert(local_bounds);
248 gen->array[gen->n_array].dim = isl_set_get_dim(array);
249 gen->array[gen->n_array].name = strdup(name);
250 gen->array[gen->n_array].n_index = n_index;
251 gen->array[gen->n_array].bound = bounds;
252 gen->array[gen->n_array].local_bound = local_bounds;
254 pa = find_array(gen->scop, array);
255 assert(pa);
257 gen->array[gen->n_array].type = strdup(pa->element_type);
259 for (i = 0; i < n_index; ++i) {
260 isl_set *dom;
261 isl_local_space *ls;
262 isl_aff *one;
263 isl_pw_aff *bound;
264 isl_set *size = i == 0 ? array : pa->extent;
266 bound = isl_set_dim_max(isl_set_copy(size), i);
267 assert(bound);
268 dom = isl_pw_aff_domain(isl_pw_aff_copy(bound));
269 ls = isl_local_space_from_dim(isl_set_get_dim(dom));
270 one = isl_aff_zero(ls);
271 one = isl_aff_add_constant_si(one, 1);
272 bound = isl_pw_aff_add(bound, isl_pw_aff_alloc(dom, one));
273 bound = isl_pw_aff_gist(bound, isl_set_copy(gen->context));
275 bounds[i] = bound;
278 collect_references(gen, &gen->array[gen->n_array]);
280 gen->n_array++;
282 isl_set_free(array);
283 return 0;
286 void collect_array_info(struct cuda_gen *gen)
288 isl_union_set *arrays;
290 arrays = isl_union_map_range(isl_union_map_copy(gen->read));
291 arrays = isl_union_set_union(arrays,
292 isl_union_map_range(isl_union_map_copy(gen->write)));
293 arrays = isl_union_set_coalesce(arrays);
295 gen->n_array = isl_union_set_n_set(arrays);
296 gen->array = isl_alloc_array(gen->ctx,
297 struct cuda_array_info, gen->n_array);
298 assert(gen->array);
299 gen->n_array = 0;
300 isl_union_set_foreach_set(arrays, &extract_array_info, gen);
301 isl_union_set_free(arrays);
304 static void free_array_info(struct cuda_gen *gen)
306 int i, j;
308 for (i = 0; i < gen->n_array; ++i) {
309 int n_index = gen->array[i].n_index;
310 free(gen->array[i].type);
311 free(gen->array[i].name);
312 for (j = 0; j < n_index; ++j) {
313 isl_pw_aff_free(gen->array[i].bound[j]);
314 isl_pw_aff_free(gen->array[i].local_bound[j]);
316 isl_dim_free(gen->array[i].dim);
317 free(gen->array[i].bound);
318 free(gen->array[i].local_bound);
319 free(gen->array[i].refs);
321 free(gen->array);
324 static void declare_device_arrays(struct cuda_gen *gen)
326 int i;
328 for (i = 0; i < gen->n_array; ++i)
329 fprintf(gen->cuda.host_c, "%s *dev_%s;\n",
330 gen->array[i].type, gen->array[i].name);
333 static void print_array_size(struct cuda_gen *gen, FILE *out,
334 struct cuda_array_info *array)
336 int i;
337 isl_printer *prn;
339 prn = isl_printer_to_file(gen->ctx, out);
340 prn = isl_printer_set_output_format(prn, ISL_FORMAT_C);
341 for (i = 0; i < array->n_index; ++i) {
342 prn = isl_printer_print_str(prn, "(");
343 prn = isl_printer_print_pw_aff(prn, array->bound[i]);
344 prn = isl_printer_print_str(prn, ") * ");
346 prn = isl_printer_print_str(prn, "sizeof(");
347 prn = isl_printer_print_str(prn, array->type);
348 prn = isl_printer_print_str(prn, ")");
349 isl_printer_free(prn);
352 static void allocate_device_arrays(struct cuda_gen *gen)
354 int i;
356 for (i = 0; i < gen->n_array; ++i) {
357 fprintf(gen->cuda.host_c, "cudaMalloc((void **) &dev_%s, ",
358 gen->array[i].name);
359 print_array_size(gen, gen->cuda.host_c, &gen->array[i]);
360 fprintf(gen->cuda.host_c, ");\n");
364 static void free_device_arrays(struct cuda_gen *gen)
366 int i;
368 for (i = 0; i < gen->n_array; ++i)
369 fprintf(gen->cuda.host_c, "cudaFree(dev_%s);\n",
370 gen->array[i].name);
373 /* Check if a cuda array is a scalar. A scalar is a value that is not stored
374 * as an array or through a pointer reference, but as single data element. At
375 * the moment, scalars are represented as zero dimensional arrays.
377 static int cuda_array_is_scalar(struct cuda_array_info *array)
379 return (array->n_index == 0);
382 static void copy_arrays_to_device(struct cuda_gen *gen)
384 int i;
386 for (i = 0; i < gen->n_array; ++i) {
387 isl_dim *dim;
388 isl_set *read_i;
389 int empty;
391 dim = isl_dim_copy(gen->array[i].dim);
392 read_i = isl_union_set_extract_set(gen->copy_in, dim);
393 empty = isl_set_fast_is_empty(read_i);
394 isl_set_free(read_i);
395 if (empty)
396 continue;
398 fprintf(gen->cuda.host_c, "cudaMemcpy(dev_%s,",
399 gen->array[i].name);
401 if (cuda_array_is_scalar(&(gen->array[i])))
402 fprintf(gen->cuda.host_c, " &%s, ",
403 gen->array[i].name);
404 else
405 fprintf(gen->cuda.host_c, " %s, ", gen->array[i].name);
407 print_array_size(gen, gen->cuda.host_c, &gen->array[i]);
408 fprintf(gen->cuda.host_c, ", cudaMemcpyHostToDevice);\n");
412 static void copy_arrays_from_device(struct cuda_gen *gen)
414 int i;
415 isl_union_set *write;
416 write = isl_union_map_range(isl_union_map_copy(gen->write));
418 for (i = 0; i < gen->n_array; ++i) {
419 isl_dim *dim;
420 isl_set *write_i;
421 int empty;
423 dim = isl_dim_copy(gen->array[i].dim);
424 write_i = isl_union_set_extract_set(write, dim);
425 empty = isl_set_fast_is_empty(write_i);
426 isl_set_free(write_i);
427 if (empty)
428 continue;
430 fprintf(gen->cuda.host_c, "cudaMemcpy(%s, dev_%s, ",
431 gen->array[i].name, gen->array[i].name);
432 print_array_size(gen, gen->cuda.host_c, &gen->array[i]);
433 fprintf(gen->cuda.host_c, ", cudaMemcpyDeviceToHost);\n");
436 isl_union_set_free(write);
439 static void read_sizes_from_file(struct cuda_gen *gen, const char *filename,
440 int *sizes, int len)
442 int i;
443 FILE *file;
445 file = fopen(filename, "r");
446 if (!file)
447 return;
449 for (i = 0; i < len; ++i)
450 if (fscanf(file, "%d", &sizes[i]) < 1)
451 break;
453 fclose(file);
456 static void reverse_list(int *list, int len)
458 int i;
459 int t;
461 for (i = 0; 2 * i < len; ++i) {
462 t = list[i];
463 list[i] = list[len - 1 - i];
464 list[len - 1 - i] = t;
468 /* Read user specified sizes from "tile.sizes", "block.sizes" and "grid.sizes"
469 * after filling in some potentially useful defaults.
471 static void read_sizes(struct cuda_gen *gen)
473 int n;
475 gen->tile_size = isl_alloc_array(gen->ctx, int, gen->tile_len);
476 assert(gen->tile_size);
477 for (n = 0; n < gen->tile_len; ++n)
478 gen->tile_size[n] = gen->options->tile_size;
479 read_sizes_from_file(gen, "tile.sizes", gen->tile_size, gen->tile_len);
481 n = gen->n_parallel;
482 gen->n_block = (n <= 3) ? n : 3;
483 switch (gen->n_block) {
484 case 1:
485 gen->block_dim[0] = 512;
486 break;
487 case 2:
488 gen->block_dim[0] = 32;
489 gen->block_dim[1] = 16;
490 break;
491 default:
492 gen->block_dim[0] = 32;
493 gen->block_dim[1] = 4;
494 gen->block_dim[2] = 4;
495 break;
497 read_sizes_from_file(gen, "block.sizes", gen->block_dim, gen->n_block);
498 reverse_list(gen->block_dim, gen->n_block);
500 gen->n_grid = (n <= 2) ? n : 2;
501 switch (gen->n_grid) {
502 case 1:
503 gen->grid_dim[0] = 65536;
504 break;
505 default:
506 gen->grid_dim[0] = 256;
507 gen->grid_dim[1] = 256;
508 break;
510 read_sizes_from_file(gen, "grid.sizes", gen->grid_dim, gen->n_grid);
511 reverse_list(gen->grid_dim, gen->n_grid);
514 static void free_stmts(struct cuda_stmt *stmts, int n)
516 int i;
518 for (i = 0; i < n; ++i) {
519 struct cuda_stmt_access *access, *next;
521 for (access = stmts[i].accesses; access; access = next) {
522 next = access->next;
523 isl_map_free(access->access);
524 free(access);
527 isl_set_free(stmts[i].domain);
529 free(stmts);
532 void clear_cuda_gen(struct cuda_gen *gen)
534 free_stmts(gen->stmts, gen->n_stmts);
535 free_array_info(gen);
536 isl_set_free(gen->context);
537 isl_union_set_free(gen->copy_in);
538 isl_union_map_free(gen->sched);
539 isl_union_map_free(gen->read);
540 isl_union_map_free(gen->write);
543 static void print_reverse_list(FILE *out, int len, int *list)
545 int i;
547 for (i = 0; i < len; ++i) {
548 if (i)
549 fprintf(out, ", ");
550 fprintf(out, "%d", list[len - 1 - i]);
554 static void print_kernel_launch(struct cuda_gen *gen,
555 __isl_keep isl_union_set *arrays)
557 int i;
558 int first = 1;
559 unsigned nparam;
560 isl_dim *dim;
562 print_indent(gen->code.dst, gen->code.indent);
563 fprintf(gen->code.dst, "kernel%d <<<k%d_dimGrid, k%d_dimBlock>>> (",
564 gen->kernel_id, gen->kernel_id, gen->kernel_id);
565 fprintf(gen->cuda.kernel_c, "__global__ void kernel%d(",
566 gen->kernel_id);
567 fprintf(gen->cuda.kernel_h, "__global__ void kernel%d(",
568 gen->kernel_id);
570 for (i = 0; i < gen->n_array; ++i) {
571 isl_dim *dim;
572 isl_set *arr;
573 int empty;
575 dim = isl_dim_copy(gen->array[i].dim);
576 arr = isl_union_set_extract_set(arrays, dim);
577 empty = isl_set_fast_is_empty(arr);
578 isl_set_free(arr);
579 if (empty)
580 continue;
582 if (!first) {
583 fprintf(gen->code.dst, ", ");
584 fprintf(gen->cuda.kernel_c, ", ");
585 fprintf(gen->cuda.kernel_h, ", ");
588 fprintf(gen->code.dst, "dev_%s", gen->array[i].name);
589 fprintf(gen->cuda.kernel_c, "%s *%s",
590 gen->array[i].type, gen->array[i].name);
591 fprintf(gen->cuda.kernel_h, "%s *%s",
592 gen->array[i].type, gen->array[i].name);
594 first = 0;
597 dim = isl_union_set_get_dim(arrays);
598 nparam = isl_dim_size(dim, isl_dim_param);
599 for (i = 0; i < nparam; ++i) {
600 const char *name = isl_dim_get_name(dim, isl_dim_param, i);
601 if (!first) {
602 fprintf(gen->code.dst, ", ");
603 fprintf(gen->cuda.kernel_c, ", ");
604 fprintf(gen->cuda.kernel_h, ", ");
606 fprintf(gen->code.dst, "%s", name);
607 fprintf(gen->cuda.kernel_c, "int %s", name);
608 fprintf(gen->cuda.kernel_h, "int %s", name);
609 first = 0;
611 isl_dim_free(dim);
613 for (i = 0; i < gen->tile_first; ++i) {
614 if (!first) {
615 fprintf(gen->code.dst, ", ");
616 fprintf(gen->cuda.kernel_c, ", ");
617 fprintf(gen->cuda.kernel_h, ", ");
619 fprintf(gen->code.dst, "h%d", i);
620 fprintf(gen->cuda.kernel_c, "int h%d", i);
621 fprintf(gen->cuda.kernel_h, "int h%d", i);
622 first = 0;
625 fprintf(gen->code.dst, ");\n");
626 fprintf(gen->cuda.kernel_c, ")\n");
627 fprintf(gen->cuda.kernel_h, ");\n");
630 /* Construct a map from a domain of dimensionality "len"
631 * to a domain of dimensionality "len" + "tile_len" that tiles
632 * the "tile_len" coordinates starting at "first".
633 * In particular, [s_i] -> [s_i / tile_size[i], s_i % tile_size[i]].
634 * "dim" prescribes the parameters.
636 static __isl_give isl_map *tile(__isl_take isl_dim *dim, int len,
637 int first, int tile_len, int *tile_size)
639 int i;
640 isl_int v;
641 isl_basic_map *bmap;
642 isl_constraint *c;
644 isl_int_init(v);
646 dim = isl_dim_add(dim, isl_dim_in, len);
647 dim = isl_dim_add(dim, isl_dim_out, len + tile_len);
648 bmap = isl_basic_map_universe(isl_dim_copy(dim));
650 for (i = 0; i < len - tile_len; ++i) {
651 int j = i < first ? i : i + tile_len;
652 int k = i < first ? i : i + 2 * tile_len;
654 c = isl_equality_alloc(isl_dim_copy(dim));
655 isl_int_set_si(v, -1);
656 isl_constraint_set_coefficient(c, isl_dim_in, j, v);
657 isl_int_set_si(v, 1);
658 isl_constraint_set_coefficient(c, isl_dim_out, k, v);
659 bmap = isl_basic_map_add_constraint(bmap, c);
662 for (i = 0; i < tile_len; ++i) {
663 c = isl_equality_alloc(isl_dim_copy(dim));
664 isl_int_set_si(v, -1);
665 isl_constraint_set_coefficient(c, isl_dim_in, first + i, v);
666 isl_int_set_si(v, tile_size[i]);
667 isl_constraint_set_coefficient(c, isl_dim_out, first + i, v);
668 isl_int_set_si(v, 1);
669 isl_constraint_set_coefficient(c, isl_dim_out,
670 first + i + tile_len, v);
671 bmap = isl_basic_map_add_constraint(bmap, c);
673 c = isl_inequality_alloc(isl_dim_copy(dim));
674 isl_int_set_si(v, 1);
675 isl_constraint_set_coefficient(c, isl_dim_out,
676 first + i + tile_len, v);
677 bmap = isl_basic_map_add_constraint(bmap, c);
679 c = isl_inequality_alloc(isl_dim_copy(dim));
680 isl_int_set_si(v, -1);
681 isl_constraint_set_coefficient(c, isl_dim_out,
682 first + i + tile_len, v);
683 isl_int_set_si(v, tile_size[i] - 1);
684 isl_constraint_set_constant(c, v);
685 bmap = isl_basic_map_add_constraint(bmap, c);
688 isl_dim_free(dim);
689 isl_int_clear(v);
691 return isl_map_from_basic_map(bmap);
694 /* Construct a map from a domain of dimensionality "len"
695 * to a domain of dimensionality "len" + "wrap_len" that "wraps"
696 * the "wrap_len" coordinates starting at "first" according to "wrap_size".
697 * In particular, [s_i] -> [s_i, s_i % wrap_size[i]].
698 * To do so, we need extra variables corresponding to [s_i / wrap_size[i]],
699 * that are projected out at the end.
700 * "dim" prescribes the parameters.
702 static __isl_give isl_map *wrap(__isl_take isl_dim *dim, int len,
703 int first, int wrap_len, int *wrap_size)
705 int i;
706 isl_basic_map *bmap;
707 isl_constraint *c;
709 dim = isl_dim_add(dim, isl_dim_in, len);
710 dim = isl_dim_add(dim, isl_dim_out, len + 2 * wrap_len);
711 bmap = isl_basic_map_universe(isl_dim_copy(dim));
713 for (i = 0; i < len; ++i) {
714 int k = i < first + wrap_len ? i : i + 2 * wrap_len;
716 c = isl_equality_alloc(isl_dim_copy(dim));
717 isl_constraint_set_coefficient_si(c, isl_dim_in, i, -1);
718 isl_constraint_set_coefficient_si(c, isl_dim_out, k, 1);
719 bmap = isl_basic_map_add_constraint(bmap, c);
722 for (i = 0; i < wrap_len; ++i) {
723 c = isl_equality_alloc(isl_dim_copy(dim));
724 isl_constraint_set_coefficient_si(c, isl_dim_out,
725 first + i, -1);
726 isl_constraint_set_coefficient_si(c, isl_dim_out,
727 first + wrap_len + i, 1);
728 isl_constraint_set_coefficient_si(c, isl_dim_out,
729 first + 2 * wrap_len + i, wrap_size[i]);
730 bmap = isl_basic_map_add_constraint(bmap, c);
732 c = isl_inequality_alloc(isl_dim_copy(dim));
733 isl_constraint_set_coefficient_si(c, isl_dim_out,
734 first + wrap_len + i, 1);
735 bmap = isl_basic_map_add_constraint(bmap, c);
737 c = isl_inequality_alloc(isl_dim_copy(dim));
738 isl_constraint_set_coefficient_si(c, isl_dim_out,
739 first + wrap_len + i, -1);
740 isl_constraint_set_constant_si(c, wrap_size[i] - 1);
741 bmap = isl_basic_map_add_constraint(bmap, c);
744 isl_dim_free(dim);
746 bmap = isl_basic_map_project_out(bmap, isl_dim_out,
747 first + 2 * wrap_len, wrap_len);
749 return isl_map_from_basic_map(bmap);
752 /* Add "n" parameters named prefix%d.
754 static __isl_give isl_set *add_params( __isl_take isl_set *set,
755 int n, const char *prefix)
757 int i;
758 unsigned nparam;
759 char name[20];
761 nparam = isl_set_dim(set, isl_dim_param);
762 set = isl_set_add_dims(set, isl_dim_param, n);
764 for (i = 0; i < n; ++i) {
765 snprintf(name, sizeof(name), "%s%d", prefix, i);
766 set = isl_set_set_dim_name(set, isl_dim_param,
767 nparam + i, name);
770 return set;
773 /* Equate the "n" dimensions of "set" starting at "first" to
774 * freshly created parameters named prefix%d.
776 static __isl_give isl_set *parametrize(__isl_take isl_set *set,
777 int first, int n, const char *prefix)
779 int i;
780 unsigned nparam;
781 isl_int v;
782 isl_dim *dim;
783 isl_basic_set *bset;
784 isl_constraint *c;
786 nparam = isl_set_dim(set, isl_dim_param);
788 set = add_params(set, n, prefix);
790 dim = isl_set_get_dim(set);
791 bset = isl_basic_set_universe(isl_dim_copy(dim));
793 isl_int_init(v);
795 for (i = 0; i < n; ++i) {
796 c = isl_equality_alloc(isl_dim_copy(dim));
797 isl_int_set_si(v, -1);
798 isl_constraint_set_coefficient(c, isl_dim_param, nparam + i, v);
799 isl_int_set_si(v, 1);
800 isl_constraint_set_coefficient(c, isl_dim_set, first + i, v);
801 bset = isl_basic_set_add_constraint(bset, c);
804 isl_int_clear(v);
805 isl_dim_free(dim);
807 return isl_set_intersect(set, isl_set_from_basic_set(bset));
810 static __isl_give isl_set *parametrization(__isl_take isl_dim *dim,
811 int len, int first, int n, const char *prefix)
813 isl_set *set;
815 dim = isl_dim_add(dim, isl_dim_set, len);
816 set = isl_set_universe(dim);
818 return parametrize(set, first, n, prefix);
821 /* Tile the B loops over the tile sizes and then tile/wrap
822 * the T1 loops over the blocks.
824 static __isl_give isl_union_map *tile_schedule(struct cuda_gen *gen,
825 __isl_take isl_union_map *sched)
827 isl_dim *dim;
828 isl_map *tiling, *block_tiling;
830 dim = isl_union_map_get_dim(sched);
831 tiling = tile(isl_dim_copy(dim), gen->untiled_len,
832 gen->tile_first, gen->tile_len, gen->tile_size);
834 if (gen->options->wrap)
835 block_tiling = wrap(dim, gen->untiled_len + gen->tile_len,
836 gen->tile_first, gen->n_grid, gen->grid_dim);
837 else
838 block_tiling = tile(dim, gen->untiled_len + gen->tile_len,
839 gen->tile_first, gen->n_grid, gen->grid_dim);
841 gen->tiled_len = gen->untiled_len + gen->tile_len + gen->n_grid;
843 tiling = isl_map_apply_range(tiling, block_tiling);
845 sched = isl_union_map_apply_range(sched,
846 isl_union_map_from_map(tiling));
848 gen->shared_len = gen->tile_first + gen->tile_len + gen->n_grid;
850 return sched;
853 static __isl_give isl_union_map *parametrize_tiled_schedule(
854 struct cuda_gen *gen, __isl_take isl_union_map *sched)
856 isl_dim *dim;
857 isl_set *par;
859 dim = isl_union_map_get_dim(sched);
860 par = parametrization(dim, gen->tiled_len, 0, gen->tile_first, "h");
861 sched = isl_union_map_intersect_range(sched,
862 isl_union_set_from_set(par));
864 dim = isl_union_map_get_dim(sched);
865 par = parametrization(dim, gen->tiled_len,
866 gen->tile_first + gen->n_grid, gen->n_grid, "b");
867 sched = isl_union_map_intersect_range(sched,
868 isl_union_set_from_set(par));
870 return sched;
873 /* Tile/wrap the P1 loops over the threads.
875 static __isl_give isl_union_map *thread_tile_schedule(struct cuda_gen *gen,
876 __isl_take isl_union_map *sched)
878 isl_dim *dim;
879 isl_map *tiling;
880 isl_set *par;
882 dim = isl_union_map_get_dim(sched);
884 if (gen->options->wrap)
885 tiling = wrap(isl_dim_copy(dim), gen->tiled_len,
886 gen->shared_len, gen->n_block, gen->block_dim);
887 else
888 tiling = tile(isl_dim_copy(dim), gen->tiled_len,
889 gen->shared_len, gen->n_block, gen->block_dim);
890 gen->thread_tiled_len = gen->tiled_len + gen->n_block;
892 sched = isl_union_map_apply_range(sched,
893 isl_union_map_from_map(tiling));
895 par = parametrization(dim, gen->thread_tiled_len,
896 gen->tile_first + gen->tile_len + gen->n_grid + gen->n_block,
897 gen->n_block, "t");
898 sched = isl_union_map_intersect_range(sched,
899 isl_union_set_from_set(par));
901 gen->shared_len = gen->tile_first + gen->tile_len + gen->n_grid;
903 return sched;
906 /* If the user asked for it, scale the shared memory tile loops
907 * (T1P and T2) of "sched" by gen->tile_size[i].
908 * If we are not performing "wrapping", then additionally scale the T1P
909 * loops by gen->grid_dim[i].
911 static __isl_give isl_union_map *scale_tile_loops(struct cuda_gen *gen,
912 __isl_take isl_union_map *sched)
914 int i;
915 isl_dim *dim;
916 isl_basic_map *scale;
917 isl_constraint *c;
919 if (!gen->options->scale_tile_loops)
920 return sched;
922 dim = isl_union_map_get_dim(sched);
923 dim = isl_dim_add(dim, isl_dim_in, gen->tiled_len);
924 dim = isl_dim_add(dim, isl_dim_out, gen->tiled_len);
925 scale = isl_basic_map_universe(isl_dim_copy(dim));
927 for (i = 0; i < gen->tiled_len; ++i) {
928 int f = 1;
930 if (i >= gen->tile_first && i < gen->tile_first + gen->n_grid) {
931 f = gen->tile_size[i - gen->tile_first];
932 if (!gen->options->wrap)
933 f *= gen->grid_dim[i - gen->tile_first];
934 } else if (i >= gen->tile_first + gen->n_grid &&
935 i < gen->tile_first + gen->n_grid + gen->tile_len) {
936 f = gen->tile_size[i - (gen->tile_first + gen->n_grid)];
939 c = isl_equality_alloc(isl_dim_copy(dim));
940 isl_constraint_set_coefficient_si(c, isl_dim_in, i, f);
941 isl_constraint_set_coefficient_si(c, isl_dim_out, i, -1);
942 scale = isl_basic_map_add_constraint(scale, c);
945 isl_dim_free(dim);
947 sched = isl_union_map_apply_range(sched,
948 isl_union_map_from_map(isl_map_from_basic_map(scale)));
950 return sched;
953 /* If we are not performing "wrapping" and if the user asked for it,
954 * scale the thread tile loops (P1T) of "sched" by gen->block_dim[i].
956 static __isl_give isl_union_map *scale_thread_tile_loops(struct cuda_gen *gen,
957 __isl_take isl_union_map *sched)
959 int i;
960 isl_dim *dim;
961 isl_basic_map *scale;
962 isl_constraint *c;
964 if (gen->options->wrap)
965 return sched;
966 if (!gen->options->scale_tile_loops)
967 return sched;
969 dim = isl_union_map_get_dim(sched);
970 dim = isl_dim_add(dim, isl_dim_in, gen->thread_tiled_len);
971 dim = isl_dim_add(dim, isl_dim_out, gen->thread_tiled_len);
972 scale = isl_basic_map_universe(isl_dim_copy(dim));
974 for (i = 0; i < gen->thread_tiled_len; ++i) {
975 int f = 1;
977 if (i >= gen->shared_len &&
978 i < gen->shared_len + gen->n_block)
979 f = gen->block_dim[i - gen->shared_len];
981 c = isl_equality_alloc(isl_dim_copy(dim));
982 isl_constraint_set_coefficient_si(c, isl_dim_in, i, f);
983 isl_constraint_set_coefficient_si(c, isl_dim_out, i, -1);
984 scale = isl_basic_map_add_constraint(scale, c);
987 isl_dim_free(dim);
989 sched = isl_union_map_apply_range(sched,
990 isl_union_map_from_map(isl_map_from_basic_map(scale)));
992 return sched;
995 /* If we are not performing "wrapping" and if the user asked for it,
996 * scale the "n_tile" loops starting at "first" of "sched" by gen->block_dim[i].
998 static __isl_give isl_union_map *scale_access_tile_loops(struct cuda_gen *gen,
999 __isl_take isl_union_map *sched, int len, int first, int n_tile)
1001 int i;
1002 isl_dim *dim;
1003 isl_basic_map *scale;
1004 isl_constraint *c;
1006 if (gen->options->wrap)
1007 return sched;
1008 if (!gen->options->scale_tile_loops)
1009 return sched;
1011 dim = isl_union_map_get_dim(sched);
1012 dim = isl_dim_add(dim, isl_dim_in, len);
1013 dim = isl_dim_add(dim, isl_dim_out, len);
1014 scale = isl_basic_map_universe(isl_dim_copy(dim));
1016 for (i = 0; i < len; ++i) {
1017 int f = 1;
1019 if (i >= first && i < first + n_tile)
1020 f = gen->block_dim[i - first];
1022 c = isl_equality_alloc(isl_dim_copy(dim));
1023 isl_constraint_set_coefficient_si(c, isl_dim_in, i, f);
1024 isl_constraint_set_coefficient_si(c, isl_dim_out, i, -1);
1025 scale = isl_basic_map_add_constraint(scale, c);
1028 isl_dim_free(dim);
1030 sched = isl_union_map_apply_range(sched,
1031 isl_union_map_from_map(isl_map_from_basic_map(scale)));
1033 return sched;
1036 /* If print_user_stmt is set, we want to print the statements ourselves,
1037 * instead of relying on the C preprocessor. If so, we need to use
1038 * the stop option so that the domains will be saved on the statement
1039 * nodes.
1041 static void print_cloog_shared_body(struct cuda_gen *gen,
1042 __isl_keep isl_set *context, __isl_keep isl_union_map *sched, int len,
1043 void (*print_user_stmt)(struct gpucode_info *info,
1044 struct clast_user_stmt *s),
1045 int first_unroll)
1047 int i;
1048 CloogOptions *options;
1049 CloogDomain *cloog_context;
1050 CloogUnionDomain *ud;
1051 CloogInput *input;
1052 struct clast_stmt *stmt;
1053 char name[20];
1055 sched = isl_union_map_copy(sched);
1056 sched = isl_union_map_align_params(sched, isl_set_get_dim(context));
1058 options = cloog_options_malloc(gen->state);
1059 options->language = LANGUAGE_C;
1060 options->strides = 1;
1061 options->sh = 1;
1062 options->f = len;
1063 options->l = -1;
1064 options->override = 1;
1065 options->save_domains = 1;
1066 options->noscalars = 1;
1067 options->first_unroll = first_unroll;
1069 ud = cloog_union_domain_from_isl_union_map(sched);
1070 for (i = 0; i < len; ++i) {
1071 snprintf(name, sizeof(name), "c%d", i);
1072 ud = cloog_union_domain_set_name(ud, CLOOG_SCAT, i, name);
1074 cloog_context = cloog_domain_from_isl_set(isl_set_copy(context));
1075 input = cloog_input_alloc(cloog_context, ud);
1077 stmt = cloog_clast_create_from_input(input, options);
1079 gen->stmt_code.indent = gen->kernel_code.indent;
1080 gen->stmt_code.dst = gen->cuda.kernel_c;
1081 gen->stmt_code.print_user_stmt = print_user_stmt;
1082 gen->stmt_code.print_user_stmt_list = NULL;
1083 gen->stmt_code.print_for_head = NULL;
1084 gen->stmt_code.print_for_foot = NULL;
1085 gen->stmt_code.user = gen;
1086 gpu_print_host_stmt(&gen->stmt_code, stmt);
1088 cloog_clast_free(stmt);
1089 cloog_options_free(options);
1092 /* Add "len" parameters p[i] called prefix%d,
1093 * with bounds to 0 <= p[i] < size[i].
1095 __isl_give isl_set *add_bounded_parameters(__isl_take isl_set *set,
1096 int len, int *size, const char *prefix)
1098 int i;
1099 unsigned nparam;
1100 isl_int v;
1101 isl_dim *dim;
1102 isl_basic_set *bset;
1103 isl_constraint *c;
1104 char name[20];
1106 nparam = isl_set_dim(set, isl_dim_param);
1107 set = isl_set_add_dims(set, isl_dim_param, len);
1109 for (i = 0; i < len; ++i) {
1110 snprintf(name, sizeof(name), "%s%d", prefix, i);
1111 set = isl_set_set_dim_name(set, isl_dim_param,
1112 nparam + i, name);
1115 dim = isl_set_get_dim(set);
1116 bset = isl_basic_set_universe(isl_dim_copy(dim));
1118 isl_int_init(v);
1120 for (i = 0; i < len; ++i) {
1121 c = isl_inequality_alloc(isl_dim_copy(dim));
1122 isl_int_set_si(v, 1);
1123 isl_constraint_set_coefficient(c, isl_dim_param, nparam + i, v);
1124 bset = isl_basic_set_add_constraint(bset, c);
1126 c = isl_inequality_alloc(isl_dim_copy(dim));
1127 isl_int_set_si(v, -1);
1128 isl_constraint_set_coefficient(c, isl_dim_param, nparam + i, v);
1129 isl_int_set_si(v, size[i] - 1);
1130 isl_constraint_set_constant(c, v);
1131 bset = isl_basic_set_add_constraint(bset, c);
1134 isl_int_clear(v);
1135 isl_dim_free(dim);
1137 return isl_set_intersect(set, isl_set_from_basic_set(bset));
1140 static void print_shared_body(struct cuda_gen *gen,
1141 __isl_keep isl_set *shared_domain, __isl_keep isl_union_map *sched,
1142 int len, void (*print_user_stmt)(struct gpucode_info *info,
1143 struct clast_user_stmt *s),
1144 int first_unroll)
1146 isl_set *context;
1148 context = isl_set_copy(shared_domain);
1149 context = parametrize(context, 0, gen->shared_len, "g");
1150 context = isl_set_project_out(context, isl_dim_set, 0, gen->shared_len);
1151 context = add_bounded_parameters(context,
1152 gen->n_block, gen->block_dim, "t");
1154 print_cloog_shared_body(gen, context, sched, len, print_user_stmt,
1155 first_unroll);
1157 isl_set_free(context);
1160 /* Given a tile of an array, construct a map that maps each element
1161 * of the tile to a copy of the tile shifted to the origin
1162 * (based on the lower bounds in group->private_bound or group->shared_bound).
1163 * If any of the indices is strided, then {private,shared}_bound[i].shift_map
1164 * is applied to the index first.
1165 * The domain of the resulting map is "access",
1166 * while the range space is anonymous.
1168 static __isl_give isl_map *shift_access(__isl_take isl_set *access,
1169 struct cuda_array_ref_group *group)
1171 int i;
1172 isl_dim *dim;
1173 isl_basic_set *bset;
1174 isl_basic_map *bmap;
1175 isl_aff *lb;
1176 isl_basic_set *offset;
1177 isl_basic_map *shift;
1178 isl_basic_map *pre_shift;
1179 isl_map *sched;
1180 const char *name;
1181 struct cuda_array_bound *bounds;
1182 int n_index = group->array->n_index;
1184 bounds = group->private_bound;
1185 if (!bounds)
1186 bounds = group->shared_bound;
1188 dim = isl_set_get_dim(access);
1189 dim = isl_dim_drop(dim, isl_dim_set, 0, n_index);
1190 offset = isl_basic_set_universe(dim);
1191 for (i = 0; i < n_index; ++i) {
1192 lb = isl_aff_copy(bounds[i].lb);
1193 bmap = isl_basic_map_from_aff(lb);
1194 bset = isl_basic_map_range(bmap);
1195 offset = isl_basic_set_flat_product(offset, bset);
1197 offset = isl_basic_set_neg(offset);
1199 dim = isl_dim_map_from_set(isl_set_get_dim(access));
1200 shift = isl_basic_map_identity(dim);
1201 shift = isl_basic_map_set_tuple_name(shift, isl_dim_out, NULL);
1203 bset = isl_basic_set_universe(isl_set_get_dim(access));
1204 bmap = isl_basic_map_from_domain_and_range(bset, offset);
1206 shift = isl_basic_map_sum(shift, bmap);
1208 dim = isl_set_get_dim(access);
1209 dim = isl_dim_drop(dim, isl_dim_set, 0, n_index);
1210 dim = isl_dim_map_from_set(dim);
1211 pre_shift = isl_basic_map_universe(isl_dim_copy(dim));
1212 dim = isl_dim_add(dim, isl_dim_in, 1);
1213 dim = isl_dim_add(dim, isl_dim_out, 1);
1214 for (i = 0; i < n_index; ++i) {
1215 if (!bounds[i].shift_map)
1216 bmap = isl_basic_map_identity(isl_dim_copy(dim));
1217 else
1218 bmap = isl_basic_map_copy(bounds[i].shift_map);
1219 pre_shift = isl_basic_map_flat_product(pre_shift, bmap);
1221 isl_dim_free(dim);
1222 name = isl_basic_map_get_tuple_name(shift, isl_dim_in);
1223 pre_shift = isl_basic_map_set_tuple_name(pre_shift, isl_dim_in, name);
1224 pre_shift = isl_basic_map_set_tuple_name(pre_shift, isl_dim_out, name);
1225 shift = isl_basic_map_apply_range(pre_shift, shift);
1227 sched = isl_map_from_basic_map(shift);
1228 sched = isl_map_intersect_domain(sched, access);
1230 return sched;
1233 /* Construct a schedule for iterating over all elements in the given
1234 * piece of an array. The schedule iterates over a copy of the piece
1235 * that is shifted to the origin.
1236 * We subsequently also perform the tiling/wrapping over the threads.
1238 * In particular, we tile the final iterators so that the final thread
1239 * dimension runs over the final array dimension.
1240 * However, if those final iterators have only a single iteration,
1241 * we try to tile earlier iterators instead.
1243 static __isl_give isl_union_map *access_schedule(struct cuda_gen *gen,
1244 __isl_take isl_set *access, struct cuda_array_ref_group *group)
1246 isl_dim *dim;
1247 isl_map *sched;
1248 isl_union_map *usched;
1249 isl_map *tiling;
1250 isl_set *par;
1251 unsigned nvar = isl_set_dim(access, isl_dim_set);
1252 int n_tile;
1253 int first;
1255 sched = shift_access(access, group);
1257 n_tile = gen->n_block;
1258 if (n_tile > nvar) {
1259 int i;
1260 sched = isl_map_insert(sched, isl_dim_out, 0, n_tile - nvar);
1261 for (i = 0; i < n_tile - nvar; ++i)
1262 sched = isl_map_fix_si(sched, isl_dim_out, i, 0);
1263 nvar = n_tile;
1266 first = nvar - n_tile;
1268 for (; first > 0; first --)
1269 if (!isl_map_plain_is_fixed(sched, isl_dim_out,
1270 first + n_tile - 1, NULL))
1271 break;
1273 dim = isl_map_get_dim(sched);
1274 dim = isl_dim_drop(dim, isl_dim_in, 0, isl_dim_size(dim, isl_dim_in));
1275 dim = isl_dim_drop(dim, isl_dim_out, 0, nvar);
1276 if (gen->options->wrap)
1277 tiling = wrap(isl_dim_copy(dim), nvar, first,
1278 n_tile, gen->block_dim);
1279 else
1280 tiling = tile(isl_dim_copy(dim), nvar, first,
1281 n_tile, gen->block_dim);
1282 sched = isl_map_apply_range(sched, tiling);
1284 par = parametrization(dim, nvar + n_tile, first + n_tile, n_tile, "t");
1285 usched = isl_union_map_from_map(sched);
1286 usched = isl_union_map_intersect_range(usched,
1287 isl_union_set_from_set(par));
1289 usched = scale_access_tile_loops(gen, usched, nvar + n_tile,
1290 first, n_tile);
1292 return usched;
1295 static void print_shared_access(struct cuda_gen *gen,
1296 __isl_keep isl_set *shared_domain, __isl_take isl_set *access,
1297 const char *type, struct cuda_array_ref_group *group)
1299 const char *array_name;
1300 char *name;
1301 isl_ctx *ctx;
1302 isl_union_map *sched;
1303 unsigned nvar = isl_set_dim(access, isl_dim_set);
1304 int n_tile;
1306 ctx = isl_set_get_ctx(access);
1307 array_name = isl_set_get_tuple_name(access);
1308 name = isl_alloc_array(ctx, char,
1309 strlen(type) + sizeof("_shared_") + strlen(array_name) + 20);
1310 if (group->array->n_group > 1)
1311 sprintf(name, "%s_shared_%s_%d", type, array_name, group->nr);
1312 else
1313 sprintf(name, "%s_shared_%s", type, array_name);
1314 access = isl_set_set_tuple_name(access, name);
1315 free(name);
1317 sched = access_schedule(gen, access, group);
1319 n_tile = gen->n_block;
1320 if (n_tile > nvar)
1321 n_tile = nvar;
1323 print_shared_body(gen, shared_domain, sched, nvar + n_tile, NULL, -1);
1325 isl_union_map_free(sched);
1328 /* Return the union of all read (read = 1) and/or write (write = 1)
1329 * access relations in the group.
1331 static __isl_give isl_union_map *group_access_relation(
1332 struct cuda_array_ref_group *group, int read, int write)
1334 int i;
1335 isl_union_map *access;
1337 access = isl_union_map_empty(isl_map_get_dim(group->access));
1338 for (i = 0; i < group->n_ref; ++i) {
1339 isl_map *map_i;
1341 if (!((read && group->refs[i]->read) ||
1342 (write && group->refs[i]->write)))
1343 continue;
1344 map_i = isl_map_copy(group->refs[i]->access);
1345 access = isl_union_map_union(access,
1346 isl_union_map_from_map(map_i));
1349 return access;
1352 /* Check that none of the shared memory tiles involve any strides.
1354 static int no_strides(struct cuda_array_ref_group *group)
1356 int i;
1357 int n_index = group->array->n_index;
1359 for (i = 0; i < n_index; ++i)
1360 if (group->shared_bound[i].shift)
1361 return 0;
1363 return 1;
1366 /* Return a set containing the values of the given index i
1367 * of the elements in the array tile in global memory that corresponds
1368 * to the shared memory copy.
1369 * In particular, if a is the index, we return a set with constraints
1371 * tile_offset <= a <= tile_offset + tile_size - 1
1373 * and
1375 * 0 <= a <= array_size - 1
1378 static __isl_give isl_set *group_tile_dim(struct cuda_array_ref_group *group,
1379 int i)
1381 isl_basic_set *tile;
1382 isl_aff *aff;
1383 isl_constraint *c;
1384 isl_local_space *ls;
1385 isl_pw_aff *bound;
1386 isl_set *dom;
1387 isl_set *tile_set;
1389 aff = isl_aff_copy(group->shared_bound[i].lb);
1390 aff = isl_aff_add_dims(aff, isl_dim_set, 1);
1391 ls = isl_aff_get_local_space(aff);
1392 aff = isl_aff_neg(aff);
1393 aff = isl_aff_add_coefficient_si(aff, isl_dim_set, 0, 1);
1394 c = isl_inequality_from_aff(isl_aff_copy(aff));
1395 tile = isl_basic_set_from_constraint(c);
1397 aff = isl_aff_neg(aff);
1398 aff = isl_aff_add_constant(aff, group->shared_bound[i].size);
1399 aff = isl_aff_add_constant_si(aff, -1);
1400 c = isl_inequality_from_aff(aff);
1401 tile = isl_basic_set_add_constraint(tile, c);
1403 aff = isl_aff_zero(ls);
1404 aff = isl_aff_add_coefficient_si(aff, isl_dim_set, 0, 1);
1405 c = isl_inequality_from_aff(aff);
1406 tile = isl_basic_set_add_constraint(tile, c);
1408 bound = isl_pw_aff_copy(group->array->bound[i]);
1409 bound = isl_pw_aff_add_dims(bound, isl_dim_set, 1);
1410 ls = isl_local_space_from_dim(isl_pw_aff_get_dim(bound));
1411 aff = isl_aff_zero(ls);
1412 aff = isl_aff_add_coefficient_si(aff, isl_dim_set, 0, 1);
1413 aff = isl_aff_add_constant_si(aff, 1);
1414 dom = isl_pw_aff_domain(isl_pw_aff_copy(bound));
1416 tile_set = isl_pw_aff_ge_set(bound, isl_pw_aff_alloc(dom, aff));
1417 tile_set = isl_set_align_params(tile_set, isl_basic_set_get_dim(tile));
1418 tile_set = isl_set_intersect(tile_set, isl_set_from_basic_set(tile));
1420 return tile_set;
1423 /* Return a set containing the elements in the array tile in
1424 * global memory that corresponds to the shared memory copy.
1426 static __isl_give isl_set *group_tile(struct cuda_array_ref_group *group)
1428 int i;
1429 int n_index = group->array->n_index;
1430 isl_set *tile;
1432 tile = group_tile_dim(group, 0);
1433 for (i = 1; i < n_index; ++i) {
1434 isl_set *tile_i;
1436 tile_i = group_tile_dim(group, i);
1437 tile = isl_set_flat_product(tile, tile_i);
1440 tile = isl_set_set_tuple_name(tile, group->array->name);
1442 return tile;
1445 /* Print code for reading into or writing from shared memory
1446 * the given array reference group.
1448 * sched maps the original iteration domains to the shared memory tile loops.
1450 * If we are performing a read from global memory to shared memory,
1451 * if the array involved is not a scalar and if the definition of the
1452 * shared memory tiles does not involve any strides, then we copy
1453 * the entire tile to shared memory. This may result in some extra
1454 * elements getting copied, but it should lead to simpler code
1455 * (which means that fewer registers may be needed) and less divergence.
1457 * Otherwise, we only copy the elements that will be read or have been written
1458 * in the kernel.
1460 * Note that the absence of stride requirement can easily be lifted.
1461 * We would just need to add constraints of the form
1463 * shift + a = stride * alpha
1465 static int print_group_shared_accesses(struct cuda_gen *gen,
1466 struct cuda_array_ref_group *group, const char *type,
1467 __isl_keep isl_set *shared_domain, __isl_keep isl_union_map *sched)
1469 int read;
1470 isl_union_map *access;
1471 isl_union_set *uset;
1472 isl_set *access_set;
1474 if (group->private_bound)
1475 return 0;
1476 if (!group->shared_bound)
1477 return 0;
1479 read = !strcmp(type, "read");
1481 access = group_access_relation(group, read, !read);
1482 access = isl_union_map_apply_domain(access, isl_union_map_copy(sched));
1483 uset = isl_union_map_range(access);
1485 if (isl_union_set_is_empty(uset)) {
1486 isl_union_set_free(uset);
1487 return 0;
1490 if (read && group->array->n_index > 0 && no_strides(group)) {
1491 isl_union_set_free(uset);
1492 access_set = group_tile(group);
1493 print_shared_access(gen, shared_domain, access_set,
1494 type, group);
1495 return 1;
1498 access_set = isl_set_from_union_set(uset);
1499 access_set = isl_set_coalesce(access_set);
1501 print_shared_access(gen, shared_domain, access_set, type, group);
1503 return 1;
1506 /* Print code for reading into or writing from shared memory at
1507 * the given level (-1 for innermost).
1509 * If we are not printing at the innermost level, then the dimensionality
1510 * of shared_domain may be smaller than gen->shared_len.
1511 * As the rest of the code assumes that the domain of access has
1512 * gen->shared_len dimensions, we therefore may need to embed this domain
1513 * in a higher dimensional space after intersection with shared_domain.
1515 static void print_shared_accesses(struct cuda_gen *gen,
1516 __isl_keep isl_set *shared_domain, __isl_keep isl_union_map *access,
1517 const char *type, int level)
1519 int i, j;
1520 isl_dim *dim;
1521 isl_map *proj;
1522 isl_set *par;
1523 int shared_len = isl_set_dim(shared_domain, isl_dim_set);
1524 int sync = 0;
1525 isl_union_map *sched;
1527 shared_domain = isl_set_copy(shared_domain);
1528 sched = isl_union_map_copy(gen->tiled_sched);
1529 dim = isl_union_map_get_dim(sched);
1530 proj = projection(dim, gen->tiled_len, shared_len);
1531 sched = isl_union_map_apply_range(sched, isl_union_map_from_map(proj));
1532 sched = isl_union_map_intersect_range(sched,
1533 isl_union_set_from_set(isl_set_copy(shared_domain)));
1534 if (shared_len != gen->shared_len) {
1535 dim = isl_union_map_get_dim(sched);
1536 proj = projection(dim, gen->shared_len, shared_len);
1537 proj = isl_map_reverse(proj);
1538 shared_domain = isl_set_apply(shared_domain,
1539 isl_map_copy(proj));
1540 sched = isl_union_map_apply_range(sched,
1541 isl_union_map_from_map(proj));
1544 dim = isl_union_map_get_dim(sched);
1545 par = parametrization(dim, gen->shared_len, 0, gen->shared_len, "g");
1546 sched = isl_union_map_intersect_range(sched,
1547 isl_union_set_from_set(par));
1549 for (i = 0; i < gen->n_array; ++i) {
1550 struct cuda_array_info *array = &gen->array[i];
1552 if (gen->array[i].print_shared_level != level)
1553 continue;
1555 for (j = 0; j < array->n_group; ++j) {
1556 if (print_group_shared_accesses(gen, array->groups[j],
1557 type, shared_domain, sched))
1558 sync = 1;
1562 isl_union_map_free(sched);
1563 isl_set_free(shared_domain);
1565 if (sync) {
1566 print_indent(gen->cuda.kernel_c, gen->kernel_code.indent);
1567 fprintf(gen->cuda.kernel_c, "__syncthreads();\n");
1571 /* Given an index expression into a tile of an array, adjust the expression
1572 * to a shift of the tile to the origin
1573 * (based on the lower bounds in array->shared_bound).
1574 * If the index is strided, then we first add
1575 * bound->shift and divide by bound->stride.
1577 static __isl_give isl_qpolynomial *shift_index(__isl_take isl_qpolynomial *qp,
1578 struct cuda_array_info *array,
1579 struct cuda_array_bound *bound, __isl_take isl_set *domain)
1581 isl_qpolynomial *lb;
1583 if (bound->shift) {
1584 isl_qpolynomial *shift, *t;
1585 isl_int one;
1586 isl_dim *dim;
1587 shift = bound->shift;
1588 shift = isl_qpolynomial_copy(shift);
1589 shift = isl_qpolynomial_drop_dims(shift, isl_dim_set, 0,
1590 isl_qpolynomial_dim(shift, isl_dim_set));
1591 shift = isl_qpolynomial_align_params(shift,
1592 isl_qpolynomial_get_dim(qp));
1593 qp = isl_qpolynomial_add(qp, shift);
1594 dim = isl_qpolynomial_get_dim(qp);
1595 isl_int_init(one);
1596 isl_int_set_si(one, 1);
1597 t = isl_qpolynomial_rat_cst(dim, one, bound->stride);
1598 isl_int_clear(one);
1599 qp = isl_qpolynomial_mul(qp, t);
1602 lb = isl_qpolynomial_from_aff(isl_aff_copy(bound->lb));
1603 lb = isl_qpolynomial_drop_dims(lb, isl_dim_set, 0,
1604 isl_qpolynomial_dim(lb, isl_dim_set));
1606 lb = isl_qpolynomial_align_params(lb, isl_qpolynomial_get_dim(qp));
1608 qp = isl_qpolynomial_sub(qp, lb);
1609 qp = isl_qpolynomial_gist(qp, domain);
1611 return qp;
1614 /* This function is called for each access to an array in some statement
1615 * in the original code.
1616 * Replace that access by an access to shared or (linearized) global memory.
1617 * Since the array in shared memory is just
1618 * a shifted copy of part of the original array, we simply need
1619 * to subtract the lower bound, which was computed
1620 * in can_tile_for_shared_memory.
1621 * If any of the indices is strided, then we first add
1622 * shared_bound[i].shift and divide by shared_bound[i].stride.
1624 * If the given array is accessed directly from global memory,
1625 * we don't need to perform any shifting and simply simplify
1626 * expression in the context of the domain instead.
1628 * If the array space (range of access) has no name, then we are
1629 * accessing an iterator in the original program.
1631 static void print_access(struct cuda_gen *gen, __isl_take isl_map *access,
1632 int group_nr)
1634 int i;
1635 const char *name;
1636 unsigned n_index;
1637 struct cuda_array_info *array = NULL;
1638 isl_printer *prn;
1639 isl_basic_set *aff;
1640 isl_set *data_set;
1641 isl_set *domain;
1642 struct cuda_array_bound *bounds = NULL;
1644 access = isl_map_align_params(access,
1645 isl_set_get_dim(gen->stmt_domain));
1647 data_set = isl_set_apply(isl_set_copy(gen->stmt_domain), access);
1649 name = isl_set_get_tuple_name(data_set);
1651 if (!name)
1652 fprintf(gen->cuda.kernel_c, "(");
1653 else {
1654 struct cuda_array_ref_group *group;
1656 for (i = 0; i < gen->n_array; ++i) {
1657 if (strcmp(name, gen->array[i].name))
1658 continue;
1659 array = &gen->array[i];
1661 assert(array);
1662 group = array->groups[group_nr];
1663 bounds = group->private_bound;
1664 if (!bounds)
1665 bounds = group->shared_bound;
1667 print_array_name(gen->cuda.kernel_c, group);
1669 if (cuda_array_is_scalar(array)) {
1670 isl_set_free(data_set);
1671 return;
1674 fprintf(gen->cuda.kernel_c, "[");
1678 n_index = isl_set_dim(data_set, isl_dim_set);
1679 aff = isl_set_affine_hull(data_set);
1681 prn = isl_printer_to_file(gen->ctx, gen->cuda.kernel_c);
1682 prn = isl_printer_set_output_format(prn, ISL_FORMAT_C);
1684 if (!bounds)
1685 for (i = 0; i + 1 < n_index; ++i)
1686 prn = isl_printer_print_str(prn, "(");
1688 for (i = 0; i < n_index; ++i) {
1689 isl_constraint *c;
1690 isl_qpolynomial *qp;
1691 int ok;
1693 ok = isl_basic_set_has_defining_equality(aff,
1694 isl_dim_out, i, &c);
1695 assert(ok);
1696 qp = isl_qpolynomial_from_constraint(c, isl_dim_out, i);
1697 qp = isl_qpolynomial_drop_dims(qp, isl_dim_set, 0,
1698 isl_qpolynomial_dim(qp, isl_dim_set));
1700 if (!array) {
1701 prn = isl_printer_print_qpolynomial(prn, qp);
1702 isl_qpolynomial_free(qp);
1703 continue;
1706 domain = isl_set_copy(gen->stmt_domain);
1707 domain = isl_set_project_out(domain, isl_dim_set, 0,
1708 isl_set_dim(domain, isl_dim_set));
1709 if (!bounds)
1710 qp = isl_qpolynomial_gist(qp, domain);
1711 else
1712 qp = shift_index(qp, array, &bounds[i], domain);
1714 if (i) {
1715 if (!bounds) {
1716 prn = isl_printer_print_str(prn, ") * (");
1717 prn = isl_printer_print_pw_aff(prn,
1718 array->local_bound[i]);
1719 prn = isl_printer_print_str(prn, ") + ");
1720 } else
1721 prn = isl_printer_print_str(prn, "][");
1723 prn = isl_printer_print_qpolynomial(prn, qp);
1724 isl_qpolynomial_free(qp);
1726 if (!name)
1727 prn = isl_printer_print_str(prn, ")");
1728 else
1729 prn = isl_printer_print_str(prn, "]");
1730 isl_printer_free(prn);
1732 isl_basic_set_free(aff);
1735 static struct cuda_stmt_access *print_expr(struct cuda_gen *gen, FILE *out,
1736 struct pet_expr *expr, struct cuda_stmt_access *access, int outer)
1738 int i;
1740 switch (expr->type) {
1741 case pet_expr_double:
1742 fprintf(out, "%g", expr->d);
1743 break;
1744 case pet_expr_access:
1745 print_access(gen, isl_map_copy(access->access), access->group);
1746 access = access->next;
1747 break;
1748 case pet_expr_unary:
1749 if (!outer)
1750 fprintf(out, "(");
1751 fprintf(out, " %s ", pet_op_str(expr->op));
1752 access = print_expr(gen, out, expr->args[pet_un_arg],
1753 access, 0);
1754 if (!outer)
1755 fprintf(out, ")");
1756 break;
1757 case pet_expr_binary:
1758 if (!outer)
1759 fprintf(out, "(");
1760 access = print_expr(gen, out, expr->args[pet_bin_lhs],
1761 access, 0);
1762 fprintf(out, " %s ", pet_op_str(expr->op));
1763 access = print_expr(gen, out, expr->args[pet_bin_rhs],
1764 access, 0);
1765 if (!outer)
1766 fprintf(out, ")");
1767 break;
1768 case pet_expr_ternary:
1769 if (!outer)
1770 fprintf(out, "(");
1771 access = print_expr(gen, out, expr->args[pet_ter_cond],
1772 access, 0);
1773 fprintf(out, " ? ");
1774 access = print_expr(gen, out, expr->args[pet_ter_true],
1775 access, 0);
1776 fprintf(out, " : ");
1777 access = print_expr(gen, out, expr->args[pet_ter_false],
1778 access, 0);
1779 if (!outer)
1780 fprintf(out, ")");
1781 break;
1782 case pet_expr_call:
1783 fprintf(out, "%s(", expr->name);
1784 for (i = 0; i < expr->n_arg; ++i) {
1785 if (i)
1786 fprintf(out, ", ");
1787 access = print_expr(gen, out, expr->args[i],
1788 access, 1);
1790 fprintf(out, ")");
1792 return access;
1795 static void print_stmt_body(struct cuda_gen *gen,
1796 FILE *out, struct cuda_stmt *stmt)
1798 print_expr(gen, out, stmt->body, stmt->accesses, 1);
1799 fprintf(out, ";\n");
1802 /* This function is called for each leaf in the innermost clast,
1803 * i.e., for each statemetn.
1804 * We print the statement body, simplifying the accesses based
1805 * on the schedule.
1807 static void print_statement(struct gpucode_info *code,
1808 struct clast_user_stmt *u)
1810 struct cuda_gen *gen = code->user;
1811 isl_dim *dim;
1812 isl_set *par;
1813 isl_set *stmt_domain;
1814 isl_union_map *stmt_sched;
1815 isl_union_set *uset;
1816 int nr;
1817 struct cuda_stmt *stmt;
1819 nr = atoi(u->statement->name + 2);
1820 stmt = &gen->stmts[nr];
1822 stmt_domain = extract_host_domain(u);
1824 stmt_sched = isl_union_map_intersect_range(
1825 isl_union_map_copy(gen->local_sched),
1826 isl_union_set_from_set(extend(stmt_domain,
1827 gen->thread_tiled_len)));
1828 dim = isl_union_map_get_dim(stmt_sched);
1829 par = parametrization(dim, gen->thread_tiled_len, 0,
1830 gen->thread_tiled_len, "c");
1831 stmt_sched = isl_union_map_intersect_range(stmt_sched,
1832 isl_union_set_from_set(par));
1834 uset = isl_union_map_domain(stmt_sched);
1835 dim = isl_union_set_get_dim(uset);
1836 dim = isl_dim_add(dim, isl_dim_set,
1837 isl_set_dim(stmt->domain, isl_dim_set));
1838 dim = isl_dim_set_tuple_name(dim, isl_dim_set, u->statement->name);
1839 gen->stmt_domain = isl_union_set_extract_set(uset, dim);
1840 isl_union_set_free(uset);
1842 print_indent(code->dst, code->indent);
1843 print_stmt_body(gen, code->dst, stmt);
1845 isl_set_free(gen->stmt_domain);
1848 /* Print an access to the element in the global memory copy of the
1849 * given array that corresponds to element [qp[0]][qp[1]]...
1850 * of the original array.
1851 * The copy in global memory has been linearized, so we need to take
1852 * the array size into account.
1854 static void print_private_global_index(isl_ctx *ctx, FILE *out,
1855 struct cuda_array_info *array, __isl_keep isl_qpolynomial **qp)
1857 int i;
1858 isl_printer *prn;
1860 fprintf(out, "%s[", array->name);
1861 prn = isl_printer_to_file(ctx, out);
1862 prn = isl_printer_set_output_format(prn, ISL_FORMAT_C);
1863 for (i = 0; i + 1 < array->n_index; ++i)
1864 prn = isl_printer_print_str(prn, "(");
1865 for (i = 0; i < array->n_index; ++i) {
1866 if (i) {
1867 prn = isl_printer_print_str(prn, ") * (");
1868 prn = isl_printer_print_pw_aff(prn,
1869 array->local_bound[i]);
1870 prn = isl_printer_print_str(prn, ") + ");
1872 prn = isl_printer_print_qpolynomial(prn, qp[i]);
1874 isl_printer_free(prn);
1875 fprintf(out, "]");
1878 /* Print an access to the element in the shared memory copy of the
1879 * given array reference group that corresponds to element [qps[0]][qps[1]]...
1880 * of the original array.
1881 * Since the array in shared memory is just a shifted copy of part
1882 * of the original array, we simply need to subtract the lower bound,
1883 * which was computed in can_tile_for_shared_memory.
1884 * If any of the indices is strided, then we first add
1885 * shared_bound[i].shift and divide by shared_bound[i].stride.
1887 static void print_private_local_index(isl_ctx *ctx, FILE *out,
1888 struct cuda_array_ref_group *group,
1889 __isl_keep isl_qpolynomial **qps, __isl_keep isl_set *domain)
1891 int i;
1892 isl_printer *prn;
1893 struct cuda_array_info *array = group->array;
1894 struct cuda_array_bound *bounds = group->private_bound;
1896 print_array_name(out, group);
1897 for (i = 0; i < array->n_index; ++i) {
1898 isl_qpolynomial *qp = isl_qpolynomial_copy(qps[i]);
1900 qp = shift_index(qp, array, &bounds[i], isl_set_copy(domain));
1902 fprintf(out, "[");
1903 prn = isl_printer_to_file(ctx, out);
1904 prn = isl_printer_set_output_format(prn, ISL_FORMAT_C);
1905 prn = isl_printer_print_qpolynomial(prn, qp);
1906 isl_printer_free(prn);
1907 fprintf(out, "]");
1908 isl_qpolynomial_free(qp);
1912 /* This function is called for each leaf in the clast of the code
1913 * for copying to or from private memory.
1914 * The statement name is read_private_<array> or write_private_<array>.
1916 * The schedule iterates over the array elements, so we can use
1917 * the domain of private_sched at the current scheduling position
1918 * as the index of the array.
1920 static void print_private_copy_statement(struct gpucode_info *code,
1921 struct clast_user_stmt *u)
1923 struct cuda_gen *gen = code->user;
1924 isl_set *domain;
1925 isl_map *sched;
1926 struct cuda_array_ref_group *group = gen->private_group;
1927 int i;
1928 unsigned n_in;
1929 unsigned n_out;
1930 isl_dim *dim;
1931 isl_set *param;
1932 isl_set *index;
1933 isl_basic_set *aff;
1934 isl_ctx *ctx;
1935 isl_qpolynomial **qp;
1936 int read;
1938 read = !strncmp(u->statement->name, "read", 4);
1940 domain = extract_host_domain(u);
1941 assert(domain);
1943 sched = isl_map_copy(gen->private_sched);
1944 sched = isl_map_reverse(sched);
1945 sched = isl_map_intersect_domain(sched, domain);
1946 n_in = isl_map_dim(sched, isl_dim_in);
1947 n_out = isl_map_dim(sched, isl_dim_out);
1948 dim = isl_map_get_dim(sched);
1949 dim = isl_dim_drop(dim, isl_dim_in, 0, n_in);
1950 dim = isl_dim_drop(dim, isl_dim_out, 0, n_out);
1951 param = parametrization(dim, n_in, 0, n_in, "c");
1952 sched = isl_map_align_params(sched, isl_set_get_dim(param));
1953 sched = isl_map_intersect_domain(sched, param);
1954 index = isl_map_range(sched);
1955 domain = isl_set_copy(index);
1956 aff = isl_set_affine_hull(index);
1957 domain = isl_set_project_out(domain, isl_dim_set, 0, n_out);
1959 ctx = isl_basic_set_get_ctx(aff);
1960 qp = isl_alloc_array(ctx, isl_qpolynomial *, n_out);
1961 assert(qp);
1963 for (i = 0; i < n_out; ++i) {
1964 isl_constraint *c;
1965 int ok;
1967 ok = isl_basic_set_has_defining_equality(aff,
1968 isl_dim_set, i, &c);
1969 assert(ok);
1970 qp[i] = isl_qpolynomial_from_constraint(c, isl_dim_set, i);
1971 qp[i] = isl_qpolynomial_drop_dims(qp[i], isl_dim_set, 0, n_out);
1974 print_indent(code->dst, code->indent);
1975 if (read) {
1976 print_private_local_index(ctx, code->dst, group, qp, domain);
1977 fprintf(code->dst, " = ");
1978 print_private_global_index(ctx, code->dst, group->array, qp);
1979 } else {
1980 print_private_global_index(ctx, code->dst, group->array, qp);
1981 fprintf(code->dst, " = ");
1982 print_private_local_index(ctx, code->dst, group, qp, domain);
1984 fprintf(code->dst, ";\n");
1986 for (i = 0; i < n_out; ++i)
1987 isl_qpolynomial_free(qp[i]);
1988 free(qp);
1990 isl_basic_set_free(aff);
1991 isl_set_free(domain);
1994 static void print_private_access(struct cuda_gen *gen,
1995 __isl_keep isl_set *shared_domain, __isl_take isl_set *access,
1996 const char *type, struct cuda_array_ref_group *group)
1998 const char *array_name;
1999 char *name;
2000 isl_ctx *ctx;
2001 unsigned nvar = isl_set_dim(access, isl_dim_set);
2002 isl_union_map *usched;
2004 if (isl_set_fast_is_empty(access)) {
2005 isl_set_free(access);
2006 return;
2009 ctx = isl_set_get_ctx(access);
2010 array_name = isl_set_get_tuple_name(access);
2011 name = isl_alloc_array(ctx, char,
2012 strlen(type) + sizeof("_private_") + strlen(array_name) + 20);
2013 if (group->array->n_group > 1)
2014 sprintf(name, "%s_private_%s_%d", type, array_name, group->nr);
2015 else
2016 sprintf(name, "%s_private_%s", type, array_name);
2017 access = isl_set_set_tuple_name(access, name);
2018 free(name);
2020 gen->private_sched = shift_access(access, group);
2021 gen->private_group = group;
2023 usched = isl_union_map_from_map(isl_map_copy(gen->private_sched));
2024 print_shared_body(gen, shared_domain, usched, nvar,
2025 &print_private_copy_statement, 1);
2026 isl_union_map_free(usched);
2028 isl_map_free(gen->private_sched);
2031 /* Print code for reading into or writing from private memory
2032 * the given array reference group.
2034 * sched maps the original iteration domains to the shared memory tile loops.
2036 static void print_group_private_accesses(struct cuda_gen *gen,
2037 struct cuda_array_ref_group *group,
2038 const char *type, __isl_keep isl_set *shared_domain,
2039 unsigned first_shared, int shared_len, __isl_keep isl_union_map *sched)
2041 int read;
2042 isl_union_map *access;
2043 isl_union_set *uset;
2044 isl_set *access_set;
2046 if (!group->private_bound)
2047 return;
2049 read = !strcmp(type, "read");
2051 access = group_access_relation(group, read, !read);
2052 access = isl_union_map_apply_domain(access, isl_union_map_copy(sched));
2053 access = isl_union_map_intersect(access,
2054 isl_union_map_copy(gen->private_access));
2055 uset = isl_union_map_range(access);
2057 if (isl_union_set_is_empty(uset)) {
2058 isl_union_set_free(uset);
2059 return;
2062 access_set = isl_set_from_union_set(uset);
2063 access_set = isl_set_coalesce(access_set);
2064 access_set = isl_set_eliminate(access_set, isl_dim_param,
2065 first_shared + shared_len,
2066 gen->shared_len - shared_len);
2068 print_private_access(gen, shared_domain, access_set, type, group);
2071 /* Print code for reading into or writing from private memory at
2072 * the given level (-1 for innermost).
2074 * If we are not printing at the innermost level, then the dimensionality
2075 * of shared_domain may be smaller than gen->shared_len.
2076 * As the rest of the code assumes that the domain of access has
2077 * gen->shared_len dimensions, we therefore may need to embed this domain
2078 * in a higher dimensional space after intersection with shared_domain.
2080 * This code is very similar to print_shared_accesses.
2081 * The main difference is that we to take into account gen->private_access.
2083 static void print_private_accesses(struct cuda_gen *gen,
2084 __isl_keep isl_set *shared_domain, __isl_keep isl_union_map *access,
2085 const char *type, int level)
2087 int i, j;
2088 isl_dim *dim;
2089 isl_map *proj;
2090 int shared_len = isl_set_dim(shared_domain, isl_dim_set);
2091 unsigned first_shared;
2092 isl_union_map *sched;
2094 shared_domain = isl_set_copy(shared_domain);
2095 sched = isl_union_map_copy(gen->tiled_sched);
2096 dim = isl_union_map_get_dim(sched);
2097 first_shared = isl_dim_size(dim, isl_dim_param);
2098 proj = projection(dim, gen->tiled_len, shared_len);
2099 sched = isl_union_map_apply_range(sched, isl_union_map_from_map(proj));
2100 sched = isl_union_map_intersect_range(sched,
2101 isl_union_set_from_set(isl_set_copy(shared_domain)));
2102 if (shared_len != gen->shared_len) {
2103 dim = isl_union_map_get_dim(sched);
2104 proj = projection(dim, gen->shared_len, shared_len);
2105 proj = isl_map_reverse(proj);
2106 shared_domain = isl_set_apply(shared_domain,
2107 isl_map_copy(proj));
2108 sched = isl_union_map_apply_range(sched,
2109 isl_union_map_from_map(proj));
2112 for (i = 0; i < gen->n_array; ++i) {
2113 struct cuda_array_info *array = &gen->array[i];
2115 if (gen->array[i].print_shared_level != level)
2116 continue;
2118 for (j = 0; j < array->n_group; ++j)
2119 print_group_private_accesses(gen, array->groups[j],
2120 type, shared_domain,
2121 first_shared, shared_len, sched);
2124 isl_union_map_free(sched);
2125 isl_set_free(shared_domain);
2128 /* Set unroll[j] if the input dimension j is involved in
2129 * the index expression represented by bmap.
2131 static int check_unroll(__isl_take isl_basic_map *bmap, void *user)
2133 int i, j;
2134 int n_in = isl_basic_map_dim(bmap, isl_dim_in);
2135 int n_out = isl_basic_map_dim(bmap, isl_dim_out);
2136 int *unroll = user;
2138 for (i = 0; i < n_out; ++i) {
2139 isl_constraint *c;
2140 int ok;
2142 ok = isl_basic_map_has_defining_equality(bmap,
2143 isl_dim_out, i, &c);
2144 assert(ok);
2145 for (j = 0; j < n_in; ++j)
2146 if (isl_constraint_involves_dims(c, isl_dim_in, j, 1))
2147 unroll[j] = 1;
2148 isl_constraint_free(c);
2151 isl_basic_map_free(bmap);
2152 return 0;
2155 /* Given an array pos mapping input dimensions to the corresponding
2156 * output dimension, construct the corresponding map.
2158 static __isl_give isl_map *permutation(__isl_take isl_dim *dim,
2159 int *pos, int len)
2161 int i;
2162 isl_constraint *c;
2163 isl_basic_map *bmap;
2165 dim = isl_dim_add(dim, isl_dim_in, len);
2166 dim = isl_dim_add(dim, isl_dim_out, len);
2167 bmap = isl_basic_map_universe(isl_dim_copy(dim));
2169 for (i = 0; i < len; ++i) {
2170 c = isl_equality_alloc(isl_dim_copy(dim));
2171 isl_constraint_set_coefficient_si(c, isl_dim_in, i, -1);
2172 isl_constraint_set_coefficient_si(c, isl_dim_out, pos[i], 1);
2173 bmap = isl_basic_map_add_constraint(bmap, c);
2175 isl_dim_free(dim);
2177 return isl_map_from_basic_map(bmap);
2180 /* Find all loops involved in any of the index expressions for any of
2181 * the private accesses, move them innermost and then mark them as
2182 * requiring unrolling by setting gen->first_unroll.
2183 * The loops involved should all be parallel because of the checks
2184 * we performed in check_private_group_access. Moving them innermost
2185 * is therefore a valid transformation.
2187 static __isl_give isl_union_map *interchange_for_unroll(struct cuda_gen *gen,
2188 __isl_take isl_union_map *sched)
2190 int i, j;
2191 int unroll[gen->thread_tiled_len];
2192 int perm[gen->thread_tiled_len];
2193 isl_dim *dim;
2194 isl_map *permute;
2195 int len = gen->shared_len + gen->n_parallel + gen->n_block;
2197 gen->first_unroll = -1;
2199 for (i = 0; i < gen->thread_tiled_len; ++i)
2200 unroll[i] = 0;
2201 for (i = 0; i < gen->n_array; ++i) {
2202 struct cuda_array_info *array = &gen->array[i];
2204 for (j = 0; j < array->n_group; ++j) {
2205 isl_union_map *access;
2206 isl_map *acc;
2208 if (!array->groups[j]->private_bound)
2209 continue;
2211 access = group_access_relation(array->groups[j], 1, 1);
2212 access = isl_union_map_apply_domain(access,
2213 isl_union_map_copy(sched));
2215 acc = isl_map_from_union_map(access);
2216 isl_map_foreach_basic_map(acc, &check_unroll, unroll);
2218 isl_map_free(acc);
2222 for (i = 0; i < gen->shared_len; ++i)
2223 if (unroll[i])
2224 return sched;
2226 for (i = gen->shared_len; i < len; ++i)
2227 if (unroll[i])
2228 break;
2230 if (i >= len)
2231 return sched;
2233 for (i = len; i < gen->thread_tiled_len; ++i)
2234 if (unroll[i])
2235 return sched;
2237 j = 0;
2238 for (i = 0; i < gen->thread_tiled_len; ++i)
2239 if (!unroll[i])
2240 perm[i] = j++;
2241 gen->first_unroll = 1 + j;
2242 for (i = 0; i < len; ++i)
2243 if (unroll[i])
2244 perm[i] = j++;
2246 dim = isl_union_map_get_dim(sched);
2247 permute = permutation(dim, perm, gen->thread_tiled_len);
2248 sched = isl_union_map_apply_range(sched,
2249 isl_union_map_from_map(permute));
2251 return sched;
2254 /* This function is called for each leaf in the clast of the kernel code.
2255 * We first specialize the schedule to the site of the leaf and
2256 * print code for reading into shared memory, performing the actual
2257 * computations and writing from shared memory, with the required
2258 * synchronizations.
2260 static void print_kernel_user(struct gpucode_info *code,
2261 struct clast_user_stmt *u)
2263 struct cuda_gen *gen = code->user;
2264 isl_set *shared_domain;
2266 shared_domain = extract_entire_host_domain(u);
2268 print_shared_accesses(gen, shared_domain, gen->read, "read", -1);
2270 print_private_accesses(gen, shared_domain, gen->read, "read", -1);
2272 print_shared_body(gen, shared_domain, gen->local_sched,
2273 gen->thread_tiled_len, &print_statement,
2274 gen->first_unroll);
2276 print_private_accesses(gen, shared_domain, gen->write, "write", -1);
2278 print_indent(gen->cuda.kernel_c, gen->kernel_code.indent);
2279 fprintf(gen->cuda.kernel_c, "__syncthreads();\n");
2281 print_shared_accesses(gen, shared_domain, gen->write, "write", -1);
2283 isl_set_free(shared_domain);
2286 /* Check if we need to perform any copying to shared memory at this level
2287 * and if so, print the copying instructions.
2288 * Any array for which we are allowed to print copying instructions at
2289 * this level, but haven't done so already, is printed.
2291 static void print_kernel_for_head(struct gpucode_info *code,
2292 struct clast_for *f)
2294 int i;
2295 struct cuda_gen *gen = code->user;
2296 isl_set *domain;
2297 int level;
2298 int print = 0;
2300 domain = isl_set_from_cloog_domain(cloog_domain_copy(f->domain));
2301 level = isl_set_dim(domain, isl_dim_set) - 1;
2303 for (i = 0; i < gen->n_array; ++i) {
2304 if (gen->array[i].print_shared_level >= 0)
2305 continue;
2306 if (gen->array[i].last_shared > level)
2307 continue;
2308 gen->array[i].print_shared_level = level;
2309 print = 1;
2312 if (print) {
2313 print_shared_accesses(gen, domain, gen->read, "read", level);
2314 print_private_accesses(gen, domain, gen->read, "read", level);
2317 isl_set_free(domain);
2320 /* Print instructions for copying from shared memory for each array
2321 * for which print_kernel_for_head has added copying instructions
2322 * to shared memory.
2324 static void print_kernel_for_foot(struct gpucode_info *code,
2325 struct clast_for *f)
2327 int i;
2328 struct cuda_gen *gen = code->user;
2329 isl_set *domain;
2330 int level;
2331 int print = 0;
2333 domain = isl_set_from_cloog_domain(cloog_domain_copy(f->domain));
2334 level = isl_set_dim(domain, isl_dim_set) - 1;
2336 for (i = 0; i < gen->n_array; ++i) {
2337 if (gen->array[i].print_shared_level != level)
2338 continue;
2339 print = 1;
2340 break;
2343 if (print) {
2344 print_private_accesses(gen, domain, gen->write, "write", level);
2345 print_shared_accesses(gen, domain, gen->write, "write", level);
2348 isl_set_free(domain);
2351 /* Use CLooG to generate code for the outer gen->shared_first loops
2352 * of the local schedule "sched".
2353 * The pretty printing of this code is handled by gpu_print_host_stmt,
2354 * which calls print_kernel_user for each iteration of the shared tile loops.
2356 static void print_cloog_kernel_body(struct cuda_gen *gen,
2357 __isl_keep isl_set *context, __isl_keep isl_union_map *sched)
2359 int i;
2360 CloogOptions *options;
2361 CloogDomain *cloog_context;
2362 CloogUnionDomain *ud;
2363 CloogInput *input;
2364 struct clast_stmt *stmt;
2365 char name[20];
2367 sched = isl_union_map_copy(sched);
2368 sched = isl_union_map_align_params(sched, isl_set_get_dim(context));
2370 options = cloog_options_malloc(gen->state);
2371 options->language = LANGUAGE_C;
2372 options->strides = 1;
2373 options->sh = 1;
2374 options->stop = gen->shared_len;
2375 options->f = gen->tiled_len;
2376 options->l = gen->tiled_len;
2377 options->save_domains = 1;
2378 options->noscalars = 1;
2380 ud = cloog_union_domain_from_isl_union_map(sched);
2381 for (i = 0; i < gen->shared_len; ++i) {
2382 snprintf(name, sizeof(name), "g%d", i);
2383 ud = cloog_union_domain_set_name(ud, CLOOG_SCAT, i, name);
2385 cloog_context = cloog_domain_from_isl_set(isl_set_copy(context));
2386 input = cloog_input_alloc(cloog_context, ud);
2388 stmt = cloog_clast_create_from_input(input, options);
2390 gen->kernel_code.indent = 4;
2391 gen->kernel_code.dst = gen->cuda.kernel_c;
2392 gen->kernel_code.print_user_stmt = NULL;
2393 gen->kernel_code.print_user_stmt_list = &print_kernel_user;
2394 gen->kernel_code.print_for_head = &print_kernel_for_head;
2395 gen->kernel_code.print_for_foot = &print_kernel_for_foot;
2396 gen->kernel_code.user = gen;
2397 gpu_print_host_stmt(&gen->kernel_code, stmt);
2399 cloog_clast_free(stmt);
2400 cloog_options_free(options);
2403 static void print_kernel_iterators(struct cuda_gen *gen)
2405 int i;
2406 const char *block_dims[] = { "blockIdx.x", "blockIdx.y" };
2407 const char *thread_dims[] = { "threadIdx.x", "threadIdx.y",
2408 "threadIdx.z" };
2410 if (gen->n_grid > 0) {
2411 print_indent(gen->cuda.kernel_c, 4);
2412 fprintf(gen->cuda.kernel_c, "int ");
2413 for (i = 0; i < gen->n_grid; ++i) {
2414 if (i)
2415 fprintf(gen->cuda.kernel_c, ", ");
2416 fprintf(gen->cuda.kernel_c, "b%d = %s",
2417 i, block_dims[gen->n_grid - 1 - i]);
2419 fprintf(gen->cuda.kernel_c, ";\n");
2422 if (gen->n_block > 0) {
2423 print_indent(gen->cuda.kernel_c, 4);
2424 fprintf(gen->cuda.kernel_c, "int ");
2425 for (i = 0; i < gen->n_block; ++i) {
2426 if (i)
2427 fprintf(gen->cuda.kernel_c, ", ");
2428 fprintf(gen->cuda.kernel_c, "t%d = %s",
2429 i, thread_dims[gen->n_block - 1 - i]);
2431 fprintf(gen->cuda.kernel_c, ";\n");
2435 static void print_group_shared_array(struct cuda_gen *gen,
2436 struct cuda_array_ref_group *group)
2438 int j;
2439 struct cuda_array_bound *bounds;
2441 bounds = group->private_bound;
2442 if (!bounds)
2443 bounds = group->shared_bound;
2444 if (!bounds)
2445 return;
2447 print_indent(gen->cuda.kernel_c, 4);
2448 fprintf(gen->cuda.kernel_c, "%s%s ",
2449 group->private_bound ? "" : "__shared__ ", group->array->type);
2450 print_array_name(gen->cuda.kernel_c, group);
2451 for (j = 0; j < group->array->n_index; ++j) {
2452 fprintf(gen->cuda.kernel_c, "[");
2453 isl_int_print(gen->cuda.kernel_c, bounds[j].size, 0);
2454 fprintf(gen->cuda.kernel_c, "]");
2456 fprintf(gen->cuda.kernel_c, ";\n");
2459 static void print_shared_arrays(struct cuda_gen *gen)
2461 int i, j;
2463 for (i = 0; i < gen->n_array; ++i) {
2464 struct cuda_array_info *array = &gen->array[i];
2466 for (j = 0; j < array->n_group; ++j)
2467 print_group_shared_array(gen, array->groups[j]);
2471 static void print_kernel_body(struct cuda_gen *gen,
2472 __isl_keep isl_set *host_domain, __isl_keep isl_union_map *sched)
2474 isl_set *context;
2476 context = isl_set_copy(host_domain);
2477 context = parametrize(context, 0, gen->tile_first, "h");
2478 context = isl_set_project_out(context, isl_dim_set, 0, gen->tile_first);
2479 context = add_bounded_parameters(context,
2480 gen->n_grid, gen->grid_dim, "b");
2482 print_kernel_iterators(gen);
2483 print_shared_arrays(gen);
2485 fprintf(gen->cuda.kernel_c, "\n");
2487 print_cloog_kernel_body(gen, context, sched);
2489 isl_set_free(context);
2492 /* Given a constraint
2494 * a(p,i) + j = g f(e)
2496 * or -a(p,i) - j = g f(e) if sign < 0,
2497 * store a(p,i) in bound->shift and g (stride) in bound->stride.
2498 * a(p,i) is assumed to be an expression in only the parameters.
2500 static void extract_stride(__isl_keep isl_constraint *c,
2501 struct cuda_array_bound *bound, isl_int stride, int sign)
2503 int i;
2504 isl_int v;
2505 isl_int one;
2506 isl_dim *dim;
2507 unsigned nparam;
2508 isl_qpolynomial *qp;
2510 isl_int_set(bound->stride, stride);
2512 dim = isl_constraint_get_dim(c);
2513 dim = isl_dim_drop(dim, isl_dim_out, 0, 1);
2514 dim = isl_dim_drop(dim, isl_dim_in, 0, isl_dim_size(dim, isl_dim_in));
2515 dim = isl_dim_domain(dim);
2517 nparam = isl_dim_size(dim, isl_dim_param);
2519 isl_int_init(v);
2520 isl_int_init(one);
2521 isl_int_set_si(one, 1);
2523 isl_constraint_get_constant(c, &v);
2524 if (sign < 0)
2525 isl_int_neg(v, v);
2526 qp = isl_qpolynomial_rat_cst(isl_dim_copy(dim), v, one);
2528 for (i = 0; i < nparam; ++i) {
2529 isl_qpolynomial *t, *p;
2531 isl_constraint_get_coefficient(c, isl_dim_param, i, &v);
2532 if (isl_int_is_zero(v))
2533 continue;
2534 if (sign < 0)
2535 isl_int_neg(v, v);
2536 t = isl_qpolynomial_rat_cst(isl_dim_copy(dim), v, one);
2537 p = isl_qpolynomial_var(isl_dim_copy(dim), isl_dim_param, i);
2538 t = isl_qpolynomial_mul(t, p);
2539 qp = isl_qpolynomial_add(qp, t);
2542 isl_dim_free(dim);
2543 isl_int_clear(one);
2544 isl_int_clear(v);
2546 bound->shift = qp;
2549 /* Given an equality constraint of a map with a single output dimension j,
2550 * check if the constraint is of the form
2552 * a(p,i) + j = g f(e)
2554 * with a(p,i) an expression in the parameters and input dimensions
2555 * and f(e) an expression in the existentially quantified variables.
2556 * If so, and if g is larger than any such g from a previously considered
2557 * constraint, then call extract_stride. to record the stride information
2558 * in bound.
2560 static int check_stride_constraint(__isl_take isl_constraint *c, void *user)
2562 int i;
2563 isl_int v, stride;
2564 unsigned n_div;
2565 struct cuda_array_bound *bound = user;
2567 isl_int_init(v);
2568 isl_int_init(stride);
2570 n_div = isl_constraint_dim(c, isl_dim_div);
2571 isl_constraint_get_coefficient(c, isl_dim_out, 0, &v);
2573 if (n_div && (isl_int_is_one(v) || isl_int_is_negone(v))) {
2574 int s = isl_int_sgn(v);
2575 isl_int_set_si(stride, 0);
2576 for (i = 0; i < n_div; ++i) {
2577 isl_constraint_get_coefficient(c, isl_dim_div, i, &v);
2578 isl_int_gcd(stride, stride, v);
2580 if (!isl_int_is_zero(stride) &&
2581 isl_int_gt(stride, bound->stride))
2582 extract_stride(c, bound, stride, s);
2585 isl_int_clear(stride);
2586 isl_int_clear(v);
2588 isl_constraint_free(c);
2589 return 0;
2592 /* Given contraints on an array index i, check if we can find
2593 * a shift a(p) and a stride g such that
2595 * a(p) + i = 0 mod g
2597 * If so, record the information in bound and apply the mapping
2598 * i -> (i + a(p))/g to the array index in bounds and return
2599 * the new constraints.
2600 * If not, simply return the original constraints.
2602 static __isl_give isl_basic_map *check_stride(struct cuda_gen *gen,
2603 struct cuda_array_bound *bound, __isl_take isl_basic_map *bounds)
2605 isl_dim *dim;
2606 isl_basic_map *aff;
2607 isl_basic_map *shift;
2608 isl_qpolynomial *qp, *t;
2609 isl_int one;
2611 isl_int_set_si(bound->stride, -1);
2613 aff = isl_basic_map_affine_hull(isl_basic_map_copy(bounds));
2615 isl_basic_map_foreach_constraint(aff, &check_stride_constraint, bound);
2617 isl_basic_map_free(aff);
2619 if (isl_int_is_neg(bound->stride))
2620 return bounds;
2622 qp = isl_qpolynomial_copy(bound->shift);
2623 qp = isl_qpolynomial_add_dims(qp, isl_dim_set, 1);
2624 dim = isl_qpolynomial_get_dim(qp);
2625 t = isl_qpolynomial_var(isl_dim_copy(dim), isl_dim_set, 0);
2626 qp = isl_qpolynomial_add(qp, t);
2627 isl_int_init(one);
2628 isl_int_set_si(one, 1);
2629 t = isl_qpolynomial_rat_cst(dim, one, bound->stride);
2630 isl_int_clear(one);
2631 qp = isl_qpolynomial_mul(qp, t);
2632 shift = isl_basic_map_from_qpolynomial(qp);
2634 bound->shift_map = isl_basic_map_copy(shift);
2635 bounds = isl_basic_map_apply_range(bounds, shift);
2637 return bounds;
2640 struct cuda_size_info {
2641 isl_basic_set *bset;
2642 struct cuda_array_bound *bound;
2643 int pos;
2646 /* Given a constraint from the basic set describing the bounds on
2647 * an array index, check if it is a lower bound, say m i >= b(x), and,
2648 * if so, check whether the expression "i - ceil(b(x)/m) + 1" has a constant
2649 * upper bound. If so, and if this bound is smaller than any bound
2650 * derived from earlier constraints, set the size to this bound on
2651 * the expression and the lower bound to ceil(b(x)/m).
2653 static int compute_size_in_direction(__isl_take isl_constraint *c, void *user)
2655 struct cuda_size_info *size = user;
2656 unsigned nparam;
2657 unsigned n_div;
2658 isl_int v;
2660 nparam = isl_basic_set_dim(size->bset, isl_dim_param);
2661 n_div = isl_constraint_dim(c, isl_dim_div);
2663 if (isl_constraint_involves_dims(c, isl_dim_div, 0, n_div)) {
2664 isl_constraint_free(c);
2665 return 0;
2668 isl_int_init(v);
2670 isl_constraint_get_coefficient(c, isl_dim_set, size->pos, &v);
2672 if (isl_int_is_pos(v)) {
2673 isl_aff *aff;
2674 isl_aff *lb;
2675 enum isl_lp_result res;
2677 aff = isl_constraint_get_bound(c, isl_dim_set, size->pos);
2678 aff = isl_aff_ceil(aff);
2680 lb = isl_aff_copy(aff);
2682 aff = isl_aff_neg(aff);
2683 aff = isl_aff_add_coefficient_si(aff, isl_dim_set, size->pos, 1);
2685 res = isl_basic_set_max(size->bset, aff, &v);
2686 isl_aff_free(aff);
2688 if (res == isl_lp_ok) {
2689 isl_int_add_ui(v, v, 1);
2690 if (isl_int_is_neg(size->bound->size) ||
2691 isl_int_lt(v, size->bound->size)) {
2692 isl_int_set(size->bound->size, v);
2693 lb = isl_aff_drop_dims(lb, isl_dim_set,
2694 0, size->pos + 1);
2695 isl_aff_free(size->bound->lb);
2696 size->bound->lb = isl_aff_copy(lb);
2699 isl_aff_free(lb);
2702 isl_int_clear(v);
2703 isl_constraint_free(c);
2705 return 0;
2708 /* Given a basic map "bounds" that maps parameters and input dimensions
2709 * to a single output dimension, look for an expression in the parameters
2710 * and input dimensions such that the range of the output dimension shifted
2711 * by this expression is a constant.
2713 * In particular, we currently only consider lower bounds on the output
2714 * dimension as candidate expressions.
2716 static int compute_array_dim_size(struct cuda_gen *gen,
2717 struct cuda_array_bound *bound, __isl_take isl_basic_map *bounds)
2719 struct cuda_size_info size;
2721 bounds = check_stride(gen, bound, bounds);
2723 isl_int_set_si(bound->size, -1);
2724 bound->lb = NULL;
2726 size.bound = bound;
2727 size.pos = isl_basic_map_dim(bounds, isl_dim_in);
2728 size.bset = isl_basic_map_wrap(bounds);
2729 size.bset = isl_basic_set_flatten(size.bset);
2730 isl_basic_set_foreach_constraint(size.bset, &compute_size_in_direction,
2731 &size);
2732 isl_basic_set_free(size.bset);
2734 return isl_int_is_nonneg(bound->size) ? 0 : -1;
2737 /* Check if we can find a shared memory tile for the given array
2738 * based on the given accesses, and if so, put the results
2739 * in array->shared_bound.
2741 * We project the accesses on each index in turn and look for a parametric
2742 * offset such that the size is constant.
2744 static int can_tile_for_shared_memory(struct cuda_gen *gen,
2745 struct cuda_array_info *array, __isl_keep isl_map *access,
2746 struct cuda_array_bound *bounds)
2748 int i;
2750 for (i = 0; i < array->n_index; ++i) {
2751 isl_map *access_i;
2752 isl_basic_map *hull;
2754 access_i = isl_map_copy(access);
2755 access_i = isl_map_project_out(access_i, isl_dim_out, 0, i);
2756 access_i = isl_map_project_out(access_i, isl_dim_out,
2757 1, array->n_index - (i + 1));
2758 access_i = isl_map_compute_divs(access_i);
2759 hull = isl_map_simple_hull(access_i);
2760 if (compute_array_dim_size(gen, &bounds[i], hull) < 0)
2761 return 0;
2764 return 1;
2767 /* Construct a map with input the shared tile loops and the loops that
2768 * will be wrapped around the threads that relates these later loops
2769 * to the thread indices and the projects them out.
2771 static __isl_give isl_map *compute_privatization(struct cuda_gen *gen)
2773 isl_map *priv;
2774 isl_map *tiling;
2775 isl_map *proj;
2776 isl_set *par;
2777 isl_dim *dim;
2779 dim = isl_union_map_get_dim(gen->shared_sched);
2781 if (gen->options->wrap)
2782 tiling = wrap(isl_dim_copy(dim), gen->shared_len + gen->n_block,
2783 gen->shared_len, gen->n_block, gen->block_dim);
2784 else
2785 tiling = tile(isl_dim_copy(dim), gen->shared_len + gen->n_block,
2786 gen->shared_len, gen->n_block, gen->block_dim);
2788 priv = tiling;
2790 par = parametrization(dim, gen->shared_len + 2 * gen->n_block,
2791 gen->tile_first + gen->tile_len + gen->n_grid + gen->n_block,
2792 gen->n_block, "t");
2794 priv = isl_map_align_params(priv, isl_set_get_dim(par));
2795 priv = isl_map_intersect_range(priv, par);
2797 dim = isl_map_get_dim(priv);
2798 dim = isl_dim_drop(dim, isl_dim_in, 0, isl_dim_size(dim, isl_dim_in));
2799 dim = isl_dim_drop(dim, isl_dim_out, 0, isl_dim_size(dim, isl_dim_out));
2800 proj = projection(dim, gen->shared_len + 2 * gen->n_block,
2801 gen->shared_len);
2803 priv = isl_map_apply_range(priv, proj);
2805 return priv;
2808 /* Construct a map from domain_dim to domain_dim that increments
2809 * the dimension at position "pos" and leaves all other dimensions
2810 * constant.
2812 static __isl_give isl_map *next(__isl_take isl_dim *domain_dim, int pos)
2814 int i;
2815 int len = isl_dim_size(domain_dim, isl_dim_set);
2816 isl_dim *dim;
2817 isl_basic_map *next;
2819 dim = isl_dim_map_from_set(domain_dim);
2820 next = isl_basic_map_universe(isl_dim_copy(dim));
2822 for (i = 0; i < len; ++i) {
2823 isl_constraint *c;
2825 c = isl_equality_alloc(isl_dim_copy(dim));
2826 isl_constraint_set_coefficient_si(c, isl_dim_in, i, 1);
2827 isl_constraint_set_coefficient_si(c, isl_dim_out, i, -1);
2828 if (i == pos)
2829 isl_constraint_set_constant_si(c, 1);
2830 next = isl_basic_map_add_constraint(next, c);
2833 isl_dim_free(dim);
2835 return isl_map_from_basic_map(next);
2838 /* Check if the given access is coalesced.
2839 * That is, check whether incrementing the dimension that will get
2840 * wrapped over the last thread index results in incrementing
2841 * the last array index.
2843 * This function is only called for access relations without reuse.
2845 static int access_is_coalesced(struct cuda_gen *gen,
2846 __isl_keep isl_union_map *access)
2848 isl_dim *dim;
2849 isl_map *access_map;
2850 isl_map *next_thread_x;
2851 isl_map *next_element;
2852 isl_map *map;
2853 int coalesced;
2855 access = isl_union_map_copy(access);
2856 access = isl_union_map_apply_domain(access,
2857 isl_union_map_copy(gen->tiled_sched));
2858 access_map = isl_map_from_union_map(access);
2860 dim = isl_map_get_dim(access_map);
2861 dim = isl_dim_domain(dim);
2862 next_thread_x = next(dim, gen->shared_len + gen->n_block - 1);
2864 dim = isl_map_get_dim(access_map);
2865 dim = isl_dim_range(dim);
2866 next_element = next(dim, isl_dim_size(dim, isl_dim_set) - 1);
2868 map = isl_map_apply_domain(next_thread_x, isl_map_copy(access_map));
2869 map = isl_map_apply_range(map, access_map);
2871 coalesced = isl_map_is_subset(map, next_element);
2873 isl_map_free(next_element);
2874 isl_map_free(map);
2876 return coalesced;
2879 /* For the given array reference group, check whether the access is private
2880 * to the thread. That is, check that any given array element
2881 * is only accessed by a single thread.
2882 * We compute an access relation that maps the shared tile loop iterators
2883 * and the shared point loop iterators that will be wrapped over the
2884 * threads to the array elements.
2885 * We actually check that those iterators that will be wrapped
2886 * partition the array space. This check is stricter than necessary
2887 * since several iterations may be mapped onto the same thread
2888 * and then they could be allowed to access the same memory elements,
2889 * but our check does not allow this situation.
2891 * We also check that the index expression only depends on parallel
2892 * loops. That way, we can move those loops innermost and unroll them.
2893 * Again, we use a test that is stricter than necessary.
2894 * We actually check whether the index expression only depends
2895 * on the iterators that are wrapped over the threads.
2896 * These are necessarily parallel, but there may be more parallel loops.
2898 * Combining the injectivity of the first test with the single-valuedness
2899 * of the second test, we simply test for bijectivity.
2901 * If it turns out we can use registers, we compute the private memory
2902 * tile size using can_tile_for_shared_memory, after introducing a dependence
2903 * on the thread indices.
2905 * Before performing any of the above computations, we first check
2906 * if there is any reuse on the reference group. If not, we simply
2907 * return. If, moreover, the access is coalesced then we also remove
2908 * the shared memory tiling since we should just use global memory instead.
2910 static void check_private_group_access(struct cuda_gen *gen,
2911 struct cuda_array_ref_group *group)
2913 isl_map *acc;
2914 isl_union_map *access;
2915 int n_index = group->array->n_index;
2917 access = group_access_relation(group, 1, 1);
2918 if (isl_union_map_is_injective(access)) {
2919 if (group->shared_bound && access_is_coalesced(gen, access)) {
2920 free_bound_list(group->shared_bound, n_index);
2921 group->shared_bound = NULL;
2923 isl_union_map_free(access);
2924 return;
2926 access = isl_union_map_apply_domain(access,
2927 isl_union_map_copy(gen->shared_sched));
2929 acc = isl_map_from_union_map(access);
2931 if (!isl_map_is_bijective(acc)) {
2932 isl_map_free(acc);
2933 return;
2936 group->private_bound = create_bound_list(gen->ctx, n_index);
2937 acc = isl_map_align_params(acc, isl_map_get_dim(gen->privatization));
2938 acc = isl_map_apply_domain(acc, isl_map_copy(gen->privatization));
2939 if (!can_tile_for_shared_memory(gen, group->array, acc,
2940 group->private_bound)) {
2941 free_bound_list(group->private_bound, n_index);
2942 group->private_bound = NULL;
2945 isl_map_free(acc);
2948 /* Look for the last shared tile loop that affects the offset of the
2949 * shared or private tile and store the result in array->last_shared.
2951 static void set_last_shared(struct cuda_gen *gen,
2952 struct cuda_array_ref_group *group)
2954 int i, j;
2955 struct cuda_array_bound *bounds;
2956 unsigned first_shared = gen->first_shared;
2957 int n_index = group->array->n_index;
2959 bounds = group->private_bound;
2960 if (!bounds)
2961 bounds = group->shared_bound;
2962 if (!bounds)
2963 return;
2965 for (j = gen->shared_len - 1; j >= 0; --j) {
2966 for (i = 0; i < n_index; ++i) {
2967 isl_aff *lb;
2968 isl_qpolynomial *shift;
2970 lb = bounds[i].lb;
2971 if (isl_aff_involves_dims(lb, isl_dim_param,
2972 first_shared + j, 1))
2973 break;
2975 shift = bounds[i].shift;
2976 if (!shift)
2977 continue;
2978 if (isl_qpolynomial_involves_dims(shift, isl_dim_param,
2979 first_shared + j, 1))
2980 break;
2982 if (i < n_index)
2983 break;
2985 group->array->last_shared = j;
2988 /* Compute the sizes of all private arrays for the current kernel,
2989 * as well as the offsets of the private pieces in the original arrays.
2990 * If we cannot or don't want to privatize a given array group,
2991 * we use the shared memory tile sizes computed in
2992 * compute_group_shared_bound instead.
2994 * If a given Array only has a single reference group and if we have
2995 * been able to find a privated or shared tile,
2996 * we also look for the last shared tile loop that affects the offset
2997 * (and therefore the array tile) and store the result in array->last_shared.
2999 * A privatized copy of all access relations from reference groups that
3000 * are mapped to private memory is stored in gen->privatization.
3002 static void compute_private_size(struct cuda_gen *gen)
3004 int i, j;
3005 isl_union_map *private;
3007 private = isl_union_map_empty(isl_union_map_get_dim(gen->shared_sched));
3009 for (i = 0; i < gen->n_array; ++i) {
3010 struct cuda_array_info *array = &gen->array[i];
3012 for (j = 0; j < array->n_group; ++j) {
3013 check_private_group_access(gen, array->groups[j]);
3015 if (!array->groups[j]->private_bound)
3016 continue;
3018 private = isl_union_map_union(private,
3019 group_access_relation(array->groups[j], 1, 1));
3022 array->last_shared = gen->shared_len - 1;
3023 array->print_shared_level = -1;
3025 if (array->n_group != 1)
3026 continue;
3027 set_last_shared(gen, array->groups[0]);
3030 if (isl_union_map_is_empty(private))
3031 isl_union_map_free(private);
3032 else {
3033 isl_union_map *priv;
3035 private = isl_union_map_apply_domain(private,
3036 isl_union_map_copy(gen->shared_sched));
3037 priv = isl_union_map_from_map(isl_map_copy(gen->privatization));
3038 private = isl_union_map_apply_domain(private, priv);
3039 gen->private_access = private;
3043 /* Fill up the groups array with singleton groups, i.e., one group
3044 * per reference, initializing the array, access, write and refs fields.
3045 * In particular the access field is initialized to the scheduled
3046 * access relation of the array reference.
3048 * Return the number of elements initialized, i.e., the number of
3049 * active references in the current kernel.
3051 static int populate_array_references(struct cuda_gen *gen,
3052 struct cuda_array_info *array, __isl_keep isl_union_map *sched,
3053 struct cuda_array_ref_group **groups)
3055 int i;
3056 int n;
3057 isl_ctx *ctx = isl_union_map_get_ctx(sched);
3059 n = 0;
3060 for (i = 0; i < array->n_ref; ++i) {
3061 isl_union_map *umap;
3062 isl_map *map;
3063 struct cuda_array_ref_group *group;
3064 struct cuda_stmt_access *access = array->refs[i];
3066 map = isl_map_copy(access->access);
3067 umap = isl_union_map_from_map(map);
3068 umap = isl_union_map_apply_domain(umap,
3069 isl_union_map_copy(sched));
3071 if (isl_union_map_is_empty(umap)) {
3072 isl_union_map_free(umap);
3073 continue;
3076 map = isl_map_from_union_map(umap);
3078 group = isl_calloc_type(ctx, struct cuda_array_ref_group);
3079 assert(group);
3080 group->array = array;
3081 group->access = map;
3082 group->write = access->write;
3083 group->refs = &array->refs[i];
3085 groups[n++] = group;
3088 return n;
3091 static void free_array_ref_group(struct cuda_array_ref_group *group,
3092 int n_index)
3094 if (!group)
3095 return;
3096 free_bound_list(group->shared_bound, n_index);
3097 free_bound_list(group->private_bound, n_index);
3098 isl_map_free(group->access);
3099 free(group->refs);
3100 free(group);
3103 /* If two groups have overlapping access relations and if one of them
3104 * involves a write, then merge the two groups into one.
3106 * We keep track of the grouping in "leader". leader[j] points to
3107 * an earlier group array element that belongs to the same group,
3108 * or the array element j itself if this element is the first in the group.
3110 * Return the number of group leaders.
3112 static int group_overlapping_writes(int n,
3113 struct cuda_array_ref_group **groups, int *leader)
3115 int i, j;
3116 int n_group = n;
3118 for (i = 0; i < n; ++i) {
3119 int l = i;
3120 groups[l]->n_ref = 1;
3121 for (j = i - 1; j >= 0; --j) {
3122 isl_map *map;
3123 int empty;
3125 if (leader[j] != j)
3126 continue;
3127 if (!groups[l]->write && !groups[j]->write)
3128 continue;
3130 map = isl_map_intersect(isl_map_copy(groups[l]->access),
3131 isl_map_copy(groups[j]->access));
3132 empty = isl_map_is_empty(map);
3133 isl_map_free(map);
3135 if (empty)
3136 continue;
3138 groups[j]->access = isl_map_union(groups[j]->access,
3139 groups[l]->access);
3140 groups[j]->write = 1;
3141 groups[l]->access = NULL;
3142 groups[j]->n_ref += groups[l]->n_ref;
3143 l = leader[l] = j;
3144 n_group--;
3146 leader[i] = l;
3149 return n_group;
3152 /* Compute the size of the shared array corresponding to the given array
3153 * array refrence group, based on the accesses from the current kernel,
3154 * as well as the offset of the shared piece in the original array.
3156 static void compute_group_shared_bound(struct cuda_gen *gen,
3157 struct cuda_array_info *array, struct cuda_array_ref_group *group)
3159 isl_ctx *ctx = isl_dim_get_ctx(array->dim);
3161 group->shared_bound = create_bound_list(ctx, array->n_index);
3162 if (!can_tile_for_shared_memory(gen, array, group->access,
3163 group->shared_bound)) {
3164 free_bound_list(group->shared_bound, array->n_index);
3165 group->shared_bound = NULL;
3169 /* Given an initial grouping of array references and shared memory tiles
3170 * for each group that allows for a shared memory tile, merge two groups
3171 * if both have a shared memory tile and if the merged group also has
3172 * a shared memory tile.
3174 * Return the number of group leaders after merging.
3176 static int group_common_shared_memory_tile(struct cuda_gen *gen,
3177 struct cuda_array_info *array, int n,
3178 struct cuda_array_ref_group **groups, int *leader, int n_group)
3180 int i, j;
3181 isl_ctx *ctx = isl_dim_get_ctx(array->dim);
3183 for (i = 0; n_group > 1 && i < n; ++i) {
3184 int l = i;
3185 if (leader[i] != i)
3186 continue;
3187 if (!groups[i]->shared_bound)
3188 continue;
3189 for (j = i - 1; j >= 0; --j) {
3190 isl_map *map;
3191 int empty;
3192 struct cuda_array_bound *shared_bound;
3194 if (leader[j] != j)
3195 continue;
3196 if (!groups[j]->shared_bound)
3197 continue;
3199 map = isl_map_intersect(isl_map_copy(groups[l]->access),
3200 isl_map_copy(groups[j]->access));
3201 empty = isl_map_is_empty(map);
3202 isl_map_free(map);
3204 if (empty)
3205 continue;
3207 map = isl_map_union(isl_map_copy(groups[l]->access),
3208 isl_map_copy(groups[j]->access));
3209 shared_bound = create_bound_list(ctx, array->n_index);
3210 if (!can_tile_for_shared_memory(gen, array, map,
3211 shared_bound)) {
3212 isl_map_free(map);
3213 free_bound_list(shared_bound, array->n_index);
3214 continue;
3217 free_bound_list(groups[j]->shared_bound,
3218 array->n_index);
3219 groups[j]->shared_bound = shared_bound;
3220 isl_map_free(groups[j]->access);
3221 groups[j]->access = map;
3222 groups[j]->n_ref += groups[l]->n_ref;
3223 l = leader[l] = j;
3224 n_group--;
3228 return n_group;
3231 /* Extract an array of array reference groups from the array of references
3232 * and the grouping information in "leader".
3234 * Store the results in array->n_group and array->groups.
3236 static void extract_array_groups(isl_ctx *ctx, struct cuda_array_info *array,
3237 int n, struct cuda_array_ref_group **groups, int *leader, int n_group)
3239 int i, j;
3241 for (i = 2; i < n; ++i)
3242 leader[i] = leader[leader[i]];
3244 array->n_group = n_group;
3245 array->groups = isl_alloc_array(ctx, struct cuda_array_ref_group *,
3246 n_group);
3247 assert(array->groups);
3249 j = 0;
3250 for (i = 0; i < n; ++i) {
3251 int k, l;
3252 struct cuda_stmt_access **refs;
3254 if (leader[i] != i) {
3255 groups[i]->refs = NULL;
3256 free_array_ref_group(groups[i], array->n_index);
3257 continue;
3260 refs = isl_alloc_array(ctx, struct cuda_stmt_access *,
3261 groups[i]->n_ref);
3262 assert(refs);
3263 l = 0;
3264 for (k = i; k < n; ++k)
3265 if (leader[k] == i) {
3266 refs[l++] = *groups[k]->refs;
3267 (*groups[k]->refs)->group = j;
3270 groups[i]->refs = refs;
3271 groups[i]->nr = j;
3272 array->groups[j++] = groups[i];
3276 /* Group array references that should be considered together when
3277 * deciding whether to access them from private, shared or global memory.
3279 * In particular, if two array references overlap and if one of them
3280 * is a write, then the two references are grouped together.
3281 * Furthermore, if two groups admit a shared memory tile and if the
3282 * combination of the two also admits a shared memory tile, we merge
3283 * the two groups.
3285 * During the construction the group->refs field points to a single
3286 * array reference inside the array of array references, while
3287 * group->n_ref contains the number of element in leader that
3288 * (directly or indirectly) point to this group, provided the group
3289 * is a leader.
3291 static void group_array_references(struct cuda_gen *gen,
3292 struct cuda_array_info *array, __isl_keep isl_union_map *sched)
3294 int i;
3295 int n, n_group;
3296 isl_ctx *ctx = isl_union_map_get_ctx(sched);
3297 struct cuda_array_ref_group **groups;
3298 int *leader;
3300 groups = isl_calloc_array(ctx, struct cuda_array_ref_group *,
3301 array->n_ref);
3302 assert(groups);
3304 n = populate_array_references(gen, array, sched, groups);
3306 leader = isl_alloc_array(ctx, int, n);
3307 assert(leader);
3309 n_group = group_overlapping_writes(n, groups, leader);
3311 for (i = 0; i < n; ++i)
3312 if (leader[i] == i)
3313 compute_group_shared_bound(gen, array, groups[i]);
3315 n_group = group_common_shared_memory_tile(gen, array, n, groups,
3316 leader, n_group);
3318 extract_array_groups(ctx, array, n, groups, leader, n_group);
3320 free(leader);
3321 free(groups);
3324 /* Take tiled_sched, project it onto the shared tile loops and
3325 * the loops that will be wrapped over the threads,
3326 * parametrize the shared tile loops and store the result in gen->shared_sched.
3327 * The position of the first of these parameters is stored in gen->first_shared.
3328 * Also compute a projection that projects out the loops that will be
3329 * wrapped over the threads and store this projection in gen->shared_proj.
3331 static void compute_shared_sched(struct cuda_gen *gen)
3333 isl_dim *dim;
3334 isl_map *proj;
3335 isl_set *par;
3336 isl_union_map *sched;
3338 sched = isl_union_map_copy(gen->tiled_sched);
3340 dim = isl_union_map_get_dim(sched);
3341 gen->first_shared = isl_dim_size(dim, isl_dim_param);
3342 proj = projection(dim, gen->tiled_len, gen->shared_len + gen->n_block);
3343 sched = isl_union_map_apply_range(sched, isl_union_map_from_map(proj));
3345 dim = isl_union_map_get_dim(sched);
3346 par = parametrization(dim, gen->shared_len + gen->n_block,
3347 0, gen->shared_len, "g");
3348 sched = isl_union_map_intersect_range(sched,
3349 isl_union_set_from_set(par));
3351 dim = isl_union_map_get_dim(sched);
3352 proj = projection(dim, gen->shared_len + gen->n_block, gen->shared_len);
3354 gen->shared_sched = sched;
3355 gen->shared_proj = isl_union_map_from_map(proj);
3358 /* Group references of all arrays in the program.
3360 static void group_references(struct cuda_gen *gen)
3362 int i;
3363 isl_union_map *sched;
3365 sched = isl_union_map_apply_range(isl_union_map_copy(gen->shared_sched),
3366 isl_union_map_copy(gen->shared_proj));
3368 for (i = 0; i < gen->n_array; ++i)
3369 group_array_references(gen, &gen->array[i], sched);
3371 isl_union_map_free(sched);
3374 /* Free all array information that is local to the current kernel.
3376 static void free_local_array_info(struct cuda_gen *gen)
3378 int i, j;
3380 for (i = 0; i < gen->n_array; ++i) {
3381 struct cuda_array_info *array = &gen->array[i];
3383 for (j = 0; j < array->n_group; ++j)
3384 free_array_ref_group(array->groups[j], array->n_index);
3385 free(array->groups);
3387 if (array->n_group == 0)
3388 continue;
3389 for (j = 0; j < gen->array[i].n_index; ++j) {
3390 isl_pw_aff_free(gen->array[i].local_bound[j]);
3391 gen->array[i].local_bound[j] = NULL;
3396 static void print_iterator_list(FILE *out, int len, const char *prefix,
3397 int parens)
3399 int i;
3401 fprintf(out, "(");
3402 for (i = 0; i < len; ++i) {
3403 if (i)
3404 fprintf(out, ", ");
3405 if (parens)
3406 fprintf(out, "(%s%d)", prefix, i);
3407 else
3408 fprintf(out, "%s%d", prefix, i);
3410 fprintf(out, ")");
3413 /* Print an access to the element in the global memory copy of the
3414 * given array that corresponds to element [a0][a1]... of the original array.
3415 * The copy in global memory has been linearized, so we need to take
3416 * the array size into account.
3418 static void print_global_index(isl_ctx *ctx, FILE *out,
3419 struct cuda_array_info *array)
3421 int i;
3422 isl_printer *prn;
3424 if (cuda_array_is_scalar(array)) {
3425 fprintf(out, "*%s", array->name);
3426 return;
3429 fprintf(out, "%s[", array->name);
3430 for (i = 0; i + 1 < array->n_index; ++i)
3431 fprintf(out, "(");
3432 for (i = 0; i < array->n_index; ++i) {
3433 if (i) {
3434 prn = isl_printer_to_file(ctx, out);
3435 prn = isl_printer_set_output_format(prn, ISL_FORMAT_C);
3436 prn = isl_printer_print_str(prn, ") * (");
3437 prn = isl_printer_print_pw_aff(prn,
3438 array->local_bound[i]);
3439 prn = isl_printer_print_str(prn, ") + ");
3440 isl_printer_free(prn);
3442 fprintf(out, "a%d", i);
3444 fprintf(out, "]");
3447 /* Print an access to the element in the shared memory copy of the
3448 * given array that corresponds to element [a0][a1]... of the original array.
3449 * Since the array in shared memory is just a shifted copy of part
3450 * of the original array, we simply need to subtract the lower bound,
3451 * which was computed in can_tile_for_shared_memory.
3452 * If any of the indices is strided, then we first add
3453 * shared_bound[i].shift and divide by shared_bound[i].stride.
3455 static void print_local_index(FILE *out, struct cuda_array_ref_group *group)
3457 int i;
3458 isl_ctx *ctx;
3459 isl_printer *prn;
3460 struct cuda_array_bound *bounds = group->shared_bound;
3462 ctx = isl_dim_get_ctx(group->array->dim);
3463 print_array_name(out, group);
3464 for (i = 0; i < group->array->n_index; ++i) {
3465 fprintf(out, "[(a%d", i);
3466 if (bounds[i].shift) {
3467 fprintf(out, " + (");
3468 prn = isl_printer_to_file(ctx, out);
3469 prn = isl_printer_set_output_format(prn, ISL_FORMAT_C);
3470 prn = isl_printer_print_qpolynomial(prn,
3471 bounds[i].shift);
3472 prn = isl_printer_print_str(prn, "))/");
3473 prn = isl_printer_print_isl_int(prn,
3474 bounds[i].stride);
3475 isl_printer_free(prn);
3476 } else
3477 fprintf(out, ")");
3478 fprintf(out, " - (");
3479 prn = isl_printer_to_file(ctx, out);
3480 prn = isl_printer_set_output_format(prn, ISL_FORMAT_C);
3481 prn = isl_printer_print_aff(prn, bounds[i].lb);
3482 isl_printer_free(prn);
3483 fprintf(out, ")]");
3487 /* Print '#define's for copying data from global memory to shared
3488 * memory and back for the given array.
3490 static void print_array_copy_defines(struct cuda_gen *gen,
3491 struct cuda_array_ref_group *group)
3493 int i;
3494 const char *type[] = { "read", "write" };
3495 struct cuda_array_info *array = group->array;
3496 int n_index = array->n_index;
3498 for (i = 0; i < 2; ++i) {
3499 fprintf(gen->cuda.kernel_c, "#define %s_", type[i]);
3500 print_array_name(gen->cuda.kernel_c, group);
3501 print_iterator_list(gen->cuda.kernel_c, n_index, "a", 0);
3502 fprintf(gen->cuda.kernel_c, " %s_", type[i]);
3503 print_array_name(gen->cuda.kernel_c, group);
3504 fprintf(gen->cuda.kernel_c, "_");
3505 print_iterator_list(gen->cuda.kernel_c, n_index, "a", 1);
3506 fprintf(gen->cuda.kernel_c, "\n");
3508 fprintf(gen->cuda.kernel_c, "#define %s_", type[i]);
3509 print_array_name(gen->cuda.kernel_c, group);
3510 fprintf(gen->cuda.kernel_c, "_");
3511 print_iterator_list(gen->cuda.kernel_c, n_index, "a", 0);
3512 if (i) {
3513 fprintf(gen->cuda.kernel_c, " ");
3514 print_global_index(gen->ctx, gen->cuda.kernel_c, array);
3515 fprintf(gen->cuda.kernel_c, " = ");
3516 print_local_index(gen->cuda.kernel_c, group);
3517 } else {
3518 fprintf(gen->cuda.kernel_c, " ");
3519 print_local_index(gen->cuda.kernel_c, group);
3520 fprintf(gen->cuda.kernel_c, " = ");
3521 print_global_index(gen->ctx, gen->cuda.kernel_c, array);
3523 fprintf(gen->cuda.kernel_c, "\n");
3527 static void print_copy_defines(struct cuda_gen *gen)
3529 int i, j;
3531 for (i = 0; i < gen->n_array; ++i) {
3532 struct cuda_array_info *array = &gen->array[i];
3534 for (j = 0; j < array->n_group; ++j) {
3535 if (array->groups[j]->private_bound)
3536 continue;
3537 if (!array->groups[j]->shared_bound)
3538 continue;
3539 print_array_copy_defines(gen, array->groups[j]);
3544 /* The sizes of the arrays on the host that have been computed by
3545 * extract_array_info may depend on the parameters. Use the extra
3546 * constraints on the parameters that are valid at "host_domain"
3547 * to simplify these expressions.
3549 static void localize_bounds(struct cuda_gen *gen,
3550 __isl_keep isl_set *host_domain)
3552 int i, j;
3553 isl_set *context;
3554 unsigned nvar;
3556 context = isl_set_copy(host_domain);
3557 nvar = isl_set_dim(host_domain, isl_dim_set);
3558 context = isl_set_project_out(host_domain, isl_dim_set, 0, nvar);
3560 for (i = 0; i < gen->n_array; ++i) {
3561 struct cuda_array_info *array = &gen->array[i];
3563 if (array->n_group == 0)
3564 continue;
3566 for (j = 0; j < array->n_index; ++j) {
3567 isl_pw_aff *pwaff;
3569 pwaff = isl_pw_aff_copy(array->bound[j]);
3570 pwaff = isl_pw_aff_gist(pwaff, isl_set_copy(context));
3571 array->local_bound[j] = pwaff;
3574 isl_set_free(context);
3577 /* Set gen->tile_len and gen->n_parallel to those of the first statement
3578 * in the statement list u.
3579 * Because of the way the schedule is constructed, the other statements
3580 * in the list, if any, should have the same values for these properties.
3582 static void set_tile_len(struct cuda_gen *gen, struct clast_user_stmt *u)
3584 int nr;
3585 struct cuda_stmt *stmt;
3587 nr = atoi(u->statement->name + 2);
3588 stmt = &gen->stmts[nr];
3590 gen->tile_len = stmt->tile_len;
3591 gen->n_parallel = stmt->n_parallel;
3594 /* This function is called for each leaf in the clast of the host code.
3595 * We first specialize the schedule to the site of the leaf, compute
3596 * the size of shared memory and then print the body of host code
3597 * and the associated kernel (through a call to print_kernel_body).
3599 static void print_host_user(struct gpucode_info *code,
3600 struct clast_user_stmt *u)
3602 struct cuda_gen *gen = code->user;
3603 isl_dim *dim;
3604 isl_set *par;
3605 isl_set *host_domain;
3606 isl_union_map *access;
3607 isl_union_map *local_sched;
3608 isl_union_set *arrays;
3610 set_tile_len(gen, u);
3611 read_sizes(gen);
3613 host_domain = extract_entire_host_domain(u);
3615 local_sched = isl_union_map_intersect_range(
3616 isl_union_map_copy(gen->sched),
3617 isl_union_set_from_set(extend(isl_set_copy(host_domain),
3618 gen->untiled_len)));
3619 access = isl_union_map_union(isl_union_map_copy(gen->read),
3620 isl_union_map_copy(gen->write));
3621 access = isl_union_map_apply_domain(access,
3622 isl_union_map_copy(local_sched));
3623 arrays = isl_union_map_range(access);
3625 print_indent(code->dst, code->indent);
3626 fprintf(code->dst, "dim3 k%d_dimBlock(", gen->kernel_id);
3627 print_reverse_list(code->dst, gen->n_block, gen->block_dim);
3628 fprintf(code->dst, ");\n");
3630 print_indent(code->dst, code->indent);
3631 fprintf(code->dst, "dim3 k%d_dimGrid(", gen->kernel_id);
3632 print_reverse_list(code->dst, gen->n_grid, gen->grid_dim);
3633 fprintf(code->dst, ");\n");
3635 gen->tiled_sched = tile_schedule(gen, local_sched);
3636 gen->tiled_sched = parametrize_tiled_schedule(gen, gen->tiled_sched);
3637 gen->tiled_sched = scale_tile_loops(gen, gen->tiled_sched);
3639 gen->local_sched = isl_union_map_copy(gen->tiled_sched);
3641 dim = isl_union_map_get_dim(gen->local_sched);
3642 par = parametrization(dim, gen->tiled_len, 0, gen->shared_len, "g");
3643 gen->local_sched = isl_union_map_intersect_range(gen->local_sched,
3644 isl_union_set_from_set(par));
3646 gen->local_sched = thread_tile_schedule(gen, gen->local_sched);
3647 gen->local_sched = scale_thread_tile_loops(gen, gen->local_sched);
3649 gen->private_access = NULL;
3650 compute_shared_sched(gen);
3651 gen->privatization = compute_privatization(gen);
3652 group_references(gen);
3653 compute_private_size(gen);
3654 localize_bounds(gen, host_domain);
3656 gen->local_sched = interchange_for_unroll(gen, gen->local_sched);
3658 print_copy_defines(gen);
3659 print_kernel_launch(gen, arrays);
3661 fprintf(gen->cuda.kernel_c, "{\n");
3663 print_kernel_body(gen, host_domain, gen->tiled_sched);
3665 fprintf(gen->cuda.kernel_c, "}\n");
3667 free_local_array_info(gen);
3668 isl_map_free(gen->privatization);
3669 isl_union_map_free(gen->private_access);
3670 isl_union_map_free(gen->local_sched);
3671 isl_union_map_free(gen->tiled_sched);
3672 isl_union_map_free(gen->shared_sched);
3673 isl_union_map_free(gen->shared_proj);
3674 isl_union_set_free(arrays);
3675 isl_set_free(host_domain);
3677 free(gen->tile_size);
3678 gen->kernel_id++;
3681 /* Use CLooG to generate code for the outer gen->tile_first loops
3682 * of the global schedule in gen->sched.
3683 * The pretty printing of this code is handled by gpu_print_host_stmt,
3684 * which calls print_host_user for each kernel invocation location.
3686 static void print_cloog_host_code(struct cuda_gen *gen)
3688 int i;
3689 isl_set *context;
3690 isl_union_map *sched;
3691 CloogOptions *options;
3692 CloogDomain *cloog_context;
3693 CloogUnionDomain *ud;
3694 CloogInput *input;
3695 struct clast_stmt *stmt;
3696 char name[20];
3698 options = cloog_options_malloc(gen->state);
3699 options->language = LANGUAGE_C;
3700 options->otl = 0;
3701 options->strides = 1;
3702 options->stop = gen->tile_first;
3703 options->f = gen->untiled_len;
3704 options->l = gen->untiled_len;
3705 options->save_domains = 1;
3706 options->noscalars = 1;
3708 sched = isl_union_map_copy(gen->sched);
3709 ud = cloog_union_domain_from_isl_union_map(sched);
3710 for (i = 0; i < options->stop; ++i) {
3711 snprintf(name, sizeof(name), "h%d", i);
3712 ud = cloog_union_domain_set_name(ud, CLOOG_SCAT, i, name);
3714 context = isl_set_copy(gen->context);
3715 cloog_context = cloog_domain_from_isl_set(context);
3716 input = cloog_input_alloc(cloog_context, ud);
3718 stmt = cloog_clast_create_from_input(input, options);
3720 gen->code.indent = 0;
3721 gen->code.dst = gen->cuda.host_c;
3722 gen->code.print_user_stmt = NULL;
3723 gen->code.print_user_stmt_list = &print_host_user;
3724 gen->code.print_for_head = NULL;
3725 gen->code.print_for_foot = NULL;
3726 gen->code.user = gen;
3727 gpu_print_host_stmt(&gen->code, stmt);
3729 cloog_clast_free(stmt);
3730 cloog_options_free(options);
3733 void print_host_code(struct cuda_gen *gen)
3735 fprintf(gen->cuda.host_c, "{\n");
3736 print_cloog_macros(gen->cuda.host_c);
3737 print_cloog_macros(gen->cuda.kernel_c);
3739 declare_device_arrays(gen);
3741 allocate_device_arrays(gen);
3742 copy_arrays_to_device(gen);
3744 gen->kernel_id = 0;
3745 print_cloog_host_code(gen);
3747 copy_arrays_from_device(gen);
3748 free_device_arrays(gen);
3750 fprintf(gen->cuda.host_c, "}\n");
3753 __isl_give isl_set *add_context_from_str(__isl_take isl_set *set,
3754 const char *str)
3756 isl_ctx *ctx;
3757 isl_set *context;
3759 if (!str)
3760 return set;
3762 ctx = isl_set_get_ctx(set);
3763 context = isl_set_read_from_str(ctx, str, -1);
3764 context = isl_set_align_params(context, isl_set_get_dim(set));
3765 set = isl_set_intersect(set, context);
3767 return set;
3770 /* Return the union of all iteration domains of the gen->stmts[i].
3772 static __isl_give isl_union_set *extract_domain(struct cuda_gen *gen)
3774 int i;
3775 isl_union_set *domain;
3777 domain = isl_union_set_empty(isl_set_get_dim(gen->context));
3778 for (i = 0; i < gen->n_stmts; ++i) {
3779 isl_set *domain_i;
3781 domain_i = isl_set_copy(gen->stmts[i].domain);
3782 domain = isl_union_set_union(domain,
3783 isl_union_set_from_set(domain_i));
3786 return domain;
3789 /* Information about the outermost tilable bands in the forest of bands.
3791 * tile_len and n_parallel are only sets on band_info structures
3792 * that correspond to outermost bands. For other bands (in particular,
3793 * ancestors of the outermost bands), n_parallal is set to 0.
3795 * prefix is the (padded) schedule leading up to the outermost tilable bands.
3797 * tile_first is the number of schedule dimensions in prefix.
3799 * suffix is the schedule of the outermost tilable bands and their descendants.
3801 struct band_info {
3802 struct cuda_gen *gen;
3803 int tile_first;
3804 int tile_len;
3805 int n_parallel;
3806 isl_union_map *prefix;
3807 isl_union_map *suffix;
3810 /* Set tile_len and n_parallel of the statement to that of
3811 * their outermost band, recorded in the band_info.
3813 static int set_stmt_tile_len(__isl_take isl_map *map, void *user)
3815 struct band_info *info = user;
3816 int nr;
3817 struct cuda_stmt *stmt;
3819 nr = atoi(isl_map_get_tuple_name(map, isl_dim_in) + 2);
3820 stmt = &info->gen->stmts[nr];
3822 stmt->tile_len = info->tile_len;
3823 stmt->n_parallel = info->n_parallel;
3825 isl_map_free(map);
3827 return 0;
3830 static void list_select_outer_band(struct cuda_gen *gen,
3831 __isl_take isl_band_list *list, int pos, struct band_info *list_info);
3833 /* Check if this band has any parallel loops. If so, take it as
3834 * the outermost tilable band. If not, continue looking for the
3835 * outermost tilable band in the children of the current band.
3837 static void band_select_outer_band(struct cuda_gen *gen,
3838 __isl_take isl_band *band, int pos, struct band_info *info)
3840 int n = isl_band_n_member(band);
3841 int n_parallel;
3843 for (n_parallel = 0; n_parallel < n; ++n_parallel)
3844 if (!isl_band_member_is_zero_distance(band, n_parallel))
3845 break;
3847 info->n_parallel = n_parallel;
3848 if (n_parallel) {
3849 info->gen = gen;
3850 info->tile_first = pos;
3851 info->tile_len = n;
3852 info->prefix = isl_band_get_prefix_schedule(band);
3853 info->suffix = isl_union_map_flat_range_product(
3854 isl_band_get_partial_schedule(band),
3855 isl_band_get_suffix_schedule(band));
3856 isl_union_map_foreach_map(info->prefix,
3857 &set_stmt_tile_len, info);
3858 } else {
3859 isl_band_list *children;
3860 if (!isl_band_has_children(band))
3861 isl_die(isl_band_get_ctx(band), isl_error_unknown,
3862 "unable to detect any parallelism", abort());
3863 children = isl_band_get_children(band);
3864 list_select_outer_band(gen, children, pos + n, info);
3867 isl_band_free(band);
3870 /* Comparison function that returns a non-zero value for band_infos
3871 * with different tile_len fields or different n_parallel fields.
3873 static int cmp_band(const void *p1, const void *p2)
3875 const struct band_info *info1 = p1;
3876 const struct band_info *info2 = p2;
3878 if (info1->tile_len != info2->tile_len)
3879 return info1->tile_len - info2->tile_len;
3881 return info1->n_parallel - info2->n_parallel;
3884 /* Extend "umap" with coordinates with fixed value "val"
3885 * to a total length of "dst_len", assuming the original dimension is "src_len".
3887 static __isl_give isl_union_map *extend_range(__isl_take isl_union_map *umap,
3888 int src_len, int dst_len, int val)
3890 isl_dim *dim;
3891 isl_map *map;
3892 int i;
3894 dim = isl_union_map_get_dim(umap);
3895 map = isl_map_reverse(projection(dim, dst_len, src_len));
3896 for (i = src_len; i < dst_len; ++i)
3897 map = isl_map_fix_si(map, isl_dim_out, i, val);
3899 umap = isl_union_map_apply_range(umap, isl_union_map_from_map(map));
3901 return umap;
3904 /* Group bands with the same values for tile_len and n_parallel.
3905 * The prefix schedule is then extended with a fixed coordinate that
3906 * is different for each such group.
3907 * Note that the actual values for this coordinate are not important.
3908 * The bands have already been effectively separated at a higher level
3909 * or they are independent and may be executed in parallel.
3910 * The list of band_info has been sorted before this functions is called.
3912 static void separate_bands(struct band_info *info, int n)
3914 int i;
3915 int j = 0;
3917 for (i = 0; i < n; ++i) {
3918 int l = info[i].tile_first;
3920 if (i &&
3921 (info[i].tile_len != info[i - 1].tile_len ||
3922 info[i].n_parallel != info[i - 1].n_parallel))
3923 j++;
3925 info[i].prefix = extend_range(info[i].prefix,
3926 l, l + 1, j);
3927 info[i].tile_first = l + 1;
3931 /* Select the outermost bands in the elements of the list, align
3932 * their prefix schedules, separate bands with different values
3933 * for tile_len and/or n_parallel and then combine the resulting
3934 * prefix and suffix schedules into a single pair of prefix and
3935 * suffix schedules for the entire list.
3937 static void list_select_outer_band(struct cuda_gen *gen,
3938 __isl_take isl_band_list *list, int pos, struct band_info *list_info)
3940 isl_band *band;
3941 int i;
3942 int n = isl_band_list_n_band(list);
3943 isl_ctx *ctx = isl_band_list_get_ctx(list);
3944 struct band_info *info;
3945 int max_tile_first;
3946 isl_union_map *prefix;
3947 isl_union_map *suffix;
3949 assert(n >= 1);
3950 info = isl_calloc_array(ctx, struct band_info, n);
3951 assert(info);
3953 max_tile_first = 0;
3954 for (i = 0; i < n; ++i) {
3955 band = isl_band_list_get_band(list, i);
3956 band_select_outer_band(gen, band, pos, &info[i]);
3957 if (info[i].tile_first > max_tile_first)
3958 max_tile_first = info[i].tile_first;
3961 for (i = 0; i < n; ++i) {
3962 if (info[i].tile_first == max_tile_first)
3963 continue;
3964 info[i].prefix = extend_range(info[i].prefix,
3965 info[i].tile_first, max_tile_first, 0);
3968 qsort(info, n, sizeof(struct band_info), &cmp_band);
3970 for (i = 0; i < n - 1; ++i)
3971 if (info[i].tile_len != info[i + 1].tile_len ||
3972 info[i].n_parallel != info[i + 1].n_parallel)
3973 break;
3975 if (i < n -1)
3976 separate_bands(info, n);
3978 prefix = info[0].prefix;
3979 suffix = info[0].suffix;
3981 for (i = 1; i < n; ++i) {
3982 prefix = isl_union_map_union(prefix, info[i].prefix);
3983 suffix = isl_union_map_union(suffix, info[i].suffix);
3986 list_info->tile_first = info[0].tile_first;
3987 list_info->tile_len = -1;
3988 list_info->prefix = prefix;
3989 list_info->suffix = suffix;
3991 isl_band_list_free(list);
3992 free(info);
3995 /* Set max_out to the maximal number of output dimensions over
3996 * all maps.
3998 static int update_max_out(__isl_take isl_map *map, void *user)
4000 int *max_out = user;
4001 int n_out = isl_map_dim(map, isl_dim_out);
4003 if (n_out > *max_out)
4004 *max_out = n_out;
4006 isl_map_free(map);
4007 return 0;
4010 struct align_range_data {
4011 int max_out;
4012 isl_union_map *res;
4015 /* Extend the dimension of the range of the given map to data->max_out and
4016 * then add the result to data->res.
4018 static int map_align_range(__isl_take isl_map *map, void *user)
4020 struct align_range_data *data = user;
4021 int i;
4022 isl_dim *dim;
4023 isl_map *proj;
4024 int n_out = isl_map_dim(map, isl_dim_out);
4026 dim = isl_union_map_get_dim(data->res);
4027 proj = isl_map_reverse(projection(dim, data->max_out, n_out));
4028 for (i = n_out; i < data->max_out; ++i)
4029 proj = isl_map_fix_si(proj, isl_dim_out, i, 0);
4031 map = isl_map_apply_range(map, proj);
4033 data->res = isl_union_map_add_map(data->res, map);
4035 return 0;
4038 /* Extend the ranges of the maps in the union map such they all have
4039 * the same dimension.
4041 static __isl_give isl_union_map *align_range(__isl_take isl_union_map *umap)
4043 struct align_range_data data;
4045 data.max_out = 0;
4046 isl_union_map_foreach_map(umap, &update_max_out, &data.max_out);
4048 data.res = isl_union_map_empty(isl_union_map_get_dim(umap));
4049 isl_union_map_foreach_map(umap, &map_align_range, &data);
4051 isl_union_map_free(umap);
4052 return data.res;
4055 /* Select the outermost tilable band that (by construction)
4056 * has at least one parallel loop.
4057 * The starting position of the aligned band is stored in the pair
4058 * gen->tile_first.
4059 * The sizes and number of parallel loops may be different in different
4060 * parts of the band forest and are therefore stored in the cuda_stmts.
4062 * Return the complete schedule, with the tilable bands aligned
4063 * at gen->tile_first and padded with zero, if needed.
4065 static __isl_give isl_union_map *select_outer_tilable_band(struct cuda_gen *gen,
4066 __isl_keep isl_schedule *schedule)
4068 isl_band_list *list;
4069 struct band_info info;
4071 gen->n_parallel = 0;
4072 gen->tile_len = -1;
4074 list = isl_schedule_get_band_forest(schedule);
4076 list_select_outer_band(gen, list, 0, &info);
4078 gen->tile_first = info.tile_first;
4079 info.suffix = align_range(info.suffix);
4081 return isl_union_map_flat_range_product(info.prefix, info.suffix);
4084 /* Set gen->untiled_len to the number of scheduling dimensions
4085 * for the schedule of the first domain.
4086 * We assume here that this number is the same for all domains.
4088 static int set_untiled_len(__isl_take isl_map *map, void *user)
4090 unsigned *untiled_len = user;
4092 *untiled_len = isl_map_dim(map, isl_dim_out);
4094 isl_map_free(map);
4095 return -1;
4098 /* Compute an appropriate schedule based on the accesses in
4099 * gen->read and gen->write.
4101 * We first compute dependences and then use those to compute
4102 * a schedule that has a parallel loop in each tilable band.
4103 * Finally, we select the outermost tilable band.
4105 static void compute_schedule(struct cuda_gen *gen,
4106 __isl_take isl_union_map *sched)
4108 isl_ctx *ctx = isl_union_map_get_ctx(sched);
4109 isl_union_set *domain;
4110 isl_union_map *empty;
4111 isl_union_map *dep_raw, *dep2, *dep3, *dep;
4112 isl_union_map *uninitialized;
4113 isl_schedule *schedule;
4114 struct isl_options *options;
4116 empty = isl_union_map_empty(isl_union_map_get_dim(sched));
4118 isl_union_map_compute_flow(isl_union_map_copy(gen->read),
4119 isl_union_map_copy(gen->write), empty,
4120 isl_union_map_copy(sched),
4121 &dep_raw, NULL, &uninitialized, NULL);
4122 isl_union_map_compute_flow(isl_union_map_copy(gen->write),
4123 isl_union_map_copy(gen->write),
4124 isl_union_map_copy(gen->read),
4125 isl_union_map_copy(sched),
4126 &dep2, &dep3, NULL, NULL);
4127 isl_union_map_free(sched);
4129 gen->copy_in = isl_union_map_range(uninitialized);
4131 dep = isl_union_map_union(dep2, dep3);
4132 dep = isl_union_map_union(dep, dep_raw);
4133 dep = isl_union_map_coalesce(dep);
4135 domain = extract_domain(gen);
4136 options = isl_ctx_peek_options(ctx, isl_options_arg);
4137 options->schedule_outer_zero_distance = 1;
4138 schedule = isl_union_set_compute_schedule(isl_union_set_copy(domain),
4139 isl_union_map_copy(dep), dep);
4141 sched = select_outer_tilable_band(gen, schedule);
4143 isl_union_map_foreach_map(sched, &set_untiled_len, &gen->untiled_len);
4144 sched = isl_union_map_intersect_domain(sched, domain);
4145 gen->sched = sched;
4147 isl_schedule_free(schedule);
4150 static struct cuda_stmt_access **expr_extract_access(struct pet_expr *expr,
4151 struct cuda_stmt_access **next_access)
4153 struct cuda_stmt_access *access;
4154 isl_ctx *ctx = isl_map_get_ctx(expr->acc.access);
4156 access = isl_alloc_type(ctx, struct cuda_stmt_access);
4157 assert(access);
4158 access->next = NULL;
4159 access->read = expr->acc.read;
4160 access->write = expr->acc.write;
4161 access->access = isl_map_copy(expr->acc.access);
4163 *next_access = access;
4164 next_access = &(*next_access)->next;
4165 return next_access;
4168 static struct cuda_stmt_access **expr_extract_accesses(struct pet_expr *expr,
4169 struct cuda_stmt_access **next_access)
4171 int i;
4173 for (i = 0; i < expr->n_arg; ++i)
4174 next_access = expr_extract_accesses(expr->args[i],
4175 next_access);
4177 if (expr->type == pet_expr_access)
4178 next_access = expr_extract_access(expr, next_access);
4180 return next_access;
4183 static void pet_stmt_extract_accesses(struct cuda_stmt *stmt)
4185 struct cuda_stmt_access **next_access = &stmt->accesses;
4187 stmt->accesses = NULL;
4188 expr_extract_accesses(stmt->body, next_access);
4191 /* Return an array of cuda_stmt representing the statements in "scop".
4193 static struct cuda_stmt *extract_stmts(isl_ctx *ctx, struct pet_scop *scop,
4194 __isl_keep isl_set *context)
4196 int i;
4197 struct cuda_stmt *stmts;
4199 stmts = isl_calloc_array(ctx, struct cuda_stmt, scop->n_stmt);
4200 assert(stmts);
4202 for (i = 0; i < scop->n_stmt; ++i) {
4203 struct cuda_stmt *s = &stmts[i];
4205 s->domain = isl_set_copy(scop->stmts[i]->domain);
4206 s->domain = isl_set_intersect(s->domain, isl_set_copy(context));
4207 s->body = scop->stmts[i]->body;
4208 pet_stmt_extract_accesses(s);
4211 return stmts;
4214 /* Replace the scop in the "input" file by equivalent code
4215 * that uses the GPU. "scop" is assumed to correspond to this scop.
4217 * We first compute a schedule that respects the dependences
4218 * of the original program and select the outermost band
4219 * of tilable dimensions that has at least one parallel loop.
4220 * We then have three blocks of dimensions
4222 * H B G
4224 * The tilable band "B" is first tiled according to "tile.sizes", resulting
4225 * in
4227 * H T P G
4229 * For each iteration of the T loop and for each array, we compute
4230 * the array elements accessed by that iteration, construct a rectangular
4231 * box around it and shift it to the origin. The result is used
4232 * as shared memory for the array.
4234 * We then split off at most 2 parallel loops from the T loops and
4235 * at most 3 parallel loops from the P loops
4237 * H T1 T2 P1 P2 G
4239 * The T1/P1 loops are then tiled or "wrapped" over the blocks/threads,
4240 * according to "grid.sizes"/"block.sizes".
4242 * H T1T T1P T2 P1T P1P P2 G
4244 * Finally, the T1P and P1P iterators are equated to the block and
4245 * thread dimensions respectively and so are effectively removed.
4246 * The H loops are run on the host. The T1T, T2, P1T, P2 and G loops
4247 * are run on the GPU.
4249 * Code is generated in three stages. We first generate code for the
4250 * host (the H loops), with iterators h%d. Then, for each leaf node
4251 * of the resulting AST, we generate code for the shared loops (up to
4252 * and including T2), with iterators g%d and after equating the H loops
4253 * to h%d parameters and the T1P loops to the block dimensions.
4254 * Finally, we generate code for the remaining loops in a similar fashion.
4256 int cuda_pet(isl_ctx *ctx, struct pet_scop *scop, struct ppcg_options *options,
4257 const char *input)
4259 isl_union_map *sched;
4260 struct cuda_gen gen;
4262 if (!scop)
4263 return -1;
4265 scop = pet_scop_align_params(scop);
4267 gen.ctx = ctx;
4268 gen.context = isl_set_copy(scop->context);
4269 gen.context = add_context_from_str(gen.context, options->ctx);
4270 gen.n_stmts = scop->n_stmt;
4271 gen.stmts = extract_stmts(ctx, scop, gen.context);
4272 gen.read = pet_scop_collect_reads(scop);
4273 gen.write = pet_scop_collect_writes(scop);
4274 gen.options = options;
4275 gen.state = cloog_isl_state_malloc(gen.ctx);
4276 gen.scop = scop;
4278 cuda_open_files(&gen.cuda, input);
4280 collect_array_info(&gen);
4282 sched = pet_scop_collect_schedule(scop);
4284 compute_schedule(&gen, sched);
4286 print_host_code(&gen);
4288 cloog_state_free(gen.state);
4289 clear_cuda_gen(&gen);
4291 cuda_close_files(&gen.cuda);
4293 return 0;