properly copy scalars from device
[ppcg.git] / cuda.c
blob9c8a0f147206ef0bdbd4cc4ccace20c821db9568
1 /*
2 * Copyright 2010-2011 INRIA Saclay
4 * Use of this software is governed by the GNU LGPLv2.1 license
6 * Written by Sven Verdoolaege, INRIA Saclay - Ile-de-France,
7 * Parc Club Orsay Universite, ZAC des vignes, 4 rue Jacques Monod,
8 * 91893 Orsay, France
9 */
11 #include <assert.h>
12 #include <stdlib.h>
14 #include <isl/polynomial.h>
15 #include <isl/union_set.h>
16 #include <isl/aff.h>
17 #include <isl/ilp.h>
18 #include <isl/flow.h>
19 #include <isl/band.h>
20 #include <isl/schedule.h>
21 #include <isl/options.h>
22 #include <cloog/isl/cloog.h>
24 #include "cuda.h"
25 #include "cuda_common.h"
26 #include "gpucode.h"
27 #include "schedule.h"
28 #include "ppcg_options.h"
30 /* The fields stride, shift and shift_map only contain valid information
31 * if shift != NULL.
32 * If so, they express that current index is such that if you add shift,
33 * then the result is always a multiple of stride.
34 * shift_map contains the mapping
36 * i -> (i + shift)/stride
38 struct cuda_array_bound {
39 isl_int size;
40 isl_aff *lb;
42 isl_int stride;
43 isl_aff *shift;
44 isl_basic_map *shift_map;
47 struct cuda_array_info;
49 /* A group of array references in a kernel that should be handled together.
50 * If private_bound is not NULL, then it is mapped to registers.
51 * Otherwise, if shared_bound is not NULL, it is mapped to shared memory.
52 * Otherwise, it is accessed from global memory.
54 struct cuda_array_ref_group {
55 /* The references in this group access this array. */
56 struct cuda_array_info *array;
57 /* Position of this group in the list of reference groups of array. */
58 int nr;
60 /* The following fields are use during the construction of the groups.
61 * access is the combined access relation relative to the shared
62 * memory tiling.
63 * write is set if any access in the group is a write.
65 isl_map *access;
66 int write;
68 /* For each index, size and offset of piece in shared memory. */
69 struct cuda_array_bound *shared_bound;
71 /* For each index, size and offset of piece in private memory. */
72 struct cuda_array_bound *private_bound;
74 /* References in this group; point to elements of a linked list. */
75 int n_ref;
76 struct cuda_stmt_access **refs;
79 struct cuda_array_info {
80 isl_space *dim;
81 /* Element type. */
82 char *type;
83 /* Name of the array. */
84 char *name;
85 /* Number of indices. */
86 unsigned n_index;
87 /* For each index, a bound on the array in that direction. */
88 isl_pw_aff **bound;
89 /* For each index, bound[i] specialized to the current kernel. */
90 isl_pw_aff **local_bound;
92 /* All references to this array; point to elements of a linked list. */
93 int n_ref;
94 struct cuda_stmt_access **refs;
96 /* The reference groups associated to this array. */
97 int n_group;
98 struct cuda_array_ref_group **groups;
100 /* Last shared memory tile dimension that affects tile of this array. */
101 int last_shared;
102 /* Dimension at which copying to/from shared memory is printed.
103 * if >= 0, then the value is >= last_shared
104 * if -1, then the copying is done at the leaf level.
106 int print_shared_level;
109 /* Print the name of the local copy of a given group of array references.
111 static void print_array_name(FILE *out, struct cuda_array_ref_group *group)
113 int global = 0;
115 if (group->private_bound)
116 fprintf(out, "private_");
117 else if (group->shared_bound)
118 fprintf(out, "shared_");
119 else
120 global = 1;
121 fprintf(out, "%s", group->array->name);
122 if (!global && group->array->n_group > 1)
123 fprintf(out, "_%d", group->nr);
126 /* Collect all references to the given array and store pointers to them
127 * in array->refs.
129 static void collect_references(struct cuda_gen *gen,
130 struct cuda_array_info *array)
132 int i;
133 int n;
135 n = 0;
136 for (i = 0; i < gen->n_stmts; ++i) {
137 struct cuda_stmt *stmt = &gen->stmts[i];
138 struct cuda_stmt_access *access;
140 for (access = stmt->accesses; access; access = access->next) {
141 const char *name;
142 name = isl_map_get_tuple_name(access->access,
143 isl_dim_out);
144 if (name && !strcmp(array->name, name))
145 n++;
149 array->n_ref = n;
150 array->refs = isl_alloc_array(gen->ctx, struct cuda_stmt_access *, n);
151 assert(array->refs);
153 n = 0;
154 for (i = 0; i < gen->n_stmts; ++i) {
155 struct cuda_stmt *stmt = &gen->stmts[i];
156 struct cuda_stmt_access *access;
158 for (access = stmt->accesses; access; access = access->next) {
159 const char *name;
160 name = isl_map_get_tuple_name(access->access,
161 isl_dim_out);
162 if (!name || strcmp(array->name, name))
163 continue;
165 array->refs[n++] = access;
170 static struct cuda_array_bound *create_bound_list(isl_ctx *ctx, int n_index)
172 int i;
173 struct cuda_array_bound *bound;
175 bound = isl_alloc_array(ctx, struct cuda_array_bound, n_index);
176 assert(bound);
178 for (i = 0; i < n_index; ++i) {
179 isl_int_init(bound[i].size);
180 bound[i].lb = NULL;
181 isl_int_init(bound[i].stride);
182 bound[i].shift = NULL;
183 bound[i].shift_map = NULL;
186 return bound;
189 static void free_bound_list(struct cuda_array_bound *bound, int n_index)
191 int j;
193 if (!bound)
194 return;
196 for (j = 0; j < n_index; ++j) {
197 isl_int_clear(bound[j].size);
198 isl_int_clear(bound[j].stride);
199 isl_aff_free(bound[j].lb);
200 isl_aff_free(bound[j].shift);
201 isl_basic_map_free(bound[j].shift_map);
203 free(bound);
206 static struct pet_array *find_array(struct pet_scop *scop,
207 __isl_keep isl_set *accessed)
209 int i;
210 isl_id *id;
212 id = isl_set_get_tuple_id(accessed);
214 for (i = 0; i < scop->n_array; ++i) {
215 isl_id *id_i;
217 id_i = isl_set_get_tuple_id(scop->arrays[i]->extent);
218 isl_id_free(id_i);
219 if (id == id_i)
220 break;
222 isl_id_free(id);
224 return i < scop->n_array ? scop->arrays[i] : NULL;
227 /* Compute bounds on the host arrays based on the accessed elements
228 * and collect all references to the array.
230 static int extract_array_info(__isl_take isl_set *array, void *user)
232 int i;
233 struct cuda_gen *gen = (struct cuda_gen *)user;
234 const char *name;
235 int n_index;
236 isl_pw_aff **bounds;
237 isl_pw_aff **local_bounds;
238 struct pet_array *pa;
240 n_index = isl_set_dim(array, isl_dim_set);
241 name = isl_set_get_tuple_name(array);
242 bounds = isl_alloc_array(isl_set_get_ctx(array),
243 isl_pw_aff *, n_index);
244 assert(bounds);
245 local_bounds = isl_calloc_array(isl_set_get_ctx(array),
246 isl_pw_aff *, n_index);
247 assert(local_bounds);
248 gen->array[gen->n_array].dim = isl_set_get_space(array);
249 gen->array[gen->n_array].name = strdup(name);
250 gen->array[gen->n_array].n_index = n_index;
251 gen->array[gen->n_array].bound = bounds;
252 gen->array[gen->n_array].local_bound = local_bounds;
254 pa = find_array(gen->scop, array);
255 assert(pa);
257 gen->array[gen->n_array].type = strdup(pa->element_type);
259 for (i = 0; i < n_index; ++i) {
260 isl_set *dom;
261 isl_local_space *ls;
262 isl_aff *one;
263 isl_pw_aff *bound;
264 isl_set *size = i == 0 ? array : pa->extent;
266 bound = isl_set_dim_max(isl_set_copy(size), i);
267 assert(bound);
268 dom = isl_pw_aff_domain(isl_pw_aff_copy(bound));
269 ls = isl_local_space_from_space(isl_set_get_space(dom));
270 one = isl_aff_zero_on_domain(ls);
271 one = isl_aff_add_constant_si(one, 1);
272 bound = isl_pw_aff_add(bound, isl_pw_aff_alloc(dom, one));
273 bound = isl_pw_aff_gist(bound, isl_set_copy(gen->context));
275 bounds[i] = bound;
278 collect_references(gen, &gen->array[gen->n_array]);
280 gen->n_array++;
282 isl_set_free(array);
283 return 0;
286 void collect_array_info(struct cuda_gen *gen)
288 isl_union_set *arrays;
290 arrays = isl_union_map_range(isl_union_map_copy(gen->read));
291 arrays = isl_union_set_union(arrays,
292 isl_union_map_range(isl_union_map_copy(gen->write)));
293 arrays = isl_union_set_coalesce(arrays);
295 gen->n_array = isl_union_set_n_set(arrays);
296 gen->array = isl_alloc_array(gen->ctx,
297 struct cuda_array_info, gen->n_array);
298 assert(gen->array);
299 gen->n_array = 0;
300 isl_union_set_foreach_set(arrays, &extract_array_info, gen);
301 isl_union_set_free(arrays);
304 static void free_array_info(struct cuda_gen *gen)
306 int i, j;
308 for (i = 0; i < gen->n_array; ++i) {
309 int n_index = gen->array[i].n_index;
310 free(gen->array[i].type);
311 free(gen->array[i].name);
312 for (j = 0; j < n_index; ++j) {
313 isl_pw_aff_free(gen->array[i].bound[j]);
314 isl_pw_aff_free(gen->array[i].local_bound[j]);
316 isl_space_free(gen->array[i].dim);
317 free(gen->array[i].bound);
318 free(gen->array[i].local_bound);
319 free(gen->array[i].refs);
321 free(gen->array);
324 static void declare_device_arrays(struct cuda_gen *gen)
326 int i;
328 for (i = 0; i < gen->n_array; ++i)
329 fprintf(gen->cuda.host_c, "%s *dev_%s;\n",
330 gen->array[i].type, gen->array[i].name);
331 fprintf(gen->cuda.host_c, "\n");
334 static void print_array_size(struct cuda_gen *gen, FILE *out,
335 struct cuda_array_info *array)
337 int i;
338 isl_printer *prn;
340 prn = isl_printer_to_file(gen->ctx, out);
341 prn = isl_printer_set_output_format(prn, ISL_FORMAT_C);
342 for (i = 0; i < array->n_index; ++i) {
343 prn = isl_printer_print_str(prn, "(");
344 prn = isl_printer_print_pw_aff(prn, array->bound[i]);
345 prn = isl_printer_print_str(prn, ") * ");
347 prn = isl_printer_print_str(prn, "sizeof(");
348 prn = isl_printer_print_str(prn, array->type);
349 prn = isl_printer_print_str(prn, ")");
350 isl_printer_free(prn);
353 static void allocate_device_arrays(struct cuda_gen *gen)
355 int i;
357 for (i = 0; i < gen->n_array; ++i) {
358 fprintf(gen->cuda.host_c,
359 "cudaCheckReturn(cudaMalloc((void **) &dev_%s, ",
360 gen->array[i].name);
361 print_array_size(gen, gen->cuda.host_c, &gen->array[i]);
362 fprintf(gen->cuda.host_c, "));\n");
364 fprintf(gen->cuda.host_c, "\n");
367 static void free_device_arrays(struct cuda_gen *gen)
369 int i;
371 for (i = 0; i < gen->n_array; ++i)
372 fprintf(gen->cuda.host_c, "cudaCheckReturn(cudaFree(dev_%s));\n",
373 gen->array[i].name);
376 /* Check if a cuda array is a scalar. A scalar is a value that is not stored
377 * as an array or through a pointer reference, but as single data element. At
378 * the moment, scalars are represented as zero dimensional arrays.
380 static int cuda_array_is_scalar(struct cuda_array_info *array)
382 return (array->n_index == 0);
385 static void copy_arrays_to_device(struct cuda_gen *gen)
387 int i;
389 for (i = 0; i < gen->n_array; ++i) {
390 isl_space *dim;
391 isl_set *read_i;
392 int empty;
394 dim = isl_space_copy(gen->array[i].dim);
395 read_i = isl_union_set_extract_set(gen->copy_in, dim);
396 empty = isl_set_fast_is_empty(read_i);
397 isl_set_free(read_i);
398 if (empty)
399 continue;
401 fprintf(gen->cuda.host_c, "cudaCheckReturn(cudaMemcpy(dev_%s,",
402 gen->array[i].name);
404 if (cuda_array_is_scalar(&(gen->array[i])))
405 fprintf(gen->cuda.host_c, " &%s, ",
406 gen->array[i].name);
407 else
408 fprintf(gen->cuda.host_c, " %s, ", gen->array[i].name);
410 print_array_size(gen, gen->cuda.host_c, &gen->array[i]);
411 fprintf(gen->cuda.host_c, ", cudaMemcpyHostToDevice));\n");
413 fprintf(gen->cuda.host_c, "\n");
416 static void copy_arrays_from_device(struct cuda_gen *gen)
418 int i;
419 isl_union_set *write;
420 write = isl_union_map_range(isl_union_map_copy(gen->write));
422 for (i = 0; i < gen->n_array; ++i) {
423 isl_space *dim;
424 isl_set *write_i;
425 int empty;
427 dim = isl_space_copy(gen->array[i].dim);
428 write_i = isl_union_set_extract_set(write, dim);
429 empty = isl_set_fast_is_empty(write_i);
430 isl_set_free(write_i);
431 if (empty)
432 continue;
434 fprintf(gen->cuda.host_c, "cudaCheckReturn(cudaMemcpy(");
435 if (cuda_array_is_scalar(&gen->array[i]))
436 fprintf(gen->cuda.host_c, "&%s, ", gen->array[i].name);
437 else
438 fprintf(gen->cuda.host_c, "%s, ", gen->array[i].name);
439 fprintf(gen->cuda.host_c, "dev_%s, ", gen->array[i].name);
440 print_array_size(gen, gen->cuda.host_c, &gen->array[i]);
441 fprintf(gen->cuda.host_c, ", cudaMemcpyDeviceToHost));\n");
444 isl_union_set_free(write);
445 fprintf(gen->cuda.host_c, "\n");
448 static void read_sizes_from_file(struct cuda_gen *gen, const char *filename,
449 int *sizes, int len)
451 int i;
452 FILE *file;
454 file = fopen(filename, "r");
455 if (!file)
456 return;
458 for (i = 0; i < len; ++i)
459 if (fscanf(file, "%d", &sizes[i]) < 1)
460 break;
462 fclose(file);
465 static void reverse_list(int *list, int len)
467 int i;
468 int t;
470 for (i = 0; 2 * i < len; ++i) {
471 t = list[i];
472 list[i] = list[len - 1 - i];
473 list[len - 1 - i] = t;
477 /* Read user specified sizes from "tile.sizes", "block.sizes" and "grid.sizes"
478 * after filling in some potentially useful defaults.
480 static void read_sizes(struct cuda_gen *gen)
482 int n;
484 gen->tile_size = isl_alloc_array(gen->ctx, int, gen->tile_len);
485 assert(gen->tile_size);
486 for (n = 0; n < gen->tile_len; ++n)
487 gen->tile_size[n] = gen->options->tile_size;
488 read_sizes_from_file(gen, "tile.sizes", gen->tile_size, gen->tile_len);
490 n = gen->n_parallel;
491 gen->n_block = (n <= 3) ? n : 3;
492 switch (gen->n_block) {
493 case 1:
494 gen->block_dim[0] = 512;
495 break;
496 case 2:
497 gen->block_dim[0] = 32;
498 gen->block_dim[1] = 16;
499 break;
500 default:
501 gen->block_dim[0] = 32;
502 gen->block_dim[1] = 4;
503 gen->block_dim[2] = 4;
504 break;
506 read_sizes_from_file(gen, "block.sizes", gen->block_dim, gen->n_block);
507 reverse_list(gen->block_dim, gen->n_block);
509 gen->n_grid = (n <= 2) ? n : 2;
510 switch (gen->n_grid) {
511 case 1:
512 gen->grid_dim[0] = 32768;
513 break;
514 default:
515 gen->grid_dim[0] = 256;
516 gen->grid_dim[1] = 256;
517 break;
519 read_sizes_from_file(gen, "grid.sizes", gen->grid_dim, gen->n_grid);
520 reverse_list(gen->grid_dim, gen->n_grid);
523 static void free_stmts(struct cuda_stmt *stmts, int n)
525 int i;
527 for (i = 0; i < n; ++i) {
528 struct cuda_stmt_access *access, *next;
530 for (access = stmts[i].accesses; access; access = next) {
531 next = access->next;
532 isl_map_free(access->access);
533 free(access);
536 isl_set_free(stmts[i].domain);
538 free(stmts);
541 void clear_cuda_gen(struct cuda_gen *gen)
543 free_stmts(gen->stmts, gen->n_stmts);
544 free_array_info(gen);
545 isl_set_free(gen->context);
546 isl_union_set_free(gen->copy_in);
547 isl_union_map_free(gen->sched);
548 isl_union_map_free(gen->read);
549 isl_union_map_free(gen->write);
552 static void print_reverse_list(FILE *out, int len, int *list)
554 int i;
556 if (len == 0)
557 return;
559 fprintf(out, "(");
560 for (i = 0; i < len; ++i) {
561 if (i)
562 fprintf(out, ", ");
563 fprintf(out, "%d", list[len - 1 - i]);
565 fprintf(out, ")");
568 static void print_kernel_launch(struct cuda_gen *gen,
569 __isl_keep isl_union_set *arrays)
571 int i;
572 int first = 1;
573 unsigned nparam;
574 isl_space *dim;
576 print_indent(gen->code.dst, gen->code.indent);
577 fprintf(gen->code.dst, "kernel%d <<<k%d_dimGrid, k%d_dimBlock>>> (",
578 gen->kernel_id, gen->kernel_id, gen->kernel_id);
579 fprintf(gen->cuda.kernel_c, "__global__ void kernel%d(",
580 gen->kernel_id);
581 fprintf(gen->cuda.kernel_h, "__global__ void kernel%d(",
582 gen->kernel_id);
584 for (i = 0; i < gen->n_array; ++i) {
585 isl_space *dim;
586 isl_set *arr;
587 int empty;
589 dim = isl_space_copy(gen->array[i].dim);
590 arr = isl_union_set_extract_set(arrays, dim);
591 empty = isl_set_fast_is_empty(arr);
592 isl_set_free(arr);
593 if (empty)
594 continue;
596 if (!first) {
597 fprintf(gen->code.dst, ", ");
598 fprintf(gen->cuda.kernel_c, ", ");
599 fprintf(gen->cuda.kernel_h, ", ");
602 fprintf(gen->code.dst, "dev_%s", gen->array[i].name);
603 fprintf(gen->cuda.kernel_c, "%s *%s",
604 gen->array[i].type, gen->array[i].name);
605 fprintf(gen->cuda.kernel_h, "%s *%s",
606 gen->array[i].type, gen->array[i].name);
608 first = 0;
611 dim = isl_union_set_get_space(arrays);
612 nparam = isl_space_dim(dim, isl_dim_param);
613 for (i = 0; i < nparam; ++i) {
614 const char *name = isl_space_get_dim_name(dim, isl_dim_param, i);
615 if (!first) {
616 fprintf(gen->code.dst, ", ");
617 fprintf(gen->cuda.kernel_c, ", ");
618 fprintf(gen->cuda.kernel_h, ", ");
620 fprintf(gen->code.dst, "%s", name);
621 fprintf(gen->cuda.kernel_c, "int %s", name);
622 fprintf(gen->cuda.kernel_h, "int %s", name);
623 first = 0;
625 isl_space_free(dim);
627 for (i = 0; i < gen->tile_first; ++i) {
628 if (!first) {
629 fprintf(gen->code.dst, ", ");
630 fprintf(gen->cuda.kernel_c, ", ");
631 fprintf(gen->cuda.kernel_h, ", ");
633 fprintf(gen->code.dst, "h%d", i);
634 fprintf(gen->cuda.kernel_c, "int h%d", i);
635 fprintf(gen->cuda.kernel_h, "int h%d", i);
636 first = 0;
639 fprintf(gen->code.dst, ");\n");
640 fprintf(gen->cuda.kernel_c, ")\n");
641 fprintf(gen->cuda.kernel_h, ");\n");
643 fprintf(gen->code.dst, "cudaCheckKernel();\n");
646 /* Construct a map from a domain of dimensionality "len"
647 * to a domain of dimensionality "len" + "tile_len" that tiles
648 * the "tile_len" coordinates starting at "first".
649 * In particular, [s_i] -> [s_i / tile_size[i], s_i % tile_size[i]].
650 * "dim" prescribes the parameters.
652 static __isl_give isl_map *tile(__isl_take isl_space *dim, int len,
653 int first, int tile_len, int *tile_size)
655 int i;
656 isl_int v;
657 isl_basic_map *bmap;
658 isl_constraint *c;
659 isl_local_space *ls;
661 isl_int_init(v);
663 dim = isl_space_add_dims(dim, isl_dim_in, len);
664 dim = isl_space_add_dims(dim, isl_dim_out, len + tile_len);
665 bmap = isl_basic_map_universe(isl_space_copy(dim));
666 ls = isl_local_space_from_space(dim);
668 for (i = 0; i < len - tile_len; ++i) {
669 int j = i < first ? i : i + tile_len;
670 int k = i < first ? i : i + 2 * tile_len;
672 c = isl_equality_alloc(isl_local_space_copy(ls));
673 isl_int_set_si(v, -1);
674 isl_constraint_set_coefficient(c, isl_dim_in, j, v);
675 isl_int_set_si(v, 1);
676 isl_constraint_set_coefficient(c, isl_dim_out, k, v);
677 bmap = isl_basic_map_add_constraint(bmap, c);
680 for (i = 0; i < tile_len; ++i) {
681 c = isl_equality_alloc(isl_local_space_copy(ls));
682 isl_int_set_si(v, -1);
683 isl_constraint_set_coefficient(c, isl_dim_in, first + i, v);
684 isl_int_set_si(v, tile_size[i]);
685 isl_constraint_set_coefficient(c, isl_dim_out, first + i, v);
686 isl_int_set_si(v, 1);
687 isl_constraint_set_coefficient(c, isl_dim_out,
688 first + i + tile_len, v);
689 bmap = isl_basic_map_add_constraint(bmap, c);
691 c = isl_inequality_alloc(isl_local_space_copy(ls));
692 isl_int_set_si(v, 1);
693 isl_constraint_set_coefficient(c, isl_dim_out,
694 first + i + tile_len, v);
695 bmap = isl_basic_map_add_constraint(bmap, c);
697 c = isl_inequality_alloc(isl_local_space_copy(ls));
698 isl_int_set_si(v, -1);
699 isl_constraint_set_coefficient(c, isl_dim_out,
700 first + i + tile_len, v);
701 isl_int_set_si(v, tile_size[i] - 1);
702 isl_constraint_set_constant(c, v);
703 bmap = isl_basic_map_add_constraint(bmap, c);
706 isl_local_space_free(ls);
707 isl_int_clear(v);
709 return isl_map_from_basic_map(bmap);
712 /* Construct a map from a domain of dimensionality "len"
713 * to a domain of dimensionality "len" + "wrap_len" that "wraps"
714 * the "wrap_len" coordinates starting at "first" according to "wrap_size".
715 * In particular, [s_i] -> [s_i, s_i % wrap_size[i]].
716 * To do so, we need extra variables corresponding to [s_i / wrap_size[i]],
717 * that are projected out at the end.
718 * "dim" prescribes the parameters.
720 static __isl_give isl_map *wrap(__isl_take isl_space *dim, int len,
721 int first, int wrap_len, int *wrap_size)
723 int i;
724 isl_basic_map *bmap;
725 isl_constraint *c;
726 isl_local_space *ls;
728 dim = isl_space_add_dims(dim, isl_dim_in, len);
729 dim = isl_space_add_dims(dim, isl_dim_out, len + 2 * wrap_len);
730 bmap = isl_basic_map_universe(isl_space_copy(dim));
731 ls = isl_local_space_from_space(dim);
733 for (i = 0; i < len; ++i) {
734 int k = i < first + wrap_len ? i : i + 2 * wrap_len;
736 c = isl_equality_alloc(isl_local_space_copy(ls));
737 isl_constraint_set_coefficient_si(c, isl_dim_in, i, -1);
738 isl_constraint_set_coefficient_si(c, isl_dim_out, k, 1);
739 bmap = isl_basic_map_add_constraint(bmap, c);
742 for (i = 0; i < wrap_len; ++i) {
743 c = isl_equality_alloc(isl_local_space_copy(ls));
744 isl_constraint_set_coefficient_si(c, isl_dim_out,
745 first + i, -1);
746 isl_constraint_set_coefficient_si(c, isl_dim_out,
747 first + wrap_len + i, 1);
748 isl_constraint_set_coefficient_si(c, isl_dim_out,
749 first + 2 * wrap_len + i, wrap_size[i]);
750 bmap = isl_basic_map_add_constraint(bmap, c);
752 c = isl_inequality_alloc(isl_local_space_copy(ls));
753 isl_constraint_set_coefficient_si(c, isl_dim_out,
754 first + wrap_len + i, 1);
755 bmap = isl_basic_map_add_constraint(bmap, c);
757 c = isl_inequality_alloc(isl_local_space_copy(ls));
758 isl_constraint_set_coefficient_si(c, isl_dim_out,
759 first + wrap_len + i, -1);
760 isl_constraint_set_constant_si(c, wrap_size[i] - 1);
761 bmap = isl_basic_map_add_constraint(bmap, c);
764 isl_local_space_free(ls);
766 bmap = isl_basic_map_project_out(bmap, isl_dim_out,
767 first + 2 * wrap_len, wrap_len);
769 return isl_map_from_basic_map(bmap);
772 /* Add "n" parameters named prefix%d.
774 static __isl_give isl_set *add_params( __isl_take isl_set *set,
775 int n, const char *prefix)
777 int i;
778 unsigned nparam;
779 char name[20];
781 nparam = isl_set_dim(set, isl_dim_param);
782 set = isl_set_add_dims(set, isl_dim_param, n);
784 for (i = 0; i < n; ++i) {
785 snprintf(name, sizeof(name), "%s%d", prefix, i);
786 set = isl_set_set_dim_name(set, isl_dim_param,
787 nparam + i, name);
790 return set;
793 /* Equate the "n" dimensions of "set" starting at "first" to
794 * freshly created parameters named prefix%d.
796 static __isl_give isl_set *parametrize(__isl_take isl_set *set,
797 int first, int n, const char *prefix)
799 int i;
800 unsigned nparam;
801 isl_int v;
802 isl_space *dim;
803 isl_basic_set *bset;
804 isl_constraint *c;
805 isl_local_space *ls;
807 nparam = isl_set_dim(set, isl_dim_param);
809 set = add_params(set, n, prefix);
811 dim = isl_set_get_space(set);
812 bset = isl_basic_set_universe(isl_space_copy(dim));
813 ls = isl_local_space_from_space(dim);
815 isl_int_init(v);
817 for (i = 0; i < n; ++i) {
818 c = isl_equality_alloc(isl_local_space_copy(ls));
819 isl_int_set_si(v, -1);
820 isl_constraint_set_coefficient(c, isl_dim_param, nparam + i, v);
821 isl_int_set_si(v, 1);
822 isl_constraint_set_coefficient(c, isl_dim_set, first + i, v);
823 bset = isl_basic_set_add_constraint(bset, c);
826 isl_int_clear(v);
827 isl_local_space_free(ls);
829 return isl_set_intersect(set, isl_set_from_basic_set(bset));
832 static __isl_give isl_set *parametrization(__isl_take isl_space *dim,
833 int len, int first, int n, const char *prefix)
835 isl_set *set;
837 dim = isl_space_add_dims(dim, isl_dim_set, len);
838 set = isl_set_universe(dim);
840 return parametrize(set, first, n, prefix);
843 /* Tile the B loops over the tile sizes and then tile/wrap
844 * the T1 loops over the blocks.
846 static __isl_give isl_union_map *tile_schedule(struct cuda_gen *gen,
847 __isl_take isl_union_map *sched)
849 isl_space *dim;
850 isl_map *tiling, *block_tiling;
852 dim = isl_union_map_get_space(sched);
853 tiling = tile(isl_space_copy(dim), gen->untiled_len,
854 gen->tile_first, gen->tile_len, gen->tile_size);
856 if (gen->options->wrap)
857 block_tiling = wrap(dim, gen->untiled_len + gen->tile_len,
858 gen->tile_first, gen->n_grid, gen->grid_dim);
859 else
860 block_tiling = tile(dim, gen->untiled_len + gen->tile_len,
861 gen->tile_first, gen->n_grid, gen->grid_dim);
863 gen->tiled_len = gen->untiled_len + gen->tile_len + gen->n_grid;
865 tiling = isl_map_apply_range(tiling, block_tiling);
867 sched = isl_union_map_apply_range(sched,
868 isl_union_map_from_map(tiling));
870 gen->shared_len = gen->tile_first + gen->tile_len + gen->n_grid;
872 return sched;
875 static __isl_give isl_union_map *parametrize_tiled_schedule(
876 struct cuda_gen *gen, __isl_take isl_union_map *sched)
878 isl_space *dim;
879 isl_set *par;
881 dim = isl_union_map_get_space(sched);
882 par = parametrization(dim, gen->tiled_len, 0, gen->tile_first, "h");
883 sched = isl_union_map_intersect_range(sched,
884 isl_union_set_from_set(par));
886 dim = isl_union_map_get_space(sched);
887 par = parametrization(dim, gen->tiled_len,
888 gen->tile_first + gen->n_grid, gen->n_grid, "b");
889 sched = isl_union_map_intersect_range(sched,
890 isl_union_set_from_set(par));
892 return sched;
895 /* Tile/wrap the P1 loops over the threads.
897 static __isl_give isl_union_map *thread_tile_schedule(struct cuda_gen *gen,
898 __isl_take isl_union_map *sched)
900 isl_space *dim;
901 isl_map *tiling;
902 isl_set *par;
904 dim = isl_union_map_get_space(sched);
906 if (gen->options->wrap)
907 tiling = wrap(isl_space_copy(dim), gen->tiled_len,
908 gen->shared_len, gen->n_block, gen->block_dim);
909 else
910 tiling = tile(isl_space_copy(dim), gen->tiled_len,
911 gen->shared_len, gen->n_block, gen->block_dim);
912 gen->thread_tiled_len = gen->tiled_len + gen->n_block;
914 sched = isl_union_map_apply_range(sched,
915 isl_union_map_from_map(tiling));
917 par = parametrization(dim, gen->thread_tiled_len,
918 gen->tile_first + gen->tile_len + gen->n_grid + gen->n_block,
919 gen->n_block, "t");
920 sched = isl_union_map_intersect_range(sched,
921 isl_union_set_from_set(par));
923 gen->shared_len = gen->tile_first + gen->tile_len + gen->n_grid;
925 return sched;
928 /* If the user asked for it, scale the shared memory tile loops
929 * (T1P and T2) of "sched" by gen->tile_size[i].
930 * If we are not performing "wrapping", then additionally scale the T1P
931 * loops by gen->grid_dim[i].
933 static __isl_give isl_union_map *scale_tile_loops(struct cuda_gen *gen,
934 __isl_take isl_union_map *sched)
936 int i;
937 isl_space *dim;
938 isl_basic_map *scale;
939 isl_constraint *c;
940 isl_local_space *ls;
942 if (!gen->options->scale_tile_loops)
943 return sched;
945 dim = isl_union_map_get_space(sched);
946 dim = isl_space_add_dims(dim, isl_dim_in, gen->tiled_len);
947 dim = isl_space_add_dims(dim, isl_dim_out, gen->tiled_len);
948 scale = isl_basic_map_universe(isl_space_copy(dim));
949 ls = isl_local_space_from_space(dim);
951 for (i = 0; i < gen->tiled_len; ++i) {
952 int f = 1;
954 if (i >= gen->tile_first && i < gen->tile_first + gen->n_grid) {
955 f = gen->tile_size[i - gen->tile_first];
956 if (!gen->options->wrap)
957 f *= gen->grid_dim[i - gen->tile_first];
958 } else if (i >= gen->tile_first + gen->n_grid &&
959 i < gen->tile_first + gen->n_grid + gen->tile_len) {
960 f = gen->tile_size[i - (gen->tile_first + gen->n_grid)];
963 c = isl_equality_alloc(isl_local_space_copy(ls));
964 isl_constraint_set_coefficient_si(c, isl_dim_in, i, f);
965 isl_constraint_set_coefficient_si(c, isl_dim_out, i, -1);
966 scale = isl_basic_map_add_constraint(scale, c);
969 isl_local_space_free(ls);
971 sched = isl_union_map_apply_range(sched,
972 isl_union_map_from_map(isl_map_from_basic_map(scale)));
974 return sched;
977 /* If we are not performing "wrapping" and if the user asked for it,
978 * scale the thread tile loops (P1T) of "sched" by gen->block_dim[i].
980 static __isl_give isl_union_map *scale_thread_tile_loops(struct cuda_gen *gen,
981 __isl_take isl_union_map *sched)
983 int i;
984 isl_space *dim;
985 isl_basic_map *scale;
986 isl_constraint *c;
987 isl_local_space *ls;
989 if (gen->options->wrap)
990 return sched;
991 if (!gen->options->scale_tile_loops)
992 return sched;
994 dim = isl_union_map_get_space(sched);
995 dim = isl_space_add_dims(dim, isl_dim_in, gen->thread_tiled_len);
996 dim = isl_space_add_dims(dim, isl_dim_out, gen->thread_tiled_len);
997 scale = isl_basic_map_universe(isl_space_copy(dim));
998 ls = isl_local_space_from_space(dim);
1000 for (i = 0; i < gen->thread_tiled_len; ++i) {
1001 int f = 1;
1003 if (i >= gen->shared_len &&
1004 i < gen->shared_len + gen->n_block)
1005 f = gen->block_dim[i - gen->shared_len];
1007 c = isl_equality_alloc(isl_local_space_copy(ls));
1008 isl_constraint_set_coefficient_si(c, isl_dim_in, i, f);
1009 isl_constraint_set_coefficient_si(c, isl_dim_out, i, -1);
1010 scale = isl_basic_map_add_constraint(scale, c);
1013 isl_local_space_free(ls);
1015 sched = isl_union_map_apply_range(sched,
1016 isl_union_map_from_map(isl_map_from_basic_map(scale)));
1018 return sched;
1021 /* If we are not performing "wrapping" and if the user asked for it,
1022 * scale the "n_tile" loops starting at "first" of "sched" by gen->block_dim[i].
1024 static __isl_give isl_union_map *scale_access_tile_loops(struct cuda_gen *gen,
1025 __isl_take isl_union_map *sched, int len, int first, int n_tile)
1027 int i;
1028 isl_space *dim;
1029 isl_basic_map *scale;
1030 isl_constraint *c;
1031 isl_local_space *ls;
1033 if (gen->options->wrap)
1034 return sched;
1035 if (!gen->options->scale_tile_loops)
1036 return sched;
1038 dim = isl_union_map_get_space(sched);
1039 dim = isl_space_add_dims(dim, isl_dim_in, len);
1040 dim = isl_space_add_dims(dim, isl_dim_out, len);
1041 scale = isl_basic_map_universe(isl_space_copy(dim));
1042 ls = isl_local_space_from_space(dim);
1044 for (i = 0; i < len; ++i) {
1045 int f = 1;
1047 if (i >= first && i < first + n_tile)
1048 f = gen->block_dim[i - first];
1050 c = isl_equality_alloc(isl_local_space_copy(ls));
1051 isl_constraint_set_coefficient_si(c, isl_dim_in, i, f);
1052 isl_constraint_set_coefficient_si(c, isl_dim_out, i, -1);
1053 scale = isl_basic_map_add_constraint(scale, c);
1056 isl_local_space_free(ls);
1058 sched = isl_union_map_apply_range(sched,
1059 isl_union_map_from_map(isl_map_from_basic_map(scale)));
1061 return sched;
1064 /* If print_user_stmt is set, we want to print the statements ourselves,
1065 * instead of relying on the C preprocessor. If so, we need to use
1066 * the stop option so that the domains will be saved on the statement
1067 * nodes.
1069 static void print_cloog_shared_body(struct cuda_gen *gen,
1070 __isl_keep isl_set *context, __isl_keep isl_union_map *sched, int len,
1071 void (*print_user_stmt)(struct gpucode_info *info,
1072 struct clast_user_stmt *s),
1073 int first_unroll)
1075 int i;
1076 CloogOptions *options;
1077 CloogDomain *cloog_context;
1078 CloogUnionDomain *ud;
1079 CloogInput *input;
1080 struct clast_stmt *stmt;
1081 char name[20];
1083 sched = isl_union_map_copy(sched);
1084 sched = isl_union_map_align_params(sched, isl_set_get_space(context));
1086 options = cloog_options_malloc(gen->state);
1087 options->language = CLOOG_LANGUAGE_C;
1088 options->strides = 1;
1089 options->sh = 1;
1090 options->f = len;
1091 options->l = -1;
1092 options->override = 1;
1093 options->save_domains = 1;
1094 options->noscalars = 1;
1095 options->first_unroll = first_unroll;
1097 ud = cloog_union_domain_from_isl_union_map(sched);
1098 for (i = 0; i < len; ++i) {
1099 snprintf(name, sizeof(name), "c%d", i);
1100 ud = cloog_union_domain_set_name(ud, CLOOG_SCAT, i, name);
1102 cloog_context = cloog_domain_from_isl_set(isl_set_copy(context));
1103 input = cloog_input_alloc(cloog_context, ud);
1105 stmt = cloog_clast_create_from_input(input, options);
1107 gen->stmt_code.indent = gen->kernel_code.indent;
1108 gen->stmt_code.dst = gen->cuda.kernel_c;
1109 gen->stmt_code.print_user_stmt = print_user_stmt;
1110 gen->stmt_code.print_user_stmt_list = NULL;
1111 gen->stmt_code.print_for_head = NULL;
1112 gen->stmt_code.print_for_foot = NULL;
1113 gen->stmt_code.user = gen;
1114 gpu_print_host_stmt(&gen->stmt_code, stmt);
1116 cloog_clast_free(stmt);
1117 cloog_options_free(options);
1120 /* Add "len" parameters p[i] called prefix%d,
1121 * with bounds to 0 <= p[i] < size[i].
1123 __isl_give isl_set *add_bounded_parameters(__isl_take isl_set *set,
1124 int len, int *size, const char *prefix)
1126 int i;
1127 unsigned nparam;
1128 isl_int v;
1129 isl_space *dim;
1130 isl_basic_set *bset;
1131 isl_constraint *c;
1132 isl_local_space *ls;
1133 char name[20];
1135 nparam = isl_set_dim(set, isl_dim_param);
1136 set = isl_set_add_dims(set, isl_dim_param, len);
1138 for (i = 0; i < len; ++i) {
1139 snprintf(name, sizeof(name), "%s%d", prefix, i);
1140 set = isl_set_set_dim_name(set, isl_dim_param,
1141 nparam + i, name);
1144 dim = isl_set_get_space(set);
1145 bset = isl_basic_set_universe(isl_space_copy(dim));
1146 ls = isl_local_space_from_space(dim);
1148 isl_int_init(v);
1150 for (i = 0; i < len; ++i) {
1151 c = isl_inequality_alloc(isl_local_space_copy(ls));
1152 isl_int_set_si(v, 1);
1153 isl_constraint_set_coefficient(c, isl_dim_param, nparam + i, v);
1154 bset = isl_basic_set_add_constraint(bset, c);
1156 c = isl_inequality_alloc(isl_local_space_copy(ls));
1157 isl_int_set_si(v, -1);
1158 isl_constraint_set_coefficient(c, isl_dim_param, nparam + i, v);
1159 isl_int_set_si(v, size[i] - 1);
1160 isl_constraint_set_constant(c, v);
1161 bset = isl_basic_set_add_constraint(bset, c);
1164 isl_int_clear(v);
1165 isl_local_space_free(ls);
1167 return isl_set_intersect(set, isl_set_from_basic_set(bset));
1170 static void print_shared_body(struct cuda_gen *gen,
1171 __isl_keep isl_set *shared_domain, __isl_keep isl_union_map *sched,
1172 int len, void (*print_user_stmt)(struct gpucode_info *info,
1173 struct clast_user_stmt *s),
1174 int first_unroll)
1176 isl_set *context;
1178 context = isl_set_copy(shared_domain);
1179 context = parametrize(context, 0, gen->shared_len, "g");
1180 context = isl_set_project_out(context, isl_dim_set, 0, gen->shared_len);
1181 context = add_bounded_parameters(context,
1182 gen->n_block, gen->block_dim, "t");
1184 print_cloog_shared_body(gen, context, sched, len, print_user_stmt,
1185 first_unroll);
1187 isl_set_free(context);
1190 /* Given a tile of an array, construct a map that maps each element
1191 * of the tile to a copy of the tile shifted to the origin
1192 * (based on the lower bounds in group->private_bound or group->shared_bound).
1193 * If any of the indices is strided, then {private,shared}_bound[i].shift_map
1194 * is applied to the index first.
1195 * The domain of the resulting map is "access",
1196 * while the range space is anonymous.
1198 static __isl_give isl_map *shift_access(__isl_take isl_set *access,
1199 struct cuda_array_ref_group *group)
1201 int i;
1202 isl_space *dim;
1203 isl_basic_set *bset;
1204 isl_basic_map *bmap;
1205 isl_aff *lb;
1206 isl_basic_set *offset;
1207 isl_basic_map *shift;
1208 isl_basic_map *pre_shift;
1209 isl_map *sched;
1210 const char *name;
1211 struct cuda_array_bound *bounds;
1212 int n_index = group->array->n_index;
1214 bounds = group->private_bound;
1215 if (!bounds)
1216 bounds = group->shared_bound;
1218 dim = isl_set_get_space(access);
1219 dim = isl_space_drop_dims(dim, isl_dim_set, 0, n_index);
1220 offset = isl_basic_set_universe(dim);
1221 for (i = 0; i < n_index; ++i) {
1222 lb = isl_aff_copy(bounds[i].lb);
1223 bmap = isl_basic_map_from_aff(lb);
1224 bset = isl_basic_map_range(bmap);
1225 offset = isl_basic_set_flat_product(offset, bset);
1227 offset = isl_basic_set_neg(offset);
1229 dim = isl_space_map_from_set(isl_set_get_space(access));
1230 shift = isl_basic_map_identity(dim);
1231 shift = isl_basic_map_set_tuple_name(shift, isl_dim_out, NULL);
1233 bset = isl_basic_set_universe(isl_set_get_space(access));
1234 bmap = isl_basic_map_from_domain_and_range(bset, offset);
1236 shift = isl_basic_map_sum(shift, bmap);
1238 dim = isl_set_get_space(access);
1239 dim = isl_space_drop_dims(dim, isl_dim_set, 0, n_index);
1240 dim = isl_space_map_from_set(dim);
1241 pre_shift = isl_basic_map_universe(isl_space_copy(dim));
1242 dim = isl_space_add_dims(dim, isl_dim_in, 1);
1243 dim = isl_space_add_dims(dim, isl_dim_out, 1);
1244 for (i = 0; i < n_index; ++i) {
1245 if (!bounds[i].shift_map)
1246 bmap = isl_basic_map_identity(isl_space_copy(dim));
1247 else
1248 bmap = isl_basic_map_copy(bounds[i].shift_map);
1249 pre_shift = isl_basic_map_flat_product(pre_shift, bmap);
1251 isl_space_free(dim);
1252 name = isl_basic_map_get_tuple_name(shift, isl_dim_in);
1253 pre_shift = isl_basic_map_set_tuple_name(pre_shift, isl_dim_in, name);
1254 pre_shift = isl_basic_map_set_tuple_name(pre_shift, isl_dim_out, name);
1255 shift = isl_basic_map_apply_range(pre_shift, shift);
1257 sched = isl_map_from_basic_map(shift);
1258 sched = isl_map_intersect_domain(sched, access);
1260 return sched;
1263 /* Construct a schedule for iterating over all elements in the given
1264 * piece of an array. The schedule iterates over a copy of the piece
1265 * that is shifted to the origin.
1266 * We subsequently also perform the tiling/wrapping over the threads.
1268 * In particular, we tile the final iterators so that the final thread
1269 * dimension runs over the final array dimension.
1270 * However, if those final iterators have only a single iteration,
1271 * we try to tile earlier iterators instead.
1273 static __isl_give isl_union_map *access_schedule(struct cuda_gen *gen,
1274 __isl_take isl_set *access, struct cuda_array_ref_group *group)
1276 isl_space *dim;
1277 isl_map *sched;
1278 isl_union_map *usched;
1279 isl_map *tiling;
1280 isl_set *par;
1281 unsigned nvar = isl_set_dim(access, isl_dim_set);
1282 int n_tile;
1283 int first;
1285 sched = shift_access(access, group);
1287 n_tile = gen->n_block;
1288 if (n_tile > nvar) {
1289 int i;
1290 sched = isl_map_insert_dims(sched,
1291 isl_dim_out, 0, n_tile - nvar);
1292 for (i = 0; i < n_tile - nvar; ++i)
1293 sched = isl_map_fix_si(sched, isl_dim_out, i, 0);
1294 nvar = n_tile;
1297 first = nvar - n_tile;
1299 for (; first > 0; first --)
1300 if (!isl_map_plain_is_fixed(sched, isl_dim_out,
1301 first + n_tile - 1, NULL))
1302 break;
1304 dim = isl_map_get_space(sched);
1305 dim = isl_space_params(dim);
1306 if (gen->options->wrap)
1307 tiling = wrap(isl_space_copy(dim), nvar, first,
1308 n_tile, gen->block_dim);
1309 else
1310 tiling = tile(isl_space_copy(dim), nvar, first,
1311 n_tile, gen->block_dim);
1312 sched = isl_map_apply_range(sched, tiling);
1314 par = parametrization(dim, nvar + n_tile, first + n_tile, n_tile, "t");
1315 usched = isl_union_map_from_map(sched);
1316 usched = isl_union_map_intersect_range(usched,
1317 isl_union_set_from_set(par));
1319 usched = scale_access_tile_loops(gen, usched, nvar + n_tile,
1320 first, n_tile);
1322 return usched;
1325 /* Print an access to the element in the global memory copy of the
1326 * given array that corresponds to element [aff[0]][aff[1]]...
1327 * of the original array.
1328 * The copy in global memory has been linearized, so we need to take
1329 * the array size into account.
1331 static void print_global_index(isl_ctx *ctx, FILE *out,
1332 struct cuda_array_info *array, __isl_keep isl_aff **aff)
1334 int i;
1335 isl_printer *prn;
1337 if (cuda_array_is_scalar(array)) {
1338 fprintf(out, "*%s", array->name);
1339 return;
1342 fprintf(out, "%s[", array->name);
1343 prn = isl_printer_to_file(ctx, out);
1344 prn = isl_printer_set_output_format(prn, ISL_FORMAT_C);
1345 for (i = 0; i + 1 < array->n_index; ++i)
1346 prn = isl_printer_print_str(prn, "(");
1347 for (i = 0; i < array->n_index; ++i) {
1348 if (i) {
1349 prn = isl_printer_print_str(prn, ") * (");
1350 prn = isl_printer_print_pw_aff(prn,
1351 array->local_bound[i]);
1352 prn = isl_printer_print_str(prn, ") + ");
1354 prn = isl_printer_print_aff(prn, aff[i]);
1356 isl_printer_free(prn);
1357 fprintf(out, "]");
1360 /* Given an index expression into a tile of an array, adjust the expression
1361 * to a shift of the tile to the origin
1362 * (based on the lower bounds in array->shared_bound).
1363 * If the index is strided, then we first add
1364 * bound->shift and divide by bound->stride.
1366 static __isl_give isl_aff *shift_index(__isl_take isl_aff *aff,
1367 struct cuda_array_info *array,
1368 struct cuda_array_bound *bound, __isl_take isl_set *domain)
1370 isl_aff *lb;
1372 if (bound->shift) {
1373 isl_aff *shift;
1374 shift = bound->shift;
1375 shift = isl_aff_copy(shift);
1376 shift = isl_aff_project_domain_on_params(shift);
1377 shift = isl_aff_align_params(shift, isl_aff_get_space(aff));
1378 aff = isl_aff_add(aff, shift);
1379 aff = isl_aff_scale_down(aff, bound->stride);
1382 lb = isl_aff_copy(bound->lb);
1383 lb = isl_aff_project_domain_on_params(lb);
1385 lb = isl_aff_align_params(lb, isl_aff_get_space(aff));
1387 aff = isl_aff_sub(aff, lb);
1388 aff = isl_aff_gist(aff, domain);
1390 return aff;
1393 /* Print an access to the element in the private/shared memory copy of the
1394 * given array reference group that corresponds to element [affs[0]][affs[1]]...
1395 * of the original array.
1396 * Since the array in private/shared memory is just a shifted copy of part
1397 * of the original array, we simply need to subtract the lower bound,
1398 * which was computed in can_tile_for_shared_memory.
1399 * If any of the indices is strided, then we first add
1400 * bounds[i].shift and divide by bounds[i].stride.
1402 static void print_local_index(isl_ctx *ctx, FILE *out,
1403 struct cuda_array_ref_group *group, struct cuda_array_bound *bounds,
1404 __isl_keep isl_aff **affs, __isl_keep isl_set *domain)
1406 int i;
1407 isl_printer *prn;
1408 struct cuda_array_info *array = group->array;
1410 print_array_name(out, group);
1411 for (i = 0; i < array->n_index; ++i) {
1412 isl_aff *aff = isl_aff_copy(affs[i]);
1414 aff = shift_index(aff, array, &bounds[i], isl_set_copy(domain));
1416 fprintf(out, "[");
1417 prn = isl_printer_to_file(ctx, out);
1418 prn = isl_printer_set_output_format(prn, ISL_FORMAT_C);
1419 prn = isl_printer_print_aff(prn, aff);
1420 isl_printer_free(prn);
1421 fprintf(out, "]");
1422 isl_aff_free(aff);
1426 /* This function is called for each leaf in the clast of the code
1427 * for copying to or from shared/private memory.
1428 * The statement name is {read,write}_{shared,private}_<array>.
1430 * The schedule iterates over the array elements, so we can use
1431 * the domain of copy_sched at the current scheduling position
1432 * as the index of the array.
1434 static void print_copy_statement(struct gpucode_info *code,
1435 struct clast_user_stmt *u)
1437 struct cuda_gen *gen = code->user;
1438 isl_set *domain;
1439 isl_map *sched;
1440 struct cuda_array_ref_group *group = gen->copy_group;
1441 struct cuda_array_bound *bounds = gen->copy_bound;
1442 int i;
1443 unsigned n_in;
1444 unsigned n_out;
1445 isl_space *dim;
1446 isl_set *param;
1447 isl_set *index;
1448 isl_basic_set *aff;
1449 isl_ctx *ctx;
1450 isl_aff **affs;
1451 int read;
1453 read = !strncmp(u->statement->name, "read", 4);
1455 domain = extract_host_domain(u);
1456 assert(domain);
1458 sched = isl_map_copy(gen->copy_sched);
1459 sched = isl_map_reverse(sched);
1460 sched = isl_map_intersect_domain(sched, domain);
1461 n_in = isl_map_dim(sched, isl_dim_in);
1462 n_out = isl_map_dim(sched, isl_dim_out);
1463 dim = isl_map_get_space(sched);
1464 dim = isl_space_drop_dims(dim, isl_dim_in, 0, n_in);
1465 dim = isl_space_drop_dims(dim, isl_dim_out, 0, n_out);
1466 param = parametrization(dim, n_in, 0, n_in, "c");
1467 sched = isl_map_align_params(sched, isl_set_get_space(param));
1468 sched = isl_map_intersect_domain(sched, param);
1469 index = isl_map_range(sched);
1470 domain = isl_set_copy(index);
1471 aff = isl_set_affine_hull(index);
1472 domain = isl_set_project_out(domain, isl_dim_set, 0, n_out);
1474 ctx = isl_basic_set_get_ctx(aff);
1475 affs = isl_alloc_array(ctx, isl_aff *, n_out);
1476 assert(affs);
1478 for (i = 0; i < n_out; ++i) {
1479 isl_constraint *c;
1480 int ok;
1482 ok = isl_basic_set_has_defining_equality(aff,
1483 isl_dim_set, i, &c);
1484 assert(ok);
1485 affs[i] = isl_constraint_get_bound(c, isl_dim_set, i);
1486 isl_constraint_free(c);
1487 affs[i] = isl_aff_project_domain_on_params(affs[i]);
1490 print_indent(code->dst, code->indent);
1491 if (read) {
1492 print_local_index(ctx, code->dst, group, bounds, affs, domain);
1493 fprintf(code->dst, " = ");
1494 print_global_index(ctx, code->dst, group->array, affs);
1495 } else {
1496 print_global_index(ctx, code->dst, group->array, affs);
1497 fprintf(code->dst, " = ");
1498 print_local_index(ctx, code->dst, group, bounds, affs, domain);
1500 fprintf(code->dst, ";\n");
1502 for (i = 0; i < n_out; ++i)
1503 isl_aff_free(affs[i]);
1504 free(affs);
1506 isl_basic_set_free(aff);
1507 isl_set_free(domain);
1510 static void print_shared_access(struct cuda_gen *gen,
1511 __isl_keep isl_set *shared_domain, __isl_take isl_set *access,
1512 const char *type, struct cuda_array_ref_group *group)
1514 const char *array_name;
1515 char *name;
1516 isl_ctx *ctx;
1517 isl_union_map *sched;
1518 unsigned nvar = isl_set_dim(access, isl_dim_set);
1519 int n_tile;
1521 ctx = isl_set_get_ctx(access);
1522 array_name = isl_set_get_tuple_name(access);
1523 name = isl_alloc_array(ctx, char,
1524 strlen(type) + sizeof("_shared_") + strlen(array_name) + 20);
1525 if (group->array->n_group > 1)
1526 sprintf(name, "%s_shared_%s_%d", type, array_name, group->nr);
1527 else
1528 sprintf(name, "%s_shared_%s", type, array_name);
1529 access = isl_set_set_tuple_name(access, name);
1530 free(name);
1532 sched = access_schedule(gen, access, group);
1534 n_tile = gen->n_block;
1535 if (n_tile > nvar)
1536 n_tile = nvar;
1538 gen->copy_sched = isl_map_from_union_map(isl_union_map_copy(sched));
1539 gen->copy_group = group;
1540 gen->copy_bound = group->shared_bound;
1542 print_shared_body(gen, shared_domain, sched, nvar + n_tile,
1543 &print_copy_statement, -1);
1545 isl_union_map_free(sched);
1546 isl_map_free(gen->copy_sched);
1549 /* Return the union of all read (read = 1) and/or write (write = 1)
1550 * access relations in the group.
1552 static __isl_give isl_union_map *group_access_relation(
1553 struct cuda_array_ref_group *group, int read, int write)
1555 int i;
1556 isl_union_map *access;
1558 access = isl_union_map_empty(isl_map_get_space(group->access));
1559 for (i = 0; i < group->n_ref; ++i) {
1560 isl_map *map_i;
1562 if (!((read && group->refs[i]->read) ||
1563 (write && group->refs[i]->write)))
1564 continue;
1565 map_i = isl_map_copy(group->refs[i]->access);
1566 access = isl_union_map_union(access,
1567 isl_union_map_from_map(map_i));
1570 return access;
1573 /* Check that none of the shared memory tiles involve any strides.
1575 static int no_strides(struct cuda_array_ref_group *group)
1577 int i;
1578 int n_index = group->array->n_index;
1580 for (i = 0; i < n_index; ++i)
1581 if (group->shared_bound[i].shift)
1582 return 0;
1584 return 1;
1587 /* Return a set containing the values of the given index i
1588 * of the elements in the array tile in global memory that corresponds
1589 * to the shared memory copy.
1590 * In particular, if a is the index, we return a set with constraints
1592 * tile_offset <= a <= tile_offset + tile_size - 1
1594 * and
1596 * 0 <= a <= array_size - 1
1599 static __isl_give isl_set *group_tile_dim(struct cuda_array_ref_group *group,
1600 int i)
1602 isl_basic_set *tile;
1603 isl_aff *aff;
1604 isl_constraint *c;
1605 isl_local_space *ls;
1606 isl_pw_aff *bound;
1607 isl_set *dom;
1608 isl_set *tile_set;
1610 aff = isl_aff_copy(group->shared_bound[i].lb);
1611 aff = isl_aff_add_dims(aff, isl_dim_in, 1);
1612 ls = isl_aff_get_domain_local_space(aff);
1613 aff = isl_aff_neg(aff);
1614 aff = isl_aff_add_coefficient_si(aff, isl_dim_in, 0, 1);
1615 c = isl_inequality_from_aff(isl_aff_copy(aff));
1616 tile = isl_basic_set_from_constraint(c);
1618 aff = isl_aff_neg(aff);
1619 aff = isl_aff_add_constant(aff, group->shared_bound[i].size);
1620 aff = isl_aff_add_constant_si(aff, -1);
1621 c = isl_inequality_from_aff(aff);
1622 tile = isl_basic_set_add_constraint(tile, c);
1624 aff = isl_aff_zero_on_domain(ls);
1625 aff = isl_aff_add_coefficient_si(aff, isl_dim_in, 0, 1);
1626 c = isl_inequality_from_aff(aff);
1627 tile = isl_basic_set_add_constraint(tile, c);
1629 bound = isl_pw_aff_copy(group->array->bound[i]);
1630 bound = isl_pw_aff_add_dims(bound, isl_dim_in, 1);
1631 ls = isl_local_space_from_space(isl_pw_aff_get_domain_space(bound));
1632 aff = isl_aff_zero_on_domain(ls);
1633 aff = isl_aff_add_coefficient_si(aff, isl_dim_in, 0, 1);
1634 aff = isl_aff_add_constant_si(aff, 1);
1635 dom = isl_pw_aff_domain(isl_pw_aff_copy(bound));
1637 tile_set = isl_pw_aff_ge_set(bound, isl_pw_aff_alloc(dom, aff));
1638 tile_set = isl_set_align_params(tile_set, isl_basic_set_get_space(tile));
1639 tile_set = isl_set_intersect(tile_set, isl_set_from_basic_set(tile));
1641 return tile_set;
1644 /* Return a set containing the elements in the array tile in
1645 * global memory that corresponds to the shared memory copy.
1647 static __isl_give isl_set *group_tile(struct cuda_array_ref_group *group)
1649 int i;
1650 int n_index = group->array->n_index;
1651 isl_set *tile;
1653 tile = group_tile_dim(group, 0);
1654 for (i = 1; i < n_index; ++i) {
1655 isl_set *tile_i;
1657 tile_i = group_tile_dim(group, i);
1658 tile = isl_set_flat_product(tile, tile_i);
1661 tile = isl_set_set_tuple_name(tile, group->array->name);
1663 return tile;
1666 /* Print code for reading into or writing from shared memory
1667 * the given array reference group.
1669 * sched maps the original iteration domains to the shared memory tile loops.
1671 * If we are performing a read from global memory to shared memory,
1672 * if the array involved is not a scalar and if the definition of the
1673 * shared memory tiles does not involve any strides, then we copy
1674 * the entire tile to shared memory. This may result in some extra
1675 * elements getting copied, but it should lead to simpler code
1676 * (which means that fewer registers may be needed) and less divergence.
1678 * Otherwise, we only copy the elements that will be read or have been written
1679 * in the kernel.
1681 * Note that the absence of stride requirement can easily be lifted.
1682 * We would just need to add constraints of the form
1684 * shift + a = stride * alpha
1686 static int print_group_shared_accesses(struct cuda_gen *gen,
1687 struct cuda_array_ref_group *group, const char *type,
1688 __isl_keep isl_set *shared_domain, __isl_keep isl_union_map *sched)
1690 int read;
1691 isl_union_map *access;
1692 isl_union_set *uset;
1693 isl_set *access_set;
1695 if (group->private_bound)
1696 return 0;
1697 if (!group->shared_bound)
1698 return 0;
1700 read = !strcmp(type, "read");
1702 access = group_access_relation(group, read, !read);
1703 access = isl_union_map_apply_domain(access, isl_union_map_copy(sched));
1704 uset = isl_union_map_range(access);
1706 if (isl_union_set_is_empty(uset)) {
1707 isl_union_set_free(uset);
1708 return 0;
1711 if (read && group->array->n_index > 0 && no_strides(group)) {
1712 isl_union_set_free(uset);
1713 access_set = group_tile(group);
1714 print_shared_access(gen, shared_domain, access_set,
1715 type, group);
1716 return 1;
1719 access_set = isl_set_from_union_set(uset);
1720 access_set = isl_set_coalesce(access_set);
1722 print_shared_access(gen, shared_domain, access_set, type, group);
1724 return 1;
1727 /* Print code for reading into or writing from shared memory at
1728 * the given level (-1 for innermost).
1730 * If we are not printing at the innermost level, then the dimensionality
1731 * of shared_domain may be smaller than gen->shared_len.
1732 * As the rest of the code assumes that the domain of access has
1733 * gen->shared_len dimensions, we therefore may need to embed this domain
1734 * in a higher dimensional space after intersection with shared_domain.
1736 static void print_shared_accesses(struct cuda_gen *gen,
1737 __isl_keep isl_set *shared_domain, __isl_keep isl_union_map *access,
1738 const char *type, int level)
1740 int i, j;
1741 isl_space *dim;
1742 isl_map *proj;
1743 isl_set *par;
1744 int shared_len = isl_set_dim(shared_domain, isl_dim_set);
1745 int sync = 0;
1746 isl_union_map *sched;
1748 shared_domain = isl_set_copy(shared_domain);
1749 sched = isl_union_map_copy(gen->tiled_sched);
1750 dim = isl_union_map_get_space(sched);
1751 proj = projection(dim, gen->tiled_len, shared_len);
1752 sched = isl_union_map_apply_range(sched, isl_union_map_from_map(proj));
1753 sched = isl_union_map_intersect_range(sched,
1754 isl_union_set_from_set(isl_set_copy(shared_domain)));
1755 if (shared_len != gen->shared_len) {
1756 dim = isl_union_map_get_space(sched);
1757 proj = projection(dim, gen->shared_len, shared_len);
1758 proj = isl_map_reverse(proj);
1759 shared_domain = isl_set_apply(shared_domain,
1760 isl_map_copy(proj));
1761 sched = isl_union_map_apply_range(sched,
1762 isl_union_map_from_map(proj));
1765 dim = isl_union_map_get_space(sched);
1766 par = parametrization(dim, gen->shared_len, 0, gen->shared_len, "g");
1767 sched = isl_union_map_intersect_range(sched,
1768 isl_union_set_from_set(par));
1770 for (i = 0; i < gen->n_array; ++i) {
1771 struct cuda_array_info *array = &gen->array[i];
1773 if (gen->array[i].print_shared_level != level)
1774 continue;
1776 for (j = 0; j < array->n_group; ++j) {
1777 if (print_group_shared_accesses(gen, array->groups[j],
1778 type, shared_domain, sched))
1779 sync = 1;
1783 isl_union_map_free(sched);
1784 isl_set_free(shared_domain);
1786 if (sync) {
1787 print_indent(gen->cuda.kernel_c, gen->kernel_code.indent);
1788 fprintf(gen->cuda.kernel_c, "__syncthreads();\n");
1792 /* This function is called for each access to an array in some statement
1793 * in the original code.
1794 * Replace that access by an access to shared or (linearized) global memory.
1795 * Since the array in shared memory is just
1796 * a shifted copy of part of the original array, we simply need
1797 * to subtract the lower bound, which was computed
1798 * in can_tile_for_shared_memory.
1799 * If any of the indices is strided, then we first add
1800 * shared_bound[i].shift and divide by shared_bound[i].stride.
1802 * If the given array is accessed directly from global memory,
1803 * we don't need to perform any shifting and simply simplify
1804 * expression in the context of the domain instead.
1806 * If the array space (range of access) has no name, then we are
1807 * accessing an iterator in the original program.
1809 static void print_access(struct cuda_gen *gen, __isl_take isl_map *access,
1810 int group_nr)
1812 int i;
1813 const char *name;
1814 unsigned n_index;
1815 struct cuda_array_info *array = NULL;
1816 isl_printer *prn;
1817 isl_basic_set *aff;
1818 isl_set *data_set;
1819 isl_set *domain;
1820 struct cuda_array_bound *bounds = NULL;
1822 access = isl_map_align_params(access,
1823 isl_set_get_space(gen->stmt_domain));
1825 data_set = isl_set_apply(isl_set_copy(gen->stmt_domain), access);
1827 name = isl_set_get_tuple_name(data_set);
1829 if (!name)
1830 fprintf(gen->cuda.kernel_c, "(");
1831 else {
1832 struct cuda_array_ref_group *group;
1834 for (i = 0; i < gen->n_array; ++i) {
1835 if (strcmp(name, gen->array[i].name))
1836 continue;
1837 array = &gen->array[i];
1839 assert(array);
1840 group = array->groups[group_nr];
1841 bounds = group->private_bound;
1842 if (!bounds)
1843 bounds = group->shared_bound;
1845 if (!bounds && cuda_array_is_scalar(array))
1846 fprintf(gen->cuda.kernel_c, "*");
1847 print_array_name(gen->cuda.kernel_c, group);
1849 if (cuda_array_is_scalar(array)) {
1850 isl_set_free(data_set);
1851 return;
1854 fprintf(gen->cuda.kernel_c, "[");
1858 n_index = isl_set_dim(data_set, isl_dim_set);
1859 aff = isl_set_affine_hull(data_set);
1861 prn = isl_printer_to_file(gen->ctx, gen->cuda.kernel_c);
1862 prn = isl_printer_set_output_format(prn, ISL_FORMAT_C);
1864 if (!bounds)
1865 for (i = 0; i + 1 < n_index; ++i)
1866 prn = isl_printer_print_str(prn, "(");
1868 for (i = 0; i < n_index; ++i) {
1869 isl_constraint *c;
1870 isl_aff *index;
1871 int ok;
1873 ok = isl_basic_set_has_defining_equality(aff,
1874 isl_dim_out, i, &c);
1875 assert(ok);
1876 index = isl_constraint_get_bound(c, isl_dim_out, i);
1877 isl_constraint_free(c);
1878 index = isl_aff_project_domain_on_params(index);
1880 if (!array) {
1881 prn = isl_printer_print_aff(prn, index);
1882 isl_aff_free(index);
1883 continue;
1886 domain = isl_set_copy(gen->stmt_domain);
1887 domain = isl_set_project_out(domain, isl_dim_set, 0,
1888 isl_set_dim(domain, isl_dim_set));
1889 if (!bounds)
1890 index = isl_aff_gist(index, domain);
1891 else
1892 index = shift_index(index, array, &bounds[i], domain);
1894 if (i) {
1895 if (!bounds) {
1896 prn = isl_printer_print_str(prn, ") * (");
1897 prn = isl_printer_print_pw_aff(prn,
1898 array->local_bound[i]);
1899 prn = isl_printer_print_str(prn, ") + ");
1900 } else
1901 prn = isl_printer_print_str(prn, "][");
1903 prn = isl_printer_print_aff(prn, index);
1904 isl_aff_free(index);
1906 if (!name)
1907 prn = isl_printer_print_str(prn, ")");
1908 else
1909 prn = isl_printer_print_str(prn, "]");
1910 isl_printer_free(prn);
1912 isl_basic_set_free(aff);
1915 static struct cuda_stmt_access *print_expr(struct cuda_gen *gen, FILE *out,
1916 struct pet_expr *expr, struct cuda_stmt_access *access, int outer)
1918 int i;
1920 switch (expr->type) {
1921 case pet_expr_double:
1922 fprintf(out, "%g", expr->d);
1923 break;
1924 case pet_expr_access:
1925 print_access(gen, isl_map_copy(access->access), access->group);
1926 access = access->next;
1927 break;
1928 case pet_expr_unary:
1929 if (!outer)
1930 fprintf(out, "(");
1931 fprintf(out, " %s ", pet_op_str(expr->op));
1932 access = print_expr(gen, out, expr->args[pet_un_arg],
1933 access, 0);
1934 if (!outer)
1935 fprintf(out, ")");
1936 break;
1937 case pet_expr_binary:
1938 if (!outer)
1939 fprintf(out, "(");
1940 access = print_expr(gen, out, expr->args[pet_bin_lhs],
1941 access, 0);
1942 fprintf(out, " %s ", pet_op_str(expr->op));
1943 access = print_expr(gen, out, expr->args[pet_bin_rhs],
1944 access, 0);
1945 if (!outer)
1946 fprintf(out, ")");
1947 break;
1948 case pet_expr_ternary:
1949 if (!outer)
1950 fprintf(out, "(");
1951 access = print_expr(gen, out, expr->args[pet_ter_cond],
1952 access, 0);
1953 fprintf(out, " ? ");
1954 access = print_expr(gen, out, expr->args[pet_ter_true],
1955 access, 0);
1956 fprintf(out, " : ");
1957 access = print_expr(gen, out, expr->args[pet_ter_false],
1958 access, 0);
1959 if (!outer)
1960 fprintf(out, ")");
1961 break;
1962 case pet_expr_call:
1963 fprintf(out, "%s(", expr->name);
1964 for (i = 0; i < expr->n_arg; ++i) {
1965 if (i)
1966 fprintf(out, ", ");
1967 access = print_expr(gen, out, expr->args[i],
1968 access, 1);
1970 fprintf(out, ")");
1972 return access;
1975 static void print_stmt_body(struct cuda_gen *gen,
1976 FILE *out, struct cuda_stmt *stmt)
1978 print_expr(gen, out, stmt->body, stmt->accesses, 1);
1979 fprintf(out, ";\n");
1982 /* This function is called for each leaf in the innermost clast,
1983 * i.e., for each statement.
1984 * We print the statement body, simplifying the accesses based
1985 * on the schedule.
1987 static void print_statement(struct gpucode_info *code,
1988 struct clast_user_stmt *u)
1990 struct cuda_gen *gen = code->user;
1991 isl_space *dim;
1992 isl_set *par;
1993 isl_set *stmt_domain;
1994 isl_union_map *stmt_sched;
1995 isl_union_set *uset;
1996 int nr;
1997 struct cuda_stmt *stmt;
1999 nr = atoi(u->statement->name + 2);
2000 stmt = &gen->stmts[nr];
2002 stmt_domain = extract_host_domain(u);
2004 stmt_sched = isl_union_map_intersect_range(
2005 isl_union_map_copy(gen->local_sched),
2006 isl_union_set_from_set(extend(stmt_domain,
2007 gen->thread_tiled_len)));
2008 dim = isl_union_map_get_space(stmt_sched);
2009 par = parametrization(dim, gen->thread_tiled_len, 0,
2010 gen->thread_tiled_len, "c");
2011 stmt_sched = isl_union_map_intersect_range(stmt_sched,
2012 isl_union_set_from_set(par));
2014 uset = isl_union_map_domain(stmt_sched);
2015 dim = isl_union_set_get_space(uset);
2016 dim = isl_space_add_dims(dim, isl_dim_set,
2017 isl_set_dim(stmt->domain, isl_dim_set));
2018 dim = isl_space_set_tuple_name(dim, isl_dim_set, u->statement->name);
2019 gen->stmt_domain = isl_union_set_extract_set(uset, dim);
2020 isl_union_set_free(uset);
2022 print_indent(code->dst, code->indent);
2023 print_stmt_body(gen, code->dst, stmt);
2025 isl_set_free(gen->stmt_domain);
2028 static void print_private_access(struct cuda_gen *gen,
2029 __isl_keep isl_set *shared_domain, __isl_take isl_set *access,
2030 const char *type, struct cuda_array_ref_group *group)
2032 const char *array_name;
2033 char *name;
2034 isl_ctx *ctx;
2035 unsigned nvar = isl_set_dim(access, isl_dim_set);
2036 isl_union_map *usched;
2038 if (isl_set_fast_is_empty(access)) {
2039 isl_set_free(access);
2040 return;
2043 ctx = isl_set_get_ctx(access);
2044 array_name = isl_set_get_tuple_name(access);
2045 name = isl_alloc_array(ctx, char,
2046 strlen(type) + sizeof("_private_") + strlen(array_name) + 20);
2047 if (group->array->n_group > 1)
2048 sprintf(name, "%s_private_%s_%d", type, array_name, group->nr);
2049 else
2050 sprintf(name, "%s_private_%s", type, array_name);
2051 access = isl_set_set_tuple_name(access, name);
2052 free(name);
2054 gen->copy_sched = shift_access(access, group);
2055 gen->copy_group = group;
2056 gen->copy_bound = group->private_bound;
2058 usched = isl_union_map_from_map(isl_map_copy(gen->copy_sched));
2059 print_shared_body(gen, shared_domain, usched, nvar,
2060 &print_copy_statement, 1);
2061 isl_union_map_free(usched);
2063 isl_map_free(gen->copy_sched);
2066 /* Print code for reading into or writing from private memory
2067 * the given array reference group.
2069 * sched maps the original iteration domains to the shared memory tile loops.
2071 static void print_group_private_accesses(struct cuda_gen *gen,
2072 struct cuda_array_ref_group *group,
2073 const char *type, __isl_keep isl_set *shared_domain,
2074 unsigned first_shared, int shared_len, __isl_keep isl_union_map *sched)
2076 int read;
2077 isl_union_map *access;
2078 isl_union_set *uset;
2079 isl_set *access_set;
2081 if (!group->private_bound)
2082 return;
2084 read = !strcmp(type, "read");
2086 access = group_access_relation(group, read, !read);
2087 access = isl_union_map_apply_domain(access, isl_union_map_copy(sched));
2088 access = isl_union_map_intersect(access,
2089 isl_union_map_copy(gen->private_access));
2090 uset = isl_union_map_range(access);
2092 if (isl_union_set_is_empty(uset)) {
2093 isl_union_set_free(uset);
2094 return;
2097 access_set = isl_set_from_union_set(uset);
2098 access_set = isl_set_coalesce(access_set);
2099 access_set = isl_set_eliminate(access_set, isl_dim_param,
2100 first_shared + shared_len,
2101 gen->shared_len - shared_len);
2103 print_private_access(gen, shared_domain, access_set, type, group);
2106 /* Print code for reading into or writing from private memory at
2107 * the given level (-1 for innermost).
2109 * If we are not printing at the innermost level, then the dimensionality
2110 * of shared_domain may be smaller than gen->shared_len.
2111 * As the rest of the code assumes that the domain of access has
2112 * gen->shared_len dimensions, we therefore may need to embed this domain
2113 * in a higher dimensional space after intersection with shared_domain.
2115 * This code is very similar to print_shared_accesses.
2116 * The main difference is that we to take into account gen->private_access.
2118 static void print_private_accesses(struct cuda_gen *gen,
2119 __isl_keep isl_set *shared_domain, __isl_keep isl_union_map *access,
2120 const char *type, int level)
2122 int i, j;
2123 isl_space *dim;
2124 isl_map *proj;
2125 int shared_len = isl_set_dim(shared_domain, isl_dim_set);
2126 unsigned first_shared;
2127 isl_union_map *sched;
2129 shared_domain = isl_set_copy(shared_domain);
2130 sched = isl_union_map_copy(gen->tiled_sched);
2131 dim = isl_union_map_get_space(sched);
2132 first_shared = isl_space_dim(dim, isl_dim_param);
2133 proj = projection(dim, gen->tiled_len, shared_len);
2134 sched = isl_union_map_apply_range(sched, isl_union_map_from_map(proj));
2135 sched = isl_union_map_intersect_range(sched,
2136 isl_union_set_from_set(isl_set_copy(shared_domain)));
2137 if (shared_len != gen->shared_len) {
2138 dim = isl_union_map_get_space(sched);
2139 proj = projection(dim, gen->shared_len, shared_len);
2140 proj = isl_map_reverse(proj);
2141 shared_domain = isl_set_apply(shared_domain,
2142 isl_map_copy(proj));
2143 sched = isl_union_map_apply_range(sched,
2144 isl_union_map_from_map(proj));
2147 for (i = 0; i < gen->n_array; ++i) {
2148 struct cuda_array_info *array = &gen->array[i];
2150 if (gen->array[i].print_shared_level != level)
2151 continue;
2153 for (j = 0; j < array->n_group; ++j)
2154 print_group_private_accesses(gen, array->groups[j],
2155 type, shared_domain,
2156 first_shared, shared_len, sched);
2159 isl_union_map_free(sched);
2160 isl_set_free(shared_domain);
2163 /* Set unroll[j] if the input dimension j is involved in
2164 * the index expression represented by bmap.
2166 static int check_unroll(__isl_take isl_basic_map *bmap, void *user)
2168 int i, j;
2169 int n_in = isl_basic_map_dim(bmap, isl_dim_in);
2170 int n_out = isl_basic_map_dim(bmap, isl_dim_out);
2171 int *unroll = user;
2173 for (i = 0; i < n_out; ++i) {
2174 isl_constraint *c;
2175 int ok;
2177 ok = isl_basic_map_has_defining_equality(bmap,
2178 isl_dim_out, i, &c);
2179 assert(ok);
2180 for (j = 0; j < n_in; ++j)
2181 if (isl_constraint_involves_dims(c, isl_dim_in, j, 1))
2182 unroll[j] = 1;
2183 isl_constraint_free(c);
2186 isl_basic_map_free(bmap);
2187 return 0;
2190 /* Given an array pos mapping input dimensions to the corresponding
2191 * output dimension, construct the corresponding map.
2193 static __isl_give isl_map *permutation(__isl_take isl_space *dim,
2194 int *pos, int len)
2196 int i;
2197 isl_constraint *c;
2198 isl_basic_map *bmap;
2199 isl_local_space *ls;
2201 dim = isl_space_add_dims(dim, isl_dim_in, len);
2202 dim = isl_space_add_dims(dim, isl_dim_out, len);
2203 bmap = isl_basic_map_universe(isl_space_copy(dim));
2204 ls = isl_local_space_from_space(dim);
2206 for (i = 0; i < len; ++i) {
2207 c = isl_equality_alloc(isl_local_space_copy(ls));
2208 isl_constraint_set_coefficient_si(c, isl_dim_in, i, -1);
2209 isl_constraint_set_coefficient_si(c, isl_dim_out, pos[i], 1);
2210 bmap = isl_basic_map_add_constraint(bmap, c);
2212 isl_local_space_free(ls);
2214 return isl_map_from_basic_map(bmap);
2217 /* Find all loops involved in any of the index expressions for any of
2218 * the private accesses, move them innermost and then mark them as
2219 * requiring unrolling by setting gen->first_unroll.
2220 * The loops involved should all be parallel because of the checks
2221 * we performed in check_private_group_access. Moving them innermost
2222 * is therefore a valid transformation.
2224 static __isl_give isl_union_map *interchange_for_unroll(struct cuda_gen *gen,
2225 __isl_take isl_union_map *sched)
2227 int i, j;
2228 int unroll[gen->thread_tiled_len];
2229 int perm[gen->thread_tiled_len];
2230 isl_space *dim;
2231 isl_map *permute;
2232 int len = gen->shared_len + gen->n_parallel + gen->n_block;
2234 gen->first_unroll = -1;
2236 for (i = 0; i < gen->thread_tiled_len; ++i)
2237 unroll[i] = 0;
2238 for (i = 0; i < gen->n_array; ++i) {
2239 struct cuda_array_info *array = &gen->array[i];
2241 for (j = 0; j < array->n_group; ++j) {
2242 isl_union_map *access;
2243 isl_map *acc;
2245 if (!array->groups[j]->private_bound)
2246 continue;
2248 access = group_access_relation(array->groups[j], 1, 1);
2249 access = isl_union_map_apply_domain(access,
2250 isl_union_map_copy(sched));
2252 acc = isl_map_from_union_map(access);
2253 isl_map_foreach_basic_map(acc, &check_unroll, unroll);
2255 isl_map_free(acc);
2259 for (i = 0; i < gen->shared_len; ++i)
2260 if (unroll[i])
2261 return sched;
2263 for (i = gen->shared_len; i < len; ++i)
2264 if (unroll[i])
2265 break;
2267 if (i >= len)
2268 return sched;
2270 for (i = len; i < gen->thread_tiled_len; ++i)
2271 if (unroll[i])
2272 return sched;
2274 j = 0;
2275 for (i = 0; i < gen->thread_tiled_len; ++i)
2276 if (!unroll[i])
2277 perm[i] = j++;
2278 gen->first_unroll = 1 + j;
2279 for (i = 0; i < len; ++i)
2280 if (unroll[i])
2281 perm[i] = j++;
2283 dim = isl_union_map_get_space(sched);
2284 permute = permutation(dim, perm, gen->thread_tiled_len);
2285 sched = isl_union_map_apply_range(sched,
2286 isl_union_map_from_map(permute));
2288 return sched;
2291 /* This function is called for each leaf in the clast of the kernel code.
2292 * We first specialize the schedule to the site of the leaf and
2293 * print code for reading into shared memory, performing the actual
2294 * computations and writing from shared memory, with the required
2295 * synchronizations.
2297 static void print_kernel_user(struct gpucode_info *code,
2298 struct clast_user_stmt *u)
2300 struct cuda_gen *gen = code->user;
2301 isl_set *shared_domain;
2303 shared_domain = extract_entire_host_domain(u);
2305 print_shared_accesses(gen, shared_domain, gen->read, "read", -1);
2307 print_private_accesses(gen, shared_domain, gen->read, "read", -1);
2309 print_shared_body(gen, shared_domain, gen->local_sched,
2310 gen->thread_tiled_len, &print_statement,
2311 gen->first_unroll);
2313 print_private_accesses(gen, shared_domain, gen->write, "write", -1);
2315 print_indent(gen->cuda.kernel_c, gen->kernel_code.indent);
2316 fprintf(gen->cuda.kernel_c, "__syncthreads();\n");
2318 print_shared_accesses(gen, shared_domain, gen->write, "write", -1);
2320 isl_set_free(shared_domain);
2323 /* Check if we need to perform any copying to shared memory at this level
2324 * and if so, print the copying instructions.
2325 * Any array for which we are allowed to print copying instructions at
2326 * this level, but haven't done so already, is printed.
2328 static void print_kernel_for_head(struct gpucode_info *code,
2329 struct clast_for *f)
2331 int i;
2332 struct cuda_gen *gen = code->user;
2333 isl_set *domain;
2334 int level;
2335 int print = 0;
2337 domain = isl_set_from_cloog_domain(cloog_domain_copy(f->domain));
2338 level = isl_set_dim(domain, isl_dim_set) - 1;
2340 for (i = 0; i < gen->n_array; ++i) {
2341 if (gen->array[i].print_shared_level >= 0)
2342 continue;
2343 if (gen->array[i].last_shared > level)
2344 continue;
2345 gen->array[i].print_shared_level = level;
2346 print = 1;
2349 if (print) {
2350 print_shared_accesses(gen, domain, gen->read, "read", level);
2351 print_private_accesses(gen, domain, gen->read, "read", level);
2354 isl_set_free(domain);
2357 /* Print instructions for copying from shared memory for each array
2358 * for which print_kernel_for_head has added copying instructions
2359 * to shared memory.
2361 static void print_kernel_for_foot(struct gpucode_info *code,
2362 struct clast_for *f)
2364 int i;
2365 struct cuda_gen *gen = code->user;
2366 isl_set *domain;
2367 int level;
2368 int print = 0;
2370 domain = isl_set_from_cloog_domain(cloog_domain_copy(f->domain));
2371 level = isl_set_dim(domain, isl_dim_set) - 1;
2373 for (i = 0; i < gen->n_array; ++i) {
2374 if (gen->array[i].print_shared_level != level)
2375 continue;
2376 print = 1;
2377 break;
2380 if (print) {
2381 print_private_accesses(gen, domain, gen->write, "write", level);
2382 print_shared_accesses(gen, domain, gen->write, "write", level);
2385 isl_set_free(domain);
2388 /* Use CLooG to generate code for the outer gen->shared_first loops
2389 * of the local schedule "sched".
2390 * The pretty printing of this code is handled by gpu_print_host_stmt,
2391 * which calls print_kernel_user for each iteration of the shared tile loops.
2393 static void print_cloog_kernel_body(struct cuda_gen *gen,
2394 __isl_keep isl_set *context, __isl_keep isl_union_map *sched)
2396 int i;
2397 CloogOptions *options;
2398 CloogDomain *cloog_context;
2399 CloogUnionDomain *ud;
2400 CloogInput *input;
2401 struct clast_stmt *stmt;
2402 char name[20];
2404 sched = isl_union_map_copy(sched);
2405 sched = isl_union_map_align_params(sched, isl_set_get_space(context));
2407 options = cloog_options_malloc(gen->state);
2408 options->language = CLOOG_LANGUAGE_C;
2409 options->strides = 1;
2410 options->sh = 1;
2411 options->stop = gen->shared_len;
2412 options->f = gen->tiled_len;
2413 options->l = gen->tiled_len;
2414 options->save_domains = 1;
2415 options->noscalars = 1;
2417 ud = cloog_union_domain_from_isl_union_map(sched);
2418 for (i = 0; i < gen->shared_len; ++i) {
2419 snprintf(name, sizeof(name), "g%d", i);
2420 ud = cloog_union_domain_set_name(ud, CLOOG_SCAT, i, name);
2422 cloog_context = cloog_domain_from_isl_set(isl_set_copy(context));
2423 input = cloog_input_alloc(cloog_context, ud);
2425 stmt = cloog_clast_create_from_input(input, options);
2427 gen->kernel_code.indent = 4;
2428 gen->kernel_code.dst = gen->cuda.kernel_c;
2429 gen->kernel_code.print_user_stmt = NULL;
2430 gen->kernel_code.print_user_stmt_list = &print_kernel_user;
2431 gen->kernel_code.print_for_head = &print_kernel_for_head;
2432 gen->kernel_code.print_for_foot = &print_kernel_for_foot;
2433 gen->kernel_code.user = gen;
2434 gpu_print_host_stmt(&gen->kernel_code, stmt);
2436 cloog_clast_free(stmt);
2437 cloog_options_free(options);
2440 static void print_kernel_iterators(struct cuda_gen *gen)
2442 int i;
2443 const char *block_dims[] = { "blockIdx.x", "blockIdx.y" };
2444 const char *thread_dims[] = { "threadIdx.x", "threadIdx.y",
2445 "threadIdx.z" };
2447 if (gen->n_grid > 0) {
2448 print_indent(gen->cuda.kernel_c, 4);
2449 fprintf(gen->cuda.kernel_c, "int ");
2450 for (i = 0; i < gen->n_grid; ++i) {
2451 if (i)
2452 fprintf(gen->cuda.kernel_c, ", ");
2453 fprintf(gen->cuda.kernel_c, "b%d = %s",
2454 i, block_dims[gen->n_grid - 1 - i]);
2456 fprintf(gen->cuda.kernel_c, ";\n");
2459 if (gen->n_block > 0) {
2460 print_indent(gen->cuda.kernel_c, 4);
2461 fprintf(gen->cuda.kernel_c, "int ");
2462 for (i = 0; i < gen->n_block; ++i) {
2463 if (i)
2464 fprintf(gen->cuda.kernel_c, ", ");
2465 fprintf(gen->cuda.kernel_c, "t%d = %s",
2466 i, thread_dims[gen->n_block - 1 - i]);
2468 fprintf(gen->cuda.kernel_c, ";\n");
2472 static void print_group_shared_array(struct cuda_gen *gen,
2473 struct cuda_array_ref_group *group)
2475 int j;
2476 struct cuda_array_bound *bounds;
2478 bounds = group->private_bound;
2479 if (!bounds)
2480 bounds = group->shared_bound;
2481 if (!bounds)
2482 return;
2484 print_indent(gen->cuda.kernel_c, 4);
2485 fprintf(gen->cuda.kernel_c, "%s%s ",
2486 group->private_bound ? "" : "__shared__ ", group->array->type);
2487 print_array_name(gen->cuda.kernel_c, group);
2488 for (j = 0; j < group->array->n_index; ++j) {
2489 fprintf(gen->cuda.kernel_c, "[");
2490 isl_int_print(gen->cuda.kernel_c, bounds[j].size, 0);
2491 fprintf(gen->cuda.kernel_c, "]");
2493 fprintf(gen->cuda.kernel_c, ";\n");
2496 static void print_shared_arrays(struct cuda_gen *gen)
2498 int i, j;
2500 for (i = 0; i < gen->n_array; ++i) {
2501 struct cuda_array_info *array = &gen->array[i];
2503 for (j = 0; j < array->n_group; ++j)
2504 print_group_shared_array(gen, array->groups[j]);
2508 static void print_kernel_body(struct cuda_gen *gen,
2509 __isl_keep isl_set *host_domain, __isl_keep isl_union_map *sched)
2511 isl_set *context;
2513 context = isl_set_copy(host_domain);
2514 context = parametrize(context, 0, gen->tile_first, "h");
2515 context = isl_set_project_out(context, isl_dim_set, 0, gen->tile_first);
2516 context = add_bounded_parameters(context,
2517 gen->n_grid, gen->grid_dim, "b");
2519 print_kernel_iterators(gen);
2520 print_shared_arrays(gen);
2522 fprintf(gen->cuda.kernel_c, "\n");
2524 print_cloog_kernel_body(gen, context, sched);
2526 isl_set_free(context);
2529 /* Given a constraint
2531 * a(p,i) + j = g f(e)
2533 * or -a(p,i) - j = g f(e) if sign < 0,
2534 * store a(p,i) in bound->shift and g (stride) in bound->stride.
2535 * a(p,i) is assumed to be an expression in only the parameters.
2537 static void extract_stride(__isl_keep isl_constraint *c,
2538 struct cuda_array_bound *bound, isl_int stride, int sign)
2540 int i;
2541 isl_int v;
2542 isl_space *dim;
2543 unsigned nparam;
2544 isl_aff *aff;
2546 isl_int_set(bound->stride, stride);
2548 dim = isl_constraint_get_space(c);
2549 dim = isl_space_params(dim);
2551 nparam = isl_space_dim(dim, isl_dim_param);
2553 isl_int_init(v);
2555 isl_constraint_get_constant(c, &v);
2556 if (sign < 0)
2557 isl_int_neg(v, v);
2558 aff = isl_aff_zero_on_domain(isl_local_space_from_space(dim));
2559 aff = isl_aff_set_constant(aff, v);
2561 for (i = 0; i < nparam; ++i) {
2562 isl_constraint_get_coefficient(c, isl_dim_param, i, &v);
2563 if (isl_int_is_zero(v))
2564 continue;
2565 if (sign < 0)
2566 isl_int_neg(v, v);
2567 aff = isl_aff_add_coefficient(aff, isl_dim_param, i, v);
2570 isl_int_clear(v);
2572 bound->shift = aff;
2575 /* Given an equality constraint of a map with a single output dimension j,
2576 * check if the constraint is of the form
2578 * a(p,i) + j = g f(e)
2580 * with a(p,i) an expression in the parameters and input dimensions
2581 * and f(e) an expression in the existentially quantified variables.
2582 * If so, and if g is larger than any such g from a previously considered
2583 * constraint, then call extract_stride. to record the stride information
2584 * in bound.
2586 static int check_stride_constraint(__isl_take isl_constraint *c, void *user)
2588 int i;
2589 isl_int v, stride;
2590 unsigned n_div;
2591 struct cuda_array_bound *bound = user;
2593 isl_int_init(v);
2594 isl_int_init(stride);
2596 n_div = isl_constraint_dim(c, isl_dim_div);
2597 isl_constraint_get_coefficient(c, isl_dim_out, 0, &v);
2599 if (n_div && (isl_int_is_one(v) || isl_int_is_negone(v))) {
2600 int s = isl_int_sgn(v);
2601 isl_int_set_si(stride, 0);
2602 for (i = 0; i < n_div; ++i) {
2603 isl_constraint_get_coefficient(c, isl_dim_div, i, &v);
2604 isl_int_gcd(stride, stride, v);
2606 if (!isl_int_is_zero(stride) &&
2607 isl_int_gt(stride, bound->stride))
2608 extract_stride(c, bound, stride, s);
2611 isl_int_clear(stride);
2612 isl_int_clear(v);
2614 isl_constraint_free(c);
2615 return 0;
2618 /* Given contraints on an array index i, check if we can find
2619 * a shift a(p) and a stride g such that
2621 * a(p) + i = 0 mod g
2623 * If so, record the information in bound and apply the mapping
2624 * i -> (i + a(p))/g to the array index in bounds and return
2625 * the new constraints.
2626 * If not, simply return the original constraints.
2628 static __isl_give isl_basic_map *check_stride(struct cuda_gen *gen,
2629 struct cuda_array_bound *bound, __isl_take isl_basic_map *bounds)
2631 isl_basic_map *aff;
2632 isl_basic_map *shift;
2633 isl_aff *aff_shift;
2635 isl_int_set_si(bound->stride, -1);
2637 aff = isl_basic_map_affine_hull(isl_basic_map_copy(bounds));
2639 isl_basic_map_foreach_constraint(aff, &check_stride_constraint, bound);
2641 isl_basic_map_free(aff);
2643 if (isl_int_is_neg(bound->stride))
2644 return bounds;
2646 aff_shift = isl_aff_copy(bound->shift);
2647 aff_shift = isl_aff_add_dims(aff_shift, isl_dim_in, 1);
2648 aff_shift = isl_aff_add_coefficient_si(aff_shift, isl_dim_in, 0, 1);
2649 aff_shift = isl_aff_scale_down(aff_shift, bound->stride);
2650 shift = isl_basic_map_from_aff(aff_shift);
2652 bound->shift_map = isl_basic_map_copy(shift);
2653 bounds = isl_basic_map_apply_range(bounds, shift);
2655 return bounds;
2658 struct cuda_size_info {
2659 isl_basic_set *bset;
2660 struct cuda_array_bound *bound;
2661 int pos;
2664 /* Given a constraint from the basic set describing the bounds on
2665 * an array index, check if it is a lower bound, say m i >= b(x), and,
2666 * if so, check whether the expression "i - ceil(b(x)/m) + 1" has a constant
2667 * upper bound. If so, and if this bound is smaller than any bound
2668 * derived from earlier constraints, set the size to this bound on
2669 * the expression and the lower bound to ceil(b(x)/m).
2671 static int compute_size_in_direction(__isl_take isl_constraint *c, void *user)
2673 struct cuda_size_info *size = user;
2674 unsigned nparam;
2675 unsigned n_div;
2676 isl_int v;
2678 nparam = isl_basic_set_dim(size->bset, isl_dim_param);
2679 n_div = isl_constraint_dim(c, isl_dim_div);
2681 if (isl_constraint_involves_dims(c, isl_dim_div, 0, n_div)) {
2682 isl_constraint_free(c);
2683 return 0;
2686 isl_int_init(v);
2688 isl_constraint_get_coefficient(c, isl_dim_set, size->pos, &v);
2690 if (isl_int_is_pos(v)) {
2691 isl_aff *aff;
2692 isl_aff *lb;
2693 enum isl_lp_result res;
2695 aff = isl_constraint_get_bound(c, isl_dim_set, size->pos);
2696 aff = isl_aff_ceil(aff);
2698 lb = isl_aff_copy(aff);
2700 aff = isl_aff_neg(aff);
2701 aff = isl_aff_add_coefficient_si(aff, isl_dim_in, size->pos, 1);
2703 res = isl_basic_set_max(size->bset, aff, &v);
2704 isl_aff_free(aff);
2706 if (res == isl_lp_ok) {
2707 isl_int_add_ui(v, v, 1);
2708 if (isl_int_is_neg(size->bound->size) ||
2709 isl_int_lt(v, size->bound->size)) {
2710 isl_int_set(size->bound->size, v);
2711 lb = isl_aff_drop_dims(lb, isl_dim_in,
2712 0, size->pos + 1);
2713 isl_aff_free(size->bound->lb);
2714 size->bound->lb = isl_aff_copy(lb);
2717 isl_aff_free(lb);
2720 isl_int_clear(v);
2721 isl_constraint_free(c);
2723 return 0;
2726 /* Given a basic map "bounds" that maps parameters and input dimensions
2727 * to a single output dimension, look for an expression in the parameters
2728 * and input dimensions such that the range of the output dimension shifted
2729 * by this expression is a constant.
2731 * In particular, we currently only consider lower bounds on the output
2732 * dimension as candidate expressions.
2734 static int compute_array_dim_size(struct cuda_gen *gen,
2735 struct cuda_array_bound *bound, __isl_take isl_basic_map *bounds)
2737 struct cuda_size_info size;
2739 bounds = isl_basic_map_detect_equalities(bounds);
2740 bounds = check_stride(gen, bound, bounds);
2742 isl_int_set_si(bound->size, -1);
2743 bound->lb = NULL;
2745 size.bound = bound;
2746 size.pos = isl_basic_map_dim(bounds, isl_dim_in);
2747 size.bset = isl_basic_map_wrap(bounds);
2748 size.bset = isl_basic_set_flatten(size.bset);
2749 size.bset = isl_set_simple_hull(isl_basic_set_compute_divs(size.bset));
2750 isl_basic_set_foreach_constraint(size.bset, &compute_size_in_direction,
2751 &size);
2752 isl_basic_set_free(size.bset);
2754 return isl_int_is_nonneg(bound->size) ? 0 : -1;
2757 /* Check if we can find a shared memory tile for the given array
2758 * based on the given accesses, and if so, put the results
2759 * in array->shared_bound.
2761 * We project the accesses on each index in turn and look for a parametric
2762 * offset such that the size is constant.
2764 static int can_tile_for_shared_memory(struct cuda_gen *gen,
2765 struct cuda_array_info *array, __isl_keep isl_map *access,
2766 struct cuda_array_bound *bounds)
2768 int i;
2770 for (i = 0; i < array->n_index; ++i) {
2771 isl_map *access_i;
2772 isl_basic_map *hull;
2774 access_i = isl_map_copy(access);
2775 access_i = isl_map_project_out(access_i, isl_dim_out, 0, i);
2776 access_i = isl_map_project_out(access_i, isl_dim_out,
2777 1, array->n_index - (i + 1));
2778 access_i = isl_map_compute_divs(access_i);
2779 hull = isl_map_simple_hull(access_i);
2780 if (compute_array_dim_size(gen, &bounds[i], hull) < 0)
2781 return 0;
2784 return 1;
2787 /* Construct a map with input the shared tile loops and the loops that
2788 * will be wrapped around the threads that relates these later loops
2789 * to the thread indices and the projects them out.
2791 static __isl_give isl_map *compute_privatization(struct cuda_gen *gen)
2793 isl_map *priv;
2794 isl_map *tiling;
2795 isl_map *proj;
2796 isl_set *par;
2797 isl_space *dim;
2799 dim = isl_union_map_get_space(gen->shared_sched);
2801 if (gen->options->wrap)
2802 tiling = wrap(isl_space_copy(dim), gen->shared_len + gen->n_block,
2803 gen->shared_len, gen->n_block, gen->block_dim);
2804 else
2805 tiling = tile(isl_space_copy(dim), gen->shared_len + gen->n_block,
2806 gen->shared_len, gen->n_block, gen->block_dim);
2808 priv = tiling;
2810 par = parametrization(dim, gen->shared_len + 2 * gen->n_block,
2811 gen->tile_first + gen->tile_len + gen->n_grid + gen->n_block,
2812 gen->n_block, "t");
2814 priv = isl_map_align_params(priv, isl_set_get_space(par));
2815 priv = isl_map_intersect_range(priv, par);
2817 dim = isl_map_get_space(priv);
2818 dim = isl_space_drop_dims(dim, isl_dim_in, 0, isl_space_dim(dim, isl_dim_in));
2819 dim = isl_space_drop_dims(dim, isl_dim_out, 0, isl_space_dim(dim, isl_dim_out));
2820 proj = projection(dim, gen->shared_len + 2 * gen->n_block,
2821 gen->shared_len);
2823 priv = isl_map_apply_range(priv, proj);
2825 return priv;
2828 /* Construct a map from domain_dim to domain_dim that increments
2829 * the dimension at position "pos" and leaves all other dimensions
2830 * constant.
2832 static __isl_give isl_map *next(__isl_take isl_space *domain_dim, int pos)
2834 int i;
2835 int len = isl_space_dim(domain_dim, isl_dim_set);
2836 isl_space *dim;
2837 isl_basic_map *next;
2838 isl_local_space *ls;
2840 dim = isl_space_map_from_set(domain_dim);
2841 next = isl_basic_map_universe(isl_space_copy(dim));
2842 ls = isl_local_space_from_space(dim);
2844 for (i = 0; i < len; ++i) {
2845 isl_constraint *c;
2847 c = isl_equality_alloc(isl_local_space_copy(ls));
2848 isl_constraint_set_coefficient_si(c, isl_dim_in, i, 1);
2849 isl_constraint_set_coefficient_si(c, isl_dim_out, i, -1);
2850 if (i == pos)
2851 isl_constraint_set_constant_si(c, 1);
2852 next = isl_basic_map_add_constraint(next, c);
2855 isl_local_space_free(ls);
2857 return isl_map_from_basic_map(next);
2860 /* Check if the given access is coalesced.
2861 * That is, check whether incrementing the dimension that will get
2862 * wrapped over the last thread index results in incrementing
2863 * the last array index.
2865 * This function is only called for access relations without reuse.
2867 static int access_is_coalesced(struct cuda_gen *gen,
2868 __isl_keep isl_union_map *access)
2870 isl_space *dim;
2871 isl_map *access_map;
2872 isl_map *next_thread_x;
2873 isl_map *next_element;
2874 isl_map *map;
2875 int coalesced;
2877 access = isl_union_map_copy(access);
2878 access = isl_union_map_apply_domain(access,
2879 isl_union_map_copy(gen->tiled_sched));
2880 access_map = isl_map_from_union_map(access);
2882 dim = isl_map_get_space(access_map);
2883 dim = isl_space_domain(dim);
2884 next_thread_x = next(dim, gen->shared_len + gen->n_block - 1);
2886 dim = isl_map_get_space(access_map);
2887 dim = isl_space_range(dim);
2888 next_element = next(dim, isl_space_dim(dim, isl_dim_set) - 1);
2890 map = isl_map_apply_domain(next_thread_x, isl_map_copy(access_map));
2891 map = isl_map_apply_range(map, access_map);
2893 coalesced = isl_map_is_subset(map, next_element);
2895 isl_map_free(next_element);
2896 isl_map_free(map);
2898 return coalesced;
2901 /* For the given array reference group, check whether the access is private
2902 * to the thread. That is, check that any given array element
2903 * is only accessed by a single thread.
2904 * We compute an access relation that maps the shared tile loop iterators
2905 * and the shared point loop iterators that will be wrapped over the
2906 * threads to the array elements.
2907 * We actually check that those iterators that will be wrapped
2908 * partition the array space. This check is stricter than necessary
2909 * since several iterations may be mapped onto the same thread
2910 * and then they could be allowed to access the same memory elements,
2911 * but our check does not allow this situation.
2913 * We also check that the index expression only depends on parallel
2914 * loops. That way, we can move those loops innermost and unroll them.
2915 * Again, we use a test that is stricter than necessary.
2916 * We actually check whether the index expression only depends
2917 * on the iterators that are wrapped over the threads.
2918 * These are necessarily parallel, but there may be more parallel loops.
2920 * Combining the injectivity of the first test with the single-valuedness
2921 * of the second test, we simply test for bijectivity.
2923 * If it turns out we can use registers, we compute the private memory
2924 * tile size using can_tile_for_shared_memory, after introducing a dependence
2925 * on the thread indices.
2927 * Before performing any of the above computations, we first check
2928 * if there is any reuse on the reference group. If not, we simply
2929 * return. If, moreover, the access is coalesced then we also remove
2930 * the shared memory tiling since we should just use global memory instead.
2932 static void check_private_group_access(struct cuda_gen *gen,
2933 struct cuda_array_ref_group *group)
2935 isl_map *acc;
2936 isl_union_map *access;
2937 int n_index = group->array->n_index;
2939 access = group_access_relation(group, 1, 1);
2940 if (isl_union_map_is_injective(access)) {
2941 if (group->shared_bound && access_is_coalesced(gen, access)) {
2942 free_bound_list(group->shared_bound, n_index);
2943 group->shared_bound = NULL;
2945 isl_union_map_free(access);
2946 return;
2948 access = isl_union_map_apply_domain(access,
2949 isl_union_map_copy(gen->shared_sched));
2951 acc = isl_map_from_union_map(access);
2953 if (!isl_map_is_bijective(acc)) {
2954 isl_map_free(acc);
2955 return;
2958 group->private_bound = create_bound_list(gen->ctx, n_index);
2959 acc = isl_map_align_params(acc, isl_map_get_space(gen->privatization));
2960 acc = isl_map_apply_domain(acc, isl_map_copy(gen->privatization));
2961 if (!can_tile_for_shared_memory(gen, group->array, acc,
2962 group->private_bound)) {
2963 free_bound_list(group->private_bound, n_index);
2964 group->private_bound = NULL;
2967 isl_map_free(acc);
2970 /* Look for the last shared tile loop that affects the offset of the
2971 * shared or private tile and store the result in array->last_shared.
2973 static void set_last_shared(struct cuda_gen *gen,
2974 struct cuda_array_ref_group *group)
2976 int i, j;
2977 struct cuda_array_bound *bounds;
2978 unsigned first_shared = gen->first_shared;
2979 int n_index = group->array->n_index;
2981 bounds = group->private_bound;
2982 if (!bounds)
2983 bounds = group->shared_bound;
2984 if (!bounds)
2985 return;
2987 for (j = gen->shared_len - 1; j >= 0; --j) {
2988 for (i = 0; i < n_index; ++i) {
2989 isl_aff *lb;
2990 isl_aff *shift;
2992 lb = bounds[i].lb;
2993 if (isl_aff_involves_dims(lb, isl_dim_param,
2994 first_shared + j, 1))
2995 break;
2997 shift = bounds[i].shift;
2998 if (!shift)
2999 continue;
3000 if (isl_aff_involves_dims(shift, isl_dim_param,
3001 first_shared + j, 1))
3002 break;
3004 if (i < n_index)
3005 break;
3007 group->array->last_shared = j;
3010 /* Compute the sizes of all private arrays for the current kernel,
3011 * as well as the offsets of the private pieces in the original arrays.
3012 * If we cannot or don't want to privatize a given array group,
3013 * we use the shared memory tile sizes computed in
3014 * compute_group_shared_bound instead.
3016 * If a given Array only has a single reference group and if we have
3017 * been able to find a privated or shared tile,
3018 * we also look for the last shared tile loop that affects the offset
3019 * (and therefore the array tile) and store the result in array->last_shared.
3021 * A privatized copy of all access relations from reference groups that
3022 * are mapped to private memory is stored in gen->privatization.
3024 static void compute_private_size(struct cuda_gen *gen)
3026 int i, j;
3027 isl_union_map *private;
3029 if (!gen->options->use_private_memory)
3030 return;
3032 private = isl_union_map_empty(isl_union_map_get_space(gen->shared_sched));
3034 for (i = 0; i < gen->n_array; ++i) {
3035 struct cuda_array_info *array = &gen->array[i];
3037 for (j = 0; j < array->n_group; ++j) {
3038 check_private_group_access(gen, array->groups[j]);
3040 if (!array->groups[j]->private_bound)
3041 continue;
3043 private = isl_union_map_union(private,
3044 group_access_relation(array->groups[j], 1, 1));
3047 array->last_shared = gen->shared_len - 1;
3048 array->print_shared_level = -1;
3050 if (array->n_group != 1)
3051 continue;
3052 set_last_shared(gen, array->groups[0]);
3055 if (isl_union_map_is_empty(private))
3056 isl_union_map_free(private);
3057 else {
3058 isl_union_map *priv;
3060 private = isl_union_map_apply_domain(private,
3061 isl_union_map_copy(gen->shared_sched));
3062 priv = isl_union_map_from_map(isl_map_copy(gen->privatization));
3063 private = isl_union_map_apply_domain(private, priv);
3064 gen->private_access = private;
3068 /* Fill up the groups array with singleton groups, i.e., one group
3069 * per reference, initializing the array, access, write and refs fields.
3070 * In particular the access field is initialized to the scheduled
3071 * access relation of the array reference.
3073 * Return the number of elements initialized, i.e., the number of
3074 * active references in the current kernel.
3076 static int populate_array_references(struct cuda_gen *gen,
3077 struct cuda_array_info *array, __isl_keep isl_union_map *sched,
3078 struct cuda_array_ref_group **groups)
3080 int i;
3081 int n;
3082 isl_ctx *ctx = isl_union_map_get_ctx(sched);
3084 n = 0;
3085 for (i = 0; i < array->n_ref; ++i) {
3086 isl_union_map *umap;
3087 isl_map *map;
3088 struct cuda_array_ref_group *group;
3089 struct cuda_stmt_access *access = array->refs[i];
3091 map = isl_map_copy(access->access);
3092 umap = isl_union_map_from_map(map);
3093 umap = isl_union_map_apply_domain(umap,
3094 isl_union_map_copy(sched));
3096 if (isl_union_map_is_empty(umap)) {
3097 isl_union_map_free(umap);
3098 continue;
3101 map = isl_map_from_union_map(umap);
3103 group = isl_calloc_type(ctx, struct cuda_array_ref_group);
3104 assert(group);
3105 group->array = array;
3106 group->access = map;
3107 group->write = access->write;
3108 group->refs = &array->refs[i];
3110 groups[n++] = group;
3113 return n;
3116 static void free_array_ref_group(struct cuda_array_ref_group *group,
3117 int n_index)
3119 if (!group)
3120 return;
3121 free_bound_list(group->shared_bound, n_index);
3122 free_bound_list(group->private_bound, n_index);
3123 isl_map_free(group->access);
3124 free(group->refs);
3125 free(group);
3128 /* If two groups have overlapping access relations and if one of them
3129 * involves a write, then merge the two groups into one.
3131 * We keep track of the grouping in "leader". leader[j] points to
3132 * an earlier group array element that belongs to the same group,
3133 * or the array element j itself if this element is the first in the group.
3135 * Return the number of group leaders.
3137 static int group_overlapping_writes(int n,
3138 struct cuda_array_ref_group **groups, int *leader)
3140 int i, j;
3141 int n_group = n;
3143 for (i = 0; i < n; ++i) {
3144 int l = i;
3145 groups[l]->n_ref = 1;
3146 for (j = i - 1; j >= 0; --j) {
3147 isl_map *map;
3148 int empty;
3150 if (leader[j] != j)
3151 continue;
3152 if (!groups[l]->write && !groups[j]->write)
3153 continue;
3155 map = isl_map_intersect(isl_map_copy(groups[l]->access),
3156 isl_map_copy(groups[j]->access));
3157 empty = isl_map_is_empty(map);
3158 isl_map_free(map);
3160 if (empty)
3161 continue;
3163 groups[j]->access = isl_map_union(groups[j]->access,
3164 groups[l]->access);
3165 groups[j]->write = 1;
3166 groups[l]->access = NULL;
3167 groups[j]->n_ref += groups[l]->n_ref;
3168 l = leader[l] = j;
3169 n_group--;
3171 leader[i] = l;
3174 return n_group;
3177 /* Compute the size of the shared array corresponding to the given array
3178 * array refrence group, based on the accesses from the current kernel,
3179 * as well as the offset of the shared piece in the original array.
3181 static void compute_group_shared_bound(struct cuda_gen *gen,
3182 struct cuda_array_info *array, struct cuda_array_ref_group *group)
3184 isl_ctx *ctx = isl_space_get_ctx(array->dim);
3186 if (!gen->options->use_shared_memory)
3187 return;
3189 group->shared_bound = create_bound_list(ctx, array->n_index);
3190 if (!can_tile_for_shared_memory(gen, array, group->access,
3191 group->shared_bound)) {
3192 free_bound_list(group->shared_bound, array->n_index);
3193 group->shared_bound = NULL;
3197 /* Given an initial grouping of array references and shared memory tiles
3198 * for each group that allows for a shared memory tile, merge two groups
3199 * if both have a shared memory tile and if the merged group also has
3200 * a shared memory tile.
3202 * Return the number of group leaders after merging.
3204 static int group_common_shared_memory_tile(struct cuda_gen *gen,
3205 struct cuda_array_info *array, int n,
3206 struct cuda_array_ref_group **groups, int *leader, int n_group)
3208 int i, j;
3209 isl_ctx *ctx = isl_space_get_ctx(array->dim);
3211 for (i = 0; n_group > 1 && i < n; ++i) {
3212 int l = i;
3213 if (leader[i] != i)
3214 continue;
3215 if (!groups[i]->shared_bound)
3216 continue;
3217 for (j = i - 1; j >= 0; --j) {
3218 isl_map *map;
3219 int empty;
3220 struct cuda_array_bound *shared_bound;
3222 if (leader[j] != j)
3223 continue;
3224 if (!groups[j]->shared_bound)
3225 continue;
3227 map = isl_map_intersect(isl_map_copy(groups[l]->access),
3228 isl_map_copy(groups[j]->access));
3229 empty = isl_map_is_empty(map);
3230 isl_map_free(map);
3232 if (empty)
3233 continue;
3235 map = isl_map_union(isl_map_copy(groups[l]->access),
3236 isl_map_copy(groups[j]->access));
3237 shared_bound = create_bound_list(ctx, array->n_index);
3238 if (!can_tile_for_shared_memory(gen, array, map,
3239 shared_bound)) {
3240 isl_map_free(map);
3241 free_bound_list(shared_bound, array->n_index);
3242 continue;
3245 free_bound_list(groups[j]->shared_bound,
3246 array->n_index);
3247 groups[j]->shared_bound = shared_bound;
3248 isl_map_free(groups[j]->access);
3249 groups[j]->access = map;
3250 groups[j]->n_ref += groups[l]->n_ref;
3251 l = leader[l] = j;
3252 n_group--;
3256 return n_group;
3259 /* Extract an array of array reference groups from the array of references
3260 * and the grouping information in "leader".
3262 * Store the results in array->n_group and array->groups.
3264 static void extract_array_groups(isl_ctx *ctx, struct cuda_array_info *array,
3265 int n, struct cuda_array_ref_group **groups, int *leader, int n_group)
3267 int i, j;
3269 for (i = 2; i < n; ++i)
3270 leader[i] = leader[leader[i]];
3272 array->n_group = n_group;
3273 array->groups = isl_alloc_array(ctx, struct cuda_array_ref_group *,
3274 n_group);
3275 assert(array->groups);
3277 j = 0;
3278 for (i = 0; i < n; ++i) {
3279 int k, l;
3280 struct cuda_stmt_access **refs;
3282 if (leader[i] != i) {
3283 groups[i]->refs = NULL;
3284 free_array_ref_group(groups[i], array->n_index);
3285 continue;
3288 refs = isl_alloc_array(ctx, struct cuda_stmt_access *,
3289 groups[i]->n_ref);
3290 assert(refs);
3291 l = 0;
3292 for (k = i; k < n; ++k)
3293 if (leader[k] == i) {
3294 refs[l++] = *groups[k]->refs;
3295 (*groups[k]->refs)->group = j;
3298 groups[i]->refs = refs;
3299 groups[i]->nr = j;
3300 array->groups[j++] = groups[i];
3304 /* Group array references that should be considered together when
3305 * deciding whether to access them from private, shared or global memory.
3307 * In particular, if two array references overlap and if one of them
3308 * is a write, then the two references are grouped together.
3309 * Furthermore, if two groups admit a shared memory tile and if the
3310 * combination of the two also admits a shared memory tile, we merge
3311 * the two groups.
3313 * During the construction the group->refs field points to a single
3314 * array reference inside the array of array references, while
3315 * group->n_ref contains the number of element in leader that
3316 * (directly or indirectly) point to this group, provided the group
3317 * is a leader.
3319 static void group_array_references(struct cuda_gen *gen,
3320 struct cuda_array_info *array, __isl_keep isl_union_map *sched)
3322 int i;
3323 int n, n_group;
3324 isl_ctx *ctx = isl_union_map_get_ctx(sched);
3325 struct cuda_array_ref_group **groups;
3326 int *leader;
3328 groups = isl_calloc_array(ctx, struct cuda_array_ref_group *,
3329 array->n_ref);
3330 assert(groups);
3332 n = populate_array_references(gen, array, sched, groups);
3334 leader = isl_alloc_array(ctx, int, n);
3335 assert(leader);
3337 n_group = group_overlapping_writes(n, groups, leader);
3339 for (i = 0; i < n; ++i)
3340 if (leader[i] == i)
3341 compute_group_shared_bound(gen, array, groups[i]);
3343 n_group = group_common_shared_memory_tile(gen, array, n, groups,
3344 leader, n_group);
3346 extract_array_groups(ctx, array, n, groups, leader, n_group);
3348 free(leader);
3349 free(groups);
3352 /* Take tiled_sched, project it onto the shared tile loops and
3353 * the loops that will be wrapped over the threads,
3354 * parametrize the shared tile loops and store the result in gen->shared_sched.
3355 * The position of the first of these parameters is stored in gen->first_shared.
3356 * Also compute a projection that projects out the loops that will be
3357 * wrapped over the threads and store this projection in gen->shared_proj.
3359 static void compute_shared_sched(struct cuda_gen *gen)
3361 isl_space *dim;
3362 isl_map *proj;
3363 isl_set *par;
3364 isl_union_map *sched;
3366 sched = isl_union_map_copy(gen->tiled_sched);
3368 dim = isl_union_map_get_space(sched);
3369 gen->first_shared = isl_space_dim(dim, isl_dim_param);
3370 proj = projection(dim, gen->tiled_len, gen->shared_len + gen->n_block);
3371 sched = isl_union_map_apply_range(sched, isl_union_map_from_map(proj));
3373 dim = isl_union_map_get_space(sched);
3374 par = parametrization(dim, gen->shared_len + gen->n_block,
3375 0, gen->shared_len, "g");
3376 sched = isl_union_map_intersect_range(sched,
3377 isl_union_set_from_set(par));
3379 dim = isl_union_map_get_space(sched);
3380 proj = projection(dim, gen->shared_len + gen->n_block, gen->shared_len);
3382 gen->shared_sched = sched;
3383 gen->shared_proj = isl_union_map_from_map(proj);
3386 /* Group references of all arrays in the program.
3388 static void group_references(struct cuda_gen *gen)
3390 int i;
3391 isl_union_map *sched;
3393 sched = isl_union_map_apply_range(isl_union_map_copy(gen->shared_sched),
3394 isl_union_map_copy(gen->shared_proj));
3396 for (i = 0; i < gen->n_array; ++i)
3397 group_array_references(gen, &gen->array[i], sched);
3399 isl_union_map_free(sched);
3402 /* Free all array information that is local to the current kernel.
3404 static void free_local_array_info(struct cuda_gen *gen)
3406 int i, j;
3408 for (i = 0; i < gen->n_array; ++i) {
3409 struct cuda_array_info *array = &gen->array[i];
3411 for (j = 0; j < array->n_group; ++j)
3412 free_array_ref_group(array->groups[j], array->n_index);
3413 free(array->groups);
3415 if (array->n_group == 0)
3416 continue;
3417 for (j = 0; j < gen->array[i].n_index; ++j) {
3418 isl_pw_aff_free(gen->array[i].local_bound[j]);
3419 gen->array[i].local_bound[j] = NULL;
3424 static void print_iterator_list(FILE *out, int len, const char *prefix,
3425 int parens)
3427 int i;
3429 fprintf(out, "(");
3430 for (i = 0; i < len; ++i) {
3431 if (i)
3432 fprintf(out, ", ");
3433 if (parens)
3434 fprintf(out, "(%s%d)", prefix, i);
3435 else
3436 fprintf(out, "%s%d", prefix, i);
3438 fprintf(out, ")");
3441 /* The sizes of the arrays on the host that have been computed by
3442 * extract_array_info may depend on the parameters. Use the extra
3443 * constraints on the parameters that are valid at "host_domain"
3444 * to simplify these expressions.
3446 static void localize_bounds(struct cuda_gen *gen,
3447 __isl_keep isl_set *host_domain)
3449 int i, j;
3450 isl_set *context;
3452 context = isl_set_copy(host_domain);
3453 context = isl_set_params(host_domain);
3455 for (i = 0; i < gen->n_array; ++i) {
3456 struct cuda_array_info *array = &gen->array[i];
3458 if (array->n_group == 0)
3459 continue;
3461 for (j = 0; j < array->n_index; ++j) {
3462 isl_pw_aff *pwaff;
3464 pwaff = isl_pw_aff_copy(array->bound[j]);
3465 pwaff = isl_pw_aff_gist(pwaff, isl_set_copy(context));
3466 array->local_bound[j] = pwaff;
3469 isl_set_free(context);
3472 /* Set gen->tile_len and gen->n_parallel to those of the first statement
3473 * in the statement list u.
3474 * Because of the way the schedule is constructed, the other statements
3475 * in the list, if any, should have the same values for these properties.
3477 static void set_tile_len(struct cuda_gen *gen, struct clast_user_stmt *u)
3479 int nr;
3480 struct cuda_stmt *stmt;
3482 nr = atoi(u->statement->name + 2);
3483 stmt = &gen->stmts[nr];
3485 gen->tile_len = stmt->tile_len;
3486 gen->n_parallel = stmt->n_parallel;
3489 /* This function is called for each leaf in the clast of the host code.
3490 * We first specialize the schedule to the site of the leaf, compute
3491 * the size of shared memory and then print the body of host code
3492 * and the associated kernel (through a call to print_kernel_body).
3494 static void print_host_user(struct gpucode_info *code,
3495 struct clast_user_stmt *u)
3497 struct cuda_gen *gen = code->user;
3498 isl_space *dim;
3499 isl_set *par;
3500 isl_set *host_domain;
3501 isl_union_map *access;
3502 isl_union_map *local_sched;
3503 isl_union_set *arrays;
3505 set_tile_len(gen, u);
3506 read_sizes(gen);
3508 host_domain = extract_entire_host_domain(u);
3510 local_sched = isl_union_map_intersect_range(
3511 isl_union_map_copy(gen->sched),
3512 isl_union_set_from_set(extend(isl_set_copy(host_domain),
3513 gen->untiled_len)));
3514 access = isl_union_map_union(isl_union_map_copy(gen->read),
3515 isl_union_map_copy(gen->write));
3516 access = isl_union_map_apply_domain(access,
3517 isl_union_map_copy(local_sched));
3518 arrays = isl_union_map_range(access);
3520 print_indent(code->dst, code->indent);
3521 fprintf(code->dst, "dim3 k%d_dimBlock", gen->kernel_id);
3522 print_reverse_list(code->dst, gen->n_block, gen->block_dim);
3523 fprintf(code->dst, ";\n");
3525 print_indent(code->dst, code->indent);
3526 fprintf(code->dst, "dim3 k%d_dimGrid", gen->kernel_id);
3527 print_reverse_list(code->dst, gen->n_grid, gen->grid_dim);
3528 fprintf(code->dst, ";\n");
3530 gen->tiled_sched = tile_schedule(gen, local_sched);
3531 gen->tiled_sched = parametrize_tiled_schedule(gen, gen->tiled_sched);
3532 gen->tiled_sched = scale_tile_loops(gen, gen->tiled_sched);
3534 gen->local_sched = isl_union_map_copy(gen->tiled_sched);
3536 dim = isl_union_map_get_space(gen->local_sched);
3537 par = parametrization(dim, gen->tiled_len, 0, gen->shared_len, "g");
3538 gen->local_sched = isl_union_map_intersect_range(gen->local_sched,
3539 isl_union_set_from_set(par));
3541 gen->local_sched = thread_tile_schedule(gen, gen->local_sched);
3542 gen->local_sched = scale_thread_tile_loops(gen, gen->local_sched);
3544 gen->private_access = NULL;
3545 compute_shared_sched(gen);
3546 gen->privatization = compute_privatization(gen);
3547 group_references(gen);
3548 compute_private_size(gen);
3549 localize_bounds(gen, host_domain);
3551 gen->local_sched = interchange_for_unroll(gen, gen->local_sched);
3553 print_kernel_launch(gen, arrays);
3555 fprintf(gen->cuda.kernel_c, "{\n");
3557 print_kernel_body(gen, host_domain, gen->tiled_sched);
3559 fprintf(gen->cuda.kernel_c, "}\n");
3561 free_local_array_info(gen);
3562 isl_map_free(gen->privatization);
3563 isl_union_map_free(gen->private_access);
3564 isl_union_map_free(gen->local_sched);
3565 isl_union_map_free(gen->tiled_sched);
3566 isl_union_map_free(gen->shared_sched);
3567 isl_union_map_free(gen->shared_proj);
3568 isl_union_set_free(arrays);
3569 isl_set_free(host_domain);
3571 free(gen->tile_size);
3572 gen->kernel_id++;
3575 /* Use CLooG to generate code for the outer gen->tile_first loops
3576 * of the global schedule in gen->sched.
3577 * The pretty printing of this code is handled by gpu_print_host_stmt,
3578 * which calls print_host_user for each kernel invocation location.
3580 static void print_cloog_host_code(struct cuda_gen *gen)
3582 int i;
3583 isl_set *context;
3584 isl_union_map *sched;
3585 CloogOptions *options;
3586 CloogDomain *cloog_context;
3587 CloogUnionDomain *ud;
3588 CloogInput *input;
3589 struct clast_stmt *stmt;
3590 char name[20];
3592 options = cloog_options_malloc(gen->state);
3593 options->language = CLOOG_LANGUAGE_C;
3594 options->otl = 0;
3595 options->strides = 1;
3596 options->stop = gen->tile_first;
3597 options->f = gen->untiled_len;
3598 options->l = gen->untiled_len;
3599 options->save_domains = 1;
3600 options->noscalars = 1;
3602 sched = isl_union_map_copy(gen->sched);
3603 ud = cloog_union_domain_from_isl_union_map(sched);
3604 for (i = 0; i < options->stop; ++i) {
3605 snprintf(name, sizeof(name), "h%d", i);
3606 ud = cloog_union_domain_set_name(ud, CLOOG_SCAT, i, name);
3608 context = isl_set_copy(gen->context);
3609 cloog_context = cloog_domain_from_isl_set(context);
3610 input = cloog_input_alloc(cloog_context, ud);
3612 stmt = cloog_clast_create_from_input(input, options);
3614 gen->code.indent = 0;
3615 gen->code.dst = gen->cuda.host_c;
3616 gen->code.print_user_stmt = NULL;
3617 gen->code.print_user_stmt_list = &print_host_user;
3618 gen->code.print_for_head = NULL;
3619 gen->code.print_for_foot = NULL;
3620 gen->code.user = gen;
3621 gpu_print_host_stmt(&gen->code, stmt);
3623 cloog_clast_free(stmt);
3624 cloog_options_free(options);
3625 fprintf(gen->cuda.host_c, "\n");
3628 void print_cuda_macros(struct cuda_gen *gen)
3630 const char *macros =
3631 "#define cudaCheckReturn(ret) assert((ret) == cudaSuccess)\n"
3632 "#define cudaCheckKernel()"
3633 " assert(cudaGetLastError() == cudaSuccess)\n\n";
3634 fputs(macros, gen->cuda.host_c);
3637 void print_host_code(struct cuda_gen *gen)
3639 fprintf(gen->cuda.host_c, "{\n");
3640 print_cloog_macros(gen->cuda.host_c);
3641 print_cloog_macros(gen->cuda.kernel_c);
3643 print_cuda_macros(gen);
3645 declare_device_arrays(gen);
3647 allocate_device_arrays(gen);
3648 copy_arrays_to_device(gen);
3650 gen->kernel_id = 0;
3651 print_cloog_host_code(gen);
3653 copy_arrays_from_device(gen);
3654 free_device_arrays(gen);
3656 fprintf(gen->cuda.host_c, "}\n");
3659 __isl_give isl_set *add_context_from_str(__isl_take isl_set *set,
3660 const char *str)
3662 isl_ctx *ctx;
3663 isl_set *context;
3665 if (!str)
3666 return set;
3668 ctx = isl_set_get_ctx(set);
3669 context = isl_set_read_from_str(ctx, str);
3670 context = isl_set_align_params(context, isl_set_get_space(set));
3671 set = isl_set_intersect(set, context);
3673 return set;
3676 /* Return the union of all iteration domains of the gen->stmts[i].
3678 static __isl_give isl_union_set *extract_domain(struct cuda_gen *gen)
3680 int i;
3681 isl_union_set *domain;
3683 domain = isl_union_set_empty(isl_set_get_space(gen->context));
3684 for (i = 0; i < gen->n_stmts; ++i) {
3685 isl_set *domain_i;
3687 domain_i = isl_set_copy(gen->stmts[i].domain);
3688 domain = isl_union_set_union(domain,
3689 isl_union_set_from_set(domain_i));
3692 return domain;
3695 /* Information about the outermost tilable bands in the forest of bands.
3697 * tile_len and n_parallel are only sets on band_info structures
3698 * that correspond to outermost bands. For other bands (in particular,
3699 * ancestors of the outermost bands), n_parallal is set to 0.
3701 * prefix is the (padded) schedule leading up to the outermost tilable bands.
3703 * tile_first is the number of schedule dimensions in prefix.
3705 * suffix is the schedule of the outermost tilable bands and their descendants.
3707 struct band_info {
3708 struct cuda_gen *gen;
3709 int tile_first;
3710 int tile_len;
3711 int n_parallel;
3712 isl_union_map *prefix;
3713 isl_union_map *suffix;
3716 /* Set tile_len and n_parallel of the statement to that of
3717 * their outermost band, recorded in the band_info.
3719 static int set_stmt_tile_len(__isl_take isl_map *map, void *user)
3721 struct band_info *info = user;
3722 int nr;
3723 struct cuda_stmt *stmt;
3725 nr = atoi(isl_map_get_tuple_name(map, isl_dim_in) + 2);
3726 stmt = &info->gen->stmts[nr];
3728 stmt->tile_len = info->tile_len;
3729 stmt->n_parallel = info->n_parallel;
3731 isl_map_free(map);
3733 return 0;
3736 static void list_select_outer_band(struct cuda_gen *gen,
3737 __isl_take isl_band_list *list, int pos, struct band_info *list_info);
3739 /* Check if this band has any parallel loops. If so, take it as
3740 * the outermost tilable band. If not, continue looking for the
3741 * outermost tilable band in the children of the current band.
3743 static void band_select_outer_band(struct cuda_gen *gen,
3744 __isl_take isl_band *band, int pos, struct band_info *info)
3746 int n = isl_band_n_member(band);
3747 int n_parallel;
3749 for (n_parallel = 0; n_parallel < n; ++n_parallel)
3750 if (!isl_band_member_is_zero_distance(band, n_parallel))
3751 break;
3753 info->n_parallel = n_parallel;
3754 if (n_parallel) {
3755 info->gen = gen;
3756 info->tile_first = pos;
3757 info->tile_len = n;
3758 info->prefix = isl_band_get_prefix_schedule(band);
3759 info->suffix = isl_union_map_flat_range_product(
3760 isl_band_get_partial_schedule(band),
3761 isl_band_get_suffix_schedule(band));
3762 isl_union_map_foreach_map(info->prefix,
3763 &set_stmt_tile_len, info);
3764 } else if (isl_band_has_children(band)) {
3765 isl_band_list *children;
3766 children = isl_band_get_children(band);
3767 list_select_outer_band(gen, children, pos + n, info);
3768 } else {
3769 info->gen = gen;
3770 info->tile_first = pos + n;
3771 info->tile_len = 0;
3772 info->prefix = isl_union_map_flat_range_product(
3773 isl_band_get_prefix_schedule(band),
3774 isl_band_get_partial_schedule(band));
3775 info->suffix = isl_band_get_suffix_schedule(band);
3776 isl_union_map_foreach_map(info->prefix,
3777 &set_stmt_tile_len, info);
3780 isl_band_free(band);
3783 /* Comparison function that returns a non-zero value for band_infos
3784 * with different tile_len fields or different n_parallel fields.
3786 static int cmp_band(const void *p1, const void *p2)
3788 const struct band_info *info1 = p1;
3789 const struct band_info *info2 = p2;
3791 if (info1->tile_len != info2->tile_len)
3792 return info1->tile_len - info2->tile_len;
3794 return info1->n_parallel - info2->n_parallel;
3797 /* Extend "umap" with coordinates with fixed value "val"
3798 * to a total length of "dst_len", assuming the original dimension is "src_len".
3800 static __isl_give isl_union_map *extend_range(__isl_take isl_union_map *umap,
3801 int src_len, int dst_len, int val)
3803 isl_space *dim;
3804 isl_map *map;
3805 int i;
3807 dim = isl_union_map_get_space(umap);
3808 map = isl_map_reverse(projection(dim, dst_len, src_len));
3809 for (i = src_len; i < dst_len; ++i)
3810 map = isl_map_fix_si(map, isl_dim_out, i, val);
3812 umap = isl_union_map_apply_range(umap, isl_union_map_from_map(map));
3814 return umap;
3817 /* Group bands with the same values for tile_len and n_parallel.
3818 * The prefix schedule is then extended with a fixed coordinate that
3819 * is different for each such group.
3820 * Note that the actual values for this coordinate are not important.
3821 * The bands have already been effectively separated at a higher level
3822 * or they are independent and may be executed in parallel.
3823 * The list of band_info has been sorted before this functions is called.
3825 static void separate_bands(struct band_info *info, int n)
3827 int i;
3828 int j = 0;
3830 for (i = 0; i < n; ++i) {
3831 int l = info[i].tile_first;
3833 if (i &&
3834 (info[i].tile_len != info[i - 1].tile_len ||
3835 info[i].n_parallel != info[i - 1].n_parallel))
3836 j++;
3838 info[i].prefix = extend_range(info[i].prefix,
3839 l, l + 1, j);
3840 info[i].tile_first = l + 1;
3844 /* Select the outermost bands in the elements of the list, align
3845 * their prefix schedules, separate bands with different values
3846 * for tile_len and/or n_parallel and then combine the resulting
3847 * prefix and suffix schedules into a single pair of prefix and
3848 * suffix schedules for the entire list.
3850 static void list_select_outer_band(struct cuda_gen *gen,
3851 __isl_take isl_band_list *list, int pos, struct band_info *list_info)
3853 isl_band *band;
3854 int i;
3855 int n = isl_band_list_n_band(list);
3856 isl_ctx *ctx = isl_band_list_get_ctx(list);
3857 struct band_info *info;
3858 int max_tile_first;
3859 isl_union_map *prefix;
3860 isl_union_map *suffix;
3862 assert(n >= 1);
3863 info = isl_calloc_array(ctx, struct band_info, n);
3864 assert(info);
3866 max_tile_first = 0;
3867 for (i = 0; i < n; ++i) {
3868 band = isl_band_list_get_band(list, i);
3869 band_select_outer_band(gen, band, pos, &info[i]);
3870 if (info[i].tile_first > max_tile_first)
3871 max_tile_first = info[i].tile_first;
3874 for (i = 0; i < n; ++i) {
3875 if (info[i].tile_first == max_tile_first)
3876 continue;
3877 info[i].prefix = extend_range(info[i].prefix,
3878 info[i].tile_first, max_tile_first, 0);
3879 info[i].tile_first = max_tile_first;
3882 qsort(info, n, sizeof(struct band_info), &cmp_band);
3884 for (i = 0; i < n - 1; ++i)
3885 if (info[i].tile_len != info[i + 1].tile_len ||
3886 info[i].n_parallel != info[i + 1].n_parallel)
3887 break;
3889 if (i < n -1)
3890 separate_bands(info, n);
3892 prefix = info[0].prefix;
3893 suffix = info[0].suffix;
3895 for (i = 1; i < n; ++i) {
3896 prefix = isl_union_map_union(prefix, info[i].prefix);
3897 suffix = isl_union_map_union(suffix, info[i].suffix);
3900 list_info->tile_first = info[0].tile_first;
3901 list_info->tile_len = -1;
3902 list_info->prefix = prefix;
3903 list_info->suffix = suffix;
3905 isl_band_list_free(list);
3906 free(info);
3909 /* Set max_out to the maximal number of output dimensions over
3910 * all maps.
3912 static int update_max_out(__isl_take isl_map *map, void *user)
3914 int *max_out = user;
3915 int n_out = isl_map_dim(map, isl_dim_out);
3917 if (n_out > *max_out)
3918 *max_out = n_out;
3920 isl_map_free(map);
3921 return 0;
3924 struct align_range_data {
3925 int max_out;
3926 isl_union_map *res;
3929 /* Extend the dimension of the range of the given map to data->max_out and
3930 * then add the result to data->res.
3932 static int map_align_range(__isl_take isl_map *map, void *user)
3934 struct align_range_data *data = user;
3935 int i;
3936 isl_space *dim;
3937 isl_map *proj;
3938 int n_out = isl_map_dim(map, isl_dim_out);
3940 dim = isl_union_map_get_space(data->res);
3941 proj = isl_map_reverse(projection(dim, data->max_out, n_out));
3942 for (i = n_out; i < data->max_out; ++i)
3943 proj = isl_map_fix_si(proj, isl_dim_out, i, 0);
3945 map = isl_map_apply_range(map, proj);
3947 data->res = isl_union_map_add_map(data->res, map);
3949 return 0;
3952 /* Extend the ranges of the maps in the union map such they all have
3953 * the same dimension.
3955 static __isl_give isl_union_map *align_range(__isl_take isl_union_map *umap)
3957 struct align_range_data data;
3959 data.max_out = 0;
3960 isl_union_map_foreach_map(umap, &update_max_out, &data.max_out);
3962 data.res = isl_union_map_empty(isl_union_map_get_space(umap));
3963 isl_union_map_foreach_map(umap, &map_align_range, &data);
3965 isl_union_map_free(umap);
3966 return data.res;
3969 /* Select the outermost tilable band that (by construction)
3970 * has at least one parallel loop.
3971 * The starting position of the aligned band is stored in the pair
3972 * gen->tile_first.
3973 * The sizes and number of parallel loops may be different in different
3974 * parts of the band forest and are therefore stored in the cuda_stmts.
3976 * Return the complete schedule, with the tilable bands aligned
3977 * at gen->tile_first and padded with zero, if needed.
3979 static __isl_give isl_union_map *select_outer_tilable_band(struct cuda_gen *gen,
3980 __isl_keep isl_schedule *schedule)
3982 isl_band_list *list;
3983 struct band_info info;
3985 gen->n_parallel = 0;
3986 gen->tile_len = -1;
3988 list = isl_schedule_get_band_forest(schedule);
3990 list_select_outer_band(gen, list, 0, &info);
3992 gen->tile_first = info.tile_first;
3993 info.suffix = align_range(info.suffix);
3995 return isl_union_map_flat_range_product(info.prefix, info.suffix);
3998 /* Set gen->untiled_len to the number of scheduling dimensions
3999 * for the schedule of the first domain.
4000 * We assume here that this number is the same for all domains.
4002 static int set_untiled_len(__isl_take isl_map *map, void *user)
4004 unsigned *untiled_len = user;
4006 *untiled_len = isl_map_dim(map, isl_dim_out);
4008 isl_map_free(map);
4009 return -1;
4012 /* Compute an appropriate schedule based on the accesses in
4013 * gen->read and gen->write.
4015 * We first compute dependences and then use those to compute
4016 * a schedule that has a parallel loop in each tilable band.
4017 * Finally, we select the outermost tilable band.
4019 static void compute_schedule(struct cuda_gen *gen,
4020 __isl_take isl_union_map *sched)
4022 isl_ctx *ctx = isl_union_map_get_ctx(sched);
4023 isl_union_set *domain;
4024 isl_union_map *empty;
4025 isl_union_map *dep_raw, *dep2, *dep3, *dep;
4026 isl_union_map *uninitialized;
4027 isl_schedule *schedule;
4029 empty = isl_union_map_empty(isl_union_map_get_space(sched));
4031 isl_union_map_compute_flow(isl_union_map_copy(gen->read),
4032 isl_union_map_copy(gen->write), empty,
4033 isl_union_map_copy(sched),
4034 &dep_raw, NULL, &uninitialized, NULL);
4035 isl_union_map_compute_flow(isl_union_map_copy(gen->write),
4036 isl_union_map_copy(gen->write),
4037 isl_union_map_copy(gen->read),
4038 isl_union_map_copy(sched),
4039 &dep2, &dep3, NULL, NULL);
4040 isl_union_map_free(sched);
4042 gen->copy_in = isl_union_map_range(uninitialized);
4044 dep = isl_union_map_union(dep2, dep3);
4045 dep = isl_union_map_union(dep, dep_raw);
4046 dep = isl_union_map_coalesce(dep);
4048 domain = extract_domain(gen);
4049 schedule = isl_union_set_compute_schedule(isl_union_set_copy(domain),
4050 isl_union_map_copy(dep), dep);
4052 sched = select_outer_tilable_band(gen, schedule);
4054 isl_union_map_foreach_map(sched, &set_untiled_len, &gen->untiled_len);
4055 sched = isl_union_map_intersect_domain(sched, domain);
4056 gen->sched = sched;
4058 isl_schedule_free(schedule);
4061 static struct cuda_stmt_access **expr_extract_access(struct pet_expr *expr,
4062 struct cuda_stmt_access **next_access)
4064 struct cuda_stmt_access *access;
4065 isl_ctx *ctx = isl_map_get_ctx(expr->acc.access);
4067 access = isl_alloc_type(ctx, struct cuda_stmt_access);
4068 assert(access);
4069 access->next = NULL;
4070 access->read = expr->acc.read;
4071 access->write = expr->acc.write;
4072 access->access = isl_map_copy(expr->acc.access);
4074 *next_access = access;
4075 next_access = &(*next_access)->next;
4076 return next_access;
4079 static struct cuda_stmt_access **expr_extract_accesses(struct pet_expr *expr,
4080 struct cuda_stmt_access **next_access)
4082 int i;
4084 for (i = 0; i < expr->n_arg; ++i)
4085 next_access = expr_extract_accesses(expr->args[i],
4086 next_access);
4088 if (expr->type == pet_expr_access)
4089 next_access = expr_extract_access(expr, next_access);
4091 return next_access;
4094 static void pet_stmt_extract_accesses(struct cuda_stmt *stmt)
4096 struct cuda_stmt_access **next_access = &stmt->accesses;
4098 stmt->accesses = NULL;
4099 expr_extract_accesses(stmt->body, next_access);
4102 /* Return an array of cuda_stmt representing the statements in "scop".
4104 static struct cuda_stmt *extract_stmts(isl_ctx *ctx, struct pet_scop *scop,
4105 __isl_keep isl_set *context)
4107 int i;
4108 struct cuda_stmt *stmts;
4110 stmts = isl_calloc_array(ctx, struct cuda_stmt, scop->n_stmt);
4111 assert(stmts);
4113 for (i = 0; i < scop->n_stmt; ++i) {
4114 struct cuda_stmt *s = &stmts[i];
4116 s->domain = isl_set_copy(scop->stmts[i]->domain);
4117 s->domain = isl_set_intersect_params(s->domain,
4118 isl_set_copy(context));
4119 s->body = scop->stmts[i]->body;
4120 pet_stmt_extract_accesses(s);
4123 return stmts;
4126 /* Replace the scop in the "input" file by equivalent code
4127 * that uses the GPU. "scop" is assumed to correspond to this scop.
4129 * We first compute a schedule that respects the dependences
4130 * of the original program and select the outermost band
4131 * of tilable dimensions that has at least one parallel loop.
4132 * We then have three blocks of dimensions
4134 * H B G
4136 * The tilable band "B" is first tiled according to "tile.sizes", resulting
4137 * in
4139 * H T P G
4141 * For each iteration of the T loop and for each array, we compute
4142 * the array elements accessed by that iteration, construct a rectangular
4143 * box around it and shift it to the origin. The result is used
4144 * as shared memory for the array.
4146 * We then split off at most 2 parallel loops from the T loops and
4147 * at most 3 parallel loops from the P loops
4149 * H T1 T2 P1 P2 G
4151 * The T1/P1 loops are then tiled or "wrapped" over the blocks/threads,
4152 * according to "grid.sizes"/"block.sizes".
4154 * H T1T T1P T2 P1T P1P P2 G
4156 * Finally, the T1P and P1P iterators are equated to the block and
4157 * thread dimensions respectively and so are effectively removed.
4158 * The H loops are run on the host. The T1T, T2, P1T, P2 and G loops
4159 * are run on the GPU.
4161 * Code is generated in three stages. We first generate code for the
4162 * host (the H loops), with iterators h%d. Then, for each leaf node
4163 * of the resulting AST, we generate code for the shared loops (up to
4164 * and including T2), with iterators g%d and after equating the H loops
4165 * to h%d parameters and the T1P loops to the block dimensions.
4166 * Finally, we generate code for the remaining loops in a similar fashion.
4168 int cuda_pet(isl_ctx *ctx, struct pet_scop *scop, struct ppcg_options *options,
4169 const char *input)
4171 isl_union_map *sched;
4172 struct cuda_gen gen;
4174 if (!scop)
4175 return -1;
4177 scop = pet_scop_align_params(scop);
4179 gen.ctx = ctx;
4180 gen.context = isl_set_copy(scop->context);
4181 gen.context = add_context_from_str(gen.context, options->ctx);
4182 gen.n_stmts = scop->n_stmt;
4183 gen.stmts = extract_stmts(ctx, scop, gen.context);
4184 gen.read = pet_scop_collect_reads(scop);
4185 gen.write = pet_scop_collect_writes(scop);
4186 gen.options = options;
4187 gen.state = cloog_isl_state_malloc(gen.ctx);
4188 gen.scop = scop;
4190 cuda_open_files(&gen.cuda, input);
4192 collect_array_info(&gen);
4194 sched = pet_scop_collect_schedule(scop);
4196 compute_schedule(&gen, sched);
4198 print_host_code(&gen);
4200 cloog_state_free(gen.state);
4201 clear_cuda_gen(&gen);
4203 cuda_close_files(&gen.cuda);
4205 return 0;